ansys-pyensight-core 0.11.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- ansys/pyensight/core/__init__.py +41 -0
- ansys/pyensight/core/common.py +341 -0
- ansys/pyensight/core/deep_pixel_view.html +98 -0
- ansys/pyensight/core/dockerlauncher.py +1124 -0
- ansys/pyensight/core/dvs.py +872 -0
- ansys/pyensight/core/enscontext.py +345 -0
- ansys/pyensight/core/enshell_grpc.py +641 -0
- ansys/pyensight/core/ensight_grpc.py +874 -0
- ansys/pyensight/core/ensobj.py +515 -0
- ansys/pyensight/core/launch_ensight.py +296 -0
- ansys/pyensight/core/launcher.py +388 -0
- ansys/pyensight/core/libuserd.py +2110 -0
- ansys/pyensight/core/listobj.py +280 -0
- ansys/pyensight/core/locallauncher.py +579 -0
- ansys/pyensight/core/py.typed +0 -0
- ansys/pyensight/core/renderable.py +880 -0
- ansys/pyensight/core/session.py +1923 -0
- ansys/pyensight/core/sgeo_poll.html +24 -0
- ansys/pyensight/core/utils/__init__.py +21 -0
- ansys/pyensight/core/utils/adr.py +111 -0
- ansys/pyensight/core/utils/dsg_server.py +1220 -0
- ansys/pyensight/core/utils/export.py +606 -0
- ansys/pyensight/core/utils/omniverse.py +769 -0
- ansys/pyensight/core/utils/omniverse_cli.py +614 -0
- ansys/pyensight/core/utils/omniverse_dsg_server.py +1196 -0
- ansys/pyensight/core/utils/omniverse_glb_server.py +848 -0
- ansys/pyensight/core/utils/parts.py +1221 -0
- ansys/pyensight/core/utils/query.py +487 -0
- ansys/pyensight/core/utils/readers.py +300 -0
- ansys/pyensight/core/utils/resources/Materials/000_sky.exr +0 -0
- ansys/pyensight/core/utils/support.py +128 -0
- ansys/pyensight/core/utils/variables.py +2019 -0
- ansys/pyensight/core/utils/views.py +674 -0
- ansys_pyensight_core-0.11.0.dist-info/METADATA +309 -0
- ansys_pyensight_core-0.11.0.dist-info/RECORD +37 -0
- ansys_pyensight_core-0.11.0.dist-info/WHEEL +4 -0
- ansys_pyensight_core-0.11.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,872 @@
|
|
|
1
|
+
# Copyright (C) 2022 - 2026 ANSYS, Inc. and/or its affiliates.
|
|
2
|
+
# SPDX-License-Identifier: MIT
|
|
3
|
+
#
|
|
4
|
+
#
|
|
5
|
+
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
# of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
# in the Software without restriction, including without limitation the rights
|
|
8
|
+
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
# copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
# furnished to do so, subject to the following conditions:
|
|
11
|
+
#
|
|
12
|
+
# The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
# copies or substantial portions of the Software.
|
|
14
|
+
#
|
|
15
|
+
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
# SOFTWARE.
|
|
22
|
+
|
|
23
|
+
"""The module provides a concrete implementation for the DVS bindings in PyEnSight.
|
|
24
|
+
|
|
25
|
+
It also provides a collection of utilities to starts DVS servers, clients,
|
|
26
|
+
launch a local PyEnSight session, or connect to an existing one, and finally
|
|
27
|
+
to send data from the clients to the servers.
|
|
28
|
+
"""
|
|
29
|
+
import glob
|
|
30
|
+
import io
|
|
31
|
+
import logging
|
|
32
|
+
import os
|
|
33
|
+
import pathlib
|
|
34
|
+
import platform
|
|
35
|
+
import re
|
|
36
|
+
import sys
|
|
37
|
+
import tarfile
|
|
38
|
+
import tempfile
|
|
39
|
+
import threading
|
|
40
|
+
import time
|
|
41
|
+
import traceback
|
|
42
|
+
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
|
43
|
+
import warnings
|
|
44
|
+
|
|
45
|
+
from ansys.api.pyensight.dvs_api import dvs_base
|
|
46
|
+
from ansys.pyensight.core import DockerLauncher, LocalLauncher
|
|
47
|
+
from ansys.pyensight.core.common import GRPC_WARNING_MESSAGE, grpc_version_check, safe_extract
|
|
48
|
+
import numpy
|
|
49
|
+
|
|
50
|
+
if TYPE_CHECKING:
|
|
51
|
+
from ansys.pyensight.core import Session
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class DVS(dvs_base):
|
|
55
|
+
"""Create an instance of the DVS module.
|
|
56
|
+
|
|
57
|
+
The module tries to look for the DVS Python bindings from the input
|
|
58
|
+
Ansys installation folder (which might also be the CEI folder) or the input
|
|
59
|
+
lib_folder. If not found, and if a PyEnSight session is provided,
|
|
60
|
+
the DVS commands will be launched on the remote EnSight Python interpreter.
|
|
61
|
+
|
|
62
|
+
Parameters
|
|
63
|
+
----------
|
|
64
|
+
|
|
65
|
+
session: Session
|
|
66
|
+
An optional PyEnSight session. This must be provided in case the
|
|
67
|
+
DVS modules needs to be used on a remote session of EnSight.
|
|
68
|
+
ansys_installation: str
|
|
69
|
+
The optional full path to a local Ansys installation, or the CEI folder
|
|
70
|
+
from an Ansys installation
|
|
71
|
+
lib_folder: str
|
|
72
|
+
The optional full path to a folder that contains the DVS libraries and Python
|
|
73
|
+
bindings.
|
|
74
|
+
"""
|
|
75
|
+
|
|
76
|
+
def __init__(
|
|
77
|
+
self,
|
|
78
|
+
session: Optional["Session"] = None,
|
|
79
|
+
ansys_installation: Optional[str] = None,
|
|
80
|
+
lib_folder: Optional[str] = None,
|
|
81
|
+
) -> None:
|
|
82
|
+
super().__init__(session=session)
|
|
83
|
+
self._ansys_installation: Optional[str] = None
|
|
84
|
+
if ansys_installation:
|
|
85
|
+
self._ansys_installation = LocalLauncher.get_cei_install_directory(ansys_installation)
|
|
86
|
+
self._lib_folder: Optional[str] = lib_folder
|
|
87
|
+
if self._session:
|
|
88
|
+
if not self._ansys_installation and hasattr(self._session._launcher, "_install_path"):
|
|
89
|
+
self._ansys_installation = self._session._launcher._install_path
|
|
90
|
+
if not self._session and not self._ansys_installation and not self._lib_folder:
|
|
91
|
+
raise RuntimeError(
|
|
92
|
+
"Either a PyEnSight session or an ansys installation path, or a folder containing the DVS Python modules need to be provided."
|
|
93
|
+
)
|
|
94
|
+
self._connect_session = self._session
|
|
95
|
+
self._servers: Dict[int, Dict[str, Union[str, int]]] = {}
|
|
96
|
+
self._server_ids: List[int] = []
|
|
97
|
+
self._clients: Dict[int, Dict[str, Union[str, int, bool]]] = {}
|
|
98
|
+
self._client_count = 0
|
|
99
|
+
self._attempt_dvs_python_bindings_import()
|
|
100
|
+
self._parts: Dict[int, Any] = {}
|
|
101
|
+
self._vars: Dict[int, Any] = {}
|
|
102
|
+
self._update_num = 0
|
|
103
|
+
self._current_update = 0
|
|
104
|
+
self._elem_type_map = {
|
|
105
|
+
2: self.ELEMTYPE_BAR_2,
|
|
106
|
+
3: self.ELEMTYPE_TRIANGLE,
|
|
107
|
+
4: self.ELEMTYPE_QUADRANGLE,
|
|
108
|
+
}
|
|
109
|
+
self._total_ranks = 0
|
|
110
|
+
if hasattr(self._session._launcher, "_install_path"):
|
|
111
|
+
self._temp_cache = tempfile.TemporaryDirectory(prefix="pyensight_dvs")
|
|
112
|
+
os.mkdir(os.path.join(self._temp_cache.name, "dvs_cache"))
|
|
113
|
+
self._cache_folder: str = os.path.join(self._temp_cache.name, "dvs_cache")
|
|
114
|
+
else:
|
|
115
|
+
self._cache_folder = "/home/ensight/dvs_cache"
|
|
116
|
+
self._dataset_name: Optional[str] = None
|
|
117
|
+
self._secret_key: Optional[str] = None
|
|
118
|
+
|
|
119
|
+
@staticmethod
|
|
120
|
+
def _is_windows():
|
|
121
|
+
"""True if the platform being used is Windows."""
|
|
122
|
+
return "Win" in platform.system()
|
|
123
|
+
|
|
124
|
+
def launch_local_pyensight_session(
|
|
125
|
+
self,
|
|
126
|
+
use_egl=False,
|
|
127
|
+
use_sos: Optional[int] = None,
|
|
128
|
+
additional_command_line_options: Optional[List] = None,
|
|
129
|
+
grpc_use_tcp_sockets: Optional[bool] = False,
|
|
130
|
+
grpc_allow_network_connections: Optional[bool] = False,
|
|
131
|
+
grpc_disable_tls: Optional[bool] = False,
|
|
132
|
+
grpc_uds_pathname: Optional[str] = None,
|
|
133
|
+
):
|
|
134
|
+
"""Launch a local PyEnSight session.
|
|
135
|
+
|
|
136
|
+
If an ansys installation has been provided, it will be used to launch EnSight.
|
|
137
|
+
The session will be associated to the current DVS module instance.
|
|
138
|
+
|
|
139
|
+
Parameters
|
|
140
|
+
|
|
141
|
+
use_egl : bool, optional
|
|
142
|
+
Whether to use EGL hardware for accelerated graphics. The platform
|
|
143
|
+
must be able to support this hardware. This parameter is defined on
|
|
144
|
+
the parent ``Launcher`` class, where the default is ``False``.
|
|
145
|
+
use_sos : int, optional
|
|
146
|
+
Number of EnSight servers to use for SOS (Server of Server) mode.
|
|
147
|
+
This parameter is defined on the parent ``Launcher`` class, where
|
|
148
|
+
the default is ``None``, in which case SOS mode is not used.
|
|
149
|
+
additional_command_line_options: list, optional
|
|
150
|
+
Additional command line options to be used to launch EnSight.
|
|
151
|
+
grpc_use_tcp_sockets :
|
|
152
|
+
If using gRPC, and if True, then allow TCP Socket based connections
|
|
153
|
+
instead of only local connections.
|
|
154
|
+
grpc_allow_network_connections :
|
|
155
|
+
If using gRPC and using TCP Socket based connections, listen on all networks.
|
|
156
|
+
grpc_disable_tls :
|
|
157
|
+
If using gRPC and using TCP Socket based connections, disable TLS.
|
|
158
|
+
grpc_uds_pathname :
|
|
159
|
+
If using gRPC and using Unix Domain Socket based connections, explicitly
|
|
160
|
+
set the pathname to the shared UDS file instead of using the default.
|
|
161
|
+
"""
|
|
162
|
+
launcher = LocalLauncher(
|
|
163
|
+
ansys_installation=self._ansys_installation,
|
|
164
|
+
use_sos=use_sos,
|
|
165
|
+
use_egl=use_egl,
|
|
166
|
+
additional_command_line_options=additional_command_line_options,
|
|
167
|
+
grpc_allow_network_connections=grpc_allow_network_connections,
|
|
168
|
+
grpc_use_tcp_sockets=grpc_use_tcp_sockets,
|
|
169
|
+
grpc_disable_tls=grpc_disable_tls,
|
|
170
|
+
grpc_uds_pathname=grpc_uds_pathname,
|
|
171
|
+
)
|
|
172
|
+
session = launcher.start()
|
|
173
|
+
self._session = session
|
|
174
|
+
|
|
175
|
+
def _check_ansys_version_grpc(self):
|
|
176
|
+
"""Check if the gRPC security options apply to the EnSight install."""
|
|
177
|
+
buildinfo_path = os.path.join(self._ansys_installation, "BUILDINFO.txt")
|
|
178
|
+
if not os.path.exists(buildinfo_path):
|
|
179
|
+
raise RuntimeError("Couldn't find BUILDINFO file, cannot check installation.")
|
|
180
|
+
with open(buildinfo_path, "r") as buildinfo_file:
|
|
181
|
+
buildinfo = buildinfo_file.read()
|
|
182
|
+
version_match = re.search("Version: (.*)\n", buildinfo)
|
|
183
|
+
internal_version_match = re.search("Internal: (.*)\n", buildinfo)
|
|
184
|
+
if not internal_version_match:
|
|
185
|
+
raise RuntimeError("Couldn't parse EnSight internal version in BUILDINFO file.")
|
|
186
|
+
internal_version = internal_version_match.group(1)
|
|
187
|
+
if not version_match:
|
|
188
|
+
raise RuntimeError("Couldn't parse EnSight version in BUILDINFO file.")
|
|
189
|
+
ensight_full_version = version_match.group(1)
|
|
190
|
+
return grpc_version_check(internal_version, ensight_full_version)
|
|
191
|
+
|
|
192
|
+
def _attempt_dvs_python_bindings_import(self):
|
|
193
|
+
"""Attempt to load the actual DVS Python bindings.
|
|
194
|
+
|
|
195
|
+
If an input lib folder has been provided, it will be tried first.
|
|
196
|
+
If an ansys installation has been provided, it will be tried as second choice.
|
|
197
|
+
"""
|
|
198
|
+
if self._lib_folder:
|
|
199
|
+
try:
|
|
200
|
+
sys.path.append(self._lib_folder)
|
|
201
|
+
import dynamic_visualization_store
|
|
202
|
+
|
|
203
|
+
self._dvs_module = dynamic_visualization_store
|
|
204
|
+
except (ModuleNotFoundError, ImportError):
|
|
205
|
+
raise RuntimeError("Cannot import DVS module from provided library folder.")
|
|
206
|
+
if self._ansys_installation:
|
|
207
|
+
# Check if you are inside of an ansys install
|
|
208
|
+
if not self._check_ansys_version_grpc():
|
|
209
|
+
warnings.warn(GRPC_WARNING_MESSAGE.replace("EnSight", "DVS"))
|
|
210
|
+
apex_path = glob.glob(os.path.join(self._ansys_installation, "apex???"))
|
|
211
|
+
if not apex_path:
|
|
212
|
+
# try dev path
|
|
213
|
+
raise RuntimeError("Cannot find a valid EnSight install")
|
|
214
|
+
apex_path = apex_path[-1]
|
|
215
|
+
arch = "win64" if self._is_windows() else "linux_2.6_64"
|
|
216
|
+
apex_libs = os.path.join(apex_path, "machines", arch)
|
|
217
|
+
python_path = glob.glob(os.path.join(apex_libs, "Python-3.*"))[-1]
|
|
218
|
+
apex_py_version = re.search(
|
|
219
|
+
r"Python-3.([0-9]+).([0-9]+)", os.path.basename(python_path)
|
|
220
|
+
)
|
|
221
|
+
apex_py_major_version = apex_py_version.group(1)
|
|
222
|
+
lib_path = os.path.join(python_path, "lib", f"python3.{apex_py_major_version}")
|
|
223
|
+
if self._is_windows():
|
|
224
|
+
lib_path = os.path.join(python_path, "DLLs")
|
|
225
|
+
sys.path.append(lib_path)
|
|
226
|
+
try:
|
|
227
|
+
import dynamic_visualization_store
|
|
228
|
+
|
|
229
|
+
self._dvs_module = dynamic_visualization_store
|
|
230
|
+
except (ModuleNotFoundError, ImportError):
|
|
231
|
+
python_cei = os.path.join(apex_libs, "Python-CEI")
|
|
232
|
+
if os.path.isdir(python_cei):
|
|
233
|
+
python_cei_lib_path = os.path.join(
|
|
234
|
+
python_cei, "lib", f"python3.{apex_py_major_version}"
|
|
235
|
+
)
|
|
236
|
+
if self._is_windows():
|
|
237
|
+
python_cei_lib_path = os.path.join(python_cei, "DLLs")
|
|
238
|
+
sys.path.append(python_cei_lib_path)
|
|
239
|
+
try:
|
|
240
|
+
import dynamic_visualization_store
|
|
241
|
+
|
|
242
|
+
self._dvs_module = dynamic_visualization_store
|
|
243
|
+
except (ModuleNotFoundError, ImportError):
|
|
244
|
+
warnings.warn(
|
|
245
|
+
"Cannot import DVS module from provided ansys installation folder."
|
|
246
|
+
)
|
|
247
|
+
|
|
248
|
+
DVS_NULL_TRANSPORT = 0
|
|
249
|
+
DVS_GRPC_TRANSPORT = 1
|
|
250
|
+
|
|
251
|
+
@property
|
|
252
|
+
def session(self):
|
|
253
|
+
return self._session
|
|
254
|
+
|
|
255
|
+
@session.setter
|
|
256
|
+
def session(self, session: "Session"):
|
|
257
|
+
self._session = session
|
|
258
|
+
|
|
259
|
+
def start_dvs_servers(
|
|
260
|
+
self, num_servers: int, transport: int = 0, ranks_per_server: int = 1, secret_key: str = ""
|
|
261
|
+
):
|
|
262
|
+
"""Start DVS servers using the Python bindings.
|
|
263
|
+
|
|
264
|
+
The DVS servers will be started externall to the eventual EnSigth session available.
|
|
265
|
+
For simplicity, it is assumed that each server will receive the same number of ranks,
|
|
266
|
+
declared in input.
|
|
267
|
+
|
|
268
|
+
Parameters
|
|
269
|
+
----------
|
|
270
|
+
num_servers: int
|
|
271
|
+
the number of DVS servers to launch
|
|
272
|
+
transport: int
|
|
273
|
+
the kind of transport to be used. Defaults to null.
|
|
274
|
+
Description of options as follows.
|
|
275
|
+
|
|
276
|
+
================== =========================================================
|
|
277
|
+
Name Query type
|
|
278
|
+
================== =========================================================
|
|
279
|
+
DVS_NULL_TRANSPORT Start the servers with the null protocol. Default
|
|
280
|
+
DVS_GRPC_TRANSPORT Start the servers with the grpc protocol.
|
|
281
|
+
================== =========================================================
|
|
282
|
+
ranks_per_server: int
|
|
283
|
+
the number or ranks that will be connected to each server. Defaults to 1
|
|
284
|
+
secret_key: str
|
|
285
|
+
the secret key that will be used for the eventual gRPC connection.
|
|
286
|
+
Can be an empty string, that is also the default value.
|
|
287
|
+
"""
|
|
288
|
+
if not self._secret_key:
|
|
289
|
+
self._secret_key = secret_key
|
|
290
|
+
transport_string = "null" if transport == 0 else "grpc"
|
|
291
|
+
uri = f"{transport_string}://"
|
|
292
|
+
grpc = transport == self.DVS_GRPC_TRANSPORT
|
|
293
|
+
options = {"CACHE_URI": f"hdf5:///{pathlib.Path(self._cache_folder).as_posix()}"}
|
|
294
|
+
if grpc:
|
|
295
|
+
uri += "127.0.0.1:0"
|
|
296
|
+
options.update(
|
|
297
|
+
{
|
|
298
|
+
"SERVER_SECURITY_SECRET": secret_key,
|
|
299
|
+
}
|
|
300
|
+
)
|
|
301
|
+
if self._session:
|
|
302
|
+
if self._session._grpc_allow_network_connections:
|
|
303
|
+
options["SERVER_LISTEN_ALL_NETWORKS"] = "1"
|
|
304
|
+
if self._session._grpc_disable_tls:
|
|
305
|
+
options["SERVER_DISABLE_TLS"] = "1"
|
|
306
|
+
if self._session._grpc_use_tcp_sockets:
|
|
307
|
+
options["SERVER_USE_TCP_SOCKETS"] = "1"
|
|
308
|
+
if self._session._grpc_uds_pathname:
|
|
309
|
+
options["SERVER_UNIX_DOMAIN_SOCKET_PATH"] = self._session._grpc_uds_pathname
|
|
310
|
+
try:
|
|
311
|
+
for n in range(0, num_servers):
|
|
312
|
+
# Assume ranks equally distributed
|
|
313
|
+
server_id = self.server_create(uri=uri)
|
|
314
|
+
self.server_start(
|
|
315
|
+
server_id, server_num=n, local_ranks=ranks_per_server, options=options
|
|
316
|
+
)
|
|
317
|
+
self._server_ids.append(server_id)
|
|
318
|
+
self._servers[n] = {
|
|
319
|
+
"server_id": server_id,
|
|
320
|
+
"ranks": ranks_per_server,
|
|
321
|
+
"in_ensight": False,
|
|
322
|
+
}
|
|
323
|
+
if grpc:
|
|
324
|
+
uri_to_save = self.server_get_uri(server_id)
|
|
325
|
+
port = re.search(":([0-9]+)", uri_to_save)
|
|
326
|
+
if port:
|
|
327
|
+
self._servers[n].update(
|
|
328
|
+
{"server_uri": uri_to_save, "port": int(port.group(1))}
|
|
329
|
+
)
|
|
330
|
+
self._total_ranks = ranks_per_server * len(self._server_ids)
|
|
331
|
+
started = False
|
|
332
|
+
start = time.time()
|
|
333
|
+
while not started and time.time() - start < 60:
|
|
334
|
+
if not all([self.server_started(s) for s in self._server_ids]):
|
|
335
|
+
time.sleep(0.5)
|
|
336
|
+
else:
|
|
337
|
+
started = True
|
|
338
|
+
if not started:
|
|
339
|
+
raise RuntimeError("The DVS servers have not started in 60 seconds.")
|
|
340
|
+
except Exception as e:
|
|
341
|
+
traceback.print_exc()
|
|
342
|
+
raise RuntimeError(f"Couldn't start the servers, error: {e}")
|
|
343
|
+
|
|
344
|
+
def _start_dvs_client(self, server_id: int, rank: int, dedup=False):
|
|
345
|
+
"""Start a DVS client.
|
|
346
|
+
|
|
347
|
+
Parameters
|
|
348
|
+
----------
|
|
349
|
+
server_id: int
|
|
350
|
+
the server ID to connect to
|
|
351
|
+
rank: int
|
|
352
|
+
the rank of the client to launch
|
|
353
|
+
secret_key: str
|
|
354
|
+
the secret key to be used to connect to the DVS server
|
|
355
|
+
dedup: bool
|
|
356
|
+
True to not send duplicate data to server
|
|
357
|
+
"""
|
|
358
|
+
if server_id not in self._server_ids:
|
|
359
|
+
raise RuntimeError(f"Server ID {server_id} not started in this process.")
|
|
360
|
+
flags = self.FLAGS_BLOCK_FOR_SERVER
|
|
361
|
+
if dedup:
|
|
362
|
+
flags |= self.FLAGS_DEDUP
|
|
363
|
+
try:
|
|
364
|
+
client_id = self.connect(server_id=server_id, secret=self._secret_key, flags=flags)
|
|
365
|
+
except Exception as e:
|
|
366
|
+
traceback.print_exc()
|
|
367
|
+
raise RuntimeError(f"Couldn't start the client, error {e}")
|
|
368
|
+
self._clients[self._client_count] = {
|
|
369
|
+
"client_id": client_id,
|
|
370
|
+
"server_id": server_id,
|
|
371
|
+
"rank": rank,
|
|
372
|
+
"update_started": False,
|
|
373
|
+
}
|
|
374
|
+
self._client_count += 1
|
|
375
|
+
|
|
376
|
+
def start_dvs_servers_in_ensight(self, ranks_per_server: int, secret_key=""):
|
|
377
|
+
"""Launch the DVS servers in EnSight for an in-situ session.
|
|
378
|
+
|
|
379
|
+
On each EnSight server a DVS server will be launched.
|
|
380
|
+
|
|
381
|
+
Parameters
|
|
382
|
+
----------
|
|
383
|
+
ranks_per_server: int
|
|
384
|
+
how many ranks will be sent to each server. This will be used
|
|
385
|
+
in a later stage for the clients launch and connection.
|
|
386
|
+
secret_key: str
|
|
387
|
+
the secret key that will be used for the eventual gRPC connection.
|
|
388
|
+
Can be an empty string, that is also the default value.
|
|
389
|
+
"""
|
|
390
|
+
if not self._secret_key:
|
|
391
|
+
self._secret_key = secret_key
|
|
392
|
+
if not self._session:
|
|
393
|
+
raise RuntimeError(
|
|
394
|
+
"A PyEnSight session is required to start the DVS servers in EnSight."
|
|
395
|
+
)
|
|
396
|
+
thread, port = self._session.ensight.utils.readers.dvs.launch_live_dvs(
|
|
397
|
+
secret_key=secret_key
|
|
398
|
+
)
|
|
399
|
+
num_servers = self._session._launcher._use_sos or 1
|
|
400
|
+
base_uri = f"grpc://{self._session.hostname}"
|
|
401
|
+
self._total_ranks = num_servers * ranks_per_server
|
|
402
|
+
# Need to implement SOS support in session.ensight.utils.readers.dvs.launch_live_dvs
|
|
403
|
+
for n in range(num_servers):
|
|
404
|
+
# Just create a server but not start it
|
|
405
|
+
server_id = self.server_create(uri=base_uri + f":{port+n}")
|
|
406
|
+
self._server_ids.append(server_id)
|
|
407
|
+
self._servers[n] = {
|
|
408
|
+
"server_uri": base_uri + ":{}".format(port + n),
|
|
409
|
+
"port": port + n,
|
|
410
|
+
"server_id": server_id,
|
|
411
|
+
"in_ensight": True,
|
|
412
|
+
"ranks": ranks_per_server,
|
|
413
|
+
}
|
|
414
|
+
|
|
415
|
+
def start_dvs_clients(self, dataset_name: str, dedup=False):
|
|
416
|
+
"""Launch the DVS clients and connect to the existing DVS servers.
|
|
417
|
+
|
|
418
|
+
Parameters
|
|
419
|
+
----------
|
|
420
|
+
dataset_name: str
|
|
421
|
+
The dataset name required to initialize the following exports.
|
|
422
|
+
"""
|
|
423
|
+
self._dataset_name = dataset_name
|
|
424
|
+
rank_per_server = list(self._servers.values())[0].get("ranks")
|
|
425
|
+
local_ranks = 0
|
|
426
|
+
n = 0
|
|
427
|
+
for rank in range(0, self._total_ranks):
|
|
428
|
+
server = self._servers[n]
|
|
429
|
+
local_ranks += 1
|
|
430
|
+
if local_ranks == rank_per_server:
|
|
431
|
+
local_ranks = 0
|
|
432
|
+
n += 1
|
|
433
|
+
self._start_dvs_client(int(server["server_id"]), rank, dedup=dedup)
|
|
434
|
+
|
|
435
|
+
def _begin_update(
|
|
436
|
+
self, client_dict: Dict[str, Union[str, int, bool]], time: float, rank: int, chunk: int
|
|
437
|
+
):
|
|
438
|
+
"""Start an update.
|
|
439
|
+
|
|
440
|
+
Parameters
|
|
441
|
+
----------
|
|
442
|
+
client_dict: dict
|
|
443
|
+
A dictionary holding the DVS client parameters
|
|
444
|
+
time: float
|
|
445
|
+
The time value for the current update. May be a time already used
|
|
446
|
+
rank: int
|
|
447
|
+
The rank of the update
|
|
448
|
+
chunk: int
|
|
449
|
+
The chunk of the update
|
|
450
|
+
"""
|
|
451
|
+
try:
|
|
452
|
+
_ = self.begin_update(client_dict["client_id"], self._update_num, time, rank, chunk)
|
|
453
|
+
client_dict["update_started"] = True
|
|
454
|
+
except Exception as e:
|
|
455
|
+
traceback.print_exc()
|
|
456
|
+
raise RuntimeError(f"Couldn't begin update. Error: {e}")
|
|
457
|
+
|
|
458
|
+
def begin_updates(self, time: float):
|
|
459
|
+
"""Begin an update on all the clients available for the input time value.
|
|
460
|
+
|
|
461
|
+
Each update will be launched on a separate thread. The client associated
|
|
462
|
+
to the update will be flagged for the update start.
|
|
463
|
+
|
|
464
|
+
Currently we are assuming one chunk. Chunking support will be added in a future
|
|
465
|
+
release.
|
|
466
|
+
|
|
467
|
+
Parameters
|
|
468
|
+
----------
|
|
469
|
+
time: float
|
|
470
|
+
The time value for the current update. May be a time already used
|
|
471
|
+
"""
|
|
472
|
+
for _, client_vals in self._clients.items():
|
|
473
|
+
thread = threading.Thread(
|
|
474
|
+
target=self._begin_update, args=(client_vals, time, client_vals["rank"], 0)
|
|
475
|
+
)
|
|
476
|
+
thread.start()
|
|
477
|
+
|
|
478
|
+
def begin_initialization(self):
|
|
479
|
+
"""Begin initialization for all the clients."""
|
|
480
|
+
for c in range(self._client_count):
|
|
481
|
+
client = self._clients[c]
|
|
482
|
+
_ = self.begin_init(
|
|
483
|
+
client["client_id"],
|
|
484
|
+
dataset_name=f"Simba_{self._dataset_name}",
|
|
485
|
+
rank=client["rank"],
|
|
486
|
+
total_ranks=self._total_ranks,
|
|
487
|
+
num_chunks=1,
|
|
488
|
+
)
|
|
489
|
+
|
|
490
|
+
def end_initialization(self):
|
|
491
|
+
"""End initialization for all the clients."""
|
|
492
|
+
for c in range(self._client_count):
|
|
493
|
+
client = self._clients[c]
|
|
494
|
+
_ = self.end_init(client["client_id"])
|
|
495
|
+
|
|
496
|
+
def create_part(self, part_id: int, part_name: str, metadata: Optional[Dict[str, str]] = None):
|
|
497
|
+
"""Create a part definition for the DVS export.
|
|
498
|
+
|
|
499
|
+
Parameters
|
|
500
|
+
----------
|
|
501
|
+
part_id: int
|
|
502
|
+
the ID of the part to be exported
|
|
503
|
+
part_name: str
|
|
504
|
+
the name of the part to export
|
|
505
|
+
metadata: dict
|
|
506
|
+
An optional dictionary of metadata to attach to the part.
|
|
507
|
+
"""
|
|
508
|
+
if not metadata:
|
|
509
|
+
metadata = {}
|
|
510
|
+
if self._parts.get(part_id):
|
|
511
|
+
print("Part already created, skip")
|
|
512
|
+
return
|
|
513
|
+
part = {
|
|
514
|
+
"id": part_id,
|
|
515
|
+
"name": part_name,
|
|
516
|
+
"structured": False,
|
|
517
|
+
"chunking": False,
|
|
518
|
+
"tags": metadata,
|
|
519
|
+
}
|
|
520
|
+
for c in range(self._client_count):
|
|
521
|
+
client = self._clients[c]
|
|
522
|
+
self.add_part_info(client["client_id"], [part])
|
|
523
|
+
self._parts[part_id] = part
|
|
524
|
+
|
|
525
|
+
def create_variable(
|
|
526
|
+
self,
|
|
527
|
+
var_id: int,
|
|
528
|
+
var_name: str,
|
|
529
|
+
var_type: int,
|
|
530
|
+
location: int,
|
|
531
|
+
unit: str = "",
|
|
532
|
+
unit_label="",
|
|
533
|
+
metadata: Optional[Dict[str, str]] = None,
|
|
534
|
+
):
|
|
535
|
+
"""Create a variable definition for the DVS export.
|
|
536
|
+
|
|
537
|
+
Parameters
|
|
538
|
+
----------
|
|
539
|
+
var_id: int
|
|
540
|
+
the ID of the var to be exported
|
|
541
|
+
var_name: str
|
|
542
|
+
the name of the var to export
|
|
543
|
+
var_type: int
|
|
544
|
+
The variable type. Check the VARTYPE enums available with this module
|
|
545
|
+
location: int
|
|
546
|
+
The variable location. Check the LOCATION enums available with this module
|
|
547
|
+
unit: str
|
|
548
|
+
The variable units. See https://nexusdemo.ensight.com/docs/python/html/ENS_UNITSSchema.html
|
|
549
|
+
unit_label: str
|
|
550
|
+
The label for the variable units. See https://nexusdemo.ensight.com/docs/python/html/ENS_UNITSSchema.html
|
|
551
|
+
metadata: dict
|
|
552
|
+
An optional dictionary of metadata to attach to the var.
|
|
553
|
+
"""
|
|
554
|
+
if not metadata:
|
|
555
|
+
metadata = {}
|
|
556
|
+
if self._vars.get(var_id):
|
|
557
|
+
print("Var already created, skip")
|
|
558
|
+
return
|
|
559
|
+
var = {
|
|
560
|
+
"id": var_id,
|
|
561
|
+
"name": var_name,
|
|
562
|
+
"tags": metadata,
|
|
563
|
+
"type": var_type,
|
|
564
|
+
"location": location,
|
|
565
|
+
"unit": unit,
|
|
566
|
+
"unit_label": unit_label,
|
|
567
|
+
}
|
|
568
|
+
for c in range(self._client_count):
|
|
569
|
+
client = self._clients[c]
|
|
570
|
+
self.add_var_info(client["client_id"], [var])
|
|
571
|
+
self._vars[var_id] = var
|
|
572
|
+
|
|
573
|
+
def _check_updates_started(self):
|
|
574
|
+
"""Check that all the updates started successfully.
|
|
575
|
+
|
|
576
|
+
This is required because the launch of the updates is threaded.
|
|
577
|
+
"""
|
|
578
|
+
started = False
|
|
579
|
+
start = time.time()
|
|
580
|
+
while not started and time.time() - start < 60:
|
|
581
|
+
started = all([vals["update_started"] for c, vals in self._clients.items()])
|
|
582
|
+
if not started:
|
|
583
|
+
time.sleep(0.5)
|
|
584
|
+
if not started:
|
|
585
|
+
for c, vals in self._clients.items():
|
|
586
|
+
update = vals["update_started"]
|
|
587
|
+
logging.debug(f"Client {c}, update: {update}")
|
|
588
|
+
raise RuntimeError("Not all clients have begun the updates.")
|
|
589
|
+
|
|
590
|
+
def send_coordinates(self, part_id: int, vertices: Union[List[float], numpy.ndarray]):
|
|
591
|
+
"""Send the coordinates data for the input part.
|
|
592
|
+
|
|
593
|
+
The full coordinates array will be sent across all the ranks.
|
|
594
|
+
The data will be used for building a mesh chunk in DVS.
|
|
595
|
+
The data are assumed in the following format:
|
|
596
|
+
[x0, y0, z0, x1, y1, z1, ...]
|
|
597
|
+
|
|
598
|
+
Parameters
|
|
599
|
+
----------
|
|
600
|
+
part_id: int
|
|
601
|
+
the part to define the coordinates for
|
|
602
|
+
vertices: List[int] or numpy array
|
|
603
|
+
the coordinates array. The format is described above.
|
|
604
|
+
"""
|
|
605
|
+
if not self._parts.get(part_id):
|
|
606
|
+
raise RuntimeError(
|
|
607
|
+
"Please create the part first via create_part() or the lower level add_part_info."
|
|
608
|
+
)
|
|
609
|
+
if not isinstance(vertices, numpy.ndarray):
|
|
610
|
+
vertices = numpy.array(vertices)
|
|
611
|
+
reshaped_vertices = vertices.reshape(-1, 3)
|
|
612
|
+
x_coords = reshaped_vertices[:, 0]
|
|
613
|
+
y_coords = reshaped_vertices[:, 1]
|
|
614
|
+
z_coords = reshaped_vertices[:, 2]
|
|
615
|
+
self._check_updates_started()
|
|
616
|
+
for c in range(self._client_count):
|
|
617
|
+
client = self._clients[c]
|
|
618
|
+
self.update_nodes(
|
|
619
|
+
client["client_id"], part_id=part_id, x=x_coords, y=y_coords, z=z_coords
|
|
620
|
+
)
|
|
621
|
+
|
|
622
|
+
def send_variable_data(
|
|
623
|
+
self,
|
|
624
|
+
var_id: int,
|
|
625
|
+
part_id: int,
|
|
626
|
+
values: Union[List[float], numpy.ndarray],
|
|
627
|
+
):
|
|
628
|
+
"""Send the variable data for the input variable.
|
|
629
|
+
|
|
630
|
+
Parameters
|
|
631
|
+
----------
|
|
632
|
+
var_id: int
|
|
633
|
+
the ID of the variable that will get its values updated.
|
|
634
|
+
part_id: int
|
|
635
|
+
the ID of the part to update the variable for
|
|
636
|
+
values: List[int] or numpy array
|
|
637
|
+
the variablle array. If the variable is a vector, the values are expected as
|
|
638
|
+
[v1x, v1y, v1z, v2x, v2y, v2z ...]
|
|
639
|
+
"""
|
|
640
|
+
if not self._vars.get(var_id):
|
|
641
|
+
raise RuntimeError(
|
|
642
|
+
"Please create the variable first via create_var() or the lower level add_var_info."
|
|
643
|
+
)
|
|
644
|
+
if not self._parts.get(part_id):
|
|
645
|
+
raise RuntimeError(
|
|
646
|
+
"Please create the part first via create_part() or the lower level add_part_info."
|
|
647
|
+
)
|
|
648
|
+
if not self._parts[part_id].get("dvs_elem_type"):
|
|
649
|
+
raise RuntimeError(f"Please send first the part connectivity for part {part_id}")
|
|
650
|
+
elem_type = self._parts[part_id]["dvs_elem_type"]
|
|
651
|
+
if not isinstance(values, numpy.ndarray):
|
|
652
|
+
values = numpy.array(values)
|
|
653
|
+
self._check_updates_started()
|
|
654
|
+
var_type = self._vars[var_id]["type"]
|
|
655
|
+
location = self._vars[var_id]["location"]
|
|
656
|
+
# The following checks are there just to make mypy happy
|
|
657
|
+
if isinstance(var_type, (str, bool, dict)):
|
|
658
|
+
raise RuntimeError("Var type is not an integer")
|
|
659
|
+
if isinstance(location, (str, bool, dict)):
|
|
660
|
+
raise RuntimeError("Location is not an integer")
|
|
661
|
+
for c in range(self._client_count):
|
|
662
|
+
client = self._clients[c]
|
|
663
|
+
if var_type == self.VARTYPE_SCALAR:
|
|
664
|
+
if location == self.LOCATION_NODE:
|
|
665
|
+
self.update_var_node_scalar(
|
|
666
|
+
client["client_id"], var_id=var_id, part_id=part_id, values=values
|
|
667
|
+
)
|
|
668
|
+
elif location == self.LOCATION_ELEMENT:
|
|
669
|
+
self.update_var_element_scalar(
|
|
670
|
+
client["client_id"],
|
|
671
|
+
var_id=var_id,
|
|
672
|
+
part_id=part_id,
|
|
673
|
+
elem_type=elem_type,
|
|
674
|
+
values=values,
|
|
675
|
+
)
|
|
676
|
+
elif var_type == self.VARTYPE_VECTOR:
|
|
677
|
+
if location == self.LOCATION_NODE:
|
|
678
|
+
self.update_var_node_vector(
|
|
679
|
+
client["client_id"], var_id=var_id, part_id=part_id, values=values
|
|
680
|
+
)
|
|
681
|
+
elif location == self.LOCATION_ELEMENT:
|
|
682
|
+
self.update_var_element_vector(
|
|
683
|
+
client["client_id"],
|
|
684
|
+
var_id=var_id,
|
|
685
|
+
part_id=part_id,
|
|
686
|
+
elem_type=elem_type,
|
|
687
|
+
values=values,
|
|
688
|
+
)
|
|
689
|
+
|
|
690
|
+
@staticmethod
|
|
691
|
+
def _split_list(lst: Union[List[int], List[float]], num_parts: int):
|
|
692
|
+
"""Split the input list in n parts.
|
|
693
|
+
|
|
694
|
+
lst: list
|
|
695
|
+
the list to be split
|
|
696
|
+
num_parts: int
|
|
697
|
+
the number of parts to split the list into
|
|
698
|
+
|
|
699
|
+
Returns
|
|
700
|
+
-------
|
|
701
|
+
parts: list
|
|
702
|
+
A list containing the parts the original list was split into
|
|
703
|
+
"""
|
|
704
|
+
n = len(lst)
|
|
705
|
+
part_size = n // num_parts
|
|
706
|
+
remainder = n % num_parts
|
|
707
|
+
parts = []
|
|
708
|
+
start = 0
|
|
709
|
+
for i in range(num_parts):
|
|
710
|
+
end = start + part_size + (1 if i < remainder else 0)
|
|
711
|
+
parts.append(lst[start:end])
|
|
712
|
+
start = end
|
|
713
|
+
return parts
|
|
714
|
+
|
|
715
|
+
def send_connectivity(
|
|
716
|
+
self,
|
|
717
|
+
part_id,
|
|
718
|
+
offsets: Union[List, numpy.ndarray],
|
|
719
|
+
faces: Union[List, numpy.ndarray],
|
|
720
|
+
ghost=False,
|
|
721
|
+
):
|
|
722
|
+
"""Send the connectivity data for the input part.
|
|
723
|
+
|
|
724
|
+
The data will be used for building an element block in DVS.
|
|
725
|
+
The connectivity array will be split among all the available ranks.
|
|
726
|
+
The faces data are assumed in the following format:
|
|
727
|
+
[n, i1, i2, ...in, m, j1, j2, ...jn, p, k1, k2, ...kp, ...]
|
|
728
|
+
The offsets data instead:
|
|
729
|
+
[0, n, n+m, n+m+p ....]
|
|
730
|
+
The faces list indicates the IDs of the vertices of each face, in order.
|
|
731
|
+
The offsets lists indicates the index where to find a specific face.
|
|
732
|
+
|
|
733
|
+
Parameters
|
|
734
|
+
----------
|
|
735
|
+
part_id: int
|
|
736
|
+
the part to define the connectivity for
|
|
737
|
+
offsets: List[int] or numpy array
|
|
738
|
+
the offsets values. The format is described above.
|
|
739
|
+
faces: List[int] or numpy array
|
|
740
|
+
the connectivity value. The format is described above.
|
|
741
|
+
ghost: bool
|
|
742
|
+
True if the input data contains ghost elements.
|
|
743
|
+
"""
|
|
744
|
+
if not self._clients:
|
|
745
|
+
raise RuntimeError("No DVS clients started yet.")
|
|
746
|
+
if not self._parts.get(part_id):
|
|
747
|
+
raise RuntimeError(
|
|
748
|
+
"Please create the part first via create_part() or the lower level add_part_info."
|
|
749
|
+
)
|
|
750
|
+
if not isinstance(faces, numpy.ndarray):
|
|
751
|
+
faces = numpy.array(faces)
|
|
752
|
+
if not isinstance(offsets, numpy.ndarray):
|
|
753
|
+
offsets = numpy.array(offsets)
|
|
754
|
+
vertices_per_face = numpy.diff(offsets)
|
|
755
|
+
connectivity_split = numpy.split(faces, numpy.cumsum(vertices_per_face[:-1]))
|
|
756
|
+
elem_type = self.ELEMTYPE_N_SIDED_POLYGON
|
|
757
|
+
all_same = numpy.all(numpy.array(vertices_per_face) == vertices_per_face[0])
|
|
758
|
+
if all_same:
|
|
759
|
+
num_vertices = vertices_per_face[0]
|
|
760
|
+
_elem_type = self._elem_type_map.get(num_vertices)
|
|
761
|
+
if _elem_type:
|
|
762
|
+
elem_type = _elem_type
|
|
763
|
+
if ghost:
|
|
764
|
+
elem_type += 1
|
|
765
|
+
self._check_updates_started()
|
|
766
|
+
split_arrays = self._split_list(connectivity_split, self._total_ranks)
|
|
767
|
+
split_num_faces = self._split_list(vertices_per_face, self._total_ranks)
|
|
768
|
+
for c in range(self._client_count):
|
|
769
|
+
client = self._clients[c]
|
|
770
|
+
arrays = split_arrays[c]
|
|
771
|
+
if len(arrays) > 1:
|
|
772
|
+
indices = numpy.concatenate(arrays)
|
|
773
|
+
elif arrays:
|
|
774
|
+
indices = arrays[0]
|
|
775
|
+
else:
|
|
776
|
+
indices = numpy.array([])
|
|
777
|
+
if elem_type not in [
|
|
778
|
+
self.ELEMTYPE_N_SIDED_POLYGON,
|
|
779
|
+
self.ELEMTYPE_N_SIDED_POLYGON_GHOST,
|
|
780
|
+
]:
|
|
781
|
+
self.update_elements(
|
|
782
|
+
client["client_id"], part_id=part_id, elem_type=elem_type, indices=indices
|
|
783
|
+
)
|
|
784
|
+
else:
|
|
785
|
+
connectivity_num_faces = split_num_faces[c]
|
|
786
|
+
self.update_elements_polygon(
|
|
787
|
+
client["client_id"],
|
|
788
|
+
part_id=part_id,
|
|
789
|
+
elem_type=elem_type,
|
|
790
|
+
nodes_per_polygon=numpy.array(connectivity_num_faces),
|
|
791
|
+
indices=indices,
|
|
792
|
+
)
|
|
793
|
+
self._parts[part_id]["dvs_elem_type"] = elem_type
|
|
794
|
+
|
|
795
|
+
def _check_timestep_count(self, timeout=120.0):
|
|
796
|
+
"""Check that there are no pending timesteps before loading data.
|
|
797
|
+
|
|
798
|
+
Parameters
|
|
799
|
+
----------
|
|
800
|
+
timeout: float
|
|
801
|
+
the timeout to set while checking for pending timesteps
|
|
802
|
+
"""
|
|
803
|
+
ready = False
|
|
804
|
+
start = time.time()
|
|
805
|
+
while not ready and time.time() - start < timeout:
|
|
806
|
+
vals = []
|
|
807
|
+
for server_id in self._server_ids:
|
|
808
|
+
num_pending, num_complete = self.server_timestep_count(server_id)
|
|
809
|
+
vals.append(num_pending == 0)
|
|
810
|
+
ready = all(vals)
|
|
811
|
+
if not ready:
|
|
812
|
+
time.sleep(0.5)
|
|
813
|
+
if not ready:
|
|
814
|
+
raise RuntimeError(
|
|
815
|
+
f"There are still pending timesteps within the input timeout of {timeout} seconds"
|
|
816
|
+
)
|
|
817
|
+
|
|
818
|
+
def load_dataset_in_ensight(self, timeout=120.0):
|
|
819
|
+
"""Launch the cached dataset in EnSight.
|
|
820
|
+
|
|
821
|
+
Parameters
|
|
822
|
+
----------
|
|
823
|
+
timeout: float
|
|
824
|
+
the timeout to set while checking for pending timesteps
|
|
825
|
+
"""
|
|
826
|
+
if not self._session:
|
|
827
|
+
raise RuntimeError("A PyEnSight session must be available.")
|
|
828
|
+
self._check_timestep_count(timeout=timeout)
|
|
829
|
+
self._session.load_data(os.path.join(self._cache_folder, f"Simba_{self._dataset_name}.dvs"))
|
|
830
|
+
|
|
831
|
+
def end_updates(self):
|
|
832
|
+
"""End the current updates."""
|
|
833
|
+
for c in range(self._client_count):
|
|
834
|
+
client = self._clients[c]
|
|
835
|
+
_ = self.end_update(client["client_id"])
|
|
836
|
+
client["update_started"] = False
|
|
837
|
+
self._update_num += 1
|
|
838
|
+
|
|
839
|
+
def delete_item_on_clients(self, update_num, filter=""):
|
|
840
|
+
"""Delete an item from all the running clients.
|
|
841
|
+
|
|
842
|
+
Parameters
|
|
843
|
+
----------
|
|
844
|
+
update_num: int
|
|
845
|
+
the update number to remove from the database
|
|
846
|
+
filter: str
|
|
847
|
+
the filter to apply when deleting the update number
|
|
848
|
+
"""
|
|
849
|
+
for c in range(self._client_count):
|
|
850
|
+
client = self._clients[c]
|
|
851
|
+
_ = self.delete_item(client["client_id"], update_num, client["rank"], filter)
|
|
852
|
+
|
|
853
|
+
def get_dvs_data_from_container(self, destination: str, use_docker=False):
|
|
854
|
+
"""Utility to save the data from the container to a local destination.
|
|
855
|
+
|
|
856
|
+
destination: str
|
|
857
|
+
the folder where to copy the files to
|
|
858
|
+
use_docker: bool
|
|
859
|
+
if True, download is done using the docker CLI
|
|
860
|
+
"""
|
|
861
|
+
if not isinstance(self._session._launcher, DockerLauncher):
|
|
862
|
+
raise RuntimeError("Method only available for DockerLauncher instances.")
|
|
863
|
+
if not os.path.exists(destination):
|
|
864
|
+
os.makedirs(destination)
|
|
865
|
+
posix_uri = pathlib.Path(destination).as_uri()
|
|
866
|
+
if use_docker:
|
|
867
|
+
bits, stat = self._session._launcher._container.get_archive(self._cache_folder)
|
|
868
|
+
with tarfile.open(fileobj=io.BytesIO(b"".join(bits)), mode="r") as tar:
|
|
869
|
+
safe_extract(tar, destination)
|
|
870
|
+
os.remove(bits)
|
|
871
|
+
else:
|
|
872
|
+
self._session.copy_from_session(posix_uri, ["dvs_cache"])
|