ansys-pyensight-core 0.9.1__py3-none-any.whl → 0.9.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ansys-pyensight-core might be problematic. Click here for more details.

@@ -0,0 +1,792 @@
1
+ """The module provides a concrete implementation for the DVS bindings in PyEnSight.
2
+
3
+ It also provides a collection of utilities to starts DVS servers, clients,
4
+ launch a local PyEnSight session, or connect to an existing one, and finally
5
+ to send data from the clients to the servers.
6
+ """
7
+ import glob
8
+ import io
9
+ import logging
10
+ import os
11
+ import pathlib
12
+ import platform
13
+ import re
14
+ import sys
15
+ import tarfile
16
+ import tempfile
17
+ import threading
18
+ import time
19
+ import traceback
20
+ from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
21
+ import warnings
22
+
23
+ from ansys.api.pyensight.dvs_api import dvs_base
24
+ from ansys.pyensight.core import DockerLauncher, LocalLauncher
25
+ from ansys.pyensight.core.common import safe_extract
26
+ import numpy
27
+
28
+ if TYPE_CHECKING:
29
+ from ansys.pyensight.core import Session
30
+
31
+
32
+ class DVS(dvs_base):
33
+ """Create an instance of the DVS module.
34
+
35
+ The module tries to look for the DVS Python bindings from the input
36
+ Ansys installation folder (which might also be the CEI folder) or the input
37
+ lib_folder. If not found, and if a PyEnSight session is provided,
38
+ the DVS commands will be launched on the remote EnSight Python interpreter.
39
+
40
+ Parameters
41
+ ----------
42
+
43
+ session: Session
44
+ An optional PyEnSight session. This must be provided in case the
45
+ DVS modules needs to be used on a remote session of EnSight.
46
+ ansys_installation: str
47
+ The optional full path to a local Ansys installation, or the CEI folder
48
+ from an Ansys installation
49
+ lib_folder: str
50
+ The optional full path to a folder that contains the DVS libraries and Python
51
+ bindings.
52
+ """
53
+
54
+ def __init__(
55
+ self,
56
+ session: Optional["Session"] = None,
57
+ ansys_installation: Optional[str] = None,
58
+ lib_folder: Optional[str] = None,
59
+ ) -> None:
60
+ super().__init__(session=session)
61
+ self._ansys_installation: Optional[str] = None
62
+ if ansys_installation:
63
+ self._ansys_installation = LocalLauncher.get_cei_install_directory(ansys_installation)
64
+ self._lib_folder: Optional[str] = lib_folder
65
+ if self._session:
66
+ if not self._ansys_installation and hasattr(self._session._launcher, "_install_path"):
67
+ self._ansys_installation = self._session._launcher._install_path
68
+ if not self._session and not self._ansys_installation and not self._lib_folder:
69
+ raise RuntimeError(
70
+ "Either a PyEnSight session or an ansys installation path, or a folder containing the DVS Python modules need to be provided."
71
+ )
72
+ self._connect_session = self._session
73
+ self._servers: Dict[int, Dict[str, Union[str, int]]] = {}
74
+ self._server_ids: List[int] = []
75
+ self._clients: Dict[int, Dict[str, Union[str, int, bool]]] = {}
76
+ self._client_count = 0
77
+ self._attempt_dvs_python_bindings_import()
78
+ self._parts: Dict[int, Any] = {}
79
+ self._vars: Dict[int, Any] = {}
80
+ self._update_num = 0
81
+ self._current_update = 0
82
+ self._elem_type_map = {
83
+ 2: self.ELEMTYPE_BAR_2,
84
+ 3: self.ELEMTYPE_TRIANGLE,
85
+ 4: self.ELEMTYPE_QUADRANGLE,
86
+ }
87
+ self._total_ranks = 0
88
+ if hasattr(self._session._launcher, "_install_path"):
89
+ self._temp_cache = tempfile.TemporaryDirectory(prefix="pyensight_dvs")
90
+ os.mkdir(os.path.join(self._temp_cache.name, "dvs_cache"))
91
+ self._cache_folder: str = os.path.join(self._temp_cache.name, "dvs_cache")
92
+ else:
93
+ self._cache_folder = "/home/ensight/dvs_cache"
94
+ self._dataset_name: Optional[str] = None
95
+ self._secret_key: Optional[str] = None
96
+
97
+ @staticmethod
98
+ def _is_windows():
99
+ """True if the platform being used is Windows."""
100
+ return "Win" in platform.system()
101
+
102
+ def launch_local_pyensight_session(
103
+ self,
104
+ use_egl=False,
105
+ use_sos: Optional[int] = None,
106
+ additional_command_line_options: Optional[List] = None,
107
+ ):
108
+ """Launch a local PyEnSight session.
109
+
110
+ If an ansys installation has been provided, it will be used to launch EnSight.
111
+ The session will be associated to the current DVS module instance.
112
+
113
+ Parameters
114
+
115
+ use_egl : bool, optional
116
+ Whether to use EGL hardware for accelerated graphics. The platform
117
+ must be able to support this hardware. This parameter is defined on
118
+ the parent ``Launcher`` class, where the default is ``False``.
119
+ use_sos : int, optional
120
+ Number of EnSight servers to use for SOS (Server of Server) mode.
121
+ This parameter is defined on the parent ``Launcher`` class, where
122
+ the default is ``None``, in which case SOS mode is not used.
123
+ additional_command_line_options: list, optional
124
+ Additional command line options to be used to launch EnSight.
125
+ """
126
+ launcher = LocalLauncher(
127
+ ansys_installation=self._ansys_installation,
128
+ use_sos=use_sos,
129
+ use_egl=use_egl,
130
+ additional_command_line_options=additional_command_line_options,
131
+ )
132
+ session = launcher.start()
133
+ self._session = session
134
+
135
+ def _attempt_dvs_python_bindings_import(self):
136
+ """Attempt to load the actual DVS Python bindings.
137
+
138
+ If an input lib folder has been provided, it will be tried first.
139
+ If an ansys installation has been provided, it will be tried as second choice.
140
+ """
141
+ if self._lib_folder:
142
+ try:
143
+ sys.path.append(self._lib_folder)
144
+ import dynamic_visualization_store
145
+
146
+ self._dvs_module = dynamic_visualization_store
147
+ except (ModuleNotFoundError, ImportError):
148
+ raise RuntimeError("Cannot import DVS module from provided library folder.")
149
+ if self._ansys_installation:
150
+ # Check if you are inside of an ansys install
151
+ apex_path = glob.glob(os.path.join(self._ansys_installation, "apex???"))
152
+ if not apex_path:
153
+ # try dev path
154
+ raise RuntimeError("Cannot find a valid EnSight install")
155
+ apex_path = apex_path[-1]
156
+ arch = "win64" if self._is_windows() else "linux_2.6_64"
157
+ apex_libs = os.path.join(apex_path, "machines", arch)
158
+ python_path = glob.glob(os.path.join(apex_libs, "Python-*"))[-1]
159
+ apex_py_version = re.search("Python-3.([0-9]+).([0-9]+)", os.path.basename(python_path))
160
+ apex_py_major_version = apex_py_version.group(1)
161
+ lib_path = os.path.join(python_path, "lib", f"python3.{apex_py_major_version}")
162
+ if self._is_windows():
163
+ lib_path = os.path.join(python_path, "DLLs")
164
+ sys.path.append(lib_path)
165
+ try:
166
+ import dynamic_visualization_store
167
+
168
+ self._dvs_module = dynamic_visualization_store
169
+ except (ModuleNotFoundError, ImportError):
170
+ warnings.warn("Cannot import DVS module from provided ansys installation folder.")
171
+
172
+ DVS_NULL_TRANSPORT = 0
173
+ DVS_GRPC_TRANSPORT = 1
174
+
175
+ @property
176
+ def session(self):
177
+ return self._session
178
+
179
+ @session.setter
180
+ def session(self, session: "Session"):
181
+ self._session = session
182
+
183
+ def start_dvs_servers(
184
+ self, num_servers: int, transport: int = 0, ranks_per_server: int = 1, secret_key: str = ""
185
+ ):
186
+ """Start DVS servers using the Python bindings.
187
+
188
+ The DVS servers will be started externall to the eventual EnSigth session available.
189
+ For simplicity, it is assumed that each server will receive the same number of ranks,
190
+ declared in input.
191
+
192
+ Parameters
193
+ ----------
194
+ num_servers: int
195
+ the number of DVS servers to launch
196
+ transport: int
197
+ the kind of transport to be used. Defaults to null.
198
+ Description of options as follows.
199
+
200
+ ================== =========================================================
201
+ Name Query type
202
+ ================== =========================================================
203
+ DVS_NULL_TRANSPORT Start the servers with the null protocol. Default
204
+ DVS_GRPC_TRANSPORT Start the servers with the grpc protocol.
205
+ ================== =========================================================
206
+ ranks_per_server: int
207
+ the number or ranks that will be connected to each server. Defaults to 1
208
+ secret_key: str
209
+ the secret key that will be used for the eventual gRPC connection.
210
+ Can be an empty string, that is also the default value.
211
+ """
212
+ if not self._secret_key:
213
+ self._secret_key = secret_key
214
+ transport_string = "null" if transport == 0 else "grpc"
215
+ uri = f"{transport_string}://"
216
+ grpc = transport == self.DVS_GRPC_TRANSPORT
217
+ options = {"CACHE_URI": f"hdf5:///{pathlib.Path(self._cache_folder).as_posix()}"}
218
+ if grpc:
219
+ uri += "127.0.0.1:0"
220
+ options.update(
221
+ {
222
+ "SERVER_SECURITY_SECRET": secret_key,
223
+ }
224
+ )
225
+ try:
226
+ for n in range(0, num_servers):
227
+ # Assume ranks equally distributed
228
+ server_id = self.server_create(uri=uri)
229
+ self.server_start(
230
+ server_id, server_num=n, local_ranks=ranks_per_server, options=options
231
+ )
232
+ self._server_ids.append(server_id)
233
+ self._servers[n] = {
234
+ "server_id": server_id,
235
+ "ranks": ranks_per_server,
236
+ "in_ensight": False,
237
+ }
238
+ if grpc:
239
+ uri_to_save = self.server_get_uri(server_id)
240
+ port = re.search(":([0-9]+)", uri_to_save)
241
+ if port:
242
+ self._servers[n].update(
243
+ {"server_uri": uri_to_save, "port": int(port.group(1))}
244
+ )
245
+ self._total_ranks = ranks_per_server * len(self._server_ids)
246
+ started = False
247
+ start = time.time()
248
+ while not started and time.time() - start < 60:
249
+ if not all([self.server_started(s) for s in self._server_ids]):
250
+ time.sleep(0.5)
251
+ else:
252
+ started = True
253
+ if not started:
254
+ raise RuntimeError("The DVS servers have not started in 60 seconds.")
255
+ except Exception as e:
256
+ traceback.print_exc()
257
+ raise RuntimeError(f"Couldn't start the servers, error: {e}")
258
+
259
+ def _start_dvs_client(self, server_id: int, rank: int, dedup=False):
260
+ """Start a DVS client.
261
+
262
+ Parameters
263
+ ----------
264
+ server_id: int
265
+ the server ID to connect to
266
+ rank: int
267
+ the rank of the client to launch
268
+ secret_key: str
269
+ the secret key to be used to connect to the DVS server
270
+ dedup: bool
271
+ True to not send duplicate data to server
272
+ """
273
+ if server_id not in self._server_ids:
274
+ raise RuntimeError(f"Server ID {server_id} not started in this process.")
275
+ flags = self.FLAGS_BLOCK_FOR_SERVER
276
+ if dedup:
277
+ flags |= self.FLAGS_DEDUP
278
+ try:
279
+ client_id = self.connect(server_id=server_id, secret=self._secret_key, flags=flags)
280
+ except Exception as e:
281
+ traceback.print_exc()
282
+ raise RuntimeError(f"Couldn't start the client, error {e}")
283
+ self._clients[self._client_count] = {
284
+ "client_id": client_id,
285
+ "server_id": server_id,
286
+ "rank": rank,
287
+ "update_started": False,
288
+ }
289
+ self._client_count += 1
290
+
291
+ def start_dvs_servers_in_ensight(self, ranks_per_server: int, secret_key=""):
292
+ """Launch the DVS servers in EnSight for an in-situ session.
293
+
294
+ On each EnSight server a DVS server will be launched.
295
+
296
+ Parameters
297
+ ----------
298
+ ranks_per_server: int
299
+ how many ranks will be sent to each server. This will be used
300
+ in a later stage for the clients launch and connection.
301
+ secret_key: str
302
+ the secret key that will be used for the eventual gRPC connection.
303
+ Can be an empty string, that is also the default value.
304
+ """
305
+ if not self._secret_key:
306
+ self._secret_key = secret_key
307
+ if not self._session:
308
+ raise RuntimeError(
309
+ "A PyEnSight session is required to start the DVS servers in EnSight."
310
+ )
311
+ thread, port = self._session.ensight.utils.readers.dvs.launch_live_dvs(
312
+ secret_key=secret_key
313
+ )
314
+ num_servers = self._session._launcher._use_sos or 1
315
+ base_uri = f"grpc://{self._session.hostname}"
316
+ self._total_ranks = num_servers * ranks_per_server
317
+ # Need to implement SOS support in session.ensight.utils.readers.dvs.launch_live_dvs
318
+ for n in range(num_servers):
319
+ # Just create a server but not start it
320
+ server_id = self.server_create(uri=base_uri + f":{port+n}")
321
+ self._server_ids.append(server_id)
322
+ self._servers[n] = {
323
+ "server_uri": base_uri + ":{}".format(port + n),
324
+ "port": port + n,
325
+ "server_id": server_id,
326
+ "in_ensight": True,
327
+ "ranks": ranks_per_server,
328
+ }
329
+
330
+ def start_dvs_clients(self, dataset_name: str, dedup=False):
331
+ """Launch the DVS clients and connect to the existing DVS servers.
332
+
333
+ Parameters
334
+ ----------
335
+ dataset_name: str
336
+ The dataset name required to initialize the following exports.
337
+ """
338
+ self._dataset_name = dataset_name
339
+ rank_per_server = list(self._servers.values())[0].get("ranks")
340
+ local_ranks = 0
341
+ n = 0
342
+ for rank in range(0, self._total_ranks):
343
+ server = self._servers[n]
344
+ local_ranks += 1
345
+ if local_ranks == rank_per_server:
346
+ local_ranks = 0
347
+ n += 1
348
+ self._start_dvs_client(int(server["server_id"]), rank, dedup=dedup)
349
+
350
+ def _begin_update(
351
+ self, client_dict: Dict[str, Union[str, int, bool]], time: float, rank: int, chunk: int
352
+ ):
353
+ """Start an update.
354
+
355
+ Parameters
356
+ ----------
357
+ client_dict: dict
358
+ A dictionary holding the DVS client parameters
359
+ time: float
360
+ The time value for the current update. May be a time already used
361
+ rank: int
362
+ The rank of the update
363
+ chunk: int
364
+ The chunk of the update
365
+ """
366
+ try:
367
+ _ = self.begin_update(client_dict["client_id"], self._update_num, time, rank, chunk)
368
+ client_dict["update_started"] = True
369
+ except Exception as e:
370
+ traceback.print_exc()
371
+ raise RuntimeError(f"Couldn't begin update. Error: {e}")
372
+
373
+ def begin_updates(self, time: float):
374
+ """Begin an update on all the clients available for the input time value.
375
+
376
+ Each update will be launched on a separate thread. The client associated
377
+ to the update will be flagged for the update start.
378
+
379
+ Currently we are assuming one chunk. Chunking support will be added in a future
380
+ release.
381
+
382
+ Parameters
383
+ ----------
384
+ time: float
385
+ The time value for the current update. May be a time already used
386
+ """
387
+ for _, client_vals in self._clients.items():
388
+ thread = threading.Thread(
389
+ target=self._begin_update, args=(client_vals, time, client_vals["rank"], 0)
390
+ )
391
+ thread.start()
392
+
393
+ def begin_initialization(self):
394
+ """Begin initialization for all the clients."""
395
+ for c in range(self._client_count):
396
+ client = self._clients[c]
397
+ _ = self.begin_init(
398
+ client["client_id"],
399
+ dataset_name=f"Simba_{self._dataset_name}",
400
+ rank=client["rank"],
401
+ total_ranks=self._total_ranks,
402
+ num_chunks=1,
403
+ )
404
+
405
+ def end_initialization(self):
406
+ """End initialization for all the clients."""
407
+ for c in range(self._client_count):
408
+ client = self._clients[c]
409
+ _ = self.end_init(client["client_id"])
410
+
411
+ def create_part(self, part_id: int, part_name: str, metadata: Optional[Dict[str, str]] = None):
412
+ """Create a part definition for the DVS export.
413
+
414
+ Parameters
415
+ ----------
416
+ part_id: int
417
+ the ID of the part to be exported
418
+ part_name: str
419
+ the name of the part to export
420
+ metadata: dict
421
+ An optional dictionary of metadata to attach to the part.
422
+ """
423
+ if not metadata:
424
+ metadata = {}
425
+ if self._parts.get(part_id):
426
+ print("Part already created, skip")
427
+ return
428
+ part = {
429
+ "id": part_id,
430
+ "name": part_name,
431
+ "structured": False,
432
+ "chunking": False,
433
+ "tags": metadata,
434
+ }
435
+ for c in range(self._client_count):
436
+ client = self._clients[c]
437
+ self.add_part_info(client["client_id"], [part])
438
+ self._parts[part_id] = part
439
+
440
+ def create_variable(
441
+ self,
442
+ var_id: int,
443
+ var_name: str,
444
+ var_type: int,
445
+ location: int,
446
+ unit: str = "",
447
+ unit_label="",
448
+ metadata: Optional[Dict[str, str]] = None,
449
+ ):
450
+ """Create a variable definition for the DVS export.
451
+
452
+ Parameters
453
+ ----------
454
+ var_id: int
455
+ the ID of the var to be exported
456
+ var_name: str
457
+ the name of the var to export
458
+ var_type: int
459
+ The variable type. Check the VARTYPE enums available with this module
460
+ location: int
461
+ The variable location. Check the LOCATION enums available with this module
462
+ unit: str
463
+ The variable units. See https://nexusdemo.ensight.com/docs/python/html/ENS_UNITSSchema.html
464
+ unit_label: str
465
+ The label for the variable units. See https://nexusdemo.ensight.com/docs/python/html/ENS_UNITSSchema.html
466
+ metadata: dict
467
+ An optional dictionary of metadata to attach to the var.
468
+ """
469
+ if not metadata:
470
+ metadata = {}
471
+ if self._vars.get(var_id):
472
+ print("Var already created, skip")
473
+ return
474
+ var = {
475
+ "id": var_id,
476
+ "name": var_name,
477
+ "tags": metadata,
478
+ "type": var_type,
479
+ "location": location,
480
+ "unit": unit,
481
+ "unit_label": unit_label,
482
+ }
483
+ for c in range(self._client_count):
484
+ client = self._clients[c]
485
+ self.add_var_info(client["client_id"], [var])
486
+ self._vars[var_id] = var
487
+
488
+ def _check_updates_started(self):
489
+ """Check that all the updates started successfully.
490
+
491
+ This is required because the launch of the updates is threaded.
492
+ """
493
+ started = False
494
+ start = time.time()
495
+ while not started and time.time() - start < 60:
496
+ started = all([vals["update_started"] for c, vals in self._clients.items()])
497
+ if not started:
498
+ time.sleep(0.5)
499
+ if not started:
500
+ for c, vals in self._clients.items():
501
+ update = vals["update_started"]
502
+ logging.debug(f"Client {c}, update: {update}")
503
+ raise RuntimeError("Not all clients have begun the updates.")
504
+
505
+ def send_coordinates(self, part_id: int, vertices: Union[List[float], numpy.ndarray]):
506
+ """Send the coordinates data for the input part.
507
+
508
+ The full coordinates array will be sent across all the ranks.
509
+ The data will be used for building a mesh chunk in DVS.
510
+ The data are assumed in the following format:
511
+ [x0, y0, z0, x1, y1, z1, ...]
512
+
513
+ Parameters
514
+ ----------
515
+ part_id: int
516
+ the part to define the coordinates for
517
+ vertices: List[int] or numpy array
518
+ the coordinates array. The format is described above.
519
+ """
520
+ if not self._parts.get(part_id):
521
+ raise RuntimeError(
522
+ "Please create the part first via create_part() or the lower level add_part_info."
523
+ )
524
+ if not isinstance(vertices, numpy.ndarray):
525
+ vertices = numpy.array(vertices)
526
+ reshaped_vertices = vertices.reshape(-1, 3)
527
+ x_coords = reshaped_vertices[:, 0]
528
+ y_coords = reshaped_vertices[:, 1]
529
+ z_coords = reshaped_vertices[:, 2]
530
+ self._check_updates_started()
531
+ for c in range(self._client_count):
532
+ client = self._clients[c]
533
+ self.update_nodes(
534
+ client["client_id"], part_id=part_id, x=x_coords, y=y_coords, z=z_coords
535
+ )
536
+
537
+ def send_variable_data(
538
+ self,
539
+ var_id: int,
540
+ part_id: int,
541
+ values: Union[List[float], numpy.ndarray],
542
+ ):
543
+ """Send the variable data for the input variable.
544
+
545
+ Parameters
546
+ ----------
547
+ var_id: int
548
+ the ID of the variable that will get its values updated.
549
+ part_id: int
550
+ the ID of the part to update the variable for
551
+ values: List[int] or numpy array
552
+ the variablle array. If the variable is a vector, the values are expected as
553
+ [v1x, v1y, v1z, v2x, v2y, v2z ...]
554
+ """
555
+ if not self._vars.get(var_id):
556
+ raise RuntimeError(
557
+ "Please create the variable first via create_var() or the lower level add_var_info."
558
+ )
559
+ if not self._parts.get(part_id):
560
+ raise RuntimeError(
561
+ "Please create the part first via create_part() or the lower level add_part_info."
562
+ )
563
+ if not self._parts[part_id].get("dvs_elem_type"):
564
+ raise RuntimeError(f"Please send first the part connectivity for part {part_id}")
565
+ elem_type = self._parts[part_id]["dvs_elem_type"]
566
+ if not isinstance(values, numpy.ndarray):
567
+ values = numpy.array(values)
568
+ self._check_updates_started()
569
+ var_type = self._vars[var_id]["type"]
570
+ location = self._vars[var_id]["location"]
571
+ # The following checks are there just to make mypy happy
572
+ if isinstance(var_type, (str, bool, dict)):
573
+ raise RuntimeError("Var type is not an integer")
574
+ if isinstance(location, (str, bool, dict)):
575
+ raise RuntimeError("Location is not an integer")
576
+ for c in range(self._client_count):
577
+ client = self._clients[c]
578
+ if var_type == self.VARTYPE_SCALAR:
579
+ if location == self.LOCATION_NODE:
580
+ self.update_var_node_scalar(
581
+ client["client_id"], var_id=var_id, part_id=part_id, values=values
582
+ )
583
+ elif location == self.LOCATION_ELEMENT:
584
+ self.update_var_element_scalar(
585
+ client["client_id"],
586
+ var_id=var_id,
587
+ part_id=part_id,
588
+ elem_type=elem_type,
589
+ values=values,
590
+ )
591
+ elif var_type == self.VARTYPE_VECTOR:
592
+ if location == self.LOCATION_NODE:
593
+ self.update_var_node_vector(
594
+ client["client_id"], var_id=var_id, part_id=part_id, values=values
595
+ )
596
+ elif location == self.LOCATION_ELEMENT:
597
+ self.update_var_element_vector(
598
+ client["client_id"],
599
+ var_id=var_id,
600
+ part_id=part_id,
601
+ elem_type=elem_type,
602
+ values=values,
603
+ )
604
+
605
+ @staticmethod
606
+ def _split_list(lst: Union[List[int], List[float]], num_parts: int):
607
+ """Split the input list in n parts.
608
+
609
+ lst: list
610
+ the list to be split
611
+ num_parts: int
612
+ the number of parts to split the list into
613
+
614
+ Returns
615
+ -------
616
+ parts: list
617
+ A list containing the parts the original list was split into
618
+ """
619
+ n = len(lst)
620
+ part_size = n // num_parts
621
+ remainder = n % num_parts
622
+ parts = []
623
+ start = 0
624
+ for i in range(num_parts):
625
+ end = start + part_size + (1 if i < remainder else 0)
626
+ parts.append(lst[start:end])
627
+ start = end
628
+ return parts
629
+
630
+ def _disassemble_simba_connectivity(self, faces):
631
+ i = 0
632
+ vertices_per_face = []
633
+ connectivity_1d = []
634
+ indices = []
635
+ while i < len(faces):
636
+ indices.append(i)
637
+ i += faces[i] + 1
638
+ mask = numpy.zeros(faces.shape, dtype=bool)
639
+ mask[indices] = True
640
+ vertices_per_face = numpy.array(faces[mask])
641
+ connectivity_1d = numpy.array(faces[~mask])
642
+ connectivity_split = numpy.split(connectivity_1d, numpy.cumsum(vertices_per_face[:-1]))
643
+ all_same = numpy.all(numpy.array(vertices_per_face) == vertices_per_face[0])
644
+ return connectivity_split, vertices_per_face, all_same
645
+
646
+ def send_connectivity(self, part_id, faces: Union[List, numpy.ndarray], ghost=False):
647
+ """Send the connectivity data for the input part.
648
+
649
+ The data will be used for building an element block in DVS.
650
+ The connectivity array will be split among all the available ranks.
651
+ The data are assumed in the following format:
652
+ [n, i1, i2, ...in, m, j1, j2, ...jn, p, k1, k2, ...kp, ...]
653
+ i.e. the first element declares how many vertices are available on the face,
654
+ then the following elements will be the indices of the vertices for the specific
655
+ face, and so on.
656
+
657
+ Parameters
658
+ ----------
659
+ part_id: int
660
+ the part to define the connectivity for
661
+ faces: List[int] or numpy array
662
+ the connectivity value. The format is described above.
663
+ ghost: bool
664
+ True if the input data contains ghost elements.
665
+ """
666
+ if not self._clients:
667
+ raise RuntimeError("No DVS clients started yet.")
668
+ if not self._parts.get(part_id):
669
+ raise RuntimeError(
670
+ "Please create the part first via create_part() or the lower level add_part_info."
671
+ )
672
+ if not isinstance(faces, numpy.ndarray):
673
+ faces = numpy.array(faces)
674
+ connectivity_split, vertices_per_face, all_same = self._disassemble_simba_connectivity(
675
+ faces
676
+ )
677
+ elem_type = self.ELEMTYPE_N_SIDED_POLYGON
678
+ if all_same:
679
+ num_vertices = vertices_per_face[0]
680
+ _elem_type = self._elem_type_map.get(num_vertices)
681
+ if _elem_type:
682
+ elem_type = _elem_type
683
+ if ghost:
684
+ elem_type += 1
685
+ self._check_updates_started()
686
+ split_arrays = self._split_list(connectivity_split, self._total_ranks)
687
+ split_num_faces = self._split_list(vertices_per_face, self._total_ranks)
688
+ for c in range(self._client_count):
689
+ client = self._clients[c]
690
+ arrays = split_arrays[c]
691
+ if len(arrays) > 1:
692
+ indices = numpy.concatenate(arrays)
693
+ elif arrays:
694
+ indices = arrays[0]
695
+ else:
696
+ indices = numpy.array([])
697
+ if elem_type not in [
698
+ self.ELEMTYPE_N_SIDED_POLYGON,
699
+ self.ELEMTYPE_N_SIDED_POLYGON_GHOST,
700
+ ]:
701
+ self.update_elements(
702
+ client["client_id"], part_id=part_id, elem_type=elem_type, indices=indices
703
+ )
704
+ else:
705
+ connectivity_num_faces = split_num_faces[c]
706
+ self.update_elements_polygon(
707
+ client["client_id"],
708
+ part_id=part_id,
709
+ elem_type=elem_type,
710
+ nodes_per_polygon=numpy.array(connectivity_num_faces),
711
+ indices=indices,
712
+ )
713
+ self._parts[part_id]["dvs_elem_type"] = elem_type
714
+
715
+ def _check_timestep_count(self, timeout=120.0):
716
+ """Check that there are no pending timesteps before loading data.
717
+
718
+ Parameters
719
+ ----------
720
+ timeout: float
721
+ the timeout to set while checking for pending timesteps
722
+ """
723
+ ready = False
724
+ start = time.time()
725
+ while not ready and time.time() - start < timeout:
726
+ vals = []
727
+ for server_id in self._server_ids:
728
+ num_pending, num_complete = self.server_timestep_count(server_id)
729
+ vals.append(num_pending == 0)
730
+ ready = all(vals)
731
+ if not ready:
732
+ time.sleep(0.5)
733
+ if not ready:
734
+ raise RuntimeError(
735
+ f"There are still pending timesteps within the input timeout of {timeout} seconds"
736
+ )
737
+
738
+ def load_dataset_in_ensight(self, timeout=120.0):
739
+ """Launch the cached dataset in EnSight.
740
+
741
+ Parameters
742
+ ----------
743
+ timeout: float
744
+ the timeout to set while checking for pending timesteps
745
+ """
746
+ if not self._session:
747
+ raise RuntimeError("A PyEnSight session must be available.")
748
+ self._check_timestep_count(timeout=timeout)
749
+ self._session.load_data(os.path.join(self._cache_folder, f"Simba_{self._dataset_name}.dvs"))
750
+
751
+ def end_updates(self):
752
+ """End the current updates."""
753
+ for c in range(self._client_count):
754
+ client = self._clients[c]
755
+ _ = self.end_update(client["client_id"])
756
+ client["update_started"] = False
757
+ self._update_num += 1
758
+
759
+ def delete_item_on_clients(self, update_num, filter=""):
760
+ """Delete an item from all the running clients.
761
+
762
+ Parameters
763
+ ----------
764
+ update_num: int
765
+ the update number to remove from the database
766
+ filter: str
767
+ the filter to apply when deleting the update number
768
+ """
769
+ for c in range(self._client_count):
770
+ client = self._clients[c]
771
+ _ = self.delete_item(client["client_id"], update_num, client["rank"], filter)
772
+
773
+ def get_dvs_data_from_container(self, destination: str, use_docker=False):
774
+ """Utility to save the data from the container to a local destination.
775
+
776
+ destination: str
777
+ the folder where to copy the files to
778
+ use_docker: bool
779
+ if True, download is done using the docker CLI
780
+ """
781
+ if not isinstance(self._session._launcher, DockerLauncher):
782
+ raise RuntimeError("Method only available for DockerLauncher instances.")
783
+ if not os.path.exists(destination):
784
+ os.makedirs(destination)
785
+ posix_uri = pathlib.Path(destination).as_uri()
786
+ if use_docker:
787
+ bits, stat = self._session._launcher._container.get_archive(self._cache_folder)
788
+ with tarfile.open(fileobj=io.BytesIO(b"".join(bits)), mode="r") as tar:
789
+ safe_extract(tar, destination)
790
+ os.remove(bits)
791
+ else:
792
+ self._session.copy_from_session(posix_uri, ["dvs_cache"])