pyedb 0.60.0__py3-none-any.whl → 0.61.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of pyedb might be problematic. Click here for more details.

Files changed (34) hide show
  1. pyedb/__init__.py +1 -1
  2. pyedb/configuration/cfg_components.py +35 -7
  3. pyedb/dotnet/database/cell/hierarchy/component.py +8 -6
  4. pyedb/dotnet/database/cell/hierarchy/model.py +1 -28
  5. pyedb/dotnet/database/cell/hierarchy/s_parameter_model.py +10 -14
  6. pyedb/dotnet/database/cell/hierarchy/spice_model.py +13 -7
  7. pyedb/dotnet/database/components.py +5 -1
  8. pyedb/dotnet/database/edb_data/padstacks_data.py +5 -3
  9. pyedb/dotnet/database/modeler.py +2 -1
  10. pyedb/dotnet/database/padstack.py +187 -1
  11. pyedb/dotnet/edb.py +70 -1
  12. pyedb/generic/general_methods.py +21 -0
  13. pyedb/grpc/database/definition/materials.py +1 -1
  14. pyedb/grpc/database/definition/padstack_def.py +16 -9
  15. pyedb/grpc/database/padstacks.py +201 -6
  16. pyedb/grpc/database/primitive/padstack_instance.py +90 -0
  17. pyedb/grpc/edb.py +70 -1
  18. pyedb/grpc/rpc_session.py +16 -3
  19. pyedb/workflows/__init__.py +21 -0
  20. pyedb/workflows/job_manager/__init__.py +21 -0
  21. pyedb/workflows/job_manager/backend/__init__.py +21 -0
  22. pyedb/workflows/job_manager/backend/job_manager_handler.py +910 -0
  23. pyedb/workflows/job_manager/backend/job_submission.py +1169 -0
  24. pyedb/workflows/job_manager/backend/service.py +1663 -0
  25. pyedb/workflows/job_manager/backend/start_service.py +86 -0
  26. pyedb/workflows/job_manager/backend/submit_job_on_scheduler.py +168 -0
  27. pyedb/workflows/job_manager/backend/submit_local_job.py +166 -0
  28. pyedb/workflows/utilities/__init__.py +21 -0
  29. pyedb/workflows/utilities/cutout.py +1 -1
  30. pyedb/workflows/utilities/hfss_log_parser.py +446 -0
  31. {pyedb-0.60.0.dist-info → pyedb-0.61.0.dist-info}/METADATA +7 -4
  32. {pyedb-0.60.0.dist-info → pyedb-0.61.0.dist-info}/RECORD +34 -24
  33. {pyedb-0.60.0.dist-info → pyedb-0.61.0.dist-info}/WHEEL +0 -0
  34. {pyedb-0.60.0.dist-info → pyedb-0.61.0.dist-info}/licenses/LICENSE +0 -0
@@ -0,0 +1,1169 @@
1
+ # Copyright (C) 2023 - 2025 ANSYS, Inc. and/or its affiliates.
2
+ # SPDX-License-Identifier: MIT
3
+ #
4
+ #
5
+ # Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ # of this software and associated documentation files (the "Software"), to deal
7
+ # in the Software without restriction, including without limitation the rights
8
+ # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ # copies of the Software, and to permit persons to whom the Software is
10
+ # furnished to do so, subject to the following conditions:
11
+ #
12
+ # The above copyright notice and this permission notice shall be included in all
13
+ # copies or substantial portions of the Software.
14
+ #
15
+ # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ # SOFTWARE.
22
+
23
+
24
+ """
25
+ ``job_submission`` --- Cross-platform HFSS simulation runner with enterprise scheduler support
26
+ ==============================================================================================
27
+
28
+ This module provides a single entry point, :func:`create_hfss_config`, that
29
+ builds a validated, JSON-serialisable configuration object and submits it to
30
+
31
+ * local subprocess (default)
32
+ * SLURM
33
+ * LSF (IBM Platform)
34
+ * PBS / Torque
35
+ * Windows HPC Server
36
+
37
+ The configuration is **immutable** (dataclass), **validated** on creation and
38
+ can be round-tripped through JSON for persistence or REST transmission.
39
+
40
+ Examples
41
+ --------
42
+ Local simulation::
43
+
44
+ >>> cfg = create_hfss_config(
45
+ ... ansys_edt_path="/ansys/v241/Linux64/ansysedt",
46
+ ... jobid="patch_antenna",
47
+ ... project_path="/home/antenna.aedt")
48
+ >>> result = cfg.run_simulation(timeout=3600)
49
+ >>> result.returncode
50
+ 0
51
+
52
+ SLURM cluster::
53
+
54
+ >>> cfg = create_hfss_config(
55
+ ... jobid="array_001",
56
+ ... project_path="/shared/array.aedt",
57
+ ... scheduler_type=SchedulerType.SLURM,
58
+ ... scheduler_options=SchedulerOptions(
59
+ ... queue="compute",
60
+ ... nodes=4,
61
+ ... memory="64GB",
62
+ ... time="08:00:00"))
63
+ >>> job_id = cfg.run_simulation()
64
+ >>> print(job_id)
65
+ slurm_job_12345
66
+ """ # noqa: D205,D400 (summary line is intentionally long)
67
+
68
+ from datetime import datetime
69
+ import enum
70
+ import getpass
71
+ import logging
72
+ import os
73
+ import platform
74
+ import re
75
+ import shlex
76
+ import shutil
77
+ import subprocess # nosec B404
78
+ import tempfile
79
+ from typing import Any, Dict, List, Optional, Union
80
+
81
+ from pydantic import BaseModel, Field
82
+
83
+ from pyedb.generic.general_methods import installed_ansys_em_versions, is_linux
84
+
85
+ logger = logging.getLogger("JobManager")
86
+
87
+
88
+ class SchedulerType(enum.Enum):
89
+ """
90
+ Supported enterprise job schedulers.
91
+
92
+ Members
93
+ -------
94
+ NONE : str
95
+ Direct subprocess execution (default).
96
+ SLURM : str
97
+ Simple Linux Utility for Resource Management.
98
+ LSF : str
99
+ IBM Platform Load Sharing Facility.
100
+ PBS : str
101
+ Portable Batch System (Torque / PBS Pro).
102
+ WINDOWS_HPC : str
103
+ Microsoft Windows HPC Server.
104
+ """
105
+
106
+ NONE = "none"
107
+ LSF = "lsf"
108
+ SLURM = "slurm"
109
+ PBS = "pbs"
110
+ WINDOWS_HPC = "windows_hpc"
111
+
112
+
113
+ class SchedulerOptions(BaseModel):
114
+ """
115
+ Resource requirements and scheduler-specific directives.
116
+
117
+ All attributes are validated by :meth:`validate`, which is automatically
118
+ called after instantiation.
119
+
120
+ Parameters
121
+ ----------
122
+ queue : str, optional
123
+ Partition or queue name. Defaults to ``"default"``.
124
+ time : str, optional
125
+ Wall-time limit in ``HH:MM:SS`` or ``D.HH:MM:SS``. Defaults to
126
+ ``"24:00:00"``.
127
+ nodes : int, optional
128
+ Number of compute nodes. Defaults to ``1``.
129
+ tasks_per_node : int, optional
130
+ Processes per node. Defaults to ``1``.
131
+ memory : str, optional
132
+ Memory per node, e.g. ``"4GB"``. Defaults to ``"4GB"``.
133
+ account : str, optional
134
+ Account / project to charge.
135
+ reservation : str, optional
136
+ Advance reservation name.
137
+ qos : str, optional
138
+ Quality-of-service level.
139
+ constraints : str, optional
140
+ Node features, e.g. ``"gpu"``.
141
+ exclusive : bool, optional
142
+ Request whole nodes. Defaults to ``False``.
143
+ gpus : int, optional
144
+ Number of GPUs. Defaults to ``0``.
145
+ gpu_type : str, optional
146
+ GPU model, e.g. ``"a100"``.
147
+ priority : str, optional
148
+ Job priority: ``Low``, ``BelowNormal``, ``Normal``, ``AboveNormal``,
149
+ ``High``. Defaults to ``"Normal"``.
150
+ email_notification : str, optional
151
+ Address for status mails.
152
+ run_as_administrator : bool, optional
153
+ Elevated privileges (Windows HPC only). Defaults to ``False``.
154
+
155
+ Raises
156
+ ------
157
+ ValueError
158
+ On any validation failure.
159
+ """
160
+
161
+ queue: str = "default"
162
+ time: str = "24:00:00"
163
+ nodes: int = 1
164
+ tasks_per_node: int = 1
165
+ cores_per_node: int = 0 # 0 → use scheduler default
166
+ memory: str = "4GB"
167
+ account: Optional[str] = None
168
+ reservation: Optional[str] = None
169
+ qos: Optional[str] = None
170
+ constraints: Optional[str] = None
171
+ exclusive: bool = False
172
+ gpus: int = 0
173
+ gpu_type: Optional[str] = None
174
+ priority: str = "Normal"
175
+ email_notification: Optional[str] = None
176
+ run_as_administrator: bool = False
177
+
178
+ def validate_fields(self) -> None:
179
+ """
180
+ Validate all scheduler options for correctness and consistency.
181
+
182
+ Raises
183
+ ------
184
+ ValueError
185
+ If any parameter is invalid or out of range.
186
+ """
187
+ if self.nodes < 1:
188
+ raise ValueError("Number of nodes must be at least 1")
189
+ if self.tasks_per_node < 1:
190
+ raise ValueError("Tasks per node must be at least 1")
191
+ if self.gpus < 0:
192
+ raise ValueError("GPU count cannot be negative")
193
+ if self.cores_per_node < 0:
194
+ raise ValueError("cores_per_node must be non-negative")
195
+
196
+ # Validate priority values
197
+ valid_priorities = ["Low", "BelowNormal", "Normal", "AboveNormal", "High"]
198
+ if self.priority not in valid_priorities:
199
+ raise ValueError(f"Priority must be one of: {valid_priorities}")
200
+
201
+ # Flexible time format validation for different schedulers
202
+ time_patterns = [
203
+ r"^\d+:\d{2}:\d{2}$", # HH:MM:SS
204
+ r"^\d+\.\d{2}:\d{2}:\d{2}$", # days.hours:minutes:seconds (Windows HPC)
205
+ r"^\d+$", # minutes only
206
+ ]
207
+
208
+ if not any(re.match(pattern, self.time) for pattern in time_patterns):
209
+ raise ValueError("Time must be in HH:MM:SS, days.HH:MM:SS, or minutes format")
210
+
211
+
212
+ class MachineNode(BaseModel):
213
+ """
214
+ Compute-node descriptor for distributed HFSS runs.
215
+
216
+ Parameters
217
+ ----------
218
+ hostname : str, optional
219
+ DNS name or IP. Defaults to ``"localhost"``.
220
+ cores : int, optional
221
+ Logical cores to use. ``-1`` means *all*. Defaults to ``-1``.
222
+ max_cores : int, optional
223
+ Physical cores available. Defaults to ``20``.
224
+ utilization : int, optional
225
+ CPU percentage to utilize (1–100). Defaults to ``90``.
226
+
227
+ Raises
228
+ ------
229
+ ValueError
230
+ If ``utilization`` or ``max_cores`` is out of range.
231
+ """
232
+
233
+ hostname: str = "localhost"
234
+ cores: int = -1
235
+ max_cores: int = 20
236
+ utilization: int = 90
237
+
238
+ def __init__(self, **data):
239
+ """Initialize and validate parameters."""
240
+ super().__init__(**data)
241
+ self.validate_fields()
242
+
243
+ def validate_fields(self) -> None:
244
+ """
245
+ Validate machine node parameters for correctness.
246
+
247
+ Ensures all node parameters are within valid ranges and formats.
248
+
249
+ Raises:
250
+ ValueError: If any parameter is invalid.
251
+ """
252
+ if not self.hostname:
253
+ raise ValueError("Hostname cannot be empty")
254
+ if self.utilization < 1 or self.utilization > 100:
255
+ raise ValueError("Utilization must be between 1 and 100")
256
+ if self.max_cores < 1:
257
+ raise ValueError("Max cores must be at least 1")
258
+
259
+ def __str__(self) -> str:
260
+ """
261
+ Return string representation in HFSS machinelist format.
262
+
263
+ Returns
264
+ -------
265
+ str
266
+ Node configuration in format ``hostname:cores:max_cores:util%``.
267
+ """
268
+ return f"{self.hostname}:{self.cores}:{self.max_cores}:{self.utilization}%"
269
+
270
+
271
+ class HFSS3DLayoutBatchOptions(BaseModel):
272
+ """
273
+ HFSS-specific solver flags and environment settings.
274
+
275
+ Defaults are **platform aware** (Windows vs Linux).
276
+
277
+ Parameters
278
+ ----------
279
+ create_starting_mesh : bool, optional
280
+ Generate initial mesh. Defaults to ``True``.
281
+ default_process_priority : str, optional
282
+ OS process priority. Defaults to ``"Normal"``.
283
+ enable_gpu : bool, optional
284
+ GPU acceleration. Defaults to ``False``.
285
+ mpi_vendor : str, optional
286
+ MPI implementation. Auto-detected.
287
+ mpi_version : str, optional
288
+ Version string. Defaults to ``"Default"``.
289
+ remote_spawn_command : str, optional
290
+ Remote shell command. Auto-detected.
291
+ solve_adaptive_only : bool, optional
292
+ Skip frequency sweep. Defaults to ``False``.
293
+ validate_only : bool, optional
294
+ Check setup only. Defaults to ``False``.
295
+ temp_directory : str, optional
296
+ Scratch path. Auto-detected.
297
+ """
298
+
299
+ create_starting_mesh: bool = False
300
+ default_process_priority: str = "Normal"
301
+ enable_gpu: bool = False
302
+ mpi_vendor: str = "Intel"
303
+ mpi_version: str = "Default"
304
+ remote_spawn_command: str = "Scheduler"
305
+ solve_adaptive_only: bool = False
306
+ validate_only: bool = False
307
+ temp_directory: str = Field(default_factory=lambda: tempfile.gettempdir())
308
+
309
+ def __init__(self, **data):
310
+ """Initialize and validate options."""
311
+ super().__init__(**data)
312
+ self.validate_fields()
313
+
314
+ def validate_fields(self) -> None:
315
+ """
316
+ Validate all HFSS 3D layout options for correctness.
317
+
318
+ Performs comprehensive validation of HFSS-specific parameters including
319
+ priority levels, MPI vendors, and directory paths.
320
+
321
+ Raises
322
+ ------
323
+ ValueError
324
+ If any parameter is invalid or unsupported.
325
+ """
326
+ valid_priorities = ["Normal", "Low", "High", "Idle"]
327
+ if self.default_process_priority not in valid_priorities:
328
+ raise ValueError(f"Priority must be one of: {valid_priorities}")
329
+
330
+ # MPI vendor validation - ANSYS expects specific string values
331
+ valid_mpi_vendors = ["Microsoft", "Intel", "Open MPI"]
332
+ if self.mpi_vendor not in valid_mpi_vendors:
333
+ raise ValueError(f"MPI vendor must be one of: {valid_mpi_vendors}")
334
+
335
+ if not self.temp_directory:
336
+ raise ValueError("Temp directory cannot be empty")
337
+
338
+ def to_batch_options_dict(self) -> Dict[str, str]:
339
+ """
340
+ Convert options to HFSS batch options dictionary format.
341
+
342
+ Returns
343
+ -------
344
+ Dict[str, str]
345
+ Key-value pairs suitable for the ``-batchoptions`` switch.
346
+ """
347
+ return {
348
+ "HFSS 3D Layout Design/CreateStartingMesh": "1" if self.create_starting_mesh else "0",
349
+ "HFSS 3D Layout Design/DefaultProcessPriority": self.default_process_priority,
350
+ "HFSS 3D Layout Design/EnableGPU": "1" if self.enable_gpu else "0",
351
+ "HFSS 3D Layout Design/MPIVendor": self.mpi_vendor,
352
+ "HFSS 3D Layout Design/MPIVersion": self.mpi_version,
353
+ "HFSS 3D Layout Design/RemoteSpawnCommand": self.remote_spawn_command,
354
+ "HFSS 3D Layout Design/SolveAdaptiveOnly": "1" if self.solve_adaptive_only else "0",
355
+ "HFSS 3D Layout Design/ValidateOnly": "1" if self.validate_only else "0",
356
+ "HFSS 3D Layout Design/RAMLimitPercent": "90",
357
+ "HFSS/RAMLimitPercent": "90",
358
+ "Maxwell 2D/RAMLimitPercent": "90",
359
+ "Maxwell 3D/RAMLimitPercent": "90",
360
+ "Q3D Extractor/RAMLimitPercent": "90",
361
+ }
362
+
363
+ # ------------------------------------------------------------------
364
+ # HFSS3DLayoutBatchOptions
365
+ # ------------------------------------------------------------------
366
+ def to_batch_options_string(self) -> str:
367
+ """
368
+ Return the Windows-safe string:
369
+ "'key1'='value1' 'key2'='value2' ..."
370
+ """
371
+ tmp = {
372
+ "HFSS 3D Layout Design/CreateStartingMesh": "1" if self.create_starting_mesh else "0",
373
+ "HFSS 3D Layout Design/DefaultProcessPriority": self.default_process_priority,
374
+ "HFSS 3D Layout Design/EnableGPU": "1" if self.enable_gpu else "0",
375
+ "HFSS 3D Layout Design/MPIVendor": self.mpi_vendor,
376
+ "HFSS 3D Layout Design/MPIVersion": self.mpi_version,
377
+ "HFSS 3D Layout Design/RemoteSpawnCommand": self.remote_spawn_command,
378
+ "HFSS 3D Layout Design/SolveAdaptiveOnly": "1" if self.solve_adaptive_only else "0",
379
+ "HFSS 3D Layout Design/ValidateOnly": "1" if self.validate_only else "0",
380
+ "HFSS 3D Layout Design/RAMLimitPercent": "90",
381
+ "HFSS/RAMLimitPercent": "90",
382
+ "Maxwell 2D/RAMLimitPercent": "90",
383
+ "Maxwell 3D/RAMLimitPercent": "90",
384
+ "Q3D Extractor/RAMLimitPercent": "90",
385
+ }
386
+ quoted_pairs = [f"'{k}'='{v}'" for k, v in tmp.items()]
387
+ return " ".join(quoted_pairs)
388
+
389
+
390
+ class HFSSSimulationConfig(BaseModel):
391
+ """
392
+ Complete, validated simulation configuration.
393
+
394
+ The class is a **frozen** dataclass (after ``__post_init__``) and can be
395
+ serialised to/from JSON via :meth:`to_dict` / :meth:`from_dict`.
396
+ e
397
+ Parameters
398
+ ----------
399
+ ansys_edt_path : str
400
+ Path to ``ansysedt`` executable.
401
+ solver : str, optional
402
+ Solver name. Defaults to ``"Hfss3DLayout"``.
403
+ jobid : str, optional
404
+ Unique identifier. Auto-generated with timestamp if omitted.
405
+ distributed : bool, optional
406
+ Enable MPI distribution. Defaults to ``True``.
407
+ machine_nodes : list[MachineNode], optional
408
+ Compute nodes. Defaults to ``[MachineNode()]``.
409
+ auto : bool, optional
410
+ Non-interactive mode. Defaults to ``True``.
411
+ non_graphical : bool, optional
412
+ Hide GUI. Defaults to ``True``.
413
+ monitor : bool, optional
414
+ Stream solver log. Defaults to ``True``.
415
+ layout_options : HFSS3DLayoutBatchOptions, optional
416
+ Solver flags. Defaults to a new instance.
417
+ project_path : str, optional
418
+ ``.aedt`` or ``.aedb`` file. Defaults to platform temp.
419
+ design_name : str, optional
420
+ Design inside project. Defaults to ``""`` (use active).
421
+ design_mode : str, optional
422
+ Variation name. Defaults to ``""``.
423
+ setup_name : str, optional
424
+ Setup to solve. Defaults to ``""``.
425
+ scheduler_type : SchedulerType, optional
426
+ External scheduler. Defaults to :attr:`SchedulerType.NONE`.
427
+ scheduler_options : SchedulerOptions, optional
428
+ Scheduler directives. Defaults to a new instance.
429
+
430
+ Raises
431
+ ------
432
+ ValueError
433
+ On validation failure.
434
+ FileNotFoundError
435
+ If *project_path* does not exist.
436
+ """
437
+
438
+ model_config = {"populate_by_name": True, "exclude_defaults": False}
439
+ ansys_edt_path: str = None
440
+ solver: str = "Hfss3DLayout"
441
+ jobid: str = None
442
+ user: str = "unknown"
443
+ distributed: bool = True
444
+ machine_nodes: List[MachineNode] = Field(default_factory=lambda: [MachineNode()])
445
+ auto: bool = True
446
+ non_graphical: bool = True
447
+ monitor: bool = True
448
+ layout_options: HFSS3DLayoutBatchOptions = Field(default_factory=HFSS3DLayoutBatchOptions)
449
+ project_path: str = Field(default_factory=lambda: os.path.join(tempfile.gettempdir(), "simulation.aedt"))
450
+ design_name: str = ""
451
+ design_mode: str = ""
452
+ setup_name: str = ""
453
+ scheduler_type: SchedulerType = SchedulerType.NONE
454
+ scheduler_options: SchedulerOptions = Field(default_factory=SchedulerOptions)
455
+
456
+ def __init__(self, **data):
457
+ """Initialize and validate the configuration."""
458
+ super().__init__(**data)
459
+ if not self.ansys_edt_path:
460
+ installed_versions = installed_ansys_em_versions()
461
+ if not installed_versions:
462
+ raise ValueError(
463
+ "No installed Ansys EM versions found. Please specify ansys_edt_path. Or "
464
+ "add ansysedt full path to the configuration."
465
+ )
466
+ if is_linux:
467
+ self.ansys_edt_path = os.path.join(list(installed_versions.values())[-1], "ansysedt") # latest
468
+ else:
469
+ self.ansys_edt_path = os.path.join(list(installed_versions.values())[-1], "ansysedt.exe") # latest
470
+ if not self.jobid:
471
+ self.jobid = f"JOB_ID_{datetime.now().strftime('%Y%m%d_%H%M%S')}"
472
+ if "auto" not in data: # user did not touch it
473
+ data["auto"] = self.scheduler_type != SchedulerType.NONE
474
+ self.validate_fields()
475
+
476
+ def validate_fields(self) -> None:
477
+ """
478
+ Validate all options and raise ``ValueError`` on violation.
479
+ Checks ranges, formats, and scheduler-specific rules.
480
+ """
481
+ if not self.jobid:
482
+ raise ValueError("Job ID cannot be empty")
483
+
484
+ if not re.match(r"^[a-zA-Z0-9_-]+$", self.jobid):
485
+ raise ValueError("Job ID can only contain letters, numbers, underscores, and hyphens")
486
+
487
+ if not self.project_path.lower().endswith((".aedt", ".aedb")):
488
+ raise ValueError("Project path must be a .aedt or .aedb file")
489
+
490
+ if not os.path.exists(self.project_path):
491
+ raise FileNotFoundError(f"Project file not found: {self.project_path}")
492
+
493
+ # Platform-scheduler compatibility validation
494
+ # if self.scheduler_type == SchedulerType.WINDOWS_HPC and platform.system() != "Windows":
495
+ # raise ValueError("Windows HPC scheduler is only available on Windows platforms")
496
+
497
+ # Validate scheduler options
498
+ self.scheduler_options.validate_fields()
499
+
500
+ def _build_ansys_command_for_launcher(self) -> str:
501
+ """
502
+ Build the *inner* ANSYS command that will be executed by srun.
503
+ Quotes are chosen so that the string survives one shell expansion
504
+ performed by sbatch.
505
+ """
506
+ # 1. executable
507
+ ansys_root = os.path.dirname(self.ansys_edt_path)
508
+ cmd_parts = [shlex.quote(self.ansys_edt_path)]
509
+
510
+ # 2. mandatory HFSS flags
511
+ cmd_parts.extend(["-distributed", f"-machinelist numcores={self._num_cores_for_scheduler()}"])
512
+ if self.auto:
513
+ cmd_parts.append("-auto")
514
+ if self.non_graphical:
515
+ cmd_parts.append("-ng")
516
+ if self.monitor:
517
+ cmd_parts.append("-monitor")
518
+
519
+ # 3. batch options (quoted to protect spaces in keys)
520
+ batch_opts = self.generate_batch_options_string() # "key1=val1 key2=val2"
521
+ cmd_parts.extend(["-batchoptions", shlex.quote(batch_opts)])
522
+
523
+ # 4. project & design
524
+ design_str = self.generate_design_string()
525
+ if self.design_name:
526
+ cmd_parts.extend(["-batchsolve", design_str, shlex.quote(self.project_path)])
527
+ else:
528
+ cmd_parts.extend(["-batchsolve", shlex.quote(self.project_path)])
529
+
530
+ # join to one string that srun will receive
531
+ return " ".join(cmd_parts)
532
+
533
+ def generate_machinelist_string(self) -> str:
534
+ """
535
+ Return HFSS ``-machinelist`` argument.
536
+
537
+ Returns
538
+ -------
539
+ str
540
+ Machine list string.
541
+ """
542
+ if not self.machine_nodes:
543
+ return ""
544
+
545
+ node_strings = [str(node) for node in self.machine_nodes]
546
+ return f"list={','.join(node_strings)}"
547
+
548
+ def generate_batch_options_string(self) -> str:
549
+ """
550
+ Generate HFSS batch options string from layout options.
551
+ Converts HFSS3DLayoutOptions to command-line batch options format.
552
+ Format matches Ansys reference: "key1=value1 key2=value2"
553
+
554
+ Returns
555
+ -------
556
+ str
557
+ Batch options string with space-separated key=value pairs.
558
+
559
+ """
560
+ options_dict = self.layout_options.to_batch_options_dict()
561
+ # Simple format: space-separated key=value pairs
562
+ # They will be properly quoted when used in the command
563
+ options_list = [f"'{k}'='{v}'" for k, v in options_dict.items()]
564
+ return " ".join(options_list)
565
+
566
+ def generate_design_string(self) -> str:
567
+ """
568
+ Generate design specification string for HFSS command.
569
+
570
+ Returns
571
+ -------
572
+ str
573
+ Design string.
574
+ """
575
+ return f"{self.design_name}:{self.design_mode}:{self.setup_name}"
576
+
577
+ def generate_slurm_script(self) -> str:
578
+ """
579
+ Returns a proper SLURM batch script with shebang.
580
+ This script can be written to a file and submitted via sbatch.
581
+ """
582
+ opts = self.scheduler_options
583
+ ansys_root = os.path.dirname(self.ansys_edt_path)
584
+ launcher = os.path.join(ansys_root, "schedulers/scripts/utils/ansysedt_launcher.sh")
585
+ wrapper = os.path.join(ansys_root, "schedulers/scripts/utils/slurm_srun_wrapper.sh")
586
+ common = os.path.join(ansys_root, "common")
587
+
588
+ # build the inner ANSYS command
589
+ ansys_cmd = self._build_ansys_command_for_launcher()
590
+
591
+ # Build SLURM directives
592
+ sbatch_directives = [
593
+ "#!/bin/bash",
594
+ f"#SBATCH --export=NONE",
595
+ f"#SBATCH --chdir={os.path.dirname(os.path.abspath(self.project_path))}",
596
+ f"#SBATCH --job-name={self.jobid}",
597
+ f"#SBATCH --partition={opts.queue}",
598
+ f"#SBATCH --nodes={opts.nodes}",
599
+ f"#SBATCH --cpus-per-task=1",
600
+ f"#SBATCH --ntasks={self._num_cores_for_scheduler()}",
601
+ ]
602
+ # Remove memory limitation - not in reference command
603
+ # if opts.memory:
604
+ # sbatch_directives.append(f"#SBATCH --mem={opts.memory}")
605
+ if opts.account:
606
+ sbatch_directives.append(f"#SBATCH --account={opts.account}")
607
+ if opts.reservation:
608
+ sbatch_directives.append(f"#SBATCH --reservation={opts.reservation}")
609
+ if opts.qos:
610
+ sbatch_directives.append(f"#SBATCH --qos={opts.qos}")
611
+ if opts.constraints:
612
+ sbatch_directives.append(f"#SBATCH --constraint={opts.constraints}")
613
+ if opts.exclusive:
614
+ sbatch_directives.append("#SBATCH --exclusive")
615
+ if opts.gpus > 0:
616
+ gpu_type = f":{opts.gpu_type}" if opts.gpu_type else ""
617
+ sbatch_directives.append(f"#SBATCH --gpus={opts.gpus}{gpu_type}")
618
+
619
+ # Build the execution command
620
+ exec_cmd = (
621
+ f"{launcher} /usr/bin/env ANSYSEM_GENERIC_MPI_WRAPPER={wrapper} ANSYSEM_COMMON_PREFIX={common} "
622
+ f"ANSOFT_PASS_DEBUG_ENV_TO_REMOTE_ENGINES=1 srun --overcommit --export=ALL -n 1 -N 1 "
623
+ f"--cpu-bind=none --mem-per-cpu=0 --overlap {ansys_cmd}"
624
+ )
625
+
626
+ # Combine directives and command
627
+ script_lines = sbatch_directives + ["", exec_cmd]
628
+ return "\n".join(script_lines)
629
+
630
+ def generate_lsf_script(self) -> str:
631
+ """
632
+ Return LSF batch script.
633
+
634
+ Returns
635
+ -------
636
+ str
637
+ Multi-line string.
638
+ """
639
+ opts = self.scheduler_options
640
+ script = [
641
+ "#!/bin/bash",
642
+ f"#BSUB -J {self.jobid}",
643
+ f"#BSUB -o {self.jobid}.%J.out",
644
+ f"#BSUB -e {self.jobid}.%J.err",
645
+ f"#BSUB -q {opts.queue}",
646
+ f"#BSUB -W {opts.time}",
647
+ f"#BSUB -n {opts.nodes * opts.tasks_per_node}",
648
+ f'#BSUB -R "rusage[ncpus={opts.cores_per_node}]"',
649
+ ]
650
+
651
+ if opts.memory:
652
+ script.append(f"#BSUB -R 'rusage[mem={opts.memory}]'")
653
+ if opts.account:
654
+ script.append(f"#BSUB -P {opts.account}")
655
+ if opts.exclusive:
656
+ script.append("#BSUB -x")
657
+
658
+ script.extend(
659
+ [
660
+ "",
661
+ "# Load ANSYS environment",
662
+ "module load ansys",
663
+ "",
664
+ "# Execute HFSS simulation",
665
+ self.generate_command_string(),
666
+ "",
667
+ "echo 'LSF job completed successfully'",
668
+ ]
669
+ )
670
+
671
+ return "\n".join(script)
672
+
673
+ def generate_scheduler_script(self) -> str:
674
+ """
675
+ Delegate to the correct generator based on
676
+ :attr:`scheduler_type`.
677
+
678
+ Returns
679
+ -------
680
+ str
681
+ Batch script or PowerShell code.
682
+
683
+ Raises
684
+ ------
685
+ ValueError
686
+ If *scheduler_type* is unsupported.
687
+ """
688
+ if self.scheduler_type == SchedulerType.SLURM:
689
+ return self.generate_slurm_script()
690
+ elif self.scheduler_type == SchedulerType.LSF:
691
+ return self.generate_lsf_script()
692
+ else:
693
+ raise ValueError(f"Unsupported scheduler type: {self.scheduler_type}")
694
+
695
+ def _num_cores_for_scheduler(self) -> int:
696
+ """Total cores requested: nodes * cores_per_node (or tasks fallback)."""
697
+ opts = self.scheduler_options
698
+ # prefer explicit cores-per-node, else tasks-per-node, else 1
699
+ cpp = opts.cores_per_node if opts.cores_per_node > 0 else opts.tasks_per_node
700
+ return opts.nodes * cpp
701
+
702
+ def generate_command_string(self) -> str:
703
+ """
704
+ Platform-escaped command line.
705
+ Local → list=…
706
+ Scheduler → distributed numcores=… + -auto
707
+ """
708
+ parts = []
709
+
710
+ # 1. executable
711
+ ansysedt_path = self.ansys_edt_path
712
+ if platform.system() == "Windows":
713
+ parts.append(f'"{ansysedt_path}"')
714
+ else:
715
+ parts.append(shlex.quote(ansysedt_path))
716
+
717
+ # 2. jobid
718
+ parts.append(f"-jobid {self.jobid}")
719
+
720
+ # 3. machine list & distributed flag
721
+ if self.scheduler_type == SchedulerType.NONE: # LOCAL
722
+ if self.machine_nodes:
723
+ simplified = [f"{n.hostname}:{n.cores}:{n.max_cores}:{n.utilization}%" for n in self.machine_nodes]
724
+ parts.append(f"-machinelist list={','.join(simplified)}")
725
+ else: # SCHEDULER
726
+ if self.distributed:
727
+ parts.append("-distributed")
728
+ parts.append(f"-machinelist numcores={self._num_cores_for_scheduler()}")
729
+ if self.auto: # auto only for schedulers
730
+ parts.append("-auto")
731
+
732
+ # 4. common flags
733
+ if self.non_graphical:
734
+ parts.append("-ng")
735
+ if self.monitor:
736
+ parts.append("-monitor")
737
+
738
+ # 5. batch options
739
+ batch_opts = self.generate_batch_options_string()
740
+ if platform.system() == "Windows":
741
+ parts.append(f'-batchoptions "{batch_opts}"')
742
+ else:
743
+ parts.append(f"-batchoptions {shlex.quote(batch_opts)}")
744
+
745
+ # 6. design & project
746
+ design_str = self.generate_design_string()
747
+ if platform.system() == "Windows":
748
+ proj_quoted = f'"{self.project_path}"'
749
+ else:
750
+ proj_quoted = shlex.quote(self.project_path)
751
+
752
+ if self.design_name:
753
+ parts.append(f"-batchsolve {design_str} {proj_quoted}")
754
+ else:
755
+ parts.append(f"-batchsolve {proj_quoted}")
756
+
757
+ return " ".join(parts)
758
+
759
+ def submit_to_scheduler(self, script_path: Optional[str] = None) -> subprocess.CompletedProcess:
760
+ """
761
+ Write the batch script (if *script_path* given) and submit to the
762
+ configured scheduler.
763
+
764
+ Parameters
765
+ ----------
766
+ script_path : str, optional
767
+ Destination file name. Auto-generated if omitted.
768
+
769
+ Returns
770
+ -------
771
+ subprocess.CompletedProcess
772
+ Result of ``sbatch`` / ``bsub`` / ``qsub`` / PowerShell.
773
+
774
+ Raises
775
+ ------
776
+ ValueError
777
+ If *scheduler_type* is :attr:`SchedulerType.NONE`. subprocess.TimeoutExpired If submission takes longer
778
+ than 30 s.
779
+ """
780
+ if self.scheduler_type == SchedulerType.NONE:
781
+ # ---- auto-detect (avoids circular import) -----------------
782
+ if platform.system() == "Windows":
783
+ detected = SchedulerType.NONE
784
+ else:
785
+ detected = SchedulerType.NONE
786
+ for cmd, enum in (("sinfo", SchedulerType.SLURM), ("bhosts", SchedulerType.LSF)):
787
+ if shutil.which(cmd):
788
+ detected = enum
789
+ break
790
+ # --------------------------------------------------------------
791
+ if detected == SchedulerType.NONE:
792
+ raise ValueError(
793
+ "No scheduler configured and none auto-detected on this host "
794
+ "(SLURM / LSF binaries not found in PATH)."
795
+ )
796
+ self.scheduler_type = detected
797
+
798
+ # Generate scheduler-specific script
799
+ script_content = self.generate_scheduler_script()
800
+
801
+ project_dir = os.path.dirname(os.path.abspath(self.project_path))
802
+ if script_path is None:
803
+ script_ext = "sh"
804
+ script_name = f"{self.jobid}_{self.scheduler_type.value}.{script_ext}"
805
+ script_path = os.path.join(project_dir, script_name)
806
+ else:
807
+ # user gave a relative name → make it relative to project dir
808
+ script_path = os.path.join(project_dir, script_path)
809
+
810
+ # Ensure directory exists
811
+ os.makedirs(project_dir, exist_ok=True)
812
+
813
+ # Save batch script with proper permissions
814
+ with open(script_path, "w", encoding="utf-8") as f:
815
+ f.write(script_content)
816
+
817
+ # if self.scheduler_type != SchedulerType.WINDOWS_HPC:
818
+ os.chmod(script_path, 0o750) # nosec B103
819
+ if self.scheduler_type == SchedulerType.SLURM:
820
+ submit_cmd = ["sbatch", script_path]
821
+ elif self.scheduler_type == SchedulerType.LSF:
822
+ submit_cmd = ["bsub"]
823
+ else:
824
+ submit_cmd = ["bsub", "<", script_path]
825
+ try:
826
+ # Execute submission command with timeout
827
+ if self.scheduler_type == SchedulerType.LSF:
828
+ # For LSF, redirect script content via stdin instead of shell
829
+ with open(script_path, "r") as script_file:
830
+ result = subprocess.run( # nosec B603
831
+ submit_cmd,
832
+ stdin=script_file,
833
+ capture_output=True,
834
+ text=True,
835
+ timeout=30,
836
+ shell=False,
837
+ )
838
+ else:
839
+ result = subprocess.run( # nosec B603
840
+ submit_cmd,
841
+ capture_output=True,
842
+ text=True,
843
+ timeout=30,
844
+ shell=False,
845
+ )
846
+ if result.stderr:
847
+ logger.warning(f"🔍 DEBUG: Submission stderr: {result.stderr}")
848
+
849
+ return result
850
+ except subprocess.TimeoutExpired:
851
+ raise Exception(f"Scheduler submission timed out after 30 seconds")
852
+ except Exception as e:
853
+ raise Exception(f"Failed to submit to {self.scheduler_type.value}: {e}")
854
+
855
+ def run_simulation(self, **subprocess_kwargs) -> Union[subprocess.CompletedProcess, str]:
856
+ """
857
+ **Main entry point** — run the simulation **either**
858
+
859
+ * locally (subprocess), or
860
+ * by submitting to an external scheduler.
861
+
862
+ Parameters
863
+ ----------
864
+ **subprocess_kwargs
865
+ Forwarded to ``subprocess.run`` for local execution.
866
+
867
+ Returns
868
+ -------
869
+ subprocess.CompletedProcess
870
+ For local runs (contains ``stdout``, ``stderr``, ``returncode``).
871
+ str
872
+ For scheduler runs — external job ID such as ``"slurm_job_12345"``.
873
+
874
+ Raises
875
+ ------
876
+ Exception
877
+ On any failure (solver not found, submission error, timeout, …).
878
+ """
879
+ if self.scheduler_type != SchedulerType.NONE:
880
+ # Submit to configured scheduler
881
+ result = self.submit_to_scheduler()
882
+
883
+ if result.returncode == 0:
884
+ # Extract and return job ID from scheduler output
885
+ job_id = self._extract_job_id(result.stdout)
886
+ return (
887
+ f"{self.scheduler_type.value}_job_{job_id}"
888
+ if job_id
889
+ else f"submitted_to_{self.scheduler_type.value}"
890
+ )
891
+ else:
892
+ raise Exception(f"Scheduler submission failed: {result.stderr}")
893
+ else:
894
+ # Direct execution path
895
+ system = platform.system()
896
+
897
+ # Default subprocess options
898
+ default_kwargs = {
899
+ "capture_output": True,
900
+ "text": True,
901
+ "timeout": None,
902
+ "encoding": "utf-8",
903
+ "errors": "replace",
904
+ }
905
+
906
+ # Platform-specific command generation
907
+ if system == "Windows":
908
+ default_kwargs["shell"] = True
909
+ command = self.generate_command_string()
910
+ else:
911
+ default_kwargs["shell"] = False
912
+ command = self.generate_command_list()
913
+
914
+ # Apply user-provided kwargs
915
+ default_kwargs.update(subprocess_kwargs)
916
+
917
+ # extra safety
918
+ if self.scheduler_type == SchedulerType.NONE:
919
+ self.auto = False
920
+
921
+ try:
922
+ print(f"Starting HFSS simulation: {self.jobid}")
923
+ print(f"Command: {' '.join(command) if isinstance(command, list) else command}")
924
+
925
+ result = subprocess.run(command, **default_kwargs) # nosec B603
926
+
927
+ if result.returncode == 0:
928
+ print(f"✅ Simulation {self.jobid} completed successfully")
929
+ else:
930
+ print(f"❌ Simulation {self.jobid} failed with return code {result.returncode}")
931
+
932
+ return result
933
+
934
+ except subprocess.TimeoutExpired:
935
+ raise Exception(f"Simulation {self.jobid} timed out after {default_kwargs.get('timeout')} seconds")
936
+ except subprocess.CalledProcessError as e:
937
+ raise Exception(f"Simulation {self.jobid} failed with error: {e}")
938
+ except FileNotFoundError as e:
939
+ raise Exception(f"ANSYS executable not found for simulation {self.jobid}: {e}")
940
+ except Exception as e:
941
+ raise Exception(f"Failed to run simulation {self.jobid}: {e}")
942
+
943
+ def _extract_job_id(self, output: str) -> Optional[str]:
944
+ """
945
+ Parse scheduler stdout and extract the **external** job ID.
946
+
947
+ Parameters
948
+ ----------
949
+ output : str
950
+ Raw stdout of ``sbatch``, ``bsub``, ``qsub``, or PowerShell.
951
+
952
+ Returns
953
+ -------
954
+ str or None
955
+ Job ID if found, otherwise ``None``.
956
+ """
957
+ if self.scheduler_type == SchedulerType.SLURM:
958
+ # sbatch output: "Submitted batch job 12345"
959
+ match = re.search(r"Submitted batch job (\d+)", output)
960
+ return match.group(1) if match else None
961
+
962
+ elif self.scheduler_type == SchedulerType.LSF:
963
+ # bsub output: "Job <12345> is submitted to queue <normal>."
964
+ match = re.search(r"<(\d+)>", output)
965
+ return match.group(1) if match else None
966
+
967
+ elif self.scheduler_type == SchedulerType.PBS:
968
+ # qsub output: "12345.hostname"
969
+ return output.strip().split(".")[0] if output.strip() else None
970
+
971
+ elif self.scheduler_type == SchedulerType.WINDOWS_HPC:
972
+ # Windows HPC output: "Job submitted with ID: 12345"
973
+ match = re.search(r"ID:\s*(\d+)", output)
974
+ return match.group(1) if match else None
975
+
976
+ return None
977
+
978
+ def generate_command_list(self) -> List[str]:
979
+ """
980
+ List form for subprocess.run(shell=False).
981
+ Local → list=…
982
+ Scheduler → distributed numcores=… + -auto
983
+ """
984
+ cmd = [self.ansys_edt_path, "-jobid", self.jobid]
985
+
986
+ # machine list & distributed flag
987
+ if self.scheduler_type == SchedulerType.NONE: # LOCAL
988
+ if self.machine_nodes:
989
+ simplified = [f"{n.hostname}:{n.cores}:{n.max_cores}:{n.utilization}%" for n in self.machine_nodes]
990
+ cmd.extend(["-machinelist", f"list={','.join(simplified)}"])
991
+ else: # SCHEDULER
992
+ if self.distributed:
993
+ cmd.append("-distributed")
994
+ cmd.extend(["-machinelist", f"numcores={self._num_cores_for_scheduler()}"])
995
+ if self.auto:
996
+ cmd.append("-auto")
997
+
998
+ # common flags
999
+ if self.non_graphical:
1000
+ cmd.append("-ng")
1001
+ if self.monitor:
1002
+ cmd.append("-monitor")
1003
+
1004
+ # batch options
1005
+ cmd.extend(["-batchoptions", self.generate_batch_options_string()])
1006
+
1007
+ # design & project
1008
+ design_str = self.generate_design_string()
1009
+ cmd.extend(["-batchsolve", design_str, self.project_path])
1010
+
1011
+ return cmd
1012
+
1013
+ def to_dict(self) -> Dict[str, Any]:
1014
+ """
1015
+ Serialize the **complete** configuration to a JSON-safe dictionary.
1016
+
1017
+ Returns
1018
+ -------
1019
+ dict
1020
+ Contains all fields including nested BaseModels and enums.
1021
+ """
1022
+ return {
1023
+ "solver": self.solver,
1024
+ "ansys_edt_path": self.ansys_edt_path,
1025
+ "jobid": self.jobid,
1026
+ "distributed": self.distributed,
1027
+ "machine_nodes": [node.model_dump() for node in self.machine_nodes],
1028
+ "auto": self.auto,
1029
+ "non_graphical": self.non_graphical,
1030
+ "monitor": self.monitor,
1031
+ "layout_options": self.layout_options.model_dump(),
1032
+ "project_path": self.project_path,
1033
+ "design_name": self.design_name,
1034
+ "design_mode": self.design_mode,
1035
+ "setup_name": self.setup_name,
1036
+ "scheduler_type": self.scheduler_type.value,
1037
+ "scheduler_options": self.scheduler_options.model_dump(),
1038
+ "platform": platform.system(),
1039
+ "timestamp": datetime.now().isoformat(),
1040
+ "version": "1.0.0",
1041
+ }
1042
+
1043
+ @classmethod
1044
+ def from_dict(cls, data: Dict[str, Any]) -> "HFSSSimulationConfig":
1045
+ """
1046
+ Deserialize a dictionary produced by :meth:`to_dict`.
1047
+
1048
+ Parameters
1049
+ ----------
1050
+ data : dict
1051
+ Dictionary obtained via ``json.load`` or equivalent.
1052
+
1053
+ Returns
1054
+ -------
1055
+ HFSSSimulationConfig
1056
+ New validated instance.
1057
+ """
1058
+ machine_nodes = [MachineNode(**node_data) for node_data in data.get("machine_nodes", [])]
1059
+ layout_options = HFSS3DLayoutBatchOptions(**data.get("layout_options", {}))
1060
+
1061
+ # Handle scheduler_options creation with proper defaults
1062
+ scheduler_options_data = data.get("scheduler_options", {})
1063
+ if not scheduler_options_data:
1064
+ # Create default scheduler options for local execution
1065
+ scheduler_options = SchedulerOptions()
1066
+ else:
1067
+ scheduler_options = SchedulerOptions(**scheduler_options_data)
1068
+
1069
+ return cls(
1070
+ solver=data.get("solver", "Hfss3DLayout"),
1071
+ ansys_edt_path=data.get("ansys_edt_path", ""),
1072
+ jobid=data.get("jobid", f"RSM_{datetime.now().strftime('%Y%m%d_%H%M%S')}"),
1073
+ user=data.get("user"),
1074
+ distributed=data.get("distributed", True),
1075
+ machine_nodes=machine_nodes,
1076
+ auto=data.get("auto", True),
1077
+ non_graphical=data.get("non_graphical", True),
1078
+ monitor=data.get("monitor", True),
1079
+ layout_options=layout_options,
1080
+ project_path=data.get("project_path", ""),
1081
+ design_name=data.get("design_name", "main"),
1082
+ design_mode=data.get("design_mode", "Nominal"),
1083
+ setup_name=data.get("setup_name", "Setup1"),
1084
+ scheduler_type=SchedulerType(data.get("scheduler_type", "none")),
1085
+ scheduler_options=scheduler_options,
1086
+ )
1087
+
1088
+ def __str__(self) -> str:
1089
+ """
1090
+ String representation of the complete HFSS command.
1091
+
1092
+ Returns
1093
+ -------
1094
+ str
1095
+ Complete HFSS command string.
1096
+ """
1097
+ return self.generate_command_string()
1098
+
1099
+
1100
+ def create_hfss_config(
1101
+ project_path: str,
1102
+ jobid: Optional[str] = "",
1103
+ ansys_edt_path: Optional[str] = "",
1104
+ design_name: Optional[str] = "",
1105
+ setup_name: Optional[str] = "",
1106
+ machine_nodes: Optional[List[MachineNode]] = None,
1107
+ scheduler_type: SchedulerType = SchedulerType.NONE,
1108
+ scheduler_options: Optional[SchedulerOptions] = None,
1109
+ **kwargs,
1110
+ ) -> HFSSSimulationConfig:
1111
+ """
1112
+ **Convenience factory** that hides all boilerplate and produces a
1113
+ **validated** configuration in a single call.
1114
+
1115
+ Parameters
1116
+ ----------
1117
+ ansys_edt_path : str, Optional
1118
+ Absolute path to ``ansysedt`` executable. If not provided the latest
1119
+ installed version will be used.
1120
+ jobid : str, Optional
1121
+ Unique job identifier (letters, digits, ``_``, ``-`` only).
1122
+ project_path : str
1123
+ Absolute path to ``.aedt`` or ``.aedb`` project.
1124
+ design_name : str, optional
1125
+ Design inside project. Default ``""`` (active design).
1126
+ setup_name : str, optional
1127
+ Setup name. Default ``""`` (first setup).
1128
+ machine_nodes : list[MachineNode], optional
1129
+ Compute nodes for MPI. Default ``[MachineNode()]``.
1130
+ scheduler_type : SchedulerType, optional
1131
+ External scheduler. Default :attr:`SchedulerType.NONE`.
1132
+ scheduler_options : SchedulerOptions, optional
1133
+ Scheduler directives. Default instance.
1134
+ **kwargs
1135
+ Additional fields passed directly to ``HFSSSimulationConfig``.
1136
+
1137
+ Returns
1138
+ -------
1139
+ HFSSSimulationConfig
1140
+ Ready-to-run configuration.
1141
+
1142
+ Examples
1143
+ --------
1144
+ >>> cfg = create_hfss_config(
1145
+ ... ansys_edt_path="/ansys/v241/Linux64/ansysedt",
1146
+ ... jobid="patch",
1147
+ ... project_path="/shared/patch.aedt",
1148
+ ... scheduler_type=SchedulerType.SLURM,
1149
+ ... scheduler_options=SchedulerOptions(nodes=4, memory="32GB"),
1150
+ ... )
1151
+ >>> job = cfg.run_simulation()
1152
+ """
1153
+ if machine_nodes is None:
1154
+ machine_nodes = [MachineNode()]
1155
+
1156
+ if scheduler_options is None:
1157
+ scheduler_options = SchedulerOptions()
1158
+
1159
+ return HFSSSimulationConfig(
1160
+ ansys_edt_path=ansys_edt_path,
1161
+ jobid=jobid,
1162
+ project_path=project_path,
1163
+ design_name=design_name,
1164
+ setup_name=setup_name,
1165
+ machine_nodes=machine_nodes,
1166
+ scheduler_type=scheduler_type,
1167
+ scheduler_options=scheduler_options,
1168
+ **kwargs,
1169
+ )