luminarycloud 0.22.0__py3-none-any.whl → 0.22.2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- luminarycloud/_client/authentication_plugin.py +49 -0
- luminarycloud/_client/client.py +38 -11
- luminarycloud/_client/http_client.py +1 -1
- luminarycloud/_client/retry_interceptor.py +64 -2
- luminarycloud/_helpers/__init__.py +9 -0
- luminarycloud/_helpers/_inference_jobs.py +227 -0
- luminarycloud/_helpers/_parse_iso_datetime.py +54 -0
- luminarycloud/_helpers/download.py +11 -0
- luminarycloud/_helpers/proto_decorator.py +38 -7
- luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2.py +152 -132
- luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2.pyi +66 -8
- luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2_grpc.py +34 -0
- luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2_grpc.pyi +12 -0
- luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2.py +142 -39
- luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2.pyi +300 -3
- luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2_grpc.py +34 -0
- luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2_grpc.pyi +12 -0
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2.py +255 -0
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2.pyi +466 -0
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2_grpc.py +242 -0
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2_grpc.pyi +95 -0
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2.py +29 -7
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2.pyi +39 -0
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2_grpc.py +36 -0
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2_grpc.pyi +18 -0
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2.py +88 -65
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2.pyi +42 -0
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2_grpc.py +34 -0
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2_grpc.pyi +12 -0
- luminarycloud/_proto/api/v0/luminarycloud/vis/vis_pb2.py +163 -153
- luminarycloud/_proto/api/v0/luminarycloud/vis/vis_pb2.pyi +37 -3
- luminarycloud/_proto/base/base_pb2.py +7 -6
- luminarycloud/_proto/base/base_pb2.pyi +4 -0
- luminarycloud/_proto/client/simulation_pb2.py +358 -339
- luminarycloud/_proto/client/simulation_pb2.pyi +89 -3
- luminarycloud/_proto/physicsaiinferenceservice/physicsaiinferenceservice_pb2.py +35 -0
- luminarycloud/_proto/physicsaiinferenceservice/physicsaiinferenceservice_pb2.pyi +7 -0
- luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2.py +6 -3
- luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2_grpc.py +68 -0
- luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2_grpc.pyi +24 -0
- luminarycloud/_wrapper.py +53 -7
- luminarycloud/enum/vis_enums.py +6 -0
- luminarycloud/feature_modification.py +25 -32
- luminarycloud/geometry.py +10 -6
- luminarycloud/geometry_version.py +4 -0
- luminarycloud/mesh.py +4 -0
- luminarycloud/meshing/mesh_generation_params.py +5 -6
- luminarycloud/meshing/sizing_strategy/sizing_strategies.py +1 -2
- luminarycloud/outputs/__init__.py +2 -0
- luminarycloud/outputs/output_definitions.py +3 -3
- luminarycloud/outputs/stopping_conditions.py +94 -0
- luminarycloud/params/enum/_enum_wrappers.py +16 -0
- luminarycloud/params/geometry/shapes.py +33 -33
- luminarycloud/params/simulation/adaptive_mesh_refinement/__init__.py +1 -0
- luminarycloud/params/simulation/adaptive_mesh_refinement/active_region_.py +83 -0
- luminarycloud/params/simulation/adaptive_mesh_refinement/boundary_layer_profile_.py +1 -1
- luminarycloud/params/simulation/adaptive_mesh_refinement_.py +8 -1
- luminarycloud/physics_ai/__init__.py +7 -0
- luminarycloud/physics_ai/inference.py +166 -199
- luminarycloud/physics_ai/models.py +22 -0
- luminarycloud/physics_ai/solution.py +4 -0
- luminarycloud/pipelines/api.py +143 -16
- luminarycloud/pipelines/core.py +1 -1
- luminarycloud/pipelines/stages.py +22 -9
- luminarycloud/project.py +61 -8
- luminarycloud/simulation.py +25 -0
- luminarycloud/types/__init__.py +2 -0
- luminarycloud/types/ids.py +2 -0
- luminarycloud/types/vector3.py +1 -2
- luminarycloud/vis/__init__.py +1 -0
- luminarycloud/vis/data_extraction.py +7 -7
- luminarycloud/vis/filters.py +97 -0
- luminarycloud/vis/interactive_report.py +163 -7
- luminarycloud/vis/report.py +113 -1
- luminarycloud/vis/visualization.py +3 -0
- luminarycloud/volume_selection.py +16 -8
- luminarycloud/workflow_utils.py +149 -0
- {luminarycloud-0.22.0.dist-info → luminarycloud-0.22.2.dist-info}/METADATA +1 -1
- {luminarycloud-0.22.0.dist-info → luminarycloud-0.22.2.dist-info}/RECORD +80 -76
- {luminarycloud-0.22.0.dist-info → luminarycloud-0.22.2.dist-info}/WHEEL +1 -1
- luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2.py +0 -61
- luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2.pyi +0 -85
- luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2_grpc.py +0 -67
- luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2_grpc.pyi +0 -26
- luminarycloud/_proto/inferenceservice/inferenceservice_pb2.py +0 -69
- luminarycloud/pipeline_util/dictable.py +0 -27
luminarycloud/pipelines/api.py
CHANGED
|
@@ -7,8 +7,10 @@ from time import time, sleep
|
|
|
7
7
|
import logging
|
|
8
8
|
|
|
9
9
|
from .arguments import PipelineArgValueType
|
|
10
|
+
from .core import Stage
|
|
10
11
|
from ..pipelines import Pipeline, PipelineArgs
|
|
11
12
|
from .._client import get_default_client
|
|
13
|
+
from .._helpers import parse_iso_datetime
|
|
12
14
|
|
|
13
15
|
logger = logging.getLogger(__name__)
|
|
14
16
|
|
|
@@ -22,7 +24,7 @@ class LogLine:
|
|
|
22
24
|
@classmethod
|
|
23
25
|
def from_json(cls, json: dict) -> "LogLine":
|
|
24
26
|
return cls(
|
|
25
|
-
timestamp=
|
|
27
|
+
timestamp=parse_iso_datetime(json["timestamp"]),
|
|
26
28
|
level=json["level"],
|
|
27
29
|
message=json["message"],
|
|
28
30
|
)
|
|
@@ -53,8 +55,8 @@ class PipelineRecord:
|
|
|
53
55
|
name=json["name"],
|
|
54
56
|
description=json["description"],
|
|
55
57
|
definition_yaml=json["definition_yaml"],
|
|
56
|
-
created_at=
|
|
57
|
-
updated_at=
|
|
58
|
+
created_at=parse_iso_datetime(json["created_at"]),
|
|
59
|
+
updated_at=parse_iso_datetime(json["updated_at"]),
|
|
58
60
|
)
|
|
59
61
|
|
|
60
62
|
def pipeline_jobs(self) -> "list[PipelineJobRecord]":
|
|
@@ -69,6 +71,25 @@ class PipelineRecord:
|
|
|
69
71
|
res = get_default_client().http.get(f"/rest/v0/pipelines/{self.id}/pipeline_jobs")
|
|
70
72
|
return [PipelineJobRecord.from_json(p) for p in res["data"]]
|
|
71
73
|
|
|
74
|
+
def delete(self) -> None:
|
|
75
|
+
"""
|
|
76
|
+
Delete this pipeline.
|
|
77
|
+
|
|
78
|
+
This will permanently delete the pipeline and all associated pipeline jobs.
|
|
79
|
+
This operation cannot be undone.
|
|
80
|
+
|
|
81
|
+
Raises
|
|
82
|
+
------
|
|
83
|
+
HTTPException
|
|
84
|
+
If the pipeline does not exist or if you do not have permission to delete it.
|
|
85
|
+
|
|
86
|
+
Examples
|
|
87
|
+
--------
|
|
88
|
+
>>> pipeline = pipelines.get_pipeline("pipeline-123")
|
|
89
|
+
>>> pipeline.delete()
|
|
90
|
+
"""
|
|
91
|
+
get_default_client().http.delete(f"/rest/v0/pipelines/{self.id}")
|
|
92
|
+
|
|
72
93
|
|
|
73
94
|
@dataclass
|
|
74
95
|
class PipelineJobRecord:
|
|
@@ -80,11 +101,12 @@ class PipelineJobRecord:
|
|
|
80
101
|
pipeline_id: str
|
|
81
102
|
name: str
|
|
82
103
|
description: str | None
|
|
83
|
-
status: Literal["pending", "running", "completed", "failed"]
|
|
104
|
+
status: Literal["pending", "running", "completed", "failed", "cancelled", "paused"]
|
|
84
105
|
created_at: datetime
|
|
85
106
|
updated_at: datetime
|
|
86
107
|
started_at: datetime | None
|
|
87
108
|
completed_at: datetime | None
|
|
109
|
+
paused_at: datetime | None
|
|
88
110
|
|
|
89
111
|
@classmethod
|
|
90
112
|
def from_json(cls, json: dict) -> "PipelineJobRecord":
|
|
@@ -92,14 +114,15 @@ class PipelineJobRecord:
|
|
|
92
114
|
id=json["id"],
|
|
93
115
|
pipeline_id=json["pipeline_id"],
|
|
94
116
|
name=json["name"],
|
|
95
|
-
description=json
|
|
117
|
+
description=json.get("description"),
|
|
96
118
|
status=json["status"],
|
|
97
|
-
created_at=
|
|
98
|
-
updated_at=
|
|
99
|
-
started_at=
|
|
119
|
+
created_at=parse_iso_datetime(json["created_at"]),
|
|
120
|
+
updated_at=parse_iso_datetime(json["updated_at"]),
|
|
121
|
+
started_at=(parse_iso_datetime(json["started_at"]) if json.get("started_at") else None),
|
|
100
122
|
completed_at=(
|
|
101
|
-
|
|
123
|
+
parse_iso_datetime(json["completed_at"]) if json.get("completed_at") else None
|
|
102
124
|
),
|
|
125
|
+
paused_at=(parse_iso_datetime(json["paused_at"]) if json.get("paused_at") else None),
|
|
103
126
|
)
|
|
104
127
|
|
|
105
128
|
def pipeline(self) -> PipelineRecord:
|
|
@@ -156,18 +179,37 @@ class PipelineJobRecord:
|
|
|
156
179
|
res = get_default_client().http.get(f"/rest/v0/pipeline_jobs/{self.id}/artifacts")
|
|
157
180
|
return res["data"]
|
|
158
181
|
|
|
182
|
+
def delete(self) -> None:
|
|
183
|
+
"""
|
|
184
|
+
Delete this pipeline job.
|
|
185
|
+
|
|
186
|
+
This will permanently delete the pipeline job and all associated runs and tasks.
|
|
187
|
+
This operation cannot be undone.
|
|
188
|
+
|
|
189
|
+
Raises
|
|
190
|
+
------
|
|
191
|
+
HTTPException
|
|
192
|
+
If the pipeline job does not exist or if you do not have permission to delete it.
|
|
193
|
+
|
|
194
|
+
Examples
|
|
195
|
+
--------
|
|
196
|
+
>>> pipeline_job = pipelines.get_pipeline_job("pipelinejob-123")
|
|
197
|
+
>>> pipeline_job.delete()
|
|
198
|
+
"""
|
|
199
|
+
get_default_client().http.delete(f"/rest/v0/pipeline_jobs/{self.id}")
|
|
200
|
+
|
|
159
201
|
def wait(
|
|
160
202
|
self,
|
|
161
203
|
*,
|
|
162
204
|
interval_seconds: float = 5,
|
|
163
205
|
timeout_seconds: float = float("inf"),
|
|
164
206
|
print_logs: bool = False,
|
|
165
|
-
) -> Literal["completed", "failed"]:
|
|
207
|
+
) -> Literal["completed", "failed", "cancelled"]:
|
|
166
208
|
"""
|
|
167
|
-
Wait for the pipeline job to complete or
|
|
209
|
+
Wait for the pipeline job to complete, fail, or be cancelled.
|
|
168
210
|
|
|
169
211
|
This method polls the pipeline job status at regular intervals until it reaches
|
|
170
|
-
a terminal state (completed or
|
|
212
|
+
a terminal state (completed, failed, or cancelled).
|
|
171
213
|
|
|
172
214
|
Parameters
|
|
173
215
|
----------
|
|
@@ -180,7 +222,7 @@ class PipelineJobRecord:
|
|
|
180
222
|
|
|
181
223
|
Returns
|
|
182
224
|
-------
|
|
183
|
-
Literal["completed", "failed"]
|
|
225
|
+
Literal["completed", "failed", "cancelled"]
|
|
184
226
|
The final status of the pipeline job.
|
|
185
227
|
|
|
186
228
|
Raises
|
|
@@ -216,6 +258,9 @@ class PipelineJobRecord:
|
|
|
216
258
|
elif updated_job.status == "failed":
|
|
217
259
|
logger.warning(f"Pipeline job {self.id} failed")
|
|
218
260
|
return "failed"
|
|
261
|
+
elif updated_job.status == "cancelled":
|
|
262
|
+
logger.info(f"Pipeline job {self.id} was cancelled")
|
|
263
|
+
return "cancelled"
|
|
219
264
|
|
|
220
265
|
# Check timeout
|
|
221
266
|
if time() >= deadline:
|
|
@@ -233,13 +278,86 @@ class PipelineJobRecord:
|
|
|
233
278
|
self.started_at = updated_job.started_at
|
|
234
279
|
self.completed_at = updated_job.completed_at
|
|
235
280
|
|
|
281
|
+
def get_concurrency_limits(self) -> dict[str, int]:
|
|
282
|
+
"""
|
|
283
|
+
Returns the concurrency limits for this pipeline job.
|
|
284
|
+
|
|
285
|
+
Returns
|
|
286
|
+
-------
|
|
287
|
+
dict[str, int]
|
|
288
|
+
A dictionary mapping stage IDs to their concurrency limits.
|
|
289
|
+
"""
|
|
290
|
+
res = get_default_client().http.get(f"/rest/v0/pipeline_jobs/{self.id}/concurrency_limits")
|
|
291
|
+
return {k: v["limit"] for k, v in res["data"].items()}
|
|
292
|
+
|
|
293
|
+
def set_concurrency_limits(self, limits: dict[str, int]) -> None:
|
|
294
|
+
"""
|
|
295
|
+
Sets the concurrency limits for this pipeline job.
|
|
296
|
+
|
|
297
|
+
Parameters
|
|
298
|
+
----------
|
|
299
|
+
limits : dict[str, int]
|
|
300
|
+
A dictionary mapping stage IDs to their concurrency limits.
|
|
301
|
+
"""
|
|
302
|
+
body = {k: {"limit": v} for k, v in limits.items()}
|
|
303
|
+
get_default_client().http.put(f"/rest/v0/pipeline_jobs/{self.id}/concurrency_limits", body)
|
|
304
|
+
|
|
305
|
+
def cancel(self) -> None:
|
|
306
|
+
"""Cancel this running pipeline job.
|
|
307
|
+
|
|
308
|
+
This will request cancellation of the underlying Prefect flow run. The
|
|
309
|
+
job should eventually transition to a cancelled terminal state once
|
|
310
|
+
the backend processes the cancellation.
|
|
311
|
+
|
|
312
|
+
Raises
|
|
313
|
+
------
|
|
314
|
+
HTTPError
|
|
315
|
+
If the pipeline job cannot be cancelled (e.g., not found, not
|
|
316
|
+
running, or lacks the necessary Prefect flow run ID).
|
|
317
|
+
"""
|
|
318
|
+
get_default_client().http.post(f"/rest/v0/pipeline_jobs/{self.id}/cancel", {})
|
|
319
|
+
logger.info(f"Cancelled pipeline job {self.id}")
|
|
320
|
+
|
|
321
|
+
def pause(self) -> None:
|
|
322
|
+
"""Pause this running pipeline job.
|
|
323
|
+
|
|
324
|
+
This will prevent new tasks from being scheduled while allowing
|
|
325
|
+
in-progress tasks to complete. The job status will be set to PAUSED
|
|
326
|
+
and all stage concurrency limits will be temporarily set to 0.
|
|
327
|
+
|
|
328
|
+
Call resume() to continue execution.
|
|
329
|
+
|
|
330
|
+
Raises
|
|
331
|
+
------
|
|
332
|
+
HTTPError
|
|
333
|
+
If the pipeline job cannot be paused (e.g., not found or not in
|
|
334
|
+
RUNNING state).
|
|
335
|
+
"""
|
|
336
|
+
get_default_client().http.post(f"/rest/v0/pipeline_jobs/{self.id}/pause", {})
|
|
337
|
+
logger.info(f"Paused pipeline job {self.id}")
|
|
338
|
+
|
|
339
|
+
def resume(self) -> None:
|
|
340
|
+
"""Resume this paused pipeline job.
|
|
341
|
+
|
|
342
|
+
This will restore the job status to RUNNING and restore the original
|
|
343
|
+
concurrency limits, allowing new tasks to be scheduled again.
|
|
344
|
+
|
|
345
|
+
Raises
|
|
346
|
+
------
|
|
347
|
+
HTTPError
|
|
348
|
+
If the pipeline job cannot be resumed (e.g., not found or not in
|
|
349
|
+
PAUSED state).
|
|
350
|
+
"""
|
|
351
|
+
get_default_client().http.post(f"/rest/v0/pipeline_jobs/{self.id}/resume", {})
|
|
352
|
+
logger.info(f"Resumed pipeline job {self.id}")
|
|
353
|
+
|
|
236
354
|
|
|
237
355
|
@dataclass
|
|
238
356
|
class PipelineJobRunRecord:
|
|
239
357
|
pipeline_job_id: str
|
|
240
358
|
idx: int
|
|
241
359
|
arguments: list[PipelineArgValueType]
|
|
242
|
-
status: Literal["pending", "running", "completed", "failed"]
|
|
360
|
+
status: Literal["pending", "running", "completed", "failed", "cancelled"]
|
|
243
361
|
|
|
244
362
|
@classmethod
|
|
245
363
|
def from_json(cls, json: dict) -> "PipelineJobRunRecord":
|
|
@@ -347,7 +465,11 @@ def get_pipeline(id: str) -> PipelineRecord:
|
|
|
347
465
|
|
|
348
466
|
|
|
349
467
|
def create_pipeline_job(
|
|
350
|
-
pipeline_id: str,
|
|
468
|
+
pipeline_id: str,
|
|
469
|
+
args: PipelineArgs,
|
|
470
|
+
name: str,
|
|
471
|
+
description: str | None = None,
|
|
472
|
+
concurrency_limits: dict[str, int] | None = None,
|
|
351
473
|
) -> PipelineJobRecord:
|
|
352
474
|
"""
|
|
353
475
|
Create a new pipeline job.
|
|
@@ -362,6 +484,8 @@ def create_pipeline_job(
|
|
|
362
484
|
Name of the pipeline job.
|
|
363
485
|
description : str, optional
|
|
364
486
|
Description of the pipeline job.
|
|
487
|
+
concurrency_limits : dict[str, int], optional
|
|
488
|
+
A dictionary mapping stage IDs to their concurrency limits.
|
|
365
489
|
"""
|
|
366
490
|
|
|
367
491
|
arg_rows = [row.row_values for row in args.rows]
|
|
@@ -373,7 +497,10 @@ def create_pipeline_job(
|
|
|
373
497
|
}
|
|
374
498
|
|
|
375
499
|
res = get_default_client().http.post(f"/rest/v0/pipelines/{pipeline_id}/pipeline_jobs", body)
|
|
376
|
-
|
|
500
|
+
pjr = PipelineJobRecord.from_json(res["data"])
|
|
501
|
+
if concurrency_limits is not None:
|
|
502
|
+
pjr.set_concurrency_limits(concurrency_limits)
|
|
503
|
+
return pjr
|
|
377
504
|
|
|
378
505
|
|
|
379
506
|
def get_pipeline_job(id: str) -> PipelineJobRecord:
|
luminarycloud/pipelines/core.py
CHANGED
|
@@ -292,7 +292,7 @@ class Pipeline:
|
|
|
292
292
|
def pipeline_params(self) -> set[PipelineParameter]:
|
|
293
293
|
return self._stages_dict_and_params()[1]
|
|
294
294
|
|
|
295
|
-
def
|
|
295
|
+
def get_stage_id(self, stage: Stage) -> str:
|
|
296
296
|
return self._stage_ids[stage]
|
|
297
297
|
|
|
298
298
|
def _stages_dict_and_params(self) -> tuple[dict, set[PipelineParameter]]:
|
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
from dataclasses import dataclass
|
|
3
3
|
|
|
4
4
|
from .core import Stage, StageInputs, StageOutputs, PipelineOutput
|
|
5
|
-
from .parameters import StringPipelineParameter, IntPipelineParameter
|
|
5
|
+
from .parameters import BoolPipelineParameter, StringPipelineParameter, IntPipelineParameter
|
|
6
6
|
from ..meshing import MeshGenerationParams
|
|
7
7
|
|
|
8
8
|
|
|
@@ -49,6 +49,20 @@ class ReadGeometry(Stage[ReadGeometryOutputs]):
|
|
|
49
49
|
----------
|
|
50
50
|
geometry_id : str | StringPipelineParameter
|
|
51
51
|
The ID of the Geometry to retrieve.
|
|
52
|
+
use_geo_without_copying : bool | BoolPipelineParameter
|
|
53
|
+
By default, this is False, meaning that each Geometry this stage references will be copied
|
|
54
|
+
and the PipelineJob will actually operate on the copied Geometry. This is done so that a
|
|
55
|
+
PipelineJob can be based on a single parametric Geometry which each PipelineJobRun modifies
|
|
56
|
+
by applying a NamedVariableSet. That modification mutates the Geometry, so those runs can
|
|
57
|
+
only happen in parallel without intefrering with each other if they each operate on a
|
|
58
|
+
different copy of the Geometry.
|
|
59
|
+
|
|
60
|
+
However, if you've already prepared your Geometry in advance and you don't want the
|
|
61
|
+
PipelineJob to create copies, you can set this to True. In that case, the referenced
|
|
62
|
+
Geometry will be used directly without being copied.
|
|
63
|
+
|
|
64
|
+
IMPORTANT: If you set this to True, you must ensure no two PipelineJobRuns operate on the
|
|
65
|
+
same Geometry, i.e. no two PipelineArgs rows contain the same Geometry ID.
|
|
52
66
|
|
|
53
67
|
Outputs
|
|
54
68
|
-------
|
|
@@ -61,10 +75,11 @@ class ReadGeometry(Stage[ReadGeometryOutputs]):
|
|
|
61
75
|
*,
|
|
62
76
|
stage_name: str | None = None,
|
|
63
77
|
geometry_id: str | StringPipelineParameter,
|
|
78
|
+
use_geo_without_copying: bool | BoolPipelineParameter = False,
|
|
64
79
|
):
|
|
65
80
|
super().__init__(
|
|
66
81
|
stage_name,
|
|
67
|
-
{"geometry_id": geometry_id},
|
|
82
|
+
{"geometry_id": geometry_id, "use_geo_without_copying": use_geo_without_copying},
|
|
68
83
|
StageInputs(self),
|
|
69
84
|
ReadGeometryOutputs._instantiate_for(self),
|
|
70
85
|
)
|
|
@@ -202,13 +217,6 @@ class Mesh(Stage[MeshOutputs]):
|
|
|
202
217
|
MeshOutputs._instantiate_for(self),
|
|
203
218
|
)
|
|
204
219
|
|
|
205
|
-
# TODO: bring back the full MeshGenerationParams, but we need to be able to hydrate it from the
|
|
206
|
-
# pipeline YAML. I can probably bake that logic into PipelineDictable, `from_pipeline_dict` or
|
|
207
|
-
# something.
|
|
208
|
-
# @classmethod
|
|
209
|
-
# def _parse_params(cls, params: dict) -> dict:
|
|
210
|
-
# return {"mesh_gen_params": MeshGenerationParams.from_pipeline_dict(**params["mesh_gen_params"])}
|
|
211
|
-
|
|
212
220
|
|
|
213
221
|
@dataclass
|
|
214
222
|
class SimulateOutputs(StageOutputs):
|
|
@@ -230,6 +238,9 @@ class Simulate(Stage[SimulateOutputs]):
|
|
|
230
238
|
The name to assign to the Simulation. If None, a default name will be used.
|
|
231
239
|
sim_template_id : str | StringPipelineParameter
|
|
232
240
|
The ID of the SimulationTemplate to use for the Simulation.
|
|
241
|
+
batch_processing : bool | BoolPipelineParameter, default True
|
|
242
|
+
If True, the Simulation will run as a standard job. If False, the Simulation will run as a
|
|
243
|
+
priority job.
|
|
233
244
|
|
|
234
245
|
Outputs
|
|
235
246
|
-------
|
|
@@ -244,10 +255,12 @@ class Simulate(Stage[SimulateOutputs]):
|
|
|
244
255
|
mesh: PipelineOutputMesh,
|
|
245
256
|
sim_name: str | StringPipelineParameter | None = None,
|
|
246
257
|
sim_template_id: str | StringPipelineParameter,
|
|
258
|
+
batch_processing: bool | BoolPipelineParameter = True,
|
|
247
259
|
):
|
|
248
260
|
super().__init__(
|
|
249
261
|
stage_name,
|
|
250
262
|
{
|
|
263
|
+
"batch_processing": batch_processing,
|
|
251
264
|
"sim_name": sim_name,
|
|
252
265
|
"sim_template_id": sim_template_id,
|
|
253
266
|
},
|
luminarycloud/project.py
CHANGED
|
@@ -25,6 +25,10 @@ from ._helpers import (
|
|
|
25
25
|
upload_file,
|
|
26
26
|
upload_mesh,
|
|
27
27
|
upload_table_as_json,
|
|
28
|
+
create_inference_job,
|
|
29
|
+
get_inference_job,
|
|
30
|
+
list_inference_jobs,
|
|
31
|
+
SurfaceForInference,
|
|
28
32
|
)
|
|
29
33
|
from ._helpers.warnings import deprecated
|
|
30
34
|
from ._proto.api.v0.luminarycloud.geometry import geometry_pb2 as geometrypb
|
|
@@ -47,10 +51,20 @@ from ._proto.upload import upload_pb2 as uploadpb
|
|
|
47
51
|
from ._wrapper import ProtoWrapper, ProtoWrapperBase
|
|
48
52
|
from .enum import GPUType, MeshType, TableType
|
|
49
53
|
from .meshing import MeshAdaptationParams, MeshGenerationParams
|
|
54
|
+
from .named_variable_set import NamedVariableSet, get_named_variable_set
|
|
55
|
+
from .physics_ai.inference import InferenceJob, VisualizationExport
|
|
50
56
|
from .simulation_param import SimulationParam
|
|
51
57
|
from .tables import RectilinearTable, create_rectilinear_table
|
|
52
|
-
from .types import
|
|
53
|
-
|
|
58
|
+
from .types import (
|
|
59
|
+
MeshID,
|
|
60
|
+
ProjectID,
|
|
61
|
+
SimulationTemplateID,
|
|
62
|
+
NamedVariableSetID,
|
|
63
|
+
Expression,
|
|
64
|
+
LcFloat,
|
|
65
|
+
PhysicsAiInferenceJobID,
|
|
66
|
+
PhysicsAiModelVersionID,
|
|
67
|
+
)
|
|
54
68
|
|
|
55
69
|
if TYPE_CHECKING:
|
|
56
70
|
from .geometry import Geometry
|
|
@@ -485,15 +499,14 @@ class Project(ProtoWrapperBase):
|
|
|
485
499
|
description : str, optional
|
|
486
500
|
Simulation description.
|
|
487
501
|
batch_processing : bool, default True
|
|
488
|
-
If True,
|
|
489
|
-
|
|
490
|
-
Use Batch Processing on simulations that are not time-sensitive to
|
|
491
|
-
save up to 65% in credits.
|
|
502
|
+
If True, this simulation will run as a standard job. If False, this simulation will run
|
|
503
|
+
as a priority job.
|
|
492
504
|
gpu_type : GPUType, optional
|
|
493
505
|
GPU type to use for the simulation.
|
|
494
506
|
gpu_count : int, optional
|
|
495
|
-
Number of GPUs to use for the simulation.
|
|
496
|
-
|
|
507
|
+
Number of GPUs to use for the simulation. Only relevant if `gpu_type` is
|
|
508
|
+
specified. If this is set to 0 or omitted and `gpu_type` is specified, the number
|
|
509
|
+
of gpus will be automatically determined.
|
|
497
510
|
"""
|
|
498
511
|
|
|
499
512
|
named_variable_set_version_id: Optional[str] = None
|
|
@@ -702,6 +715,46 @@ class Project(ProtoWrapperBase):
|
|
|
702
715
|
req = projectpb.UnshareProjectWithSupportRequest(id=self.id)
|
|
703
716
|
get_default_client().UnshareProjectWithSupport(req)
|
|
704
717
|
|
|
718
|
+
def create_inference_job(
|
|
719
|
+
self,
|
|
720
|
+
geometry: str,
|
|
721
|
+
model_version_id: PhysicsAiModelVersionID,
|
|
722
|
+
synchronous: bool = False,
|
|
723
|
+
conditions: Optional[Dict[str, Any]] = None,
|
|
724
|
+
settings: Optional[Dict[str, Any]] = None,
|
|
725
|
+
surfaces: Optional[list[SurfaceForInference]] = None,
|
|
726
|
+
inference_fields: Optional[list[str]] = None,
|
|
727
|
+
per_surface_visualizations: Optional[list[VisualizationExport]] = None,
|
|
728
|
+
merged_visualizations: Optional[list[VisualizationExport]] = None,
|
|
729
|
+
) -> InferenceJob:
|
|
730
|
+
"""
|
|
731
|
+
Create a new Physics AI inference job.
|
|
732
|
+
"""
|
|
733
|
+
return create_inference_job(
|
|
734
|
+
self.id,
|
|
735
|
+
geometry,
|
|
736
|
+
model_version_id,
|
|
737
|
+
synchronous,
|
|
738
|
+
conditions,
|
|
739
|
+
settings,
|
|
740
|
+
surfaces,
|
|
741
|
+
inference_fields,
|
|
742
|
+
per_surface_visualizations,
|
|
743
|
+
merged_visualizations,
|
|
744
|
+
)
|
|
745
|
+
|
|
746
|
+
def get_inference_job(self, job_id: PhysicsAiInferenceJobID) -> InferenceJob:
|
|
747
|
+
"""
|
|
748
|
+
Get a Physics AI inference job by its ID.
|
|
749
|
+
"""
|
|
750
|
+
return get_inference_job(job_id)
|
|
751
|
+
|
|
752
|
+
def list_inference_jobs(self) -> list[InferenceJob]:
|
|
753
|
+
"""
|
|
754
|
+
List all inference jobs for the project.
|
|
755
|
+
"""
|
|
756
|
+
return list_inference_jobs(self.id)
|
|
757
|
+
|
|
705
758
|
|
|
706
759
|
def add_named_variables_from_csv(project: Project, csv_path: str) -> list[NamedVariableSet]:
|
|
707
760
|
"""
|
luminarycloud/simulation.py
CHANGED
|
@@ -22,6 +22,7 @@ from .enum import (
|
|
|
22
22
|
SimulationStatus,
|
|
23
23
|
Vector3Component,
|
|
24
24
|
)
|
|
25
|
+
from .outputs.stopping_conditions import StoppingConditionStatusResult
|
|
25
26
|
from .simulation_param import SimulationParam
|
|
26
27
|
from .reference_values import ReferenceValues
|
|
27
28
|
from .simulation_param import SimulationParam
|
|
@@ -324,6 +325,30 @@ class Simulation(ProtoWrapperBase):
|
|
|
324
325
|
result = _get_workflow_ids([self.id])
|
|
325
326
|
return result.get(self.id)
|
|
326
327
|
|
|
328
|
+
def get_stopping_condition_status(self) -> StoppingConditionStatusResult:
|
|
329
|
+
"""
|
|
330
|
+
Retrieves the stopping condition status for a completed simulation.
|
|
331
|
+
|
|
332
|
+
This evaluates the stopping conditions defined in the simulation parameters
|
|
333
|
+
against the final simulation results to determine which conditions were satisfied.
|
|
334
|
+
|
|
335
|
+
Returns
|
|
336
|
+
-------
|
|
337
|
+
StoppingConditionStatusResult
|
|
338
|
+
The stopping condition status containing:
|
|
339
|
+
- overall_success: Whether the overall stopping criteria were met
|
|
340
|
+
- force_stopped: Whether a force-stop condition was triggered
|
|
341
|
+
- condition_results: Results for each individual condition (output name, threshold, value, satisfied)
|
|
342
|
+
|
|
343
|
+
Raises
|
|
344
|
+
------
|
|
345
|
+
SDKException
|
|
346
|
+
If the simulation has not completed or the status cannot be retrieved.
|
|
347
|
+
"""
|
|
348
|
+
req = simulationpb.GetStoppingConditionStatusRequest(id=self.id)
|
|
349
|
+
res = get_default_client().GetStoppingConditionStatus(req)
|
|
350
|
+
return StoppingConditionStatusResult._from_proto(res)
|
|
351
|
+
|
|
327
352
|
@deprecated(
|
|
328
353
|
"Use get_parameters() instead. This method will be removed in a future release.",
|
|
329
354
|
)
|
luminarycloud/types/__init__.py
CHANGED
|
@@ -7,6 +7,8 @@ from .ids import (
|
|
|
7
7
|
SimulationTemplateID as SimulationTemplateID,
|
|
8
8
|
GeometryFeatureID as GeometryFeatureID,
|
|
9
9
|
NamedVariableSetID as NamedVariableSetID,
|
|
10
|
+
PhysicsAiInferenceJobID as PhysicsAiInferenceJobID,
|
|
11
|
+
PhysicsAiModelVersionID as PhysicsAiModelVersionID,
|
|
10
12
|
)
|
|
11
13
|
|
|
12
14
|
from .adfloat import (
|
luminarycloud/types/ids.py
CHANGED
|
@@ -11,5 +11,7 @@ GeometryFeatureID = NewType("GeometryFeatureID", str)
|
|
|
11
11
|
NamedVariableSetID = NewType("NamedVariableSetID", str)
|
|
12
12
|
PhysicsAiArchitectureID = NewType("PhysicsAiArchitectureID", str)
|
|
13
13
|
PhysicsAiArchitectureVersionID = NewType("PhysicsAiArchitectureVersionID", str)
|
|
14
|
+
PhysicsAiInferenceJobID = NewType("PhysicsAiInferenceJobID", str)
|
|
14
15
|
PhysicsAiModelID = NewType("PhysicsAiModelID", str)
|
|
15
16
|
PhysicsAiModelVersionID = NewType("PhysicsAiModelVersionID", str)
|
|
17
|
+
PhysicsAiTrainingJobID = NewType("PhysicsAiTrainingJobID", str)
|
luminarycloud/types/vector3.py
CHANGED
|
@@ -8,11 +8,10 @@ from .adfloat import (
|
|
|
8
8
|
_to_ad_proto as _float_to_ad_proto,
|
|
9
9
|
_from_ad_proto as _float_from_ad_proto,
|
|
10
10
|
)
|
|
11
|
-
from ..pipeline_util.dictable import PipelineDictable
|
|
12
11
|
|
|
13
12
|
|
|
14
13
|
@dataclass
|
|
15
|
-
class Vector3
|
|
14
|
+
class Vector3:
|
|
16
15
|
"""Represents a 3-dimensional vector.
|
|
17
16
|
|
|
18
17
|
Supports direct component access, indexing, iteration, and conversion to numpy arrays.
|
luminarycloud/vis/__init__.py
CHANGED
|
@@ -602,7 +602,7 @@ class DataExtractor:
|
|
|
602
602
|
code += f" if sol.id == '{self._solution.id}':\n"
|
|
603
603
|
code += f" solution = sol\n"
|
|
604
604
|
code += f" break\n"
|
|
605
|
-
code += "
|
|
605
|
+
code += f"{obj_name} = vis.DataExtractor(solution)\n"
|
|
606
606
|
code += "\n"
|
|
607
607
|
|
|
608
608
|
code += "\n"
|
|
@@ -615,11 +615,11 @@ class DataExtractor:
|
|
|
615
615
|
for extract in self._extracts:
|
|
616
616
|
# Name objects numerically: slice0, slice1, etc.
|
|
617
617
|
name = _data_extract_to_obj_name(extract)
|
|
618
|
-
|
|
619
|
-
name_map[
|
|
620
|
-
ids_to_obj_name[extract.id] =
|
|
621
|
-
code += extract._to_code_helper(
|
|
622
|
-
code += f"
|
|
618
|
+
extract_obj_name = f"{name}{name_map[name]}"
|
|
619
|
+
name_map[name] += 1
|
|
620
|
+
ids_to_obj_name[extract.id] = extract_obj_name
|
|
621
|
+
code += extract._to_code_helper(extract_obj_name, hide_defaults=hide_defaults)
|
|
622
|
+
code += f"{obj_name}.add_data_extract({extract_obj_name})\n"
|
|
623
623
|
code += "\n"
|
|
624
624
|
|
|
625
625
|
if include_imports:
|
|
@@ -649,7 +649,7 @@ class DataExtractor:
|
|
|
649
649
|
code = "\n".join(filtered_lines)
|
|
650
650
|
|
|
651
651
|
code += "\n"
|
|
652
|
-
code += "extract_output =
|
|
652
|
+
code += f"extract_output = {obj_name}.create_extracts(name='extract data', description='longer description')\n"
|
|
653
653
|
code += "status = extract_output.wait()\n"
|
|
654
654
|
code += "if status == ExtractStatusType.COMPLETED:\n"
|
|
655
655
|
code += " extract_output.save_files('data_extracts_prefix', True)\n"
|