luminarycloud 0.21.2__py3-none-any.whl → 0.22.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- luminarycloud/__init__.py +3 -1
- luminarycloud/_client/authentication_plugin.py +49 -0
- luminarycloud/_client/client.py +38 -8
- luminarycloud/_client/http_client.py +1 -1
- luminarycloud/_client/retry_interceptor.py +64 -2
- luminarycloud/_feature_flag.py +22 -0
- luminarycloud/_helpers/_create_simulation.py +7 -2
- luminarycloud/_helpers/download.py +11 -0
- luminarycloud/_helpers/proto_decorator.py +13 -5
- luminarycloud/_proto/api/v0/luminarycloud/feature_flag/feature_flag_pb2.py +55 -0
- luminarycloud/_proto/api/v0/luminarycloud/feature_flag/feature_flag_pb2.pyi +52 -0
- luminarycloud/_proto/api/v0/luminarycloud/feature_flag/feature_flag_pb2_grpc.py +72 -0
- luminarycloud/_proto/api/v0/luminarycloud/feature_flag/feature_flag_pb2_grpc.pyi +35 -0
- luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2.py +132 -132
- luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2.pyi +36 -8
- luminarycloud/_proto/api/v0/luminarycloud/mesh/mesh_pb2.py +74 -73
- luminarycloud/_proto/api/v0/luminarycloud/mesh/mesh_pb2.pyi +8 -1
- luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2.py +53 -23
- luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2.pyi +54 -1
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2.py +195 -0
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2.pyi +361 -0
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2_grpc.py +172 -0
- luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2_grpc.pyi +66 -0
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2.py +97 -61
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2.pyi +68 -3
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2_grpc.py +34 -0
- luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2_grpc.pyi +12 -0
- luminarycloud/_proto/api/v0/luminarycloud/simulation_template/simulation_template_pb2.py +33 -31
- luminarycloud/_proto/api/v0/luminarycloud/simulation_template/simulation_template_pb2.pyi +23 -2
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2.py +88 -65
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2.pyi +42 -0
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2_grpc.py +34 -0
- luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2_grpc.pyi +12 -0
- luminarycloud/_proto/base/base_pb2.py +7 -6
- luminarycloud/_proto/base/base_pb2.pyi +4 -0
- luminarycloud/_proto/cad/shape_pb2.py +39 -19
- luminarycloud/_proto/cad/shape_pb2.pyi +86 -34
- luminarycloud/_proto/client/simulation_pb2.py +3 -3
- luminarycloud/_proto/geometry/geometry_pb2.py +77 -63
- luminarycloud/_proto/geometry/geometry_pb2.pyi +42 -3
- luminarycloud/_proto/hexmesh/hexmesh_pb2.py +22 -18
- luminarycloud/_proto/hexmesh/hexmesh_pb2.pyi +18 -2
- luminarycloud/_proto/physicsaiinferenceservice/physicsaiinferenceservice_pb2.py +30 -0
- luminarycloud/_proto/physicsaiinferenceservice/physicsaiinferenceservice_pb2.pyi +7 -0
- luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2.py +2 -2
- luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2_grpc.py +34 -0
- luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2_grpc.pyi +12 -0
- luminarycloud/enum/vis_enums.py +6 -0
- luminarycloud/feature_modification.py +32 -1
- luminarycloud/geometry.py +67 -7
- luminarycloud/geometry_version.py +4 -0
- luminarycloud/mesh.py +4 -0
- luminarycloud/meshing/mesh_generation_params.py +13 -14
- luminarycloud/meshing/sizing_strategy/sizing_strategies.py +1 -2
- luminarycloud/physics_ai/solution.py +4 -0
- luminarycloud/pipelines/api.py +99 -8
- luminarycloud/pipelines/core.py +12 -2
- luminarycloud/pipelines/stages.py +22 -9
- luminarycloud/project.py +5 -8
- luminarycloud/simulation.py +57 -0
- luminarycloud/types/vector3.py +1 -2
- luminarycloud/vis/data_extraction.py +7 -7
- luminarycloud/vis/interactive_report.py +163 -7
- luminarycloud/vis/report.py +113 -1
- luminarycloud/volume_selection.py +71 -7
- {luminarycloud-0.21.2.dist-info → luminarycloud-0.22.1.dist-info}/METADATA +1 -1
- {luminarycloud-0.21.2.dist-info → luminarycloud-0.22.1.dist-info}/RECORD +68 -58
- {luminarycloud-0.21.2.dist-info → luminarycloud-0.22.1.dist-info}/WHEEL +1 -1
- luminarycloud/pipeline_util/dictable.py +0 -27
|
@@ -2,7 +2,7 @@
|
|
|
2
2
|
from dataclasses import dataclass
|
|
3
3
|
|
|
4
4
|
from .core import Stage, StageInputs, StageOutputs, PipelineOutput
|
|
5
|
-
from .parameters import StringPipelineParameter, IntPipelineParameter
|
|
5
|
+
from .parameters import BoolPipelineParameter, StringPipelineParameter, IntPipelineParameter
|
|
6
6
|
from ..meshing import MeshGenerationParams
|
|
7
7
|
|
|
8
8
|
|
|
@@ -49,6 +49,20 @@ class ReadGeometry(Stage[ReadGeometryOutputs]):
|
|
|
49
49
|
----------
|
|
50
50
|
geometry_id : str | StringPipelineParameter
|
|
51
51
|
The ID of the Geometry to retrieve.
|
|
52
|
+
use_geo_without_copying : bool | BoolPipelineParameter
|
|
53
|
+
By default, this is False, meaning that each Geometry this stage references will be copied
|
|
54
|
+
and the PipelineJob will actually operate on the copied Geometry. This is done so that a
|
|
55
|
+
PipelineJob can be based on a single parametric Geometry which each PipelineJobRun modifies
|
|
56
|
+
by applying a NamedVariableSet. That modification mutates the Geometry, so those runs can
|
|
57
|
+
only happen in parallel without intefrering with each other if they each operate on a
|
|
58
|
+
different copy of the Geometry.
|
|
59
|
+
|
|
60
|
+
However, if you've already prepared your Geometry in advance and you don't want the
|
|
61
|
+
PipelineJob to create copies, you can set this to True. In that case, the referenced
|
|
62
|
+
Geometry will be used directly without being copied.
|
|
63
|
+
|
|
64
|
+
IMPORTANT: If you set this to True, you must ensure no two PipelineJobRuns operate on the
|
|
65
|
+
same Geometry, i.e. no two PipelineArgs rows contain the same Geometry ID.
|
|
52
66
|
|
|
53
67
|
Outputs
|
|
54
68
|
-------
|
|
@@ -61,10 +75,11 @@ class ReadGeometry(Stage[ReadGeometryOutputs]):
|
|
|
61
75
|
*,
|
|
62
76
|
stage_name: str | None = None,
|
|
63
77
|
geometry_id: str | StringPipelineParameter,
|
|
78
|
+
use_geo_without_copying: bool | BoolPipelineParameter = False,
|
|
64
79
|
):
|
|
65
80
|
super().__init__(
|
|
66
81
|
stage_name,
|
|
67
|
-
{"geometry_id": geometry_id},
|
|
82
|
+
{"geometry_id": geometry_id, "use_geo_without_copying": use_geo_without_copying},
|
|
68
83
|
StageInputs(self),
|
|
69
84
|
ReadGeometryOutputs._instantiate_for(self),
|
|
70
85
|
)
|
|
@@ -202,13 +217,6 @@ class Mesh(Stage[MeshOutputs]):
|
|
|
202
217
|
MeshOutputs._instantiate_for(self),
|
|
203
218
|
)
|
|
204
219
|
|
|
205
|
-
# TODO: bring back the full MeshGenerationParams, but we need to be able to hydrate it from the
|
|
206
|
-
# pipeline YAML. I can probably bake that logic into PipelineDictable, `from_pipeline_dict` or
|
|
207
|
-
# something.
|
|
208
|
-
# @classmethod
|
|
209
|
-
# def _parse_params(cls, params: dict) -> dict:
|
|
210
|
-
# return {"mesh_gen_params": MeshGenerationParams.from_pipeline_dict(**params["mesh_gen_params"])}
|
|
211
|
-
|
|
212
220
|
|
|
213
221
|
@dataclass
|
|
214
222
|
class SimulateOutputs(StageOutputs):
|
|
@@ -230,6 +238,9 @@ class Simulate(Stage[SimulateOutputs]):
|
|
|
230
238
|
The name to assign to the Simulation. If None, a default name will be used.
|
|
231
239
|
sim_template_id : str | StringPipelineParameter
|
|
232
240
|
The ID of the SimulationTemplate to use for the Simulation.
|
|
241
|
+
batch_processing : bool | BoolPipelineParameter, default True
|
|
242
|
+
If True, the Simulation will run as a standard job. If False, the Simulation will run as a
|
|
243
|
+
priority job.
|
|
233
244
|
|
|
234
245
|
Outputs
|
|
235
246
|
-------
|
|
@@ -244,10 +255,12 @@ class Simulate(Stage[SimulateOutputs]):
|
|
|
244
255
|
mesh: PipelineOutputMesh,
|
|
245
256
|
sim_name: str | StringPipelineParameter | None = None,
|
|
246
257
|
sim_template_id: str | StringPipelineParameter,
|
|
258
|
+
batch_processing: bool | BoolPipelineParameter = True,
|
|
247
259
|
):
|
|
248
260
|
super().__init__(
|
|
249
261
|
stage_name,
|
|
250
262
|
{
|
|
263
|
+
"batch_processing": batch_processing,
|
|
251
264
|
"sim_name": sim_name,
|
|
252
265
|
"sim_template_id": sim_template_id,
|
|
253
266
|
},
|
luminarycloud/project.py
CHANGED
|
@@ -199,8 +199,6 @@ class Project(ProtoWrapperBase):
|
|
|
199
199
|
def load_geometry_to_setup(self, geometry: "Geometry") -> None:
|
|
200
200
|
"""
|
|
201
201
|
Load a geometry to the setup phase.
|
|
202
|
-
NOTE: this operation is irreversible and deletes all the existing meshes and simulations
|
|
203
|
-
in the project.
|
|
204
202
|
|
|
205
203
|
Parameters
|
|
206
204
|
----------
|
|
@@ -487,15 +485,14 @@ class Project(ProtoWrapperBase):
|
|
|
487
485
|
description : str, optional
|
|
488
486
|
Simulation description.
|
|
489
487
|
batch_processing : bool, default True
|
|
490
|
-
If True,
|
|
491
|
-
|
|
492
|
-
Use Batch Processing on simulations that are not time-sensitive to
|
|
493
|
-
save up to 65% in credits.
|
|
488
|
+
If True, this simulation will run as a standard job. If False, this simulation will run
|
|
489
|
+
as a priority job.
|
|
494
490
|
gpu_type : GPUType, optional
|
|
495
491
|
GPU type to use for the simulation.
|
|
496
492
|
gpu_count : int, optional
|
|
497
|
-
Number of GPUs to use for the simulation.
|
|
498
|
-
|
|
493
|
+
Number of GPUs to use for the simulation. Only relevant if `gpu_type` is
|
|
494
|
+
specified. If this is set to 0 or omitted and `gpu_type` is specified, the number
|
|
495
|
+
of gpus will be automatically determined.
|
|
499
496
|
"""
|
|
500
497
|
|
|
501
498
|
named_variable_set_version_id: Optional[str] = None
|
luminarycloud/simulation.py
CHANGED
|
@@ -311,6 +311,19 @@ class Simulation(ProtoWrapperBase):
|
|
|
311
311
|
req = simulationpb.GetSimulationParametersRequest(id=self.id)
|
|
312
312
|
return SimulationParam.from_proto(get_default_client().GetSimulationParameters(req))
|
|
313
313
|
|
|
314
|
+
def _get_workflow_id(self) -> Optional[str]:
|
|
315
|
+
"""
|
|
316
|
+
Retrieves the workflow ID associated with the current simulation.
|
|
317
|
+
|
|
318
|
+
Returns
|
|
319
|
+
-------
|
|
320
|
+
str | None
|
|
321
|
+
The workflow ID corresponding to this simulation's ID, or None if the simulation
|
|
322
|
+
has no associated workflow.
|
|
323
|
+
"""
|
|
324
|
+
result = _get_workflow_ids([self.id])
|
|
325
|
+
return result.get(self.id)
|
|
326
|
+
|
|
314
327
|
@deprecated(
|
|
315
328
|
"Use get_parameters() instead. This method will be removed in a future release.",
|
|
316
329
|
)
|
|
@@ -361,3 +374,47 @@ def get_simulation(id: SimulationID) -> Simulation:
|
|
|
361
374
|
req = simulationpb.GetSimulationRequest(id=id)
|
|
362
375
|
res = get_default_client().GetSimulation(req)
|
|
363
376
|
return Simulation(res.simulation)
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
def _get_workflow_ids(simulation_ids: list[SimulationID]) -> dict[SimulationID, str]:
|
|
380
|
+
"""
|
|
381
|
+
Get the workflow IDs corresponding to simulation IDs.
|
|
382
|
+
|
|
383
|
+
This is useful for mapping between UI-created simulations (which have workflow IDs)
|
|
384
|
+
and the simulation IDs used in the API.
|
|
385
|
+
|
|
386
|
+
Parameters
|
|
387
|
+
----------
|
|
388
|
+
simulation_ids : list[SimulationID]
|
|
389
|
+
The simulation IDs to look up.
|
|
390
|
+
|
|
391
|
+
Returns
|
|
392
|
+
-------
|
|
393
|
+
dict[SimulationID, str]
|
|
394
|
+
A mapping from simulation ID to workflow ID. Only simulation IDs that were
|
|
395
|
+
successfully resolved to workflow IDs are present as keys. Simulation IDs are
|
|
396
|
+
omitted if:
|
|
397
|
+
|
|
398
|
+
- The simulation ID does not exist
|
|
399
|
+
- The user lacks access to the simulation's project
|
|
400
|
+
- The simulation has no associated workflow ID
|
|
401
|
+
|
|
402
|
+
Examples
|
|
403
|
+
--------
|
|
404
|
+
>>> import luminarycloud as lc
|
|
405
|
+
>>> sim_ids = [lc.SimulationID("sim_123"), lc.SimulationID("sim_456")]
|
|
406
|
+
>>> workflow_ids = lc._get_workflow_ids(sim_ids)
|
|
407
|
+
>>> print(workflow_ids)
|
|
408
|
+
{SimulationID('sim_123'): 'wf_abc', SimulationID('sim_456'): 'wf_def'}
|
|
409
|
+
|
|
410
|
+
>>> # Check if a simulation has a workflow
|
|
411
|
+
>>> sim_id = lc.SimulationID("sim_123")
|
|
412
|
+
>>> if sim_id in workflow_ids:
|
|
413
|
+
... print(f"Workflow ID: {workflow_ids[sim_id]}")
|
|
414
|
+
... else:
|
|
415
|
+
... print("No workflow found")
|
|
416
|
+
"""
|
|
417
|
+
req = simulationpb.GetWorkflowIDsRequest(simulation_ids=simulation_ids)
|
|
418
|
+
res = get_default_client().GetWorkflowIDs(req)
|
|
419
|
+
# Return dict with SimulationID keys (not str keys)
|
|
420
|
+
return {SimulationID(sim_id): wf_id for sim_id, wf_id in res.data.items()}
|
luminarycloud/types/vector3.py
CHANGED
|
@@ -8,11 +8,10 @@ from .adfloat import (
|
|
|
8
8
|
_to_ad_proto as _float_to_ad_proto,
|
|
9
9
|
_from_ad_proto as _float_from_ad_proto,
|
|
10
10
|
)
|
|
11
|
-
from ..pipeline_util.dictable import PipelineDictable
|
|
12
11
|
|
|
13
12
|
|
|
14
13
|
@dataclass
|
|
15
|
-
class Vector3
|
|
14
|
+
class Vector3:
|
|
16
15
|
"""Represents a 3-dimensional vector.
|
|
17
16
|
|
|
18
17
|
Supports direct component access, indexing, iteration, and conversion to numpy arrays.
|
|
@@ -602,7 +602,7 @@ class DataExtractor:
|
|
|
602
602
|
code += f" if sol.id == '{self._solution.id}':\n"
|
|
603
603
|
code += f" solution = sol\n"
|
|
604
604
|
code += f" break\n"
|
|
605
|
-
code += "
|
|
605
|
+
code += f"{obj_name} = vis.DataExtractor(solution)\n"
|
|
606
606
|
code += "\n"
|
|
607
607
|
|
|
608
608
|
code += "\n"
|
|
@@ -615,11 +615,11 @@ class DataExtractor:
|
|
|
615
615
|
for extract in self._extracts:
|
|
616
616
|
# Name objects numerically: slice0, slice1, etc.
|
|
617
617
|
name = _data_extract_to_obj_name(extract)
|
|
618
|
-
|
|
619
|
-
name_map[
|
|
620
|
-
ids_to_obj_name[extract.id] =
|
|
621
|
-
code += extract._to_code_helper(
|
|
622
|
-
code += f"
|
|
618
|
+
extract_obj_name = f"{name}{name_map[name]}"
|
|
619
|
+
name_map[name] += 1
|
|
620
|
+
ids_to_obj_name[extract.id] = extract_obj_name
|
|
621
|
+
code += extract._to_code_helper(extract_obj_name, hide_defaults=hide_defaults)
|
|
622
|
+
code += f"{obj_name}.add_data_extract({extract_obj_name})\n"
|
|
623
623
|
code += "\n"
|
|
624
624
|
|
|
625
625
|
if include_imports:
|
|
@@ -649,7 +649,7 @@ class DataExtractor:
|
|
|
649
649
|
code = "\n".join(filtered_lines)
|
|
650
650
|
|
|
651
651
|
code += "\n"
|
|
652
|
-
code += "extract_output =
|
|
652
|
+
code += f"extract_output = {obj_name}.create_extracts(name='extract data', description='longer description')\n"
|
|
653
653
|
code += "status = extract_output.wait()\n"
|
|
654
654
|
code += "if status == ExtractStatusType.COMPLETED:\n"
|
|
655
655
|
code += " extract_output.save_files('data_extracts_prefix', True)\n"
|
|
@@ -1,6 +1,7 @@
|
|
|
1
1
|
import io
|
|
2
|
+
import numpy as np
|
|
2
3
|
from .visualization import RenderOutput
|
|
3
|
-
from .report import ReportEntry
|
|
4
|
+
from .report import ReportEntry, ReportContext
|
|
4
5
|
|
|
5
6
|
try:
|
|
6
7
|
import luminarycloud_jupyter as lcj
|
|
@@ -8,6 +9,82 @@ except ImportError:
|
|
|
8
9
|
lcj = None
|
|
9
10
|
|
|
10
11
|
|
|
12
|
+
def _detect_outliers(
|
|
13
|
+
metadata: list[dict[str, str | float]],
|
|
14
|
+
output_fields: list[str],
|
|
15
|
+
percentile_threshold: float = 95.0,
|
|
16
|
+
) -> list[int] | None:
|
|
17
|
+
"""
|
|
18
|
+
Detect outliers using Mahalanobis distance.
|
|
19
|
+
|
|
20
|
+
Parameters
|
|
21
|
+
----------
|
|
22
|
+
metadata : list[dict[str, str | float]]
|
|
23
|
+
List of metadata dictionaries for each row
|
|
24
|
+
output_fields : list[str]
|
|
25
|
+
List of output field names to use for outlier detection
|
|
26
|
+
percentile_threshold : float, optional
|
|
27
|
+
Percentile threshold for outlier detection (default: 95.0)
|
|
28
|
+
|
|
29
|
+
Returns
|
|
30
|
+
-------
|
|
31
|
+
list[int] | None
|
|
32
|
+
List of row indices that are outliers, or None if detection fails
|
|
33
|
+
"""
|
|
34
|
+
# Need at least 2 fields for meaningful multivariate analysis
|
|
35
|
+
if len(output_fields) < 2:
|
|
36
|
+
return None
|
|
37
|
+
|
|
38
|
+
# Extract data for the specified output fields
|
|
39
|
+
try:
|
|
40
|
+
data = []
|
|
41
|
+
for row_metadata in metadata:
|
|
42
|
+
row_data = []
|
|
43
|
+
for field in output_fields:
|
|
44
|
+
value = row_metadata.get(field)
|
|
45
|
+
if value is None or isinstance(value, str):
|
|
46
|
+
# Skip if field is missing or non-numeric
|
|
47
|
+
return None
|
|
48
|
+
row_data.append(float(value))
|
|
49
|
+
data.append(row_data)
|
|
50
|
+
|
|
51
|
+
data_array = np.array(data)
|
|
52
|
+
|
|
53
|
+
# Need at least as many samples as dimensions for covariance matrix
|
|
54
|
+
if len(data_array) < len(output_fields):
|
|
55
|
+
return None
|
|
56
|
+
|
|
57
|
+
# Calculate mean and covariance matrix
|
|
58
|
+
mean_vec = np.mean(data_array, axis=0)
|
|
59
|
+
cov_matrix = np.cov(data_array.T)
|
|
60
|
+
|
|
61
|
+
# Check if covariance matrix is singular
|
|
62
|
+
if np.linalg.det(cov_matrix) == 0:
|
|
63
|
+
return None
|
|
64
|
+
|
|
65
|
+
# Invert covariance matrix
|
|
66
|
+
inv_cov_matrix = np.linalg.inv(cov_matrix)
|
|
67
|
+
|
|
68
|
+
# Calculate Mahalanobis distance for each point
|
|
69
|
+
distances = []
|
|
70
|
+
for point in data_array:
|
|
71
|
+
diff = point - mean_vec
|
|
72
|
+
distance = np.sqrt(diff @ inv_cov_matrix @ diff)
|
|
73
|
+
distances.append(distance)
|
|
74
|
+
|
|
75
|
+
# Determine outlier threshold using percentile
|
|
76
|
+
threshold = np.percentile(distances, percentile_threshold)
|
|
77
|
+
|
|
78
|
+
# Find outlier indices
|
|
79
|
+
outlier_indices = [i for i, d in enumerate(distances) if d > threshold]
|
|
80
|
+
|
|
81
|
+
return outlier_indices
|
|
82
|
+
|
|
83
|
+
except Exception:
|
|
84
|
+
# If anything goes wrong, return None (no outliers detected)
|
|
85
|
+
return None
|
|
86
|
+
|
|
87
|
+
|
|
11
88
|
class InteractiveReport:
|
|
12
89
|
"""
|
|
13
90
|
Interactive report widget with lazy loading for large datasets.
|
|
@@ -29,7 +106,7 @@ class InteractiveReport:
|
|
|
29
106
|
# TODO Will/Matt: this list of report entries could be how we store stuff in the DB
|
|
30
107
|
# for interactive reports, to reference the post proc. extracts. A report is essentially
|
|
31
108
|
# a bunch of extracts + metadata.
|
|
32
|
-
def __init__(self, entries: list[ReportEntry]) -> None:
|
|
109
|
+
def __init__(self, entries: list[ReportEntry], context: ReportContext | None = None) -> None:
|
|
33
110
|
if not lcj:
|
|
34
111
|
raise ImportError("InteractiveScene requires luminarycloud[jupyter] to be installed")
|
|
35
112
|
|
|
@@ -37,6 +114,13 @@ class InteractiveReport:
|
|
|
37
114
|
if len(self.entries) == 0:
|
|
38
115
|
raise ValueError("Invalid number of entries, must be > 0")
|
|
39
116
|
|
|
117
|
+
# Validate and store context if provided
|
|
118
|
+
if context is not None:
|
|
119
|
+
self._validate_context(context)
|
|
120
|
+
self.context = context
|
|
121
|
+
else:
|
|
122
|
+
self.context = None
|
|
123
|
+
|
|
40
124
|
# Determine grid dimensions by downloading first entry
|
|
41
125
|
# to understand the structure (number of columns)
|
|
42
126
|
first_entry = self.entries[0]
|
|
@@ -54,12 +138,55 @@ class InteractiveReport:
|
|
|
54
138
|
|
|
55
139
|
nrows = len(self.entries)
|
|
56
140
|
|
|
141
|
+
# Prepare report context for the widget
|
|
142
|
+
context_dict = None
|
|
143
|
+
if self.context is not None:
|
|
144
|
+
context_dict = self.context.to_dict()
|
|
145
|
+
|
|
146
|
+
# Compute outlier indices if we have outputs
|
|
147
|
+
if self.context.outputs and len(self.context.outputs) >= 2:
|
|
148
|
+
outlier_indices = _detect_outliers(
|
|
149
|
+
[re._metadata for re in self.entries], self.context.outputs
|
|
150
|
+
)
|
|
151
|
+
if outlier_indices is not None:
|
|
152
|
+
context_dict["outlier_indices"] = outlier_indices
|
|
153
|
+
|
|
57
154
|
# Create widget with metadata but without data
|
|
58
|
-
self.widget = lcj.EnsembleWidget(
|
|
155
|
+
self.widget = lcj.EnsembleWidget(
|
|
156
|
+
[re._metadata for re in self.entries], nrows, ncols, report_context=context_dict
|
|
157
|
+
)
|
|
59
158
|
|
|
60
159
|
# Set the callback for lazy loading row data
|
|
61
160
|
self.widget.set_row_data_callback(self._load_row_data)
|
|
62
161
|
|
|
162
|
+
def _validate_context(self, context: ReportContext) -> None:
|
|
163
|
+
"""
|
|
164
|
+
Validate that all inputs and outputs from the ReportContext exist in the
|
|
165
|
+
first report entry's metadata.
|
|
166
|
+
|
|
167
|
+
Raises:
|
|
168
|
+
-------
|
|
169
|
+
ValueError
|
|
170
|
+
If any inputs or outputs are missing from the first entry's metadata.
|
|
171
|
+
"""
|
|
172
|
+
first_entry = self.entries[0]
|
|
173
|
+
metadata_keys = set(first_entry._metadata.keys())
|
|
174
|
+
|
|
175
|
+
# Check for missing inputs
|
|
176
|
+
missing_inputs = [key for key in context.inputs if key not in metadata_keys]
|
|
177
|
+
|
|
178
|
+
# Check for missing outputs
|
|
179
|
+
missing_outputs = [key for key in context.outputs if key not in metadata_keys]
|
|
180
|
+
|
|
181
|
+
# Raise exception if any keys are missing
|
|
182
|
+
if missing_inputs or missing_outputs:
|
|
183
|
+
error_parts = []
|
|
184
|
+
if missing_inputs:
|
|
185
|
+
error_parts.append(f"Missing inputs: {missing_inputs}")
|
|
186
|
+
if missing_outputs:
|
|
187
|
+
error_parts.append(f"Missing outputs: {missing_outputs}")
|
|
188
|
+
raise ValueError(f"ReportContext validation failed. {', '.join(error_parts)}")
|
|
189
|
+
|
|
63
190
|
def _load_row_data(self, row: int) -> None:
|
|
64
191
|
"""
|
|
65
192
|
Load and send data for a specific row to the widget.
|
|
@@ -79,11 +206,29 @@ class InteractiveReport:
|
|
|
79
206
|
image_and_label = extract.download_images()
|
|
80
207
|
# Each image gets its own column
|
|
81
208
|
for il in image_and_label:
|
|
82
|
-
|
|
209
|
+
# il is a tuple of (BytesIO, label)
|
|
210
|
+
# Use camera label for the name, fallback to "image" if empty
|
|
211
|
+
camera_label = il[1]
|
|
212
|
+
name = camera_label if camera_label else "image"
|
|
213
|
+
# For description: prefer extract.description, then camera label, then fallback message
|
|
214
|
+
description = (
|
|
215
|
+
extract.description
|
|
216
|
+
if extract.description
|
|
217
|
+
else camera_label if camera_label else "no label or description provided"
|
|
218
|
+
)
|
|
219
|
+
self.widget.set_cell_data(
|
|
220
|
+
row,
|
|
221
|
+
col,
|
|
222
|
+
il[0].getvalue(),
|
|
223
|
+
"jpg",
|
|
224
|
+
name=name,
|
|
225
|
+
description=description,
|
|
226
|
+
)
|
|
83
227
|
col += 1
|
|
84
228
|
else:
|
|
85
229
|
plot_data = extract.download_data()
|
|
86
|
-
data = plot_data[0][0]
|
|
230
|
+
data = plot_data[0][0] # The CSV data (rows)
|
|
231
|
+
plot_label = plot_data[0][1] # The label from the extract
|
|
87
232
|
all_axis_labels = data[0]
|
|
88
233
|
|
|
89
234
|
axis_data = []
|
|
@@ -91,14 +236,25 @@ class InteractiveReport:
|
|
|
91
236
|
axis_values = [row[axis_idx] for row in data[1:]]
|
|
92
237
|
axis_data.append(axis_values)
|
|
93
238
|
|
|
239
|
+
# For plots: use extract.name, then plot_label, then "plot" as fallback
|
|
240
|
+
# For description: use extract.description, fallback to message if empty
|
|
241
|
+
name = extract.name if extract.name else (plot_label if plot_label else "plot")
|
|
242
|
+
description = (
|
|
243
|
+
extract.description
|
|
244
|
+
if extract.description
|
|
245
|
+
else "no label or description provided"
|
|
246
|
+
)
|
|
247
|
+
|
|
94
248
|
self.widget.set_cell_scatter_plot(
|
|
95
249
|
row,
|
|
96
250
|
col,
|
|
97
|
-
|
|
251
|
+
name, # Use the same name for the plot title
|
|
98
252
|
all_axis_labels,
|
|
99
253
|
axis_data,
|
|
100
|
-
plot_name=
|
|
254
|
+
plot_name=name,
|
|
101
255
|
plot_mode="markers",
|
|
256
|
+
name=name,
|
|
257
|
+
description=description,
|
|
102
258
|
)
|
|
103
259
|
col += 1
|
|
104
260
|
|
luminarycloud/vis/report.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
import csv as csv_module
|
|
1
2
|
import json
|
|
2
3
|
import os
|
|
3
4
|
from typing import TYPE_CHECKING
|
|
@@ -17,6 +18,79 @@ if TYPE_CHECKING:
|
|
|
17
18
|
from .interactive_report import InteractiveReport
|
|
18
19
|
|
|
19
20
|
|
|
21
|
+
class ReportContext:
|
|
22
|
+
"""
|
|
23
|
+
Context for interactive reports that defines input and output metadata keys.
|
|
24
|
+
Inputs define what the geometric and flow conditions are varied with running
|
|
25
|
+
data generation and the outputs define what quantities are extracted from
|
|
26
|
+
the simulations. For the report context to be valid we require that the both
|
|
27
|
+
the inputs and outputs are non-empty.
|
|
28
|
+
|
|
29
|
+
Attributes:
|
|
30
|
+
-----------
|
|
31
|
+
inputs : list[str]
|
|
32
|
+
List of metadata keys (column names) that represent inputs to the report.
|
|
33
|
+
outputs : list[str]
|
|
34
|
+
List of metadata keys (column names) that represent outputs from the report.
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
def __init__(self, inputs: list[str], outputs: list[str]) -> None:
|
|
38
|
+
self.inputs = inputs
|
|
39
|
+
self.outputs = outputs
|
|
40
|
+
|
|
41
|
+
def to_dict(self) -> dict:
|
|
42
|
+
"""Convert ReportContext to a dictionary for serialization."""
|
|
43
|
+
return {
|
|
44
|
+
"inputs": self.inputs,
|
|
45
|
+
"outputs": self.outputs,
|
|
46
|
+
}
|
|
47
|
+
|
|
48
|
+
@classmethod
|
|
49
|
+
def from_dict(cls, data: dict) -> "ReportContext":
|
|
50
|
+
"""Create a ReportContext from a dictionary.
|
|
51
|
+
|
|
52
|
+
Parameters:
|
|
53
|
+
-----------
|
|
54
|
+
data : dict
|
|
55
|
+
Dictionary containing 'inputs' and 'outputs' keys.
|
|
56
|
+
|
|
57
|
+
Raises:
|
|
58
|
+
-------
|
|
59
|
+
ValueError
|
|
60
|
+
If 'inputs' or 'outputs' keys are missing from the data.
|
|
61
|
+
"""
|
|
62
|
+
if "inputs" not in data:
|
|
63
|
+
raise ValueError("ReportContext.from_dict: missing required key 'inputs'")
|
|
64
|
+
if "outputs" not in data:
|
|
65
|
+
raise ValueError("ReportContext.from_dict: missing required key 'outputs'")
|
|
66
|
+
|
|
67
|
+
inputs = data["inputs"]
|
|
68
|
+
outputs = data["outputs"]
|
|
69
|
+
|
|
70
|
+
if not isinstance(inputs, list):
|
|
71
|
+
raise ValueError(
|
|
72
|
+
f"ReportContext.from_dict: 'inputs' must be a list, got {type(inputs).__name__}"
|
|
73
|
+
)
|
|
74
|
+
if not isinstance(outputs, list):
|
|
75
|
+
raise ValueError(
|
|
76
|
+
f"ReportContext.from_dict: 'outputs' must be a list, got {type(outputs).__name__}"
|
|
77
|
+
)
|
|
78
|
+
|
|
79
|
+
if len(inputs) == 0:
|
|
80
|
+
raise ValueError("ReportContext.from_dict: 'inputs' must be non-empty")
|
|
81
|
+
if len(outputs) == 0:
|
|
82
|
+
raise ValueError("ReportContext.from_dict: 'outputs' must be non-empty")
|
|
83
|
+
|
|
84
|
+
return cls(inputs=inputs, outputs=outputs)
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
def load_report_context_from_json(filepath: str) -> ReportContext:
|
|
88
|
+
"""Load a ReportContext object from a JSON file at the given file path."""
|
|
89
|
+
with open(filepath, "r") as f:
|
|
90
|
+
data = json.load(f)
|
|
91
|
+
return ReportContext.from_dict(data)
|
|
92
|
+
|
|
93
|
+
|
|
20
94
|
# TODO Will/Matt: this could be something like what we store in the DB
|
|
21
95
|
# A report can contain a list of report entries that reference post proc.
|
|
22
96
|
# extracts + styling info for how they should be displayed
|
|
@@ -86,7 +160,7 @@ class ReportEntry:
|
|
|
86
160
|
if res.HasField("line_data")
|
|
87
161
|
else RenderOutput(_InternalToken())
|
|
88
162
|
)
|
|
89
|
-
extract._set_data(eid, self._project_id,
|
|
163
|
+
extract._set_data(eid, self._project_id, "", "", status)
|
|
90
164
|
self._extracts.append(extract)
|
|
91
165
|
|
|
92
166
|
|
|
@@ -250,3 +324,41 @@ def load_report_from_json(filepath: str) -> "Report":
|
|
|
250
324
|
with open(filepath, "r") as f:
|
|
251
325
|
data = json.load(f)
|
|
252
326
|
return Report.from_dict(data)
|
|
327
|
+
|
|
328
|
+
|
|
329
|
+
def load_report_from_csv(filepath: str) -> "Report":
|
|
330
|
+
"""Load a Report object from a CSV file at the given file path.
|
|
331
|
+
|
|
332
|
+
Each row in the CSV corresponds to a ReportEntry. Each column is converted
|
|
333
|
+
to metadata. No extracts are created when loading from CSV.
|
|
334
|
+
|
|
335
|
+
Parameters
|
|
336
|
+
----------
|
|
337
|
+
filepath : str
|
|
338
|
+
Path to the CSV file to load.
|
|
339
|
+
|
|
340
|
+
Returns
|
|
341
|
+
-------
|
|
342
|
+
Report
|
|
343
|
+
A Report object with entries populated from the CSV rows.
|
|
344
|
+
"""
|
|
345
|
+
entries = []
|
|
346
|
+
with open(filepath, "r") as f:
|
|
347
|
+
reader = csv_module.DictReader(f)
|
|
348
|
+
for row in reader:
|
|
349
|
+
# Convert all columns to metadata
|
|
350
|
+
metadata: dict[str, str | float] = {}
|
|
351
|
+
for key, value in row.items():
|
|
352
|
+
# Try to convert to float, otherwise keep as string
|
|
353
|
+
try:
|
|
354
|
+
metadata[key] = float(value)
|
|
355
|
+
except (ValueError, TypeError):
|
|
356
|
+
metadata[key] = value
|
|
357
|
+
|
|
358
|
+
# Create ReportEntry with placeholder project_id and no extracts
|
|
359
|
+
# We only need the project id for loading extracts, so we can omit it for CSV
|
|
360
|
+
# imports.
|
|
361
|
+
entry = ReportEntry(project_id="p-placeholder", extract_ids=[], metadata=metadata)
|
|
362
|
+
entries.append(entry)
|
|
363
|
+
|
|
364
|
+
return Report(entries)
|