luminarycloud 0.20.0__py3-none-any.whl → 0.21.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (58) hide show
  1. luminarycloud/__init__.py +2 -0
  2. luminarycloud/_client/http_client.py +10 -8
  3. luminarycloud/_helpers/_upload_mesh.py +1 -0
  4. luminarycloud/_helpers/pagination.py +62 -0
  5. luminarycloud/_helpers/upload.py +18 -12
  6. luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2.py +168 -124
  7. luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2.pyi +125 -3
  8. luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2_grpc.py +66 -0
  9. luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2_grpc.pyi +20 -0
  10. luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2.py +8 -8
  11. luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2.pyi +5 -5
  12. luminarycloud/_proto/api/v0/luminarycloud/vis/vis_pb2.py +66 -19
  13. luminarycloud/_proto/api/v0/luminarycloud/vis/vis_pb2.pyi +92 -0
  14. luminarycloud/_proto/api/v0/luminarycloud/vis/vis_pb2_grpc.py +33 -0
  15. luminarycloud/_proto/api/v0/luminarycloud/vis/vis_pb2_grpc.pyi +10 -0
  16. luminarycloud/_proto/assistant/assistant_pb2.py +61 -41
  17. luminarycloud/_proto/assistant/assistant_pb2.pyi +43 -1
  18. luminarycloud/_proto/assistant/assistant_pb2_grpc.py +33 -0
  19. luminarycloud/_proto/assistant/assistant_pb2_grpc.pyi +10 -0
  20. luminarycloud/_proto/base/base_pb2.py +9 -6
  21. luminarycloud/_proto/base/base_pb2.pyi +12 -0
  22. luminarycloud/_proto/client/simulation_pb2.py +490 -348
  23. luminarycloud/_proto/client/simulation_pb2.pyi +570 -8
  24. luminarycloud/_proto/inferenceservice/inferenceservice_pb2.py +10 -10
  25. luminarycloud/_proto/inferenceservice/inferenceservice_pb2.pyi +5 -5
  26. luminarycloud/_proto/quantity/quantity_pb2.py +24 -15
  27. luminarycloud/_proto/quantity/quantity_pb2.pyi +10 -4
  28. luminarycloud/enum/__init__.py +1 -0
  29. luminarycloud/enum/quantity_type.py +9 -0
  30. luminarycloud/enum/vis_enums.py +23 -3
  31. luminarycloud/geometry.py +41 -1
  32. luminarycloud/geometry_version.py +57 -3
  33. luminarycloud/params/enum/_enum_wrappers.py +537 -30
  34. luminarycloud/params/simulation/adaptive_mesh_refinement_.py +4 -0
  35. luminarycloud/params/simulation/physics/__init__.py +0 -1
  36. luminarycloud/params/simulation/physics/periodic_pair_.py +12 -31
  37. luminarycloud/physics_ai/architectures.py +5 -5
  38. luminarycloud/physics_ai/inference.py +13 -13
  39. luminarycloud/pipelines/__init__.py +8 -0
  40. luminarycloud/pipelines/api.py +159 -4
  41. luminarycloud/pipelines/arguments.py +15 -0
  42. luminarycloud/pipelines/operators.py +74 -17
  43. luminarycloud/project.py +5 -44
  44. luminarycloud/simulation.py +9 -3
  45. luminarycloud/simulation_param.py +0 -9
  46. luminarycloud/vis/__init__.py +2 -0
  47. luminarycloud/vis/interactive_report.py +79 -93
  48. luminarycloud/vis/report.py +219 -65
  49. luminarycloud/vis/visualization.py +60 -0
  50. luminarycloud/volume_selection.py +58 -9
  51. {luminarycloud-0.20.0.dist-info → luminarycloud-0.21.1.dist-info}/METADATA +1 -1
  52. {luminarycloud-0.20.0.dist-info → luminarycloud-0.21.1.dist-info}/RECORD +53 -57
  53. luminarycloud/params/simulation/physics/periodic_pair/__init__.py +0 -2
  54. luminarycloud/params/simulation/physics/periodic_pair/periodicity_type/__init__.py +0 -2
  55. luminarycloud/params/simulation/physics/periodic_pair/periodicity_type/rotational_periodicity_.py +0 -31
  56. luminarycloud/params/simulation/physics/periodic_pair/periodicity_type/translational_periodicity_.py +0 -29
  57. luminarycloud/params/simulation/physics/periodic_pair/periodicity_type_.py +0 -25
  58. {luminarycloud-0.20.0.dist-info → luminarycloud-0.21.1.dist-info}/WHEEL +0 -0
@@ -195,6 +195,12 @@ class Simulation(ProtoWrapperBase):
195
195
  """
196
196
  Downloads surface outputs (e.g. lift, drag, ...) in csv format.
197
197
 
198
+ Unless `reference_values` is explicitly passed, the Simulation's reference values -- i.e.
199
+ the ones specified when the Simulation was created -- will be used for computing
200
+ non-dimensional output quantities. While the Luminary Cloud UI lets you update the reference
201
+ values on a Simulation result after it has run, those updates only affect the output
202
+ calculations seen in the UI, they have no effect on the ones retrieved by this method.
203
+
198
204
  Parameters
199
205
  ----------
200
206
  quantity_type : luminarycloud.enum.QuantityType
@@ -206,9 +212,9 @@ class Simulation(ProtoWrapperBase):
206
212
  Other Parameters
207
213
  ----------------
208
214
  reference_values : ReferenceValues, optional
209
- Reference values used for computing forces, moments and
210
- other non-dimensional output quantities. If not provided, default
211
- reference values will be used.
215
+ Reference values used for computing forces, moments, and other non-dimensional output
216
+ quantities. If not provided, the simulation's reference values will be used, i.e., the
217
+ ones specified when the simulation was created.
212
218
  calculation_type : CalculationType, optional
213
219
  Whether the calculation should be done for all the surfaces together or each surface
214
220
  individually. Default is AGGREGATE.
@@ -232,15 +232,6 @@ class SimulationParam(_SimulationParam):
232
232
  f"physics {_stringify_identifier(v.physics_identifier)}. Overwriting..."
233
233
  ),
234
234
  )
235
- _remove_from_list_with_warning(
236
- _list=volume_physics_pairs,
237
- _accessor=lambda v: get_id(v.physics_identifier),
238
- _to_remove=get_id(physics.physics_identifier),
239
- _warning_message=lambda v: (
240
- f"Physics {_stringify_identifier(physics.physics_identifier)} has already been "
241
- f"assigned to volume {_stringify_identifier(v.volume_identifier)}. Overwriting..."
242
- ),
243
- )
244
235
 
245
236
  if volume_identifier.id not in (get_id(v.volume_identifier) for v in self.volume_entity):
246
237
  self.volume_entity.append(VolumeEntity(volume_identifier=volume_identifier))
@@ -6,6 +6,8 @@ from .visualization import (
6
6
  list_quantities as list_quantities,
7
7
  list_quantities as list_quantities,
8
8
  list_cameras as list_cameras,
9
+ RangeResult as RangeResult,
10
+ range_query as range_query,
9
11
  get_camera as get_camera,
10
12
  DirectionalCamera as DirectionalCamera,
11
13
  LookAtCamera as LookAtCamera,
@@ -1,10 +1,6 @@
1
1
  import io
2
- from . import ExtractOutput
3
- from .vis_util import _InternalToken, _get_status
4
2
  from .visualization import RenderOutput
5
- from ..enum import ExtractStatusType
6
- from .._client import get_default_client
7
- from .._proto.api.v0.luminarycloud.vis import vis_pb2
3
+ from .report import ReportEntry
8
4
 
9
5
  try:
10
6
  import luminarycloud_jupyter as lcj
@@ -12,42 +8,24 @@ except ImportError:
12
8
  lcj = None
13
9
 
14
10
 
15
- # TODO Will/Matt: this could be something like what we store in the DB
16
- # A report can contain a list of report entries that reference post proc.
17
- # extracts + styling info for how they should be displayed
18
- class ReportEntry:
19
- def __init__(
20
- self, project_id: str, extract_ids: list[str] = [], metadata: dict[str, str | float] = {}
21
- ) -> None:
22
- self._project_id = project_id
23
- self._extract_ids = extract_ids
24
- self._extracts: list[ExtractOutput | RenderOutput] = []
25
- self._metadata = metadata
26
-
27
- # Download all extracts for this report entry
28
- def download_extracts(self) -> None:
29
- self._extracts = []
30
- for eid in self._extract_ids:
31
- status = _get_status(self._project_id, eid)
32
- if status != ExtractStatusType.COMPLETED:
33
- raise Exception(f"Extract {eid} is not complete")
34
- req = vis_pb2.DownloadExtractRequest()
35
- req.extract_id = eid
36
- req.project_id = self._project_id
37
- # TODO: This is a bit awkward in that we download the extract to figure out what type
38
- # it is, but this is just a temporary thing, later we'll have a report DB table that
39
- # stores the extracts for a report and their types, etc.
40
- res: vis_pb2.DownloadExtractResponse = get_default_client().DownloadExtract(req)
41
- extract = (
42
- ExtractOutput(_InternalToken())
43
- if res.HasField("line_data")
44
- else RenderOutput(_InternalToken())
45
- )
46
- extract._set_data(eid, self._project_id, eid, eid, status)
47
- self._extracts.append(extract)
11
+ class InteractiveReport:
12
+ """
13
+ Interactive report widget with lazy loading for large datasets.
48
14
 
15
+ How it works:
16
+ 1. on initialization:
17
+ - sends metadata for all rows (for filtering/selection)
18
+ - downloads the first row (to determine grid dimensions)
19
+ - other rows remain unloaded
20
+
21
+ 2. on user request:
22
+ - _load_row_data() is called with row index
23
+ - downloads and sends images/plots for that specific row
24
+ - python sets row_states to 'loading' -> 'loaded' (or 'error')
25
+
26
+ This allows working with 1000+ row datasets without waiting for all data upfront.
27
+ """
49
28
 
50
- class InteractiveReport:
51
29
  # TODO Will/Matt: this list of report entries could be how we store stuff in the DB
52
30
  # for interactive reports, to reference the post proc. extracts. A report is essentially
53
31
  # a bunch of extracts + metadata.
@@ -59,62 +37,70 @@ class InteractiveReport:
59
37
  if len(self.entries) == 0:
60
38
  raise ValueError("Invalid number of entries, must be > 0")
61
39
 
62
- report_data = []
63
- for row, re in enumerate(self.entries):
64
- row_data = []
65
- re.download_extracts()
66
- for extract in re._extracts:
67
- if isinstance(extract, RenderOutput):
68
- image_and_label = extract.download_images()
69
- row_data.extend([il[0] for il in image_and_label])
70
- else:
71
- plot_data = extract.download_data()
72
- # Plot absolute pressure for each intersection curve we have
73
- # TODO will: make these params of the extract/report entry
74
- # We'll pick the first item that's not x/y/z coordinates to be the data we plot
75
- x_axis = "x"
76
- y_axis = [n for n in plot_data[0][0][0] if n != "x" and n != "y" and n != "z"][
77
- 0
78
- ]
79
- scatter_plots = []
80
- for p in plot_data:
81
- data = p[0]
82
- x_idx = data[0].index(x_axis)
83
- y_idx = data[0].index(y_axis)
84
-
85
- scatter_data = lcj.ScatterPlotData()
86
- scatter_data.name = f"plot-{row}"
87
- for r in data[1:]:
88
- scatter_data.x.append(r[x_idx])
89
- scatter_data.y.append(r[y_idx])
90
- scatter_plots.append(scatter_data)
91
- row_data.append(scatter_plots)
92
-
93
- report_data.append(row_data)
94
-
95
- # TODO Validate the grid configuration is valid, all report entries should have the
96
- # same # of extract IDs and metadata keys
97
- # maybe not needed, b/c this is something we'd control internally later?
98
- nrows = len(report_data)
99
- ncols = len(report_data[0]) if len(report_data) > 0 else 0
100
-
101
- for i, r in enumerate(report_data):
102
- if len(r) != ncols:
103
- raise ValueError(
104
- f"Invalid report configuration: row {i} does not have {ncols} columns"
105
- )
40
+ # Determine grid dimensions by downloading first entry
41
+ # to understand the structure (number of columns)
42
+ first_entry = self.entries[0]
43
+ first_entry.download_extracts()
44
+
45
+ # Calculate actual number of columns by counting how many cells
46
+ # each extract produces (RenderOutput can produce multiple images)
47
+ ncols = 0
48
+ for extract in first_entry._extracts:
49
+ if isinstance(extract, RenderOutput):
50
+ image_and_label = extract.download_images()
51
+ ncols += len(image_and_label)
52
+ else:
53
+ ncols += 1 # Plot data extracts produce one cell
106
54
 
55
+ nrows = len(self.entries)
56
+
57
+ # Create widget with metadata but without data
107
58
  self.widget = lcj.EnsembleWidget([re._metadata for re in self.entries], nrows, ncols)
108
- for row, row_data in enumerate(report_data):
109
- for col, col_data in enumerate(row_data):
110
- if isinstance(col_data, list) and isinstance(col_data[0], lcj.ScatterPlotData):
111
- x_axis = "x"
112
- y_axis = "Absolute Pressure (Pa)"
113
- self.widget.set_cell_scatter_plot(
114
- row, col, f"{row} {y_axis} v {x_axis}", x_axis, y_axis, col_data
115
- )
116
- elif isinstance(col_data, io.BytesIO):
117
- self.widget.set_cell_data(row, col, col_data.getvalue(), "jpg")
59
+
60
+ # Set the callback for lazy loading row data
61
+ self.widget.set_row_data_callback(self._load_row_data)
62
+
63
+ def _load_row_data(self, row: int) -> None:
64
+ """
65
+ Load and send data for a specific row to the widget.
66
+ This is called on-demand when the user requests data for a row.
67
+ """
68
+ re = self.entries[row]
69
+
70
+ # Download extracts if not already downloaded
71
+ if len(re._extracts) == 0:
72
+ re.download_extracts()
73
+
74
+ # Process each extract and send to widget
75
+ # Track the actual column index as we may have multiple cells per extract
76
+ col = 0
77
+ for extract in re._extracts:
78
+ if isinstance(extract, RenderOutput):
79
+ image_and_label = extract.download_images()
80
+ # Each image gets its own column
81
+ for il in image_and_label:
82
+ self.widget.set_cell_data(row, col, il[0].getvalue(), "jpg")
83
+ col += 1
84
+ else:
85
+ plot_data = extract.download_data()
86
+ data = plot_data[0][0]
87
+ all_axis_labels = data[0]
88
+
89
+ axis_data = []
90
+ for axis_idx in range(len(all_axis_labels)):
91
+ axis_values = [row[axis_idx] for row in data[1:]]
92
+ axis_data.append(axis_values)
93
+
94
+ self.widget.set_cell_scatter_plot(
95
+ row,
96
+ col,
97
+ f"Row #{row} - Multi-axis Plot",
98
+ all_axis_labels,
99
+ axis_data,
100
+ plot_name=f"plot-{row}",
101
+ plot_mode="markers",
102
+ )
103
+ col += 1
118
104
 
119
105
  def _ipython_display_(self) -> None:
120
106
  """
@@ -1,19 +1,162 @@
1
- from .visualization import Scene, RenderOutput
1
+ import json
2
+ import os
3
+ from typing import TYPE_CHECKING
4
+
5
+ from .visualization import Scene, RenderOutput, range_query
2
6
  from .data_extraction import DataExtractor, ExtractOutput
3
- from ..enum import RenderStatusType, ExtractStatusType
7
+ from ..enum import RenderStatusType, ExtractStatusType, FieldAssociation
4
8
  from ..solution import Solution
9
+ from .vis_util import _InternalToken, _get_status
5
10
  from time import sleep
11
+ from .._proto.api.v0.luminarycloud.vis import vis_pb2
12
+ from .._client import get_default_client
13
+ from .._helpers._get_project_id import _get_project_id
14
+ import luminarycloud.enum.quantity_type as quantity_type
15
+
16
+ if TYPE_CHECKING:
17
+ from .interactive_report import InteractiveReport
18
+
19
+
20
+ # TODO Will/Matt: this could be something like what we store in the DB
21
+ # A report can contain a list of report entries that reference post proc.
22
+ # extracts + styling info for how they should be displayed
23
+ class ReportEntry:
24
+ """
25
+ A single entry in a report, containing references to extracts and metadata.
26
+ Each extract can have multiple pieces of data (e.g. multiple images for a
27
+ RenderOutput, or multiple curves for an ExtractOutput). Metadata is a
28
+ dictionary of key/value pairs that can be used to store additional
29
+ information about the report entry. Typically, the metadata would include
30
+ things like simulation ID, lift/drag values, and scalar ranges for each
31
+ solution. The metadata is used to filter and sort data in the ensemble
32
+ widget.
33
+ """
34
+
35
+ def __init__(
36
+ self, project_id: str, extract_ids: list[str] = [], metadata: dict[str, str | float] = {}
37
+ ) -> None:
38
+ self._project_id = project_id
39
+ self._extract_ids = extract_ids
40
+ self._extracts: list[ExtractOutput | RenderOutput] = []
41
+ self._metadata = metadata
42
+ self._statuses = statuses = [RenderStatusType.INVALID] * len(self._extract_ids)
43
+
44
+ def to_dict(self) -> dict:
45
+ return {
46
+ "project_id": self._project_id,
47
+ "extract_ids": self._extract_ids,
48
+ "metadata": self._metadata,
49
+ }
50
+
51
+ @classmethod
52
+ def from_dict(cls, data: dict) -> "ReportEntry":
53
+ return cls(
54
+ project_id=data["project_id"],
55
+ extract_ids=data.get("extract_ids", []),
56
+ metadata=data.get("metadata", {}),
57
+ )
58
+
59
+ def refresh_statuses(self) -> None:
60
+ for i, eid in enumerate(self._extract_ids):
61
+ self._statuses[i] = _get_status(self._project_id, eid)
62
+
63
+ def is_complete(self) -> bool:
64
+ self.refresh_statuses()
65
+ return all(
66
+ (status == RenderStatusType.COMPLETED or status == RenderStatusType.FAILED)
67
+ for status in self._statuses
68
+ )
69
+
70
+ # Download all extracts for this report entry
71
+ def download_extracts(self) -> None:
72
+ self._extracts = []
73
+ for eid in self._extract_ids:
74
+ status = _get_status(self._project_id, eid)
75
+ if status != ExtractStatusType.COMPLETED:
76
+ raise Exception(f"Extract {eid} is not complete")
77
+ req = vis_pb2.DownloadExtractRequest()
78
+ req.extract_id = eid
79
+ req.project_id = self._project_id
80
+ # TODO: This is a bit awkward in that we download the extract to figure out what type
81
+ # it is, but this is just a temporary thing, later we'll have a report DB table that
82
+ # stores the extracts for a report and their types, etc.
83
+ res: vis_pb2.DownloadExtractResponse = get_default_client().DownloadExtract(req)
84
+ extract = (
85
+ ExtractOutput(_InternalToken())
86
+ if res.HasField("line_data")
87
+ else RenderOutput(_InternalToken())
88
+ )
89
+ extract._set_data(eid, self._project_id, eid, eid, status)
90
+ self._extracts.append(extract)
6
91
 
7
92
 
8
- # Notes(matt): we need a good way to pass "legend" information to the report.
9
- # The legend is list of scalar values that are associated with each solution in
10
- # the report. Examples include outputs like lift or drag, scalar ranges in the
11
- # solutions, or any user provided data. We could add a helper class to auto-produce
12
- # the legend data for common use cases or the user could provide their own. The data
13
- # would look like a csv file or a dictionary keyed on the solution/sim id, where each
14
- # entry is a list of scalar values. We would also need a header to describe what the values
15
- # are.
16
93
  class Report:
94
+ """
95
+ A report containing multiple report entries. There is support for
96
+ serialization and deserialization to/from JSON since generating the extracts
97
+ and metadata can be expensive.
98
+ """
99
+
100
+ def __init__(self, entries: list[ReportEntry]):
101
+ self._entries = entries
102
+
103
+ def to_dict(self) -> dict:
104
+ return {"entries": [entry.to_dict() for entry in self._entries]}
105
+
106
+ @classmethod
107
+ def from_dict(cls, data: dict) -> "Report":
108
+ entries = [ReportEntry.from_dict(e) for e in data.get("entries", [])]
109
+ return cls(entries)
110
+
111
+ def _check_status(self) -> bool:
112
+ """Check the status of all ReportEntries and their extracts, grouped by entry."""
113
+ still_pending = False
114
+ print("\n" + "=" * 60)
115
+ print("STATUS CHECK".center(60))
116
+ print("=" * 60)
117
+
118
+ if not self._entries:
119
+ raise RuntimeError("No report entries to check status.")
120
+
121
+ print(f"{'Entry':<8} | {'Extract ID':<20} | {'Status':<15}")
122
+ print("-" * 60)
123
+ for idx, entry in enumerate(self._entries):
124
+ entry.refresh_statuses()
125
+ for eid, status in zip(entry._extract_ids, entry._statuses):
126
+ if status != RenderStatusType.COMPLETED and status != RenderStatusType.FAILED:
127
+ still_pending = True
128
+ print(f"{idx:<8} | {eid:<20} | {status.name:<15}")
129
+ print("=" * 60)
130
+ return still_pending
131
+
132
+ def wait_for_completion(self):
133
+ """Wait for all report entries' extracts to complete."""
134
+ if not self._entries:
135
+ raise RuntimeError("No report entries to wait for.")
136
+ while self._check_status():
137
+ sleep(5)
138
+ print("All report entries' extracts have completed.")
139
+
140
+ def interact(self) -> "InteractiveReport":
141
+ from .interactive_report import InteractiveReport
142
+
143
+ if not self._check_status:
144
+ raise ValueError("Error: report entries are still pending")
145
+ return InteractiveReport(self._entries)
146
+
147
+
148
+ class ReportGenerator:
149
+ """
150
+ A helper for generating reports from multiple solutions, scenes, data extractors and
151
+ per solution metatdata.
152
+
153
+ Attributes:
154
+ -----------
155
+ calculate_ranges: bool
156
+ Whether to auto-calculate solution quantity ranges and add them to the
157
+ metadata. Default is False.
158
+ """
159
+
17
160
  def __init__(self, solutions: list[Solution]):
18
161
  self._scenes: list[Scene] = []
19
162
  self._data_extractors: list[DataExtractor] = []
@@ -21,6 +164,10 @@ class Report:
21
164
  # When we fire off requests we use these objects to track the progress.
22
165
  self._extract_outputs: list[ExtractOutput] = []
23
166
  self._render_outputs: list[RenderOutput] = []
167
+ # Controls if we should calculate solution quanity ranges
168
+ self.calculate_ranges: bool = False
169
+ # Key is solution ID, value is the metadata dict
170
+ self._metadata: dict[str, dict[str, str | float]] = {}
24
171
  for solution in solutions:
25
172
  if not isinstance(solution, Solution):
26
173
  raise TypeError("Expected a list of Solution objects.")
@@ -30,69 +177,76 @@ class Report:
30
177
  raise TypeError("Expected a Scene object.")
31
178
  self._scenes.append(scene)
32
179
 
180
+ # TODO(Matt): we could just make this a single data extract then control how they
181
+ # are added to each solution.
33
182
  def add_data_extractor(self, data_extractor: DataExtractor):
34
183
  if not isinstance(data_extractor, DataExtractor):
35
184
  raise TypeError("Expected a DataExtractor object.")
36
185
  self._data_extractors.append(data_extractor)
37
186
 
38
- def _check_status(self) -> bool:
39
- """Check the status of all render outputs and extract outputs."""
40
- still_pending = False
41
- print("\n" + "=" * 60)
42
- print("STATUS CHECK".center(60))
43
- print("=" * 60)
44
-
45
- if not self._render_outputs and not self._extract_outputs:
46
- raise RuntimeError("No render outputs or extract outputs to check status.")
47
-
48
- # Check render outputs
49
- if self._render_outputs:
50
- print(f"{'Type':<8} | {'ID':<20} | {'Status':<15}")
51
- print("-" * 60)
52
-
53
- for output in self._render_outputs:
54
- if (
55
- output.status != RenderStatusType.COMPLETED
56
- and output.status != RenderStatusType.FAILED
57
- ):
58
- output.refresh()
59
- still_pending = True
60
- print(f"{'Render':<8} | {str(output._extract_id):<20} | {output.status.name:<15}")
61
-
62
- # Check extract outputs
63
- for output in self._extract_outputs:
64
- if (
65
- output.status != ExtractStatusType.COMPLETED
66
- and output.status != ExtractStatusType.FAILED
67
- ):
68
- output.refresh()
69
- still_pending = True
70
- print(f"{'Extract':<8} | {str(output._extract_id):<20} | {output.status.name:<15}")
71
-
72
- print("=" * 60)
73
- return still_pending
187
+ def add_metadata(self, solution_id: str, metadata: dict[str, str | float]):
188
+ if solution_id not in self._metadata:
189
+ self._metadata[solution_id] = {}
190
+ self._metadata[solution_id].update(metadata)
74
191
 
75
- def create_report_data(self):
192
+ def create_report(self) -> Report:
193
+ entries = []
76
194
  for solution in self._solution:
77
- for scene in self._scenes:
78
- sol_scene = scene.clone(solution)
79
- self._render_outputs.append(
80
- sol_scene.render_images(
81
- name="Report Scene", description="Generated Report Scene"
82
- )
83
- )
195
+ extract_ids = []
196
+ project_id = _get_project_id(solution)
197
+ if not project_id:
198
+ raise ValueError("Solution does not have a project_id.")
199
+ metadata = self._metadata.get(solution.id, {})
200
+ metadata["solution id"] = solution.id
201
+ if self.calculate_ranges:
202
+ print(f"Calculating solution quantity ranges {solution.id}")
203
+ ranges = range_query(solution, FieldAssociation.CELLS)
204
+ for range_res in ranges:
205
+ if not quantity_type._is_vector(range_res.quantity):
206
+ metadata[f"{range_res.field_name} min"] = range_res.ranges[0].min_value
207
+ metadata[f"{range_res.field_name} max"] = range_res.ranges[0].max_value
208
+ else:
209
+ for r in range(len(range_res.ranges)):
210
+ if r == 0:
211
+ comp = "x"
212
+ elif r == 1:
213
+ comp = "y"
214
+ elif r == 2:
215
+ comp = "z"
216
+ elif r == 3:
217
+ comp = "mag"
218
+ comp_range = range_res.ranges[r]
219
+ metadata[f"{range_res.field_name} min ({comp})"] = comp_range.min_value
220
+ metadata[f"{range_res.field_name} max ({comp})"] = comp_range.max_value
84
221
  for extractor in self._data_extractors:
85
222
  sol_extractor = extractor.clone(solution)
86
- self._extract_outputs.append(
87
- sol_extractor.create_extracts(
88
- name="Report Extract", description="Generated Report Extract"
89
- )
223
+ extract = sol_extractor.create_extracts(
224
+ name="Report Extract", description="Generated Report Extract"
90
225
  )
226
+ extract_ids.append(extract._extract_id)
91
227
 
92
- def wait_for_completion(self):
93
- """Wait for all render and extract outputs to complete."""
94
- if not self._render_outputs and not self._extract_outputs:
95
- raise RuntimeError("No render outputs or extract outputs to wait for.")
96
- while self._check_status():
97
- sleep(5)
98
- print("All render and extract outputs have completed.")
228
+ for scene in self._scenes:
229
+ sol_scene = scene.clone(solution)
230
+ render_extract = sol_scene.render_images(
231
+ name="Report Scene", description="Generated Report Scene"
232
+ )
233
+ extract_ids.append(render_extract._extract_id)
234
+
235
+ entries.append(ReportEntry(project_id, extract_ids, metadata))
236
+ return Report(entries)
237
+
238
+
239
+ def save_report_to_json(report: Report, name: str, directory: str = ".") -> str:
240
+ """Save a Report object to a JSON file named {name}_lcreport.json in the specified directory."""
241
+ filename = f"{name}_lcreport.json"
242
+ filepath = os.path.join(directory, filename)
243
+ with open(filepath, "w") as f:
244
+ json.dump(report.to_dict(), f, indent=2)
245
+ return filepath
246
+
247
+
248
+ def load_report_from_json(filepath: str) -> "Report":
249
+ """Load a Report object from a JSON file at the given file path."""
250
+ with open(filepath, "r") as f:
251
+ data = json.load(f)
252
+ return Report.from_dict(data)
@@ -24,6 +24,7 @@ from luminarycloud.enum import (
24
24
  SceneMode,
25
25
  VisQuantity,
26
26
  QuantityType,
27
+ FieldAssociation,
27
28
  )
28
29
  from ..exceptions import NotFoundError
29
30
  from ..geometry import Geometry, get_geometry
@@ -1094,6 +1095,65 @@ def list_quantities(solution: Solution) -> List[VisQuantity]:
1094
1095
  return result
1095
1096
 
1096
1097
 
1098
+ @dc.dataclass
1099
+ class RangeResult:
1100
+ ranges: List[DataRange]
1101
+ """
1102
+ A list of ranges per component. Scalars have a single range and vector ranges are in
1103
+ in x,y,z, magnitude order.
1104
+ """
1105
+ quantity: VisQuantity
1106
+ """ The quantity type for the field, if available. """
1107
+ field_name: str
1108
+ """ Name of the field. """
1109
+
1110
+
1111
+ def range_query(solution: Solution, field_association: FieldAssociation) -> List[RangeResult]:
1112
+ """
1113
+ The range query returns the min/max values for all fields in a solution. Two
1114
+ types of ranges can be chosen: cell-centered and point-centered data. The
1115
+ results will vary based on the choice. The solver natively outputs cell-centered
1116
+ data, so the cell based query will return the actual min/max values
1117
+ from the solver run. Visualization operates on point-centered data, which is
1118
+ recentered from the cell-centered data.
1119
+
1120
+ Parameters
1121
+ ----------
1122
+ solution: Solution
1123
+ The the solution object to query.
1124
+ field_association: FieldAssociation
1125
+ The type of data to query, either cell-centered or point-centered.
1126
+ """
1127
+ if not isinstance(solution, Solution):
1128
+ raise TypeError(f"Expected 'Solution', got {type(solution).__name__}")
1129
+
1130
+ if not isinstance(field_association, FieldAssociation):
1131
+ raise TypeError(f"Expected 'FieldAssociation', got {type(field_association).__name__}")
1132
+
1133
+ sim = get_simulation(solution.simulation_id)
1134
+ req = vis_pb2.RangeQueryRequest()
1135
+ req.entity.simulation.id = solution.simulation_id
1136
+ req.entity.simulation.solution_id = solution.id
1137
+ req.project_id = sim.project_id
1138
+ if field_association == FieldAssociation.POINTS:
1139
+ req.field_association = vis_pb2.FieldAssociation.FIELD_ASSOCIATION_POINTS
1140
+ else:
1141
+ req.field_association = vis_pb2.FieldAssociation.FIELD_ASSOCIATION_CELLS
1142
+ res: vis_pb2.RangeQueryReply = get_default_client().RangeQuery(req)
1143
+ result: List[RangeResult] = []
1144
+ for r in res.range:
1145
+ ranges = []
1146
+ for r_range in r.range:
1147
+ data_range = DataRange()
1148
+ data_range.min_value = r_range.min
1149
+ data_range.max_value = r_range.max
1150
+ ranges.append(data_range)
1151
+ result.append(
1152
+ RangeResult(ranges=ranges, quantity=VisQuantity(r.quantity), field_name=r.field_name)
1153
+ )
1154
+ return result
1155
+
1156
+
1097
1157
  def list_renders(entity: Geometry | Mesh | Solution) -> List[RenderOutput]:
1098
1158
  """
1099
1159
  Lists all previously created renders associated with a project and an entity.