luminarycloud 0.22.0__py3-none-any.whl → 0.22.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (86) hide show
  1. luminarycloud/_client/authentication_plugin.py +49 -0
  2. luminarycloud/_client/client.py +38 -11
  3. luminarycloud/_client/http_client.py +1 -1
  4. luminarycloud/_client/retry_interceptor.py +64 -2
  5. luminarycloud/_helpers/__init__.py +9 -0
  6. luminarycloud/_helpers/_inference_jobs.py +227 -0
  7. luminarycloud/_helpers/_parse_iso_datetime.py +54 -0
  8. luminarycloud/_helpers/download.py +11 -0
  9. luminarycloud/_helpers/proto_decorator.py +38 -7
  10. luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2.py +152 -132
  11. luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2.pyi +66 -8
  12. luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2_grpc.py +34 -0
  13. luminarycloud/_proto/api/v0/luminarycloud/geometry/geometry_pb2_grpc.pyi +12 -0
  14. luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2.py +142 -39
  15. luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2.pyi +300 -3
  16. luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2_grpc.py +34 -0
  17. luminarycloud/_proto/api/v0/luminarycloud/physics_ai/physics_ai_pb2_grpc.pyi +12 -0
  18. luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2.py +255 -0
  19. luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2.pyi +466 -0
  20. luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2_grpc.py +242 -0
  21. luminarycloud/_proto/api/v0/luminarycloud/physicsaiinference/physicsaiinference_pb2_grpc.pyi +95 -0
  22. luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2.py +29 -7
  23. luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2.pyi +39 -0
  24. luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2_grpc.py +36 -0
  25. luminarycloud/_proto/api/v0/luminarycloud/simulation/simulation_pb2_grpc.pyi +18 -0
  26. luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2.py +88 -65
  27. luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2.pyi +42 -0
  28. luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2_grpc.py +34 -0
  29. luminarycloud/_proto/api/v0/luminarycloud/thirdpartyintegration/onshape/onshape_pb2_grpc.pyi +12 -0
  30. luminarycloud/_proto/api/v0/luminarycloud/vis/vis_pb2.py +163 -153
  31. luminarycloud/_proto/api/v0/luminarycloud/vis/vis_pb2.pyi +37 -3
  32. luminarycloud/_proto/base/base_pb2.py +7 -6
  33. luminarycloud/_proto/base/base_pb2.pyi +4 -0
  34. luminarycloud/_proto/client/simulation_pb2.py +358 -339
  35. luminarycloud/_proto/client/simulation_pb2.pyi +89 -3
  36. luminarycloud/_proto/physicsaiinferenceservice/physicsaiinferenceservice_pb2.py +35 -0
  37. luminarycloud/_proto/physicsaiinferenceservice/physicsaiinferenceservice_pb2.pyi +7 -0
  38. luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2.py +6 -3
  39. luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2_grpc.py +68 -0
  40. luminarycloud/_proto/physicsaitrainingservice/physicsaitrainingservice_pb2_grpc.pyi +24 -0
  41. luminarycloud/_wrapper.py +53 -7
  42. luminarycloud/enum/vis_enums.py +6 -0
  43. luminarycloud/feature_modification.py +25 -32
  44. luminarycloud/geometry.py +10 -6
  45. luminarycloud/geometry_version.py +4 -0
  46. luminarycloud/mesh.py +4 -0
  47. luminarycloud/meshing/mesh_generation_params.py +5 -6
  48. luminarycloud/meshing/sizing_strategy/sizing_strategies.py +1 -2
  49. luminarycloud/outputs/__init__.py +2 -0
  50. luminarycloud/outputs/output_definitions.py +3 -3
  51. luminarycloud/outputs/stopping_conditions.py +94 -0
  52. luminarycloud/params/enum/_enum_wrappers.py +16 -0
  53. luminarycloud/params/geometry/shapes.py +33 -33
  54. luminarycloud/params/simulation/adaptive_mesh_refinement/__init__.py +1 -0
  55. luminarycloud/params/simulation/adaptive_mesh_refinement/active_region_.py +83 -0
  56. luminarycloud/params/simulation/adaptive_mesh_refinement/boundary_layer_profile_.py +1 -1
  57. luminarycloud/params/simulation/adaptive_mesh_refinement_.py +8 -1
  58. luminarycloud/physics_ai/__init__.py +7 -0
  59. luminarycloud/physics_ai/inference.py +166 -199
  60. luminarycloud/physics_ai/models.py +22 -0
  61. luminarycloud/physics_ai/solution.py +4 -0
  62. luminarycloud/pipelines/api.py +143 -16
  63. luminarycloud/pipelines/core.py +1 -1
  64. luminarycloud/pipelines/stages.py +22 -9
  65. luminarycloud/project.py +61 -8
  66. luminarycloud/simulation.py +25 -0
  67. luminarycloud/types/__init__.py +2 -0
  68. luminarycloud/types/ids.py +2 -0
  69. luminarycloud/types/vector3.py +1 -2
  70. luminarycloud/vis/__init__.py +1 -0
  71. luminarycloud/vis/data_extraction.py +7 -7
  72. luminarycloud/vis/filters.py +97 -0
  73. luminarycloud/vis/interactive_report.py +163 -7
  74. luminarycloud/vis/report.py +113 -1
  75. luminarycloud/vis/visualization.py +3 -0
  76. luminarycloud/volume_selection.py +16 -8
  77. luminarycloud/workflow_utils.py +149 -0
  78. {luminarycloud-0.22.0.dist-info → luminarycloud-0.22.2.dist-info}/METADATA +1 -1
  79. {luminarycloud-0.22.0.dist-info → luminarycloud-0.22.2.dist-info}/RECORD +80 -76
  80. {luminarycloud-0.22.0.dist-info → luminarycloud-0.22.2.dist-info}/WHEEL +1 -1
  81. luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2.py +0 -61
  82. luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2.pyi +0 -85
  83. luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2_grpc.py +0 -67
  84. luminarycloud/_proto/api/v0/luminarycloud/inference/inference_pb2_grpc.pyi +0 -26
  85. luminarycloud/_proto/inferenceservice/inferenceservice_pb2.py +0 -69
  86. luminarycloud/pipeline_util/dictable.py +0 -27
@@ -139,6 +139,101 @@ class Slice(Filter):
139
139
  self.plane._from_proto(filter.slice.plane)
140
140
 
141
141
 
142
+ class MultiSlice(Filter):
143
+ """
144
+ Creates multiple parallel slice planes between two positions.
145
+ Primarily useful as a convenience wrapper when combined with child filters like Threshold, avoiding manual slice filter loops.
146
+
147
+ .. warning:: This feature is experimental and may change or be removed in the future.
148
+
149
+ Attributes:
150
+ -----------
151
+ start_position : Vector3Like
152
+ The position of the first slice plane.
153
+ end_position : Vector3Like
154
+ The position of the last slice plane.
155
+ n_slices : int
156
+ The number of slice planes to create between start and end positions.
157
+ name : str
158
+ A user provided name for the filter.
159
+ project_vectors: bool
160
+ When true, vector fields will be projected onto the plane of each slice. This is often
161
+ useful for visualizing vector fields by removing the vector components in the normal
162
+ direction of the planes. Default: False
163
+ display_attrs : DisplayAttributes
164
+ Specifies this filter's appearance.
165
+ """
166
+
167
+ def __init__(self, name: str = "") -> None:
168
+ super().__init__(generate_id("multi-slice-"))
169
+ self._start_position: Vector3Like = Vector3(x=0, y=0, z=0)
170
+ self._end_position: Vector3Like = Vector3(x=1, y=0, z=0)
171
+ self._n_slices: int = 10
172
+ self._project_vectors: bool = False
173
+ self.name = name
174
+
175
+ @property
176
+ def start_position(self) -> Vector3Like:
177
+ return self._start_position
178
+
179
+ @start_position.setter
180
+ def start_position(self, new_start_position: Vector3Like) -> None:
181
+ self._start_position = _to_vector3(new_start_position)
182
+
183
+ @property
184
+ def end_position(self) -> Vector3Like:
185
+ return self._end_position
186
+
187
+ @end_position.setter
188
+ def end_position(self, new_end_position: Vector3Like) -> None:
189
+ self._end_position = _to_vector3(new_end_position)
190
+
191
+ @property
192
+ def n_slices(self) -> int:
193
+ return self._n_slices
194
+
195
+ @n_slices.setter
196
+ def n_slices(self, new_n_slices: int) -> None:
197
+ if not isinstance(new_n_slices, int):
198
+ raise TypeError(f"Expected 'int', got {type(new_n_slices).__name__}")
199
+ if new_n_slices < 2:
200
+ raise ValueError("n_slices must be at least 2")
201
+ self._n_slices = new_n_slices
202
+
203
+ @property
204
+ def project_vectors(self) -> bool:
205
+ return self._project_vectors
206
+
207
+ @project_vectors.setter
208
+ def project_vectors(self, new_project_vectors: bool) -> None:
209
+ if not isinstance(new_project_vectors, bool):
210
+ raise TypeError(f"Expected 'bool', got {type(new_project_vectors).__name__}")
211
+ self._project_vectors = new_project_vectors
212
+
213
+ def _to_proto(self) -> vis_pb2.Filter:
214
+ vis_filter = vis_pb2.Filter()
215
+ vis_filter.id = self.id
216
+ vis_filter.name = self.name
217
+ vis_filter.multi_slice.start_position.CopyFrom(_to_vector3(self.start_position)._to_proto())
218
+ vis_filter.multi_slice.end_position.CopyFrom(_to_vector3(self.end_position)._to_proto())
219
+ vis_filter.multi_slice.n_slices = self.n_slices
220
+ vis_filter.multi_slice.project_vectors = self.project_vectors
221
+ return vis_filter
222
+
223
+ def _from_proto(self, filter: vis_pb2.Filter) -> None:
224
+ typ = filter.WhichOneof("value")
225
+ if typ != "multi_slice":
226
+ raise TypeError(f"Expected 'multi_slice', got {typ}")
227
+ self.id = filter.id
228
+ self.name = filter.name
229
+ self.start_position = Vector3()
230
+ self.start_position._from_proto(filter.multi_slice.start_position)
231
+ self.end_position = Vector3()
232
+ self.end_position._from_proto(filter.multi_slice.end_position)
233
+ self.n_slices = filter.multi_slice.n_slices
234
+ self.project_vectors = filter.multi_slice.project_vectors
235
+
236
+
142
237
  class Isosurface(Filter):
143
238
  """
144
239
  Isosurface is used to evaluate scalar fields at constant values, known as
@@ -1130,6 +1225,8 @@ def _filter_to_obj_name(filter: Filter) -> str:
1130
1225
  raise TypeError(f"Expected 'Filter', got {type(filter).__name__}")
1131
1226
  if isinstance(filter, Slice):
1132
1227
  return "slice"
1228
+ elif isinstance(filter, MultiSlice):
1229
+ return "multi_slice"
1133
1230
  elif isinstance(filter, Isosurface):
1134
1231
  return "isosurface"
1135
1232
  elif isinstance(filter, PlaneClip):
@@ -1,6 +1,7 @@
1
1
  import io
2
+ import numpy as np
2
3
  from .visualization import RenderOutput
3
- from .report import ReportEntry
4
+ from .report import ReportEntry, ReportContext
4
5
 
5
6
  try:
6
7
  import luminarycloud_jupyter as lcj
@@ -8,6 +9,82 @@ except ImportError:
8
9
  lcj = None
9
10
 
10
11
 
12
+ def _detect_outliers(
13
+ metadata: list[dict[str, str | float]],
14
+ output_fields: list[str],
15
+ percentile_threshold: float = 95.0,
16
+ ) -> list[int] | None:
17
+ """
18
+ Detect outliers using Mahalanobis distance.
19
+
20
+ Parameters
21
+ ----------
22
+ metadata : list[dict[str, str | float]]
23
+ List of metadata dictionaries for each row
24
+ output_fields : list[str]
25
+ List of output field names to use for outlier detection
26
+ percentile_threshold : float, optional
27
+ Percentile threshold for outlier detection (default: 95.0)
28
+
29
+ Returns
30
+ -------
31
+ list[int] | None
32
+ List of row indices that are outliers, or None if detection fails
33
+ """
34
+ # Need at least 2 fields for meaningful multivariate analysis
35
+ if len(output_fields) < 2:
36
+ return None
37
+
38
+ # Extract data for the specified output fields
39
+ try:
40
+ data = []
41
+ for row_metadata in metadata:
42
+ row_data = []
43
+ for field in output_fields:
44
+ value = row_metadata.get(field)
45
+ if value is None or isinstance(value, str):
46
+ # Skip if field is missing or non-numeric
47
+ return None
48
+ row_data.append(float(value))
49
+ data.append(row_data)
50
+
51
+ data_array = np.array(data)
52
+
53
+ # Need at least as many samples as dimensions for covariance matrix
54
+ if len(data_array) < len(output_fields):
55
+ return None
56
+
57
+ # Calculate mean and covariance matrix
58
+ mean_vec = np.mean(data_array, axis=0)
59
+ cov_matrix = np.cov(data_array.T)
60
+
61
+ # Check if covariance matrix is singular
62
+ if np.linalg.det(cov_matrix) == 0:
63
+ return None
64
+
65
+ # Invert covariance matrix
66
+ inv_cov_matrix = np.linalg.inv(cov_matrix)
67
+
68
+ # Calculate Mahalanobis distance for each point
69
+ distances = []
70
+ for point in data_array:
71
+ diff = point - mean_vec
72
+ distance = np.sqrt(diff @ inv_cov_matrix @ diff)
73
+ distances.append(distance)
74
+
75
+ # Determine outlier threshold using percentile
76
+ threshold = np.percentile(distances, percentile_threshold)
77
+
78
+ # Find outlier indices
79
+ outlier_indices = [i for i, d in enumerate(distances) if d > threshold]
80
+
81
+ return outlier_indices
82
+
83
+ except Exception:
84
+ # If anything goes wrong, return None (no outliers detected)
85
+ return None
86
+
87
+
11
88
  class InteractiveReport:
12
89
  """
13
90
  Interactive report widget with lazy loading for large datasets.
@@ -29,7 +106,7 @@ class InteractiveReport:
29
106
  # TODO Will/Matt: this list of report entries could be how we store stuff in the DB
30
107
  # for interactive reports, to reference the post proc. extracts. A report is essentially
31
108
  # a bunch of extracts + metadata.
32
- def __init__(self, entries: list[ReportEntry]) -> None:
109
+ def __init__(self, entries: list[ReportEntry], context: ReportContext | None = None) -> None:
33
110
  if not lcj:
34
111
  raise ImportError("InteractiveScene requires luminarycloud[jupyter] to be installed")
35
112
 
@@ -37,6 +114,13 @@ class InteractiveReport:
37
114
  if len(self.entries) == 0:
38
115
  raise ValueError("Invalid number of entries, must be > 0")
39
116
 
117
+ # Validate and store context if provided
118
+ if context is not None:
119
+ self._validate_context(context)
120
+ self.context = context
121
+ else:
122
+ self.context = None
123
+
40
124
  # Determine grid dimensions by downloading first entry
41
125
  # to understand the structure (number of columns)
42
126
  first_entry = self.entries[0]
@@ -54,12 +138,55 @@ class InteractiveReport:
54
138
 
55
139
  nrows = len(self.entries)
56
140
 
141
+ # Prepare report context for the widget
142
+ context_dict = None
143
+ if self.context is not None:
144
+ context_dict = self.context.to_dict()
145
+
146
+ # Compute outlier indices if we have outputs
147
+ if self.context.outputs and len(self.context.outputs) >= 2:
148
+ outlier_indices = _detect_outliers(
149
+ [re._metadata for re in self.entries], self.context.outputs
150
+ )
151
+ if outlier_indices is not None:
152
+ context_dict["outlier_indices"] = outlier_indices
153
+
57
154
  # Create widget with metadata but without data
58
- self.widget = lcj.EnsembleWidget([re._metadata for re in self.entries], nrows, ncols)
155
+ self.widget = lcj.EnsembleWidget(
156
+ [re._metadata for re in self.entries], nrows, ncols, report_context=context_dict
157
+ )
59
158
 
60
159
  # Set the callback for lazy loading row data
61
160
  self.widget.set_row_data_callback(self._load_row_data)
62
161
 
162
+ def _validate_context(self, context: ReportContext) -> None:
163
+ """
164
+ Validate that all inputs and outputs from the ReportContext exist in the
165
+ first report entry's metadata.
166
+
167
+ Raises:
168
+ -------
169
+ ValueError
170
+ If any inputs or outputs are missing from the first entry's metadata.
171
+ """
172
+ first_entry = self.entries[0]
173
+ metadata_keys = set(first_entry._metadata.keys())
174
+
175
+ # Check for missing inputs
176
+ missing_inputs = [key for key in context.inputs if key not in metadata_keys]
177
+
178
+ # Check for missing outputs
179
+ missing_outputs = [key for key in context.outputs if key not in metadata_keys]
180
+
181
+ # Raise exception if any keys are missing
182
+ if missing_inputs or missing_outputs:
183
+ error_parts = []
184
+ if missing_inputs:
185
+ error_parts.append(f"Missing inputs: {missing_inputs}")
186
+ if missing_outputs:
187
+ error_parts.append(f"Missing outputs: {missing_outputs}")
188
+ raise ValueError(f"ReportContext validation failed. {', '.join(error_parts)}")
189
+
63
190
  def _load_row_data(self, row: int) -> None:
64
191
  """
65
192
  Load and send data for a specific row to the widget.
@@ -79,11 +206,29 @@ class InteractiveReport:
79
206
  image_and_label = extract.download_images()
80
207
  # Each image gets its own column
81
208
  for il in image_and_label:
82
- self.widget.set_cell_data(row, col, il[0].getvalue(), "jpg")
209
+ # il is a tuple of (BytesIO, label)
210
+ # Use camera label for the name, fallback to "image" if empty
211
+ camera_label = il[1]
212
+ name = camera_label if camera_label else "image"
213
+ # For description: prefer extract.description, then camera label, then fallback message
214
+ description = (
215
+ extract.description
216
+ if extract.description
217
+ else camera_label if camera_label else "no label or description provided"
218
+ )
219
+ self.widget.set_cell_data(
220
+ row,
221
+ col,
222
+ il[0].getvalue(),
223
+ "jpg",
224
+ name=name,
225
+ description=description,
226
+ )
83
227
  col += 1
84
228
  else:
85
229
  plot_data = extract.download_data()
86
- data = plot_data[0][0]
230
+ data = plot_data[0][0] # The CSV data (rows)
231
+ plot_label = plot_data[0][1] # The label from the extract
87
232
  all_axis_labels = data[0]
88
233
 
89
234
  axis_data = []
@@ -91,14 +236,25 @@ class InteractiveReport:
91
236
  axis_values = [row[axis_idx] for row in data[1:]]
92
237
  axis_data.append(axis_values)
93
238
 
239
+ # For plots: use extract.name, then plot_label, then "plot" as fallback
240
+ # For description: use extract.description, fallback to message if empty
241
+ name = extract.name if extract.name else (plot_label if plot_label else "plot")
242
+ description = (
243
+ extract.description
244
+ if extract.description
245
+ else "no label or description provided"
246
+ )
247
+
94
248
  self.widget.set_cell_scatter_plot(
95
249
  row,
96
250
  col,
97
- f"Row #{row} - Multi-axis Plot",
251
+ name, # Use the same name for the plot title
98
252
  all_axis_labels,
99
253
  axis_data,
100
- plot_name=f"plot-{row}",
254
+ plot_name=name,
101
255
  plot_mode="markers",
256
+ name=name,
257
+ description=description,
102
258
  )
103
259
  col += 1
104
260
 
@@ -1,3 +1,4 @@
1
+ import csv as csv_module
1
2
  import json
2
3
  import os
3
4
  from typing import TYPE_CHECKING
@@ -17,6 +18,79 @@ if TYPE_CHECKING:
17
18
  from .interactive_report import InteractiveReport
18
19
 
19
20
 
21
+ class ReportContext:
22
+ """
23
+ Context for interactive reports that defines input and output metadata keys.
24
+ Inputs define what the geometric and flow conditions are varied with running
25
+ data generation and the outputs define what quantities are extracted from
26
+ the simulations. For the report context to be valid we require that the both
27
+ the inputs and outputs are non-empty.
28
+
29
+ Attributes:
30
+ -----------
31
+ inputs : list[str]
32
+ List of metadata keys (column names) that represent inputs to the report.
33
+ outputs : list[str]
34
+ List of metadata keys (column names) that represent outputs from the report.
35
+ """
36
+
37
+ def __init__(self, inputs: list[str], outputs: list[str]) -> None:
38
+ self.inputs = inputs
39
+ self.outputs = outputs
40
+
41
+ def to_dict(self) -> dict:
42
+ """Convert ReportContext to a dictionary for serialization."""
43
+ return {
44
+ "inputs": self.inputs,
45
+ "outputs": self.outputs,
46
+ }
47
+
48
+ @classmethod
49
+ def from_dict(cls, data: dict) -> "ReportContext":
50
+ """Create a ReportContext from a dictionary.
51
+
52
+ Parameters:
53
+ -----------
54
+ data : dict
55
+ Dictionary containing 'inputs' and 'outputs' keys.
56
+
57
+ Raises:
58
+ -------
59
+ ValueError
60
+ If 'inputs' or 'outputs' keys are missing from the data.
61
+ """
62
+ if "inputs" not in data:
63
+ raise ValueError("ReportContext.from_dict: missing required key 'inputs'")
64
+ if "outputs" not in data:
65
+ raise ValueError("ReportContext.from_dict: missing required key 'outputs'")
66
+
67
+ inputs = data["inputs"]
68
+ outputs = data["outputs"]
69
+
70
+ if not isinstance(inputs, list):
71
+ raise ValueError(
72
+ f"ReportContext.from_dict: 'inputs' must be a list, got {type(inputs).__name__}"
73
+ )
74
+ if not isinstance(outputs, list):
75
+ raise ValueError(
76
+ f"ReportContext.from_dict: 'outputs' must be a list, got {type(outputs).__name__}"
77
+ )
78
+
79
+ if len(inputs) == 0:
80
+ raise ValueError("ReportContext.from_dict: 'inputs' must be non-empty")
81
+ if len(outputs) == 0:
82
+ raise ValueError("ReportContext.from_dict: 'outputs' must be non-empty")
83
+
84
+ return cls(inputs=inputs, outputs=outputs)
85
+
86
+
87
+ def load_report_context_from_json(filepath: str) -> ReportContext:
88
+ """Load a ReportContext object from a JSON file at the given file path."""
89
+ with open(filepath, "r") as f:
90
+ data = json.load(f)
91
+ return ReportContext.from_dict(data)
92
+
93
+
20
94
  # TODO Will/Matt: this could be something like what we store in the DB
21
95
  # A report can contain a list of report entries that reference post proc.
22
96
  # extracts + styling info for how they should be displayed
@@ -86,7 +160,7 @@ class ReportEntry:
86
160
  if res.HasField("line_data")
87
161
  else RenderOutput(_InternalToken())
88
162
  )
89
- extract._set_data(eid, self._project_id, eid, eid, status)
163
+ extract._set_data(eid, self._project_id, "", "", status)
90
164
  self._extracts.append(extract)
91
165
 
92
166
 
@@ -250,3 +324,41 @@ def load_report_from_json(filepath: str) -> "Report":
250
324
  with open(filepath, "r") as f:
251
325
  data = json.load(f)
252
326
  return Report.from_dict(data)
327
+
328
+
329
+ def load_report_from_csv(filepath: str) -> "Report":
330
+ """Load a Report object from a CSV file at the given file path.
331
+
332
+ Each row in the CSV corresponds to a ReportEntry. Each column is converted
333
+ to metadata. No extracts are created when loading from CSV.
334
+
335
+ Parameters
336
+ ----------
337
+ filepath : str
338
+ Path to the CSV file to load.
339
+
340
+ Returns
341
+ -------
342
+ Report
343
+ A Report object with entries populated from the CSV rows.
344
+ """
345
+ entries = []
346
+ with open(filepath, "r") as f:
347
+ reader = csv_module.DictReader(f)
348
+ for row in reader:
349
+ # Convert all columns to metadata
350
+ metadata: dict[str, str | float] = {}
351
+ for key, value in row.items():
352
+ # Try to convert to float, otherwise keep as string
353
+ try:
354
+ metadata[key] = float(value)
355
+ except (ValueError, TypeError):
356
+ metadata[key] = value
357
+
358
+ # Create ReportEntry with placeholder project_id and no extracts
359
+ # We only need the project id for loading extracts, so we can omit it for CSV
360
+ # imports.
361
+ entry = ReportEntry(project_id="p-placeholder", extract_ids=[], metadata=metadata)
362
+ entries.append(entry)
363
+
364
+ return Report(entries)
@@ -38,6 +38,7 @@ from .filters import (
38
38
  Filter,
39
39
  PlaneClip,
40
40
  Slice,
41
+ MultiSlice,
41
42
  SurfaceStreamlines,
42
43
  SurfaceLIC,
43
44
  Threshold,
@@ -1282,6 +1283,8 @@ def _spec_to_scene(spec: vis_pb2.ExtractSpec) -> Scene:
1282
1283
  pfilter = PlaneClip("")
1283
1284
  elif typ == "slice":
1284
1285
  pfilter = Slice("")
1286
+ elif typ == "multi_slice":
1287
+ pfilter = MultiSlice("")
1285
1288
  elif typ == "streamlines":
1286
1289
  s_typ = filter.streamlines.WhichOneof("seed_type")
1287
1290
  if s_typ == "surface":
@@ -3,7 +3,7 @@ from __future__ import annotations
3
3
 
4
4
  import logging
5
5
  from os import PathLike
6
- from typing import TYPE_CHECKING, Iterable, Iterator, Optional
6
+ from typing import TYPE_CHECKING, Iterable, Iterator, Optional, Sequence
7
7
  from uuid import uuid4
8
8
 
9
9
  from luminarycloud._helpers import util
@@ -69,7 +69,7 @@ class VolumeSelection:
69
69
  >>> s.subtract(b)
70
70
  """
71
71
 
72
- def __init__(self, geometry: "Geometry", volumes: list[Volume | int]):
72
+ def __init__(self, geometry: "Geometry", volumes: Sequence[Volume | int]):
73
73
  self.__geometry = geometry
74
74
  self.__volume_ids = set()
75
75
  for v in volumes:
@@ -583,6 +583,7 @@ class VolumeSelection:
583
583
  self,
584
584
  vector: Vector3Like,
585
585
  quantity: int,
586
+ symmetric: bool = False,
586
587
  *,
587
588
  feature_name: str = "Linear Pattern",
588
589
  ) -> None:
@@ -595,6 +596,8 @@ class VolumeSelection:
595
596
  The vector to repeat the selected volumes along.
596
597
  quantity : int
597
598
  The number of times to repeat the selected volumes.
599
+ symmetric : bool, default False
600
+ Whether the pattern is symmetric.
598
601
  feature_name : str
599
602
  The name of the feature.
600
603
  """
@@ -608,6 +611,7 @@ class VolumeSelection:
608
611
  vector=_to_vector3_ad_proto(vector),
609
612
  ),
610
613
  quantity=quantity,
614
+ symmetric=symmetric,
611
615
  ),
612
616
  ),
613
617
  )
@@ -618,6 +622,7 @@ class VolumeSelection:
618
622
  vector: Vector3Like,
619
623
  magnitude: float,
620
624
  quantity: int,
625
+ symmetric: bool = False,
621
626
  *,
622
627
  feature_name: str = "Linear Pattern",
623
628
  ) -> None:
@@ -633,12 +638,15 @@ class VolumeSelection:
633
638
  The magnitude of the vector.
634
639
  quantity : int
635
640
  The number of times to repeat the selected volumes.
641
+ symmetric : bool, default False
642
+ Whether the pattern is symmetric.
636
643
  feature_name : str
637
644
  The name of the feature.
638
645
  """
639
646
  return self.linear_pattern(
640
647
  vector=[vector[i] * magnitude for i in range(3)],
641
648
  quantity=quantity,
649
+ symmetric=symmetric,
642
650
  feature_name=feature_name,
643
651
  )
644
652
 
@@ -719,17 +727,17 @@ class VolumeSelection:
719
727
 
720
728
  params = gpb.Create()
721
729
  if isinstance(shape, Sphere):
722
- params.sphere.CopyFrom(shape._to_proto()) # type: ignore
730
+ params.sphere.CopyFrom(shape._to_proto())
723
731
  elif isinstance(shape, Cube):
724
- params.box.CopyFrom(shape._to_proto()) # type: ignore
732
+ params.box.CopyFrom(shape._to_proto())
725
733
  elif isinstance(shape, Cylinder):
726
- params.cylinder.CopyFrom(shape._to_proto()) # type: ignore
734
+ params.cylinder.CopyFrom(shape._to_proto())
727
735
  elif isinstance(shape, Torus):
728
- params.torus.CopyFrom(shape._to_proto()) # type: ignore
736
+ params.torus.CopyFrom(shape._to_proto())
729
737
  elif isinstance(shape, Cone):
730
- params.cone.CopyFrom(shape._to_proto()) # type: ignore
738
+ params.cone.CopyFrom(shape._to_proto())
731
739
  elif isinstance(shape, HalfSphere):
732
- params.half_sphere.CopyFrom(shape._to_proto()) # type: ignore
740
+ params.half_sphere.CopyFrom(shape._to_proto())
733
741
  else:
734
742
  raise TypeError(f"Unsupported shape type: {type(shape)}")
735
743
  self.__create_feature(