geometallurgy 0.4.13__py3-none-any.whl → 0.4.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. elphick/geomet/__init__.py +11 -11
  2. elphick/geomet/base.py +1133 -1133
  3. elphick/geomet/block_model.py +319 -319
  4. elphick/geomet/config/__init__.py +1 -1
  5. elphick/geomet/config/config_read.py +39 -39
  6. elphick/geomet/config/flowsheet_example_partition.yaml +31 -31
  7. elphick/geomet/config/flowsheet_example_simple.yaml +25 -25
  8. elphick/geomet/config/mc_config.yml +35 -35
  9. elphick/geomet/data/downloader.py +39 -39
  10. elphick/geomet/data/register.csv +12 -12
  11. elphick/geomet/datasets/__init__.py +2 -2
  12. elphick/geomet/datasets/datasets.py +47 -47
  13. elphick/geomet/datasets/downloader.py +40 -40
  14. elphick/geomet/datasets/register.csv +12 -12
  15. elphick/geomet/datasets/sample_data.py +196 -196
  16. elphick/geomet/extras.py +35 -35
  17. elphick/geomet/flowsheet/__init__.py +1 -1
  18. elphick/geomet/flowsheet/flowsheet.py +1216 -1216
  19. elphick/geomet/flowsheet/loader.py +99 -99
  20. elphick/geomet/flowsheet/operation.py +256 -256
  21. elphick/geomet/flowsheet/stream.py +39 -39
  22. elphick/geomet/interval_sample.py +641 -641
  23. elphick/geomet/io.py +379 -379
  24. elphick/geomet/plot.py +147 -147
  25. elphick/geomet/sample.py +28 -28
  26. elphick/geomet/utils/amenability.py +49 -49
  27. elphick/geomet/utils/block_model_converter.py +93 -93
  28. elphick/geomet/utils/components.py +136 -136
  29. elphick/geomet/utils/data.py +49 -49
  30. elphick/geomet/utils/estimates.py +108 -108
  31. elphick/geomet/utils/interp.py +193 -193
  32. elphick/geomet/utils/interp2.py +134 -134
  33. elphick/geomet/utils/layout.py +72 -72
  34. elphick/geomet/utils/moisture.py +61 -61
  35. elphick/geomet/utils/pandas.py +378 -378
  36. elphick/geomet/utils/parallel.py +29 -29
  37. elphick/geomet/utils/partition.py +63 -63
  38. elphick/geomet/utils/size.py +51 -51
  39. elphick/geomet/utils/timer.py +80 -80
  40. elphick/geomet/utils/viz.py +56 -56
  41. elphick/geomet/validate.py.hide +176 -176
  42. {geometallurgy-0.4.13.dist-info → geometallurgy-0.4.15.dist-info}/LICENSE +21 -21
  43. {geometallurgy-0.4.13.dist-info → geometallurgy-0.4.15.dist-info}/METADATA +2 -3
  44. geometallurgy-0.4.15.dist-info/RECORD +48 -0
  45. {geometallurgy-0.4.13.dist-info → geometallurgy-0.4.15.dist-info}/WHEEL +1 -1
  46. elphick/geomet/utils/output.html +0 -617
  47. geometallurgy-0.4.13.dist-info/RECORD +0 -49
  48. {geometallurgy-0.4.13.dist-info → geometallurgy-0.4.15.dist-info}/entry_points.txt +0 -0
elphick/geomet/plot.py CHANGED
@@ -1,147 +1,147 @@
1
- from typing import Optional, List, Union, Dict, Tuple
2
-
3
- import pandas as pd
4
- import plotly.graph_objects as go
5
- import plotly.express as px
6
-
7
- from elphick.geomet.utils.size import mean_size
8
- from elphick.geomet.utils.viz import plot_parallel
9
-
10
-
11
- def parallel_plot(data: pd.DataFrame,
12
- color: Optional[str] = None,
13
- vars_include: Optional[List[str]] = None,
14
- vars_exclude: Optional[List[str]] = None,
15
- title: Optional[str] = None,
16
- include_dims: Optional[Union[bool, List[str]]] = True,
17
- plot_interval_edges: bool = False) -> go.Figure:
18
- """Create an interactive parallel plot
19
-
20
- Useful to explore multidimensional data like mass-composition data
21
-
22
- Args:
23
- data: The DataFrame to plot
24
- color: Optional color variable
25
- vars_include: Optional List of variables to include in the plot
26
- vars_exclude: Optional List of variables to exclude in the plot
27
- title: Optional plot title
28
- include_dims: Optional boolean or list of dimension to include in the plot. True will show all dims.
29
- plot_interval_edges: If True, interval edges will be plotted instead of interval mid
30
-
31
- Returns:
32
-
33
- """
34
- df: pd.DataFrame = data.copy()
35
- if vars_include is not None:
36
- missing_vars = set(vars_include).difference(set(df.columns))
37
- if len(missing_vars) > 0:
38
- raise KeyError(f'var_subset provided contains variable not found in the data: {missing_vars}')
39
- df = df[vars_include]
40
- if vars_exclude:
41
- df = df[[col for col in df.columns if col not in vars_exclude]]
42
-
43
- if include_dims is True:
44
- df.reset_index(inplace=True)
45
- elif isinstance(include_dims, List):
46
- for d in include_dims:
47
- df.reset_index(d, inplace=True)
48
-
49
- interval_cols: Dict[str, int] = {col: i for i, col in enumerate(df.columns) if df[col].dtype == 'interval'}
50
-
51
- for col, pos in interval_cols.items():
52
- if plot_interval_edges:
53
- df.insert(loc=pos + 1, column=f'{col}_left', value=df[col].array.left)
54
- df.insert(loc=pos + 2, column=f'{col}_right', value=df[col].array.right)
55
- df.drop(columns=col, inplace=True)
56
- else:
57
- # workaround for https://github.com/Elphick/mass-composition/issues/1
58
- if col == 'size':
59
- df[col] = mean_size(pd.arrays.IntervalArray(df[col]))
60
- else:
61
- df[col] = df[col].array.mid
62
-
63
- fig = plot_parallel(data=df, color=color, title=title)
64
- return fig
65
-
66
-
67
- def comparison_plot(data: pd.DataFrame,
68
- x: str, y: str,
69
- facet_col_wrap: int = 3,
70
- color: Optional[str] = None,
71
- trendline: bool = False,
72
- trendline_kwargs: Optional[Dict] = None) -> go.Figure:
73
- """Comparison Plot with multiple x-y scatter plots
74
-
75
- Args:
76
- data: DataFrame, in tidy (tall) format, with columns for x and y
77
- x: The x column
78
- y: The y column
79
- facet_col_wrap: the number of subplots per row before wrapping
80
- color: The optional variable to color by. If None color will be by Node
81
- trendline: If True add trendlines
82
- trendline_kwargs: Allows customising the trendline: ref: https://plotly.com/python/linear-fits/. Note: Axis
83
- scaling across components can be affected if using {'trendline_scope': 'trendline_scope'}.
84
-
85
- Returns:
86
- plotly Figure
87
- """
88
- if trendline:
89
- if trendline_kwargs is None:
90
- trendline_kwargs = {'trendline': 'ols'}
91
- else:
92
- if 'trendline' not in trendline_kwargs:
93
- trendline_kwargs['trendline'] = "ols"
94
- else:
95
- trendline_kwargs = {'trendline': None}
96
-
97
- data['residual'] = data[x] - data[y]
98
- fig = px.scatter(data, x=x, y=y, color=color,
99
- facet_col='variable', facet_col_wrap=facet_col_wrap,
100
- hover_data=['residual'],
101
- **trendline_kwargs)
102
-
103
- # fig.print_grid()
104
- # add y=x based on data per subplot
105
- variable_order = list(data['variable'].unique())
106
- d_subplots = subplot_index_by_title(fig, variable_order)
107
-
108
- for k, v in d_subplots.items():
109
- tmp_df = data.query('variable==@k')
110
- limits = [min([tmp_df[x].min(), tmp_df[y].min()]),
111
- max([tmp_df[x].max(), tmp_df[y].max()])]
112
-
113
- equal_trace = go.Scatter(x=limits, y=limits,
114
- line_color="gray", name="y=x", mode='lines', legendgroup='y=x', showlegend=False)
115
- fig.add_trace(equal_trace, row=v[0], col=v[1], exclude_empty_subplots=True)
116
- sp = fig.get_subplot(v[0], v[1])
117
- fig.update_xaxes(scaleanchor=sp.xaxis.anchor, scaleratio=1, row=v[0], col=v[1])
118
-
119
- fig.update_traces(selector=-1, showlegend=True)
120
- fig.for_each_yaxis(lambda _y: _y.update(showticklabels=True, matches=None))
121
- fig.for_each_xaxis(lambda _x: _x.update(showticklabels=True, matches=None))
122
-
123
- return fig
124
-
125
-
126
- def subplot_index_by_title(fig, variable_order: List[str]) -> Dict['str', Tuple[int, int]]:
127
- """Map of subplot index by title
128
-
129
- Assumes consistency by plotly between axes numbering and annotation order.
130
-
131
- Args:
132
- fig: The figure including subplots with unique titles
133
- variable_order: the variables in order top-left to bottom-right
134
-
135
- Returns:
136
- Dict keyed by title with tuple of subplot positions
137
- """
138
-
139
- d_subplots: Dict = {}
140
- i = 0
141
- for r in range(len(fig._grid_ref), 0, -1):
142
- for c in range(1, len(fig._grid_ref[0]) + 1, 1):
143
- if i < len(variable_order):
144
- d_subplots[variable_order[i]] = (r, c)
145
- i += 1
146
-
147
- return d_subplots
1
+ from typing import Optional, List, Union, Dict, Tuple
2
+
3
+ import pandas as pd
4
+ import plotly.graph_objects as go
5
+ import plotly.express as px
6
+
7
+ from elphick.geomet.utils.size import mean_size
8
+ from elphick.geomet.utils.viz import plot_parallel
9
+
10
+
11
+ def parallel_plot(data: pd.DataFrame,
12
+ color: Optional[str] = None,
13
+ vars_include: Optional[List[str]] = None,
14
+ vars_exclude: Optional[List[str]] = None,
15
+ title: Optional[str] = None,
16
+ include_dims: Optional[Union[bool, List[str]]] = True,
17
+ plot_interval_edges: bool = False) -> go.Figure:
18
+ """Create an interactive parallel plot
19
+
20
+ Useful to explore multidimensional data like mass-composition data
21
+
22
+ Args:
23
+ data: The DataFrame to plot
24
+ color: Optional color variable
25
+ vars_include: Optional List of variables to include in the plot
26
+ vars_exclude: Optional List of variables to exclude in the plot
27
+ title: Optional plot title
28
+ include_dims: Optional boolean or list of dimension to include in the plot. True will show all dims.
29
+ plot_interval_edges: If True, interval edges will be plotted instead of interval mid
30
+
31
+ Returns:
32
+
33
+ """
34
+ df: pd.DataFrame = data.copy()
35
+ if vars_include is not None:
36
+ missing_vars = set(vars_include).difference(set(df.columns))
37
+ if len(missing_vars) > 0:
38
+ raise KeyError(f'var_subset provided contains variable not found in the data: {missing_vars}')
39
+ df = df[vars_include]
40
+ if vars_exclude:
41
+ df = df[[col for col in df.columns if col not in vars_exclude]]
42
+
43
+ if include_dims is True:
44
+ df.reset_index(inplace=True)
45
+ elif isinstance(include_dims, List):
46
+ for d in include_dims:
47
+ df.reset_index(d, inplace=True)
48
+
49
+ interval_cols: Dict[str, int] = {col: i for i, col in enumerate(df.columns) if df[col].dtype == 'interval'}
50
+
51
+ for col, pos in interval_cols.items():
52
+ if plot_interval_edges:
53
+ df.insert(loc=pos + 1, column=f'{col}_left', value=df[col].array.left)
54
+ df.insert(loc=pos + 2, column=f'{col}_right', value=df[col].array.right)
55
+ df.drop(columns=col, inplace=True)
56
+ else:
57
+ # workaround for https://github.com/Elphick/mass-composition/issues/1
58
+ if col == 'size':
59
+ df[col] = mean_size(pd.arrays.IntervalArray(df[col]))
60
+ else:
61
+ df[col] = df[col].array.mid
62
+
63
+ fig = plot_parallel(data=df, color=color, title=title)
64
+ return fig
65
+
66
+
67
+ def comparison_plot(data: pd.DataFrame,
68
+ x: str, y: str,
69
+ facet_col_wrap: int = 3,
70
+ color: Optional[str] = None,
71
+ trendline: bool = False,
72
+ trendline_kwargs: Optional[Dict] = None) -> go.Figure:
73
+ """Comparison Plot with multiple x-y scatter plots
74
+
75
+ Args:
76
+ data: DataFrame, in tidy (tall) format, with columns for x and y
77
+ x: The x column
78
+ y: The y column
79
+ facet_col_wrap: the number of subplots per row before wrapping
80
+ color: The optional variable to color by. If None color will be by Node
81
+ trendline: If True add trendlines
82
+ trendline_kwargs: Allows customising the trendline: ref: https://plotly.com/python/linear-fits/. Note: Axis
83
+ scaling across components can be affected if using {'trendline_scope': 'trendline_scope'}.
84
+
85
+ Returns:
86
+ plotly Figure
87
+ """
88
+ if trendline:
89
+ if trendline_kwargs is None:
90
+ trendline_kwargs = {'trendline': 'ols'}
91
+ else:
92
+ if 'trendline' not in trendline_kwargs:
93
+ trendline_kwargs['trendline'] = "ols"
94
+ else:
95
+ trendline_kwargs = {'trendline': None}
96
+
97
+ data['residual'] = data[x] - data[y]
98
+ fig = px.scatter(data, x=x, y=y, color=color,
99
+ facet_col='variable', facet_col_wrap=facet_col_wrap,
100
+ hover_data=['residual'],
101
+ **trendline_kwargs)
102
+
103
+ # fig.print_grid()
104
+ # add y=x based on data per subplot
105
+ variable_order = list(data['variable'].unique())
106
+ d_subplots = subplot_index_by_title(fig, variable_order)
107
+
108
+ for k, v in d_subplots.items():
109
+ tmp_df = data.query('variable==@k')
110
+ limits = [min([tmp_df[x].min(), tmp_df[y].min()]),
111
+ max([tmp_df[x].max(), tmp_df[y].max()])]
112
+
113
+ equal_trace = go.Scatter(x=limits, y=limits,
114
+ line_color="gray", name="y=x", mode='lines', legendgroup='y=x', showlegend=False)
115
+ fig.add_trace(equal_trace, row=v[0], col=v[1], exclude_empty_subplots=True)
116
+ sp = fig.get_subplot(v[0], v[1])
117
+ fig.update_xaxes(scaleanchor=sp.xaxis.anchor, scaleratio=1, row=v[0], col=v[1])
118
+
119
+ fig.update_traces(selector=-1, showlegend=True)
120
+ fig.for_each_yaxis(lambda _y: _y.update(showticklabels=True, matches=None))
121
+ fig.for_each_xaxis(lambda _x: _x.update(showticklabels=True, matches=None))
122
+
123
+ return fig
124
+
125
+
126
+ def subplot_index_by_title(fig, variable_order: List[str]) -> Dict['str', Tuple[int, int]]:
127
+ """Map of subplot index by title
128
+
129
+ Assumes consistency by plotly between axes numbering and annotation order.
130
+
131
+ Args:
132
+ fig: The figure including subplots with unique titles
133
+ variable_order: the variables in order top-left to bottom-right
134
+
135
+ Returns:
136
+ Dict keyed by title with tuple of subplot positions
137
+ """
138
+
139
+ d_subplots: Dict = {}
140
+ i = 0
141
+ for r in range(len(fig._grid_ref), 0, -1):
142
+ for c in range(1, len(fig._grid_ref[0]) + 1, 1):
143
+ if i < len(variable_order):
144
+ d_subplots[variable_order[i]] = (r, c)
145
+ i += 1
146
+
147
+ return d_subplots
elphick/geomet/sample.py CHANGED
@@ -1,28 +1,28 @@
1
- import copy
2
- from pathlib import Path
3
- from typing import Optional, Literal
4
-
5
- import pandas as pd
6
-
7
- from elphick.geomet.base import MassComposition
8
-
9
-
10
- class Sample(MassComposition):
11
- def __init__(self,
12
- data: Optional[pd.DataFrame] = None,
13
- name: Optional[str] = None,
14
- moisture_in_scope: bool = True,
15
- mass_wet_var: Optional[str] = None,
16
- mass_dry_var: Optional[str] = None,
17
- moisture_var: Optional[str] = None,
18
- component_vars: Optional[list[str]] = None,
19
- composition_units: Literal['%', 'ppm', 'ppb'] = '%',
20
- components_as_symbols: bool = True,
21
- ranges: Optional[dict[str, list]] = None,
22
- config_file: Optional[Path] = None):
23
- super().__init__(data=data, name=name, moisture_in_scope=moisture_in_scope,
24
- mass_wet_var=mass_wet_var, mass_dry_var=mass_dry_var,
25
- moisture_var=moisture_var, component_vars=component_vars,
26
- composition_units=composition_units, components_as_symbols=components_as_symbols,
27
- ranges=ranges, config_file=config_file)
28
-
1
+ import copy
2
+ from pathlib import Path
3
+ from typing import Optional, Literal
4
+
5
+ import pandas as pd
6
+
7
+ from elphick.geomet.base import MassComposition
8
+
9
+
10
+ class Sample(MassComposition):
11
+ def __init__(self,
12
+ data: Optional[pd.DataFrame] = None,
13
+ name: Optional[str] = None,
14
+ moisture_in_scope: bool = True,
15
+ mass_wet_var: Optional[str] = None,
16
+ mass_dry_var: Optional[str] = None,
17
+ moisture_var: Optional[str] = None,
18
+ component_vars: Optional[list[str]] = None,
19
+ composition_units: Literal['%', 'ppm', 'ppb'] = '%',
20
+ components_as_symbols: bool = True,
21
+ ranges: Optional[dict[str, list]] = None,
22
+ config_file: Optional[Path] = None):
23
+ super().__init__(data=data, name=name, moisture_in_scope=moisture_in_scope,
24
+ mass_wet_var=mass_wet_var, mass_dry_var=mass_dry_var,
25
+ moisture_var=moisture_var, component_vars=component_vars,
26
+ composition_units=composition_units, components_as_symbols=components_as_symbols,
27
+ ranges=ranges, config_file=config_file)
28
+
@@ -1,49 +1,49 @@
1
- from typing import List
2
- import pandas as pd
3
- from itertools import tee
4
-
5
-
6
- def amenability_index(df_recovery: pd.DataFrame,
7
- col_target: str,
8
- col_mass_recovery: str):
9
- """Calculate the Amenability Index
10
-
11
- Implementation of the Amenability Index as presented in the paper titled APPLICATIONS OF INDIVIDUAL PARTICLE
12
- PYKNOMETRY by G. Elphick and Dr. T.F. Mason at the DMS POWDERS’ 10th FERROSILICON CONFERENCE.
13
-
14
- The amenability Index for a particular gangue analyte is the complement of the relative recovery to the target
15
- analyte across the full sample. It is process / operating point independent, hence characterises the ore,
16
- not the process.
17
-
18
- Args:
19
- df_recovery: DataFrame containing the ideal incremental recovery of a fractionated sample.
20
- col_target: The column name of the target analyte
21
- col_mass_recovery: The column name of the mass_recovery (yield) column
22
-
23
- Returns:
24
- A pd.Series containing the Amenability Indices for the gangue analytes
25
- """
26
- cols: List[str] = [col for col in df_recovery.columns if col not in [col_mass_recovery]]
27
- area_target = area_trapezoid(xs=df_recovery[col_mass_recovery], ys=df_recovery[col_target])
28
- results: List = []
29
- for analyte in cols:
30
- area = area_trapezoid(xs=df_recovery[col_mass_recovery], ys=df_recovery[analyte])
31
- results.append(1 - (area / area_target))
32
- return pd.Series(results, index=cols, name='amenability_index')
33
-
34
-
35
- def pairwise(iterable):
36
- """s -> (s0,s1), (s1,s2), (s2, s3), ...
37
- For use in py39, after which itertools.pairwise can be used
38
- """
39
- a, b = tee(iterable)
40
- next(b, None)
41
- return zip(a, b)
42
-
43
-
44
- def area_trapezoid(xs, ys):
45
- area = 0
46
- for (ax, ay), (bx, by) in pairwise(zip(xs, ys)):
47
- h = bx - ax
48
- area += h * (ay + by) / 2
49
- return area
1
+ from typing import List
2
+ import pandas as pd
3
+ from itertools import tee
4
+
5
+
6
+ def amenability_index(df_recovery: pd.DataFrame,
7
+ col_target: str,
8
+ col_mass_recovery: str):
9
+ """Calculate the Amenability Index
10
+
11
+ Implementation of the Amenability Index as presented in the paper titled APPLICATIONS OF INDIVIDUAL PARTICLE
12
+ PYKNOMETRY by G. Elphick and Dr. T.F. Mason at the DMS POWDERS’ 10th FERROSILICON CONFERENCE.
13
+
14
+ The amenability Index for a particular gangue analyte is the complement of the relative recovery to the target
15
+ analyte across the full sample. It is process / operating point independent, hence characterises the ore,
16
+ not the process.
17
+
18
+ Args:
19
+ df_recovery: DataFrame containing the ideal incremental recovery of a fractionated sample.
20
+ col_target: The column name of the target analyte
21
+ col_mass_recovery: The column name of the mass_recovery (yield) column
22
+
23
+ Returns:
24
+ A pd.Series containing the Amenability Indices for the gangue analytes
25
+ """
26
+ cols: List[str] = [col for col in df_recovery.columns if col not in [col_mass_recovery]]
27
+ area_target = area_trapezoid(xs=df_recovery[col_mass_recovery], ys=df_recovery[col_target])
28
+ results: List = []
29
+ for analyte in cols:
30
+ area = area_trapezoid(xs=df_recovery[col_mass_recovery], ys=df_recovery[analyte])
31
+ results.append(1 - (area / area_target))
32
+ return pd.Series(results, index=cols, name='amenability_index')
33
+
34
+
35
+ def pairwise(iterable):
36
+ """s -> (s0,s1), (s1,s2), (s2, s3), ...
37
+ For use in py39, after which itertools.pairwise can be used
38
+ """
39
+ a, b = tee(iterable)
40
+ next(b, None)
41
+ return zip(a, b)
42
+
43
+
44
+ def area_trapezoid(xs, ys):
45
+ area = 0
46
+ for (ax, ay), (bx, by) in pairwise(zip(xs, ys)):
47
+ h = bx - ax
48
+ area += h * (ay + by) / 2
49
+ return area
@@ -1,93 +1,93 @@
1
- """
2
- Methods for converting volumetric data objects
3
- REF: omfvista.volume - copied to facilitate loading selected columns/dataarrays
4
- """
5
- from collections import defaultdict
6
- from typing import TYPE_CHECKING, Optional
7
-
8
- import numpy as np
9
- import pyvista
10
-
11
- if TYPE_CHECKING:
12
- from omf import VolumeElement
13
-
14
- from omfvista.utilities import check_orientation
15
-
16
-
17
- def get_volume_shape(vol):
18
- """Returns the shape of a gridded volume"""
19
- return (len(vol.tensor_u), len(vol.tensor_v), len(vol.tensor_w))
20
-
21
-
22
- def volume_grid_geom_to_vtk(volgridgeom, origin=(0.0, 0.0, 0.0)):
23
- """Convert the 3D gridded volume to a :class:`pyvista.StructuredGrid`
24
- (or a :class:`pyvista.RectilinearGrid` when apprropriate) object contatining
25
- the 2D surface.
26
-
27
- Args:
28
- volgridgeom (:class:`omf.volume.VolumeGridGeometry`): the grid geometry
29
- to convert
30
- """
31
- volgridgeom._validate_mesh()
32
-
33
- ox, oy, oz = volgridgeom.origin
34
-
35
- # Make coordinates along each axis
36
- x = ox + np.cumsum(volgridgeom.tensor_u)
37
- x = np.insert(x, 0, ox)
38
- y = oy + np.cumsum(volgridgeom.tensor_v)
39
- y = np.insert(y, 0, oy)
40
- z = oz + np.cumsum(volgridgeom.tensor_w)
41
- z = np.insert(z, 0, oz)
42
-
43
- # If axis orientations are standard then use a vtkRectilinearGrid
44
- if check_orientation(volgridgeom.axis_u, volgridgeom.axis_v, volgridgeom.axis_w):
45
- return pyvista.RectilinearGrid(x + origin[0], y + origin[1], z + origin[2])
46
-
47
- # Otherwise use a vtkStructuredGrid
48
- # Build out all nodes in the mesh
49
- xx, yy, zz = np.meshgrid(x, y, z, indexing="ij")
50
- points = np.c_[xx.ravel("F"), yy.ravel("F"), zz.ravel("F")]
51
-
52
- # Rotate the points based on the axis orientations
53
- rotation_mtx = np.array([volgridgeom.axis_u, volgridgeom.axis_v, volgridgeom.axis_w])
54
- points = points.dot(rotation_mtx)
55
-
56
- output = pyvista.StructuredGrid()
57
- output.points = points
58
- output.dimensions = len(x), len(y), len(z)
59
- output.points += np.array(origin)
60
- return output
61
-
62
-
63
- def volume_to_vtk(volelement: 'VolumeElement',
64
- origin=(0.0, 0.0, 0.0),
65
- columns: Optional[list[str]] = None):
66
- """Convert the volume element to a VTK data object.
67
-
68
- Args:
69
- volelement (:class:`omf.volume.VolumeElement`): The volume element to convert
70
- origin: tuple(float), optional
71
- columns: list[str], optional - Columns to load from the data arrays
72
-
73
- """
74
- output = volume_grid_geom_to_vtk(volelement.geometry, origin=origin)
75
- shp = get_volume_shape(volelement.geometry)
76
- # Add data to output
77
- if columns is None:
78
- for data in volelement.data:
79
- arr = data.array.array
80
- arr = np.reshape(arr, shp).flatten(order="F")
81
- output[data.name] = arr
82
- else:
83
- available_cols: defaultdict[str, int] = defaultdict(None, {d.name: i for i, d in enumerate(volelement.data)})
84
- for col in columns:
85
- col_index = available_cols.get(col)
86
- if col_index is None:
87
- raise ValueError(f"Column '{col}' not found in the volume element '{volelement.name}':"
88
- f" Available columns: {list(available_cols.keys())}")
89
- data = volelement.data[col_index]
90
- arr = data.array.array
91
- arr = np.reshape(arr, shp).flatten(order="F")
92
- output[data.name] = arr
93
- return output
1
+ """
2
+ Methods for converting volumetric data objects
3
+ REF: omfvista.volume - copied to facilitate loading selected columns/dataarrays
4
+ """
5
+ from collections import defaultdict
6
+ from typing import TYPE_CHECKING, Optional
7
+
8
+ import numpy as np
9
+ import pyvista
10
+
11
+ if TYPE_CHECKING:
12
+ from omf import VolumeElement
13
+
14
+ from omfvista.utilities import check_orientation
15
+
16
+
17
+ def get_volume_shape(vol):
18
+ """Returns the shape of a gridded volume"""
19
+ return (len(vol.tensor_u), len(vol.tensor_v), len(vol.tensor_w))
20
+
21
+
22
+ def volume_grid_geom_to_vtk(volgridgeom, origin=(0.0, 0.0, 0.0)):
23
+ """Convert the 3D gridded volume to a :class:`pyvista.StructuredGrid`
24
+ (or a :class:`pyvista.RectilinearGrid` when apprropriate) object contatining
25
+ the 2D surface.
26
+
27
+ Args:
28
+ volgridgeom (:class:`omf.volume.VolumeGridGeometry`): the grid geometry
29
+ to convert
30
+ """
31
+ volgridgeom._validate_mesh()
32
+
33
+ ox, oy, oz = volgridgeom.origin
34
+
35
+ # Make coordinates along each axis
36
+ x = ox + np.cumsum(volgridgeom.tensor_u)
37
+ x = np.insert(x, 0, ox)
38
+ y = oy + np.cumsum(volgridgeom.tensor_v)
39
+ y = np.insert(y, 0, oy)
40
+ z = oz + np.cumsum(volgridgeom.tensor_w)
41
+ z = np.insert(z, 0, oz)
42
+
43
+ # If axis orientations are standard then use a vtkRectilinearGrid
44
+ if check_orientation(volgridgeom.axis_u, volgridgeom.axis_v, volgridgeom.axis_w):
45
+ return pyvista.RectilinearGrid(x + origin[0], y + origin[1], z + origin[2])
46
+
47
+ # Otherwise use a vtkStructuredGrid
48
+ # Build out all nodes in the mesh
49
+ xx, yy, zz = np.meshgrid(x, y, z, indexing="ij")
50
+ points = np.c_[xx.ravel("F"), yy.ravel("F"), zz.ravel("F")]
51
+
52
+ # Rotate the points based on the axis orientations
53
+ rotation_mtx = np.array([volgridgeom.axis_u, volgridgeom.axis_v, volgridgeom.axis_w])
54
+ points = points.dot(rotation_mtx)
55
+
56
+ output = pyvista.StructuredGrid()
57
+ output.points = points
58
+ output.dimensions = len(x), len(y), len(z)
59
+ output.points += np.array(origin)
60
+ return output
61
+
62
+
63
+ def volume_to_vtk(volelement: 'VolumeElement',
64
+ origin=(0.0, 0.0, 0.0),
65
+ columns: Optional[list[str]] = None):
66
+ """Convert the volume element to a VTK data object.
67
+
68
+ Args:
69
+ volelement (:class:`omf.volume.VolumeElement`): The volume element to convert
70
+ origin: tuple(float), optional
71
+ columns: list[str], optional - Columns to load from the data arrays
72
+
73
+ """
74
+ output = volume_grid_geom_to_vtk(volelement.geometry, origin=origin)
75
+ shp = get_volume_shape(volelement.geometry)
76
+ # Add data to output
77
+ if columns is None:
78
+ for data in volelement.data:
79
+ arr = data.array.array
80
+ arr = np.reshape(arr, shp).flatten(order="F")
81
+ output[data.name] = arr
82
+ else:
83
+ available_cols: defaultdict[str, int] = defaultdict(None, {d.name: i for i, d in enumerate(volelement.data)})
84
+ for col in columns:
85
+ col_index = available_cols.get(col)
86
+ if col_index is None:
87
+ raise ValueError(f"Column '{col}' not found in the volume element '{volelement.name}':"
88
+ f" Available columns: {list(available_cols.keys())}")
89
+ data = volelement.data[col_index]
90
+ arr = data.array.array
91
+ arr = np.reshape(arr, shp).flatten(order="F")
92
+ output[data.name] = arr
93
+ return output