geometallurgy 0.4.11__py3-none-any.whl → 0.4.13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (49) hide show
  1. elphick/geomet/__init__.py +11 -11
  2. elphick/geomet/base.py +1133 -1133
  3. elphick/geomet/block_model.py +319 -358
  4. elphick/geomet/config/__init__.py +1 -1
  5. elphick/geomet/config/config_read.py +39 -39
  6. elphick/geomet/config/flowsheet_example_partition.yaml +31 -31
  7. elphick/geomet/config/flowsheet_example_simple.yaml +25 -25
  8. elphick/geomet/config/mc_config.yml +35 -35
  9. elphick/geomet/data/downloader.py +39 -39
  10. elphick/geomet/data/register.csv +12 -12
  11. elphick/geomet/datasets/__init__.py +2 -2
  12. elphick/geomet/datasets/datasets.py +47 -47
  13. elphick/geomet/datasets/downloader.py +40 -40
  14. elphick/geomet/datasets/register.csv +12 -12
  15. elphick/geomet/datasets/sample_data.py +196 -196
  16. elphick/geomet/extras.py +35 -35
  17. elphick/geomet/flowsheet/__init__.py +1 -1
  18. elphick/geomet/flowsheet/flowsheet.py +1216 -1193
  19. elphick/geomet/flowsheet/loader.py +99 -99
  20. elphick/geomet/flowsheet/operation.py +256 -256
  21. elphick/geomet/flowsheet/stream.py +39 -38
  22. elphick/geomet/interval_sample.py +641 -641
  23. elphick/geomet/io.py +379 -379
  24. elphick/geomet/plot.py +147 -147
  25. elphick/geomet/sample.py +28 -28
  26. elphick/geomet/utils/amenability.py +49 -49
  27. elphick/geomet/utils/block_model_converter.py +93 -93
  28. elphick/geomet/utils/components.py +136 -136
  29. elphick/geomet/utils/data.py +49 -49
  30. elphick/geomet/utils/estimates.py +108 -108
  31. elphick/geomet/utils/interp.py +193 -193
  32. elphick/geomet/utils/interp2.py +134 -134
  33. elphick/geomet/utils/layout.py +72 -72
  34. elphick/geomet/utils/moisture.py +61 -61
  35. elphick/geomet/utils/output.html +617 -0
  36. elphick/geomet/utils/pandas.py +378 -378
  37. elphick/geomet/utils/parallel.py +29 -29
  38. elphick/geomet/utils/partition.py +63 -63
  39. elphick/geomet/utils/size.py +51 -51
  40. elphick/geomet/utils/timer.py +80 -80
  41. elphick/geomet/utils/viz.py +56 -56
  42. elphick/geomet/validate.py.hide +176 -176
  43. {geometallurgy-0.4.11.dist-info → geometallurgy-0.4.13.dist-info}/LICENSE +21 -21
  44. {geometallurgy-0.4.11.dist-info → geometallurgy-0.4.13.dist-info}/METADATA +7 -5
  45. geometallurgy-0.4.13.dist-info/RECORD +49 -0
  46. {geometallurgy-0.4.11.dist-info → geometallurgy-0.4.13.dist-info}/WHEEL +1 -1
  47. elphick/geomet/utils/sampling.py +0 -5
  48. geometallurgy-0.4.11.dist-info/RECORD +0 -49
  49. {geometallurgy-0.4.11.dist-info → geometallurgy-0.4.13.dist-info}/entry_points.txt +0 -0
@@ -1,196 +1,196 @@
1
- """
2
- To provide sample data
3
- """
4
- import random
5
- from functools import partial
6
- from pathlib import Path
7
- from typing import Optional, Iterable, List
8
-
9
- import numpy as np
10
- import pandas as pd
11
-
12
- from elphick.geomet import Sample, IntervalSample
13
- from elphick.geomet.flowsheet import Flowsheet
14
- from elphick.geomet.utils.components import is_compositional
15
- from elphick.geomet.datasets import load_size_by_assay, load_iron_ore_sample_a072391, load_size_distribution, \
16
- load_a072391_met
17
- from elphick.geomet.utils.partition import napier_munn, perfect
18
-
19
-
20
- def sample_data(include_wet_mass: bool = True, include_dry_mass: bool = True,
21
- include_moisture: bool = False) -> pd.DataFrame:
22
- """Creates synthetic data for testing
23
-
24
- Args:
25
- include_wet_mass: If True, wet mass is included.
26
- include_dry_mass: If True, dry mass is included.
27
- include_moisture: If True, moisture (H2O) is included.
28
-
29
- Returns:
30
-
31
- """
32
-
33
- # mass_wet: pd.Series = pd.Series([100, 90, 110], name='wet_mass')
34
- # mass_dry: pd.Series = pd.Series([90, 80, 100], name='dry_mass')
35
- mass_wet: pd.Series = pd.Series([100., 90., 110.], name='wet_mass')
36
- mass_dry: pd.Series = pd.Series([90., 80., 90.], name='mass_dry')
37
- chem: pd.DataFrame = pd.DataFrame.from_dict({'FE': [57., 59., 61.],
38
- 'SIO2': [5.2, 3.1, 2.2],
39
- 'al2o3': [3.0, 1.7, 0.9],
40
- 'LOI': [5.0, 4.0, 3.0]})
41
- attrs: pd.Series = pd.Series(['grp_1', 'grp_1', 'grp_2'], name='group')
42
-
43
- mass: pd.DataFrame = pd.concat([mass_wet, mass_dry], axis='columns')
44
- if include_wet_mass is True and mass_dry is False:
45
- mass = mass_wet
46
- elif include_dry_mass is False and mass_dry is True:
47
- mass = mass_dry
48
- elif include_dry_mass is False and mass_dry is False:
49
- raise AssertionError('Arguments provided result in no mass column')
50
-
51
- if include_moisture is True:
52
- moisture: pd.DataFrame = (mass_wet - mass_dry) / mass_wet * 100
53
- moisture.name = 'H2O'
54
- res: pd.DataFrame = pd.concat([mass, moisture, chem, attrs], axis='columns')
55
- else:
56
- res: pd.DataFrame = pd.concat([mass, chem, attrs], axis='columns')
57
-
58
- res.index.name = 'index'
59
-
60
- return res
61
-
62
-
63
- def dh_intervals(n: int = 5,
64
- n_dh: int = 2,
65
- analytes: Optional[Iterable[str]] = ('Fe', 'Al2O3')) -> pd.DataFrame:
66
- """Down-samples The drillhole data for testing
67
-
68
- Args:
69
- n: Number of samples
70
- n_dh: The number of drill-holes included
71
- analytes: the analytes to include
72
- Returns:
73
-
74
- """
75
-
76
- df_data: pd.DataFrame = load_iron_ore_sample_a072391()
77
- # df_data: pd.DataFrame = pd.read_csv('../sample_data/iron_ore_sample_data.csv', index_col='index')
78
-
79
- drillholes: List[str] = []
80
- for i in range(0, n_dh):
81
- drillholes.append(random.choice(list(df_data['DHID'].unique())))
82
-
83
- df_data = df_data.query('DHID in @drillholes').groupby('DHID').sample(5)
84
-
85
- cols_to_drop = [col for col in is_compositional(df_data.columns) if (col not in analytes) and (col != 'H2O')]
86
- df_data.drop(columns=cols_to_drop, inplace=True)
87
-
88
- df_data.index.name = 'index'
89
-
90
- return df_data
91
-
92
-
93
- def size_by_assay() -> pd.DataFrame:
94
- """ Sample Size x Assay dataset
95
- """
96
-
97
- df_data: pd.DataFrame = load_size_by_assay()
98
-
99
- # df_data: pd.DataFrame = pd.DataFrame(data=[size_retained, size_passing, mass_pct, fe, sio2, al2o3],
100
- # index=['size_retained', 'size_passing', 'mass_pct', 'Fe', 'SiO2', 'Al2O3']).T
101
-
102
- # # convert the sizes from micron to mm
103
- # df_data[['size_retained', 'size_passing']] = df_data[['size_retained', 'size_passing']] / 1000.0
104
-
105
- df_data.set_index(['size_retained', 'size_passing'], inplace=True)
106
-
107
- # ensure we meet the input column name requirements
108
- df_data.rename(columns={'mass_pct': 'mass_dry'}, inplace=True)
109
-
110
- return df_data
111
-
112
-
113
- def size_by_assay_2() -> pd.DataFrame:
114
- """ 3 x Sample Size x Assay dataset (balanced)
115
- """
116
- mc_size: IntervalSample = IntervalSample(size_by_assay(), name='feed', moisture_in_scope=False)
117
- partition = partial(napier_munn, d50=0.150, ep=0.1, dim='size')
118
- mc_coarse, mc_fine = mc_size.split_by_partition(partition_definition=partition, name_1='coarse', name_2='fine')
119
- fs: Flowsheet = Flowsheet().from_streams([mc_size, mc_coarse, mc_fine])
120
- return fs.to_dataframe()
121
-
122
-
123
- def size_by_assay_3() -> pd.DataFrame:
124
- """ 3 x Sample Size x Assay dataset (unbalanced)
125
- """
126
- mc_size: IntervalSample = IntervalSample(size_by_assay(), name='feed')
127
- partition = partial(napier_munn, d50=0.150, ep=0.1, dim='size')
128
- mc_coarse, mc_fine = mc_size.split_by_partition(partition_definition=partition, name_1='coarse', name_2='fine')
129
- # add error to the coarse stream to create an imbalance
130
- df_coarse_2 = mc_coarse.data.to_dataframe().apply(lambda x: np.random.normal(loc=x, scale=np.std(x)))
131
- mc_coarse_2: Sample = Sample(data=df_coarse_2, name='coarse')
132
- mc_coarse_2 = mc_coarse_2.set_parent_node(mc_size)
133
- fs_ub: Flowsheet = Flowsheet().from_streams([mc_size, mc_coarse_2, mc_fine])
134
- return fs_ub.to_dataframe()
135
-
136
-
137
- def size_distribution() -> pd.DataFrame:
138
- return load_size_distribution()
139
-
140
-
141
- def iron_ore_sample_data() -> pd.DataFrame:
142
- return load_iron_ore_sample_a072391().set_index('index')
143
-
144
-
145
- def iron_ore_met_sample_data() -> pd.DataFrame:
146
- df_met: pd.DataFrame = load_a072391_met()
147
- df_met.dropna(subset=['Dry Weight Lump (kg)'], inplace=True)
148
- df_met['Dry Weight Lump (kg)'] = df_met['Dry Weight Lump (kg)'].apply(lambda x: x.replace('..', '.')).astype(
149
- 'float64')
150
- df_met['Fe'] = df_met['Fe'].replace('MISSING', np.nan).astype('float64')
151
- df_met.dropna(subset=['Fe', 'Bulk_Hole_No', 'Dry Weight Fines (kg)'], inplace=True)
152
- df_met.columns = [col.replace('LOITotal', 'LOI') for col in df_met.columns]
153
- df_met.columns = [
154
- col.strip().lower().replace(' ', '_').replace('(', '').replace(')', '').replace('%', 'pct').replace('__', '_')
155
- for
156
- col in df_met.columns]
157
-
158
- # clean up some values and types
159
- df_met = df_met.replace('-', np.nan).replace('#VALUE!', np.nan)
160
- head_cols: List[str] = [col for col in df_met.columns if 'head' in col]
161
- df_met[head_cols] = df_met[head_cols].astype('float64')
162
- df_met['bulk_hole_no'] = df_met['bulk_hole_no'].astype('category')
163
- df_met['sample_number'] = df_met['sample_number'].astype('int64')
164
- df_met.set_index('sample_number', inplace=True)
165
-
166
- # moves suffixes to prefix
167
- df_met = df_met.pipe(_move_suffix_to_prefix, '_head')
168
- df_met = df_met.pipe(_move_suffix_to_prefix, '_lump')
169
- return df_met
170
-
171
-
172
- def demo_size_network() -> Flowsheet:
173
- mc_size: Sample = Sample(size_by_assay(), name='size sample')
174
- partition = partial(perfect, d50=0.150, dim='size')
175
- mc_coarse, mc_fine = mc_size.split_by_partition(partition_definition=partition)
176
- mc_coarse.name = 'coarse'
177
- mc_fine.name = 'fine'
178
- fs: Flowsheet = Flowsheet().from_streams([mc_size, mc_coarse, mc_fine])
179
- return fs
180
-
181
-
182
- def _move_suffix_to_prefix(df, suffix):
183
- suffix_length = len(suffix)
184
- for col in df.columns:
185
- if col.endswith(suffix):
186
- new_col = suffix[1:] + '_' + col[:-suffix_length] # Remove the suffix and prepend it to the start
187
- df.rename(columns={col: new_col}, inplace=True)
188
- return df
189
-
190
-
191
- if __name__ == '__main__':
192
- df1: pd.DataFrame = size_by_assay()
193
- df2: pd.DataFrame = size_by_assay_2()
194
- df3: pd.DataFrame = size_by_assay_3()
195
- df4: pd.DataFrame = iron_ore_met_sample_data()
196
- print('done')
1
+ """
2
+ To provide sample data
3
+ """
4
+ import random
5
+ from functools import partial
6
+ from pathlib import Path
7
+ from typing import Optional, Iterable, List
8
+
9
+ import numpy as np
10
+ import pandas as pd
11
+
12
+ from elphick.geomet import Sample, IntervalSample
13
+ from elphick.geomet.flowsheet import Flowsheet
14
+ from elphick.geomet.utils.components import is_compositional
15
+ from elphick.geomet.datasets import load_size_by_assay, load_iron_ore_sample_a072391, load_size_distribution, \
16
+ load_a072391_met
17
+ from elphick.geomet.utils.partition import napier_munn, perfect
18
+
19
+
20
+ def sample_data(include_wet_mass: bool = True, include_dry_mass: bool = True,
21
+ include_moisture: bool = False) -> pd.DataFrame:
22
+ """Creates synthetic data for testing
23
+
24
+ Args:
25
+ include_wet_mass: If True, wet mass is included.
26
+ include_dry_mass: If True, dry mass is included.
27
+ include_moisture: If True, moisture (H2O) is included.
28
+
29
+ Returns:
30
+
31
+ """
32
+
33
+ # mass_wet: pd.Series = pd.Series([100, 90, 110], name='wet_mass')
34
+ # mass_dry: pd.Series = pd.Series([90, 80, 100], name='dry_mass')
35
+ mass_wet: pd.Series = pd.Series([100., 90., 110.], name='wet_mass')
36
+ mass_dry: pd.Series = pd.Series([90., 80., 90.], name='mass_dry')
37
+ chem: pd.DataFrame = pd.DataFrame.from_dict({'FE': [57., 59., 61.],
38
+ 'SIO2': [5.2, 3.1, 2.2],
39
+ 'al2o3': [3.0, 1.7, 0.9],
40
+ 'LOI': [5.0, 4.0, 3.0]})
41
+ attrs: pd.Series = pd.Series(['grp_1', 'grp_1', 'grp_2'], name='group')
42
+
43
+ mass: pd.DataFrame = pd.concat([mass_wet, mass_dry], axis='columns')
44
+ if include_wet_mass is True and mass_dry is False:
45
+ mass = mass_wet
46
+ elif include_dry_mass is False and mass_dry is True:
47
+ mass = mass_dry
48
+ elif include_dry_mass is False and mass_dry is False:
49
+ raise AssertionError('Arguments provided result in no mass column')
50
+
51
+ if include_moisture is True:
52
+ moisture: pd.DataFrame = (mass_wet - mass_dry) / mass_wet * 100
53
+ moisture.name = 'H2O'
54
+ res: pd.DataFrame = pd.concat([mass, moisture, chem, attrs], axis='columns')
55
+ else:
56
+ res: pd.DataFrame = pd.concat([mass, chem, attrs], axis='columns')
57
+
58
+ res.index.name = 'index'
59
+
60
+ return res
61
+
62
+
63
+ def dh_intervals(n: int = 5,
64
+ n_dh: int = 2,
65
+ analytes: Optional[Iterable[str]] = ('Fe', 'Al2O3')) -> pd.DataFrame:
66
+ """Down-samples The drillhole data for testing
67
+
68
+ Args:
69
+ n: Number of samples
70
+ n_dh: The number of drill-holes included
71
+ analytes: the analytes to include
72
+ Returns:
73
+
74
+ """
75
+
76
+ df_data: pd.DataFrame = load_iron_ore_sample_a072391()
77
+ # df_data: pd.DataFrame = pd.read_csv('../sample_data/iron_ore_sample_data.csv', index_col='index')
78
+
79
+ drillholes: List[str] = []
80
+ for i in range(0, n_dh):
81
+ drillholes.append(random.choice(list(df_data['DHID'].unique())))
82
+
83
+ df_data = df_data.query('DHID in @drillholes').groupby('DHID').sample(5)
84
+
85
+ cols_to_drop = [col for col in is_compositional(df_data.columns) if (col not in analytes) and (col != 'H2O')]
86
+ df_data.drop(columns=cols_to_drop, inplace=True)
87
+
88
+ df_data.index.name = 'index'
89
+
90
+ return df_data
91
+
92
+
93
+ def size_by_assay() -> pd.DataFrame:
94
+ """ Sample Size x Assay dataset
95
+ """
96
+
97
+ df_data: pd.DataFrame = load_size_by_assay()
98
+
99
+ # df_data: pd.DataFrame = pd.DataFrame(data=[size_retained, size_passing, mass_pct, fe, sio2, al2o3],
100
+ # index=['size_retained', 'size_passing', 'mass_pct', 'Fe', 'SiO2', 'Al2O3']).T
101
+
102
+ # # convert the sizes from micron to mm
103
+ # df_data[['size_retained', 'size_passing']] = df_data[['size_retained', 'size_passing']] / 1000.0
104
+
105
+ df_data.set_index(['size_retained', 'size_passing'], inplace=True)
106
+
107
+ # ensure we meet the input column name requirements
108
+ df_data.rename(columns={'mass_pct': 'mass_dry'}, inplace=True)
109
+
110
+ return df_data
111
+
112
+
113
+ def size_by_assay_2() -> pd.DataFrame:
114
+ """ 3 x Sample Size x Assay dataset (balanced)
115
+ """
116
+ mc_size: IntervalSample = IntervalSample(size_by_assay(), name='feed', moisture_in_scope=False)
117
+ partition = partial(napier_munn, d50=0.150, ep=0.1, dim='size')
118
+ mc_coarse, mc_fine = mc_size.split_by_partition(partition_definition=partition, name_1='coarse', name_2='fine')
119
+ fs: Flowsheet = Flowsheet().from_streams([mc_size, mc_coarse, mc_fine])
120
+ return fs.to_dataframe()
121
+
122
+
123
+ def size_by_assay_3() -> pd.DataFrame:
124
+ """ 3 x Sample Size x Assay dataset (unbalanced)
125
+ """
126
+ mc_size: IntervalSample = IntervalSample(size_by_assay(), name='feed')
127
+ partition = partial(napier_munn, d50=0.150, ep=0.1, dim='size')
128
+ mc_coarse, mc_fine = mc_size.split_by_partition(partition_definition=partition, name_1='coarse', name_2='fine')
129
+ # add error to the coarse stream to create an imbalance
130
+ df_coarse_2 = mc_coarse.data.to_dataframe().apply(lambda x: np.random.normal(loc=x, scale=np.std(x)))
131
+ mc_coarse_2: Sample = Sample(data=df_coarse_2, name='coarse')
132
+ mc_coarse_2 = mc_coarse_2.set_parent_node(mc_size)
133
+ fs_ub: Flowsheet = Flowsheet().from_streams([mc_size, mc_coarse_2, mc_fine])
134
+ return fs_ub.to_dataframe()
135
+
136
+
137
+ def size_distribution() -> pd.DataFrame:
138
+ return load_size_distribution()
139
+
140
+
141
+ def iron_ore_sample_data() -> pd.DataFrame:
142
+ return load_iron_ore_sample_a072391().set_index('index')
143
+
144
+
145
+ def iron_ore_met_sample_data() -> pd.DataFrame:
146
+ df_met: pd.DataFrame = load_a072391_met()
147
+ df_met.dropna(subset=['Dry Weight Lump (kg)'], inplace=True)
148
+ df_met['Dry Weight Lump (kg)'] = df_met['Dry Weight Lump (kg)'].apply(lambda x: x.replace('..', '.')).astype(
149
+ 'float64')
150
+ df_met['Fe'] = df_met['Fe'].replace('MISSING', np.nan).astype('float64')
151
+ df_met.dropna(subset=['Fe', 'Bulk_Hole_No', 'Dry Weight Fines (kg)'], inplace=True)
152
+ df_met.columns = [col.replace('LOITotal', 'LOI') for col in df_met.columns]
153
+ df_met.columns = [
154
+ col.strip().lower().replace(' ', '_').replace('(', '').replace(')', '').replace('%', 'pct').replace('__', '_')
155
+ for
156
+ col in df_met.columns]
157
+
158
+ # clean up some values and types
159
+ df_met = df_met.replace('-', np.nan).replace('#VALUE!', np.nan)
160
+ head_cols: List[str] = [col for col in df_met.columns if 'head' in col]
161
+ df_met[head_cols] = df_met[head_cols].astype('float64')
162
+ df_met['bulk_hole_no'] = df_met['bulk_hole_no'].astype('category')
163
+ df_met['sample_number'] = df_met['sample_number'].astype('int64')
164
+ df_met.set_index('sample_number', inplace=True)
165
+
166
+ # moves suffixes to prefix
167
+ df_met = df_met.pipe(_move_suffix_to_prefix, '_head')
168
+ df_met = df_met.pipe(_move_suffix_to_prefix, '_lump')
169
+ return df_met
170
+
171
+
172
+ def demo_size_network() -> Flowsheet:
173
+ mc_size: Sample = Sample(size_by_assay(), name='size sample')
174
+ partition = partial(perfect, d50=0.150, dim='size')
175
+ mc_coarse, mc_fine = mc_size.split_by_partition(partition_definition=partition)
176
+ mc_coarse.name = 'coarse'
177
+ mc_fine.name = 'fine'
178
+ fs: Flowsheet = Flowsheet().from_streams([mc_size, mc_coarse, mc_fine])
179
+ return fs
180
+
181
+
182
+ def _move_suffix_to_prefix(df, suffix):
183
+ suffix_length = len(suffix)
184
+ for col in df.columns:
185
+ if col.endswith(suffix):
186
+ new_col = suffix[1:] + '_' + col[:-suffix_length] # Remove the suffix and prepend it to the start
187
+ df.rename(columns={col: new_col}, inplace=True)
188
+ return df
189
+
190
+
191
+ if __name__ == '__main__':
192
+ df1: pd.DataFrame = size_by_assay()
193
+ df2: pd.DataFrame = size_by_assay_2()
194
+ df3: pd.DataFrame = size_by_assay_3()
195
+ df4: pd.DataFrame = iron_ore_met_sample_data()
196
+ print('done')
elphick/geomet/extras.py CHANGED
@@ -1,35 +1,35 @@
1
- _blockmodel_imports = None
2
-
3
-
4
- # Define the Extras class to encapsulate the imported modules
5
- class BlockmodelExtras:
6
- def __init__(self, omf, omfvista, pv):
7
- self.omf = omf
8
- self.omfvista = omfvista
9
- self.pv = pv
10
-
11
-
12
- def import_blockmodel_packages():
13
- """Helper method to safely import (only once) the blockmodel packages."""
14
- global _blockmodel_imports
15
-
16
- # Optional imports
17
- try:
18
- import omf
19
- import omfvista
20
- import pyvista as pv
21
- from pyvista import CellType
22
- except ImportError as e:
23
- raise ImportError("Optional packages omfpandas or omfvista is not installed."
24
- "Please install it to use this feature.") from e
25
-
26
- if _blockmodel_imports is None:
27
- try:
28
- import omf
29
- import omfvista
30
- import pyvista as pv
31
- _blockmodel_imports = (omf, omfvista, pv)
32
- except ImportError:
33
- raise ImportError("Failed to import blockmodel related packages. "
34
- "Consider executing: 'poetry install --extras blockmodel'")
35
- return _blockmodel_imports
1
+ _blockmodel_imports = None
2
+
3
+
4
+ # Define the Extras class to encapsulate the imported modules
5
+ class BlockmodelExtras:
6
+ def __init__(self, omfpandas, omfvista, pv):
7
+ self.omfpandas = omfpandas
8
+ self.omfvista = omfvista
9
+ self.pv = pv
10
+
11
+
12
+ def import_blockmodel_packages():
13
+ """Helper method to safely import (only once) the blockmodel packages."""
14
+ global _blockmodel_imports
15
+
16
+ # Optional imports
17
+ try:
18
+ import omfpandas
19
+ import omfvista
20
+ import pyvista as pv
21
+ from pyvista import CellType
22
+ except ImportError as e:
23
+ raise ImportError("Optional packages omfpandas or omfvista is not installed."
24
+ "Please install it to use this feature.") from e
25
+
26
+ if _blockmodel_imports is None:
27
+ try:
28
+ import omfpandas
29
+ import omfvista
30
+ import pyvista as pv
31
+ _blockmodel_imports = (omfpandas, omfvista, pv)
32
+ except ImportError:
33
+ raise ImportError("Failed to import blockmodel related packages. "
34
+ "Consider executing: 'poetry install --extras blockmodel'")
35
+ return _blockmodel_imports
@@ -1 +1 @@
1
- from .flowsheet import Flowsheet
1
+ from .flowsheet import Flowsheet