geometallurgy 0.4.13__py3-none-any.whl → 0.4.15__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (48) hide show
  1. elphick/geomet/__init__.py +11 -11
  2. elphick/geomet/base.py +1133 -1133
  3. elphick/geomet/block_model.py +319 -319
  4. elphick/geomet/config/__init__.py +1 -1
  5. elphick/geomet/config/config_read.py +39 -39
  6. elphick/geomet/config/flowsheet_example_partition.yaml +31 -31
  7. elphick/geomet/config/flowsheet_example_simple.yaml +25 -25
  8. elphick/geomet/config/mc_config.yml +35 -35
  9. elphick/geomet/data/downloader.py +39 -39
  10. elphick/geomet/data/register.csv +12 -12
  11. elphick/geomet/datasets/__init__.py +2 -2
  12. elphick/geomet/datasets/datasets.py +47 -47
  13. elphick/geomet/datasets/downloader.py +40 -40
  14. elphick/geomet/datasets/register.csv +12 -12
  15. elphick/geomet/datasets/sample_data.py +196 -196
  16. elphick/geomet/extras.py +35 -35
  17. elphick/geomet/flowsheet/__init__.py +1 -1
  18. elphick/geomet/flowsheet/flowsheet.py +1216 -1216
  19. elphick/geomet/flowsheet/loader.py +99 -99
  20. elphick/geomet/flowsheet/operation.py +256 -256
  21. elphick/geomet/flowsheet/stream.py +39 -39
  22. elphick/geomet/interval_sample.py +641 -641
  23. elphick/geomet/io.py +379 -379
  24. elphick/geomet/plot.py +147 -147
  25. elphick/geomet/sample.py +28 -28
  26. elphick/geomet/utils/amenability.py +49 -49
  27. elphick/geomet/utils/block_model_converter.py +93 -93
  28. elphick/geomet/utils/components.py +136 -136
  29. elphick/geomet/utils/data.py +49 -49
  30. elphick/geomet/utils/estimates.py +108 -108
  31. elphick/geomet/utils/interp.py +193 -193
  32. elphick/geomet/utils/interp2.py +134 -134
  33. elphick/geomet/utils/layout.py +72 -72
  34. elphick/geomet/utils/moisture.py +61 -61
  35. elphick/geomet/utils/pandas.py +378 -378
  36. elphick/geomet/utils/parallel.py +29 -29
  37. elphick/geomet/utils/partition.py +63 -63
  38. elphick/geomet/utils/size.py +51 -51
  39. elphick/geomet/utils/timer.py +80 -80
  40. elphick/geomet/utils/viz.py +56 -56
  41. elphick/geomet/validate.py.hide +176 -176
  42. {geometallurgy-0.4.13.dist-info → geometallurgy-0.4.15.dist-info}/LICENSE +21 -21
  43. {geometallurgy-0.4.13.dist-info → geometallurgy-0.4.15.dist-info}/METADATA +2 -3
  44. geometallurgy-0.4.15.dist-info/RECORD +48 -0
  45. {geometallurgy-0.4.13.dist-info → geometallurgy-0.4.15.dist-info}/WHEEL +1 -1
  46. elphick/geomet/utils/output.html +0 -617
  47. geometallurgy-0.4.13.dist-info/RECORD +0 -49
  48. {geometallurgy-0.4.13.dist-info → geometallurgy-0.4.15.dist-info}/entry_points.txt +0 -0
@@ -1,176 +1,176 @@
1
- """
2
- Classes to support validation of block model files.
3
- """
4
-
5
- import logging
6
- import tempfile
7
- from abc import ABC, abstractmethod
8
- from concurrent.futures import ThreadPoolExecutor
9
- from concurrent.futures import as_completed
10
- from pathlib import Path
11
- from typing import Optional
12
-
13
- import pandas as pd
14
-
15
- from elphick.geomet.readers import ParquetFileReader, OMFFileReader
16
- from elphick.geomet.utils.components import is_compositional
17
-
18
-
19
- #
20
- # class FileValidator(ABC):
21
- # def __init__(self, file_path: Path, schema_path: Optional[Path] = None,
22
- # lazy_validation: bool = True,
23
- # negative_to_nan_threshold: float = 0):
24
- # if not file_path.exists():
25
- # raise ValueError(f"File does not exist: {file_path}")
26
- # self._logger = logging.getLogger(self.__class__.__name__)
27
- # self.file_path = file_path
28
- # self.schema_path = schema_path
29
- # self.schema: DataFrameSchema = DataFrameSchema({}) if schema_path is None else pandera.io.from_yaml(schema_path)
30
- # self.lazy_validation = lazy_validation
31
- # self.negative_to_nan_threshold = negative_to_nan_threshold
32
- #
33
- # self.report: Optional[dict] = None
34
- #
35
- # @abstractmethod
36
- # def validate(self):
37
- # pass
38
- #
39
- # def create_schema_file(self, schema_output_path: Path):
40
- # """
41
- # Create an inferred schema file from the file being validated
42
- # Args:
43
- # schema_output_path: The output path for the schema file
44
- #
45
- # Returns:
46
- #
47
- # """
48
- #
49
- # df = self.read_column()
50
- #
51
- # with open(schema_output_path, 'w') as f:
52
- # yaml.dump(self.schema.to_yaml(), f)
53
-
54
-
55
- class BaseProcessor(ABC):
56
- """
57
- To support columnar processing of large datasets, the BaseProcessor class provides a framework for processing
58
- data by column. The process method will process the data by column if a file_path is provided, or the entire
59
- dataset if data is provided.
60
- """
61
-
62
- def __init__(self, file_path: Optional[Path] = None, data: Optional[pd.DataFrame] = None, **kwargs):
63
- self.logger = logging.getLogger(self.__class__.__name__)
64
- if file_path is None and data is None:
65
- raise ValueError("Either file_path or data must be provided.")
66
- self.file_path = file_path
67
- self.data = data
68
- self.temp_files = []
69
-
70
- if self.file_path.suffix == '.parquet':
71
- self.reader: ParquetFileReader = ParquetFileReader(self.file_path)
72
- elif self.file_path.suffix == '.omf':
73
- self.reader: OMFFileReader = OMFFileReader(self.file_path, **kwargs)
74
- else:
75
- raise ValueError(f"Unsupported file format: {self.file_path.suffix}")
76
-
77
- @property
78
- def composition_variables(self) -> list[str]:
79
- """
80
- Detect columns that contain composition data
81
-
82
- Returns:
83
- A list of column names that contain composition data
84
- """
85
- res = None
86
- if self.reader.variables_in_file:
87
- res = list(is_compositional(self.reader.variables_in_file, strict=False).keys())
88
- return res
89
-
90
- def process(self, num_workers: Optional[int] = 1, **kwargs):
91
- if self.data is None:
92
- with ThreadPoolExecutor(max_workers=num_workers, thread_name_prefix='geomet-processor') as executor:
93
- futures = {executor.submit(self._process_variable, variable, **kwargs): variable for variable in
94
- self.reader.variables_in_file}
95
- results = {}
96
- for future in as_completed(futures):
97
- variable = futures[future]
98
- try:
99
- results[variable] = future.result()
100
- except Exception as exc:
101
- print(f'{variable} generated an exception: {exc}')
102
- else:
103
- results = self._process_data()
104
- return results
105
-
106
- @abstractmethod
107
- def _process_variable(self, column, **kwargs):
108
- pass
109
-
110
- @abstractmethod
111
- def _process_data(self):
112
- pass
113
-
114
-
115
- class PreProcessor(BaseProcessor):
116
- def __init__(self, file_path: Optional[Path] = None, data: Optional[pd.DataFrame] = None, **kwargs):
117
- """
118
- Preprocess data before validation.
119
- For large datasets where memory may be constrained, file_path will provide processing by columns.
120
- If data is provided, the entire dataset already in memory will be processed.
121
- Args:
122
- file_path: The optional path to the file to be preprocessed.
123
- data: The optional DataFrame to be preprocessed.
124
- """
125
-
126
- super().__init__(file_path, data, **kwargs)
127
-
128
- def process(self, negative_to_nan_threshold: Optional[float] = -1,
129
- not_detected_assays_threshold: Optional[float] = 0.5,
130
- max_workers=1):
131
- super().process(max_workers=max_workers, negative_to_nan_threshold=negative_to_nan_threshold,
132
- not_detected_assays_threshold=not_detected_assays_threshold)
133
-
134
- def _process_variable(self, column, **kwargs):
135
- data = pd.read_parquet(self.file_path, columns=[column])
136
- processed_data = self._process_data(data)
137
- temp_file = tempfile.NamedTemporaryFile(delete=False)
138
- processed_data.to_parquet(temp_file.name)
139
- self.temp_files.append(temp_file)
140
-
141
- def _process_data(self) -> pd.DataFrame:
142
- # Preprocessing logic here
143
- return data
144
-
145
-
146
- class Validator(BaseProcessor):
147
- def __init__(self, file_path: Optional[Path] = None, data: Optional[pd.DataFrame] = None, **kwargs):
148
- """
149
- Validate the data using a pandera schema.
150
- For large datasets where memory may be constrained file_path will provide processing by columns.
151
- If data is provided, the entire dataset already in memory will be processed.
152
- Args:
153
- file_path: The optional path to the file to be preprocessed.
154
- data: The optional DataFrame to be preprocessed.
155
- """
156
- super().__init__(file_path, data, **kwargs)
157
-
158
- def process(self):
159
- if self.data is None:
160
- columns = get_parquet_columns(self.file_path)
161
- with ThreadPoolExecutor() as executor:
162
- for column in columns:
163
- executor.submit(self._process_variable, column)
164
- else:
165
- self._process_data()
166
-
167
- def _process_variable(self, column):
168
- data = pd.read_parquet(self.file_path, columns=[column])
169
- processed_data = self._process_data(data)
170
- temp_file = tempfile.NamedTemporaryFile(delete=False)
171
- processed_data.to_parquet(temp_file.name)
172
- self.temp_files.append(temp_file)
173
-
174
- def _process_data(self, data):
175
- # Validation logic here
176
- return data
1
+ """
2
+ Classes to support validation of block model files.
3
+ """
4
+
5
+ import logging
6
+ import tempfile
7
+ from abc import ABC, abstractmethod
8
+ from concurrent.futures import ThreadPoolExecutor
9
+ from concurrent.futures import as_completed
10
+ from pathlib import Path
11
+ from typing import Optional
12
+
13
+ import pandas as pd
14
+
15
+ from elphick.geomet.readers import ParquetFileReader, OMFFileReader
16
+ from elphick.geomet.utils.components import is_compositional
17
+
18
+
19
+ #
20
+ # class FileValidator(ABC):
21
+ # def __init__(self, file_path: Path, schema_path: Optional[Path] = None,
22
+ # lazy_validation: bool = True,
23
+ # negative_to_nan_threshold: float = 0):
24
+ # if not file_path.exists():
25
+ # raise ValueError(f"File does not exist: {file_path}")
26
+ # self._logger = logging.getLogger(self.__class__.__name__)
27
+ # self.file_path = file_path
28
+ # self.schema_path = schema_path
29
+ # self.schema: DataFrameSchema = DataFrameSchema({}) if schema_path is None else pandera.io.from_yaml(schema_path)
30
+ # self.lazy_validation = lazy_validation
31
+ # self.negative_to_nan_threshold = negative_to_nan_threshold
32
+ #
33
+ # self.report: Optional[dict] = None
34
+ #
35
+ # @abstractmethod
36
+ # def validate(self):
37
+ # pass
38
+ #
39
+ # def create_schema_file(self, schema_output_path: Path):
40
+ # """
41
+ # Create an inferred schema file from the file being validated
42
+ # Args:
43
+ # schema_output_path: The output path for the schema file
44
+ #
45
+ # Returns:
46
+ #
47
+ # """
48
+ #
49
+ # df = self.read_column()
50
+ #
51
+ # with open(schema_output_path, 'w') as f:
52
+ # yaml.dump(self.schema.to_yaml(), f)
53
+
54
+
55
+ class BaseProcessor(ABC):
56
+ """
57
+ To support columnar processing of large datasets, the BaseProcessor class provides a framework for processing
58
+ data by column. The process method will process the data by column if a file_path is provided, or the entire
59
+ dataset if data is provided.
60
+ """
61
+
62
+ def __init__(self, file_path: Optional[Path] = None, data: Optional[pd.DataFrame] = None, **kwargs):
63
+ self.logger = logging.getLogger(self.__class__.__name__)
64
+ if file_path is None and data is None:
65
+ raise ValueError("Either file_path or data must be provided.")
66
+ self.file_path = file_path
67
+ self.data = data
68
+ self.temp_files = []
69
+
70
+ if self.file_path.suffix == '.parquet':
71
+ self.reader: ParquetFileReader = ParquetFileReader(self.file_path)
72
+ elif self.file_path.suffix == '.omf':
73
+ self.reader: OMFFileReader = OMFFileReader(self.file_path, **kwargs)
74
+ else:
75
+ raise ValueError(f"Unsupported file format: {self.file_path.suffix}")
76
+
77
+ @property
78
+ def composition_variables(self) -> list[str]:
79
+ """
80
+ Detect columns that contain composition data
81
+
82
+ Returns:
83
+ A list of column names that contain composition data
84
+ """
85
+ res = None
86
+ if self.reader.variables_in_file:
87
+ res = list(is_compositional(self.reader.variables_in_file, strict=False).keys())
88
+ return res
89
+
90
+ def process(self, num_workers: Optional[int] = 1, **kwargs):
91
+ if self.data is None:
92
+ with ThreadPoolExecutor(max_workers=num_workers, thread_name_prefix='geomet-processor') as executor:
93
+ futures = {executor.submit(self._process_variable, variable, **kwargs): variable for variable in
94
+ self.reader.variables_in_file}
95
+ results = {}
96
+ for future in as_completed(futures):
97
+ variable = futures[future]
98
+ try:
99
+ results[variable] = future.result()
100
+ except Exception as exc:
101
+ print(f'{variable} generated an exception: {exc}')
102
+ else:
103
+ results = self._process_data()
104
+ return results
105
+
106
+ @abstractmethod
107
+ def _process_variable(self, column, **kwargs):
108
+ pass
109
+
110
+ @abstractmethod
111
+ def _process_data(self):
112
+ pass
113
+
114
+
115
+ class PreProcessor(BaseProcessor):
116
+ def __init__(self, file_path: Optional[Path] = None, data: Optional[pd.DataFrame] = None, **kwargs):
117
+ """
118
+ Preprocess data before validation.
119
+ For large datasets where memory may be constrained, file_path will provide processing by columns.
120
+ If data is provided, the entire dataset already in memory will be processed.
121
+ Args:
122
+ file_path: The optional path to the file to be preprocessed.
123
+ data: The optional DataFrame to be preprocessed.
124
+ """
125
+
126
+ super().__init__(file_path, data, **kwargs)
127
+
128
+ def process(self, negative_to_nan_threshold: Optional[float] = -1,
129
+ not_detected_assays_threshold: Optional[float] = 0.5,
130
+ max_workers=1):
131
+ super().process(max_workers=max_workers, negative_to_nan_threshold=negative_to_nan_threshold,
132
+ not_detected_assays_threshold=not_detected_assays_threshold)
133
+
134
+ def _process_variable(self, column, **kwargs):
135
+ data = pd.read_parquet(self.file_path, columns=[column])
136
+ processed_data = self._process_data(data)
137
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
138
+ processed_data.to_parquet(temp_file.name)
139
+ self.temp_files.append(temp_file)
140
+
141
+ def _process_data(self) -> pd.DataFrame:
142
+ # Preprocessing logic here
143
+ return data
144
+
145
+
146
+ class Validator(BaseProcessor):
147
+ def __init__(self, file_path: Optional[Path] = None, data: Optional[pd.DataFrame] = None, **kwargs):
148
+ """
149
+ Validate the data using a pandera schema.
150
+ For large datasets where memory may be constrained file_path will provide processing by columns.
151
+ If data is provided, the entire dataset already in memory will be processed.
152
+ Args:
153
+ file_path: The optional path to the file to be preprocessed.
154
+ data: The optional DataFrame to be preprocessed.
155
+ """
156
+ super().__init__(file_path, data, **kwargs)
157
+
158
+ def process(self):
159
+ if self.data is None:
160
+ columns = get_parquet_columns(self.file_path)
161
+ with ThreadPoolExecutor() as executor:
162
+ for column in columns:
163
+ executor.submit(self._process_variable, column)
164
+ else:
165
+ self._process_data()
166
+
167
+ def _process_variable(self, column):
168
+ data = pd.read_parquet(self.file_path, columns=[column])
169
+ processed_data = self._process_data(data)
170
+ temp_file = tempfile.NamedTemporaryFile(delete=False)
171
+ processed_data.to_parquet(temp_file.name)
172
+ self.temp_files.append(temp_file)
173
+
174
+ def _process_data(self, data):
175
+ # Validation logic here
176
+ return data
@@ -1,21 +1,21 @@
1
- MIT License
2
-
3
- Copyright (c) 2024 Greg Elphick
4
-
5
- Permission is hereby granted, free of charge, to any person obtaining a copy
6
- of this software and associated documentation files (the "Software"), to deal
7
- in the Software without restriction, including without limitation the rights
8
- to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
- copies of the Software, and to permit persons to whom the Software is
10
- furnished to do so, subject to the following conditions:
11
-
12
- The above copyright notice and this permission notice shall be included in all
13
- copies or substantial portions of the Software.
14
-
15
- THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
- IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
- FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
- AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
- LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
- OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
- SOFTWARE.
1
+ MIT License
2
+
3
+ Copyright (c) 2024 Greg Elphick
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: geometallurgy
3
- Version: 0.4.13
3
+ Version: 0.4.15
4
4
  Summary: Tools for the geometallurgist
5
5
  Home-page: https://github.com/elphick/geometallurgy
6
6
  Author: Greg
@@ -9,13 +9,12 @@ Requires-Python: >=3.10,<3.13
9
9
  Classifier: Programming Language :: Python :: 3
10
10
  Classifier: Programming Language :: Python :: 3.10
11
11
  Classifier: Programming Language :: Python :: 3.11
12
- Classifier: Programming Language :: Python :: 3.12
13
12
  Provides-Extra: all
14
13
  Provides-Extra: blockmodel
15
14
  Provides-Extra: spatial
16
15
  Provides-Extra: validation
17
16
  Requires-Dist: folium (>=0.16.0,<0.17.0) ; extra == "spatial"
18
- Requires-Dist: omfpandas (>=0.8.1,<0.9.0) ; extra == "blockmodel"
17
+ Requires-Dist: omfpandas (>=0.8.1) ; extra == "blockmodel"
19
18
  Requires-Dist: omfvista (>=0.3.0) ; extra == "blockmodel"
20
19
  Requires-Dist: pandas (>=1.0)
21
20
  Requires-Dist: pandera[io] (>=0.19.3,<0.21.0) ; extra == "validation"
@@ -0,0 +1,48 @@
1
+ elphick/geomet/__init__.py,sha256=gcaArz-agLsm_Tf9KNvmGznw4Jml2QTjj_CxKUC1Ejg,274
2
+ elphick/geomet/base.py,sha256=vp0C7DEAAUDGhTdQ8_Hz9WmqZRjNGn7m5CqEGy3L_98,50603
3
+ elphick/geomet/block_model.py,sha256=E-krPZfjQVIjTgxdDgp0RbvDTJJLFJbUMELE6dh7Jlk,12937
4
+ elphick/geomet/config/__init__.py,sha256=F94hbxN3KzSaljbElIGVhdEwX0FKmHxST4jJ7rNohxY,35
5
+ elphick/geomet/config/config_read.py,sha256=frRwfRwUXpgxwMNCiBVFUw1-yPbBHs3h2KjmzXImvxY,1396
6
+ elphick/geomet/config/flowsheet_example_partition.yaml,sha256=85vrhOotQHhaKkYN-0QQA7ed03EMRkyaKZc-GtXMKro,716
7
+ elphick/geomet/config/flowsheet_example_simple.yaml,sha256=u4sM2gkyyHXnOlqW3TvwBjRdl4x34zGeZS7YGNmdfC4,428
8
+ elphick/geomet/config/mc_config.yml,sha256=6eUGCJlU5Aw9M5Rn6Xuezf8_bShxwxZ1g5XB8P5iLWc,913
9
+ elphick/geomet/data/downloader.py,sha256=x_107mTNCaG9YwEO54mBx7l3KaqOavCi93uZjPdC5GU,1621
10
+ elphick/geomet/data/register.csv,sha256=-N3F6L0097C-I79axINi_ewFAxiqbT_SOSW3-XtPkI4,3046
11
+ elphick/geomet/datasets/__init__.py,sha256=7CX648YFMv39FchI1_oEF28zF52v4gX0lnqomPwkcvU,59
12
+ elphick/geomet/datasets/datasets.py,sha256=RUqQWXZTWEA3R4S5RRdjwlosQZFy2PaMX8x329eP9mo,1882
13
+ elphick/geomet/datasets/downloader.py,sha256=JXHQfwQYbe1X-tIfajx4kGbqkcWh0U2k5R03ur2J6E4,1622
14
+ elphick/geomet/datasets/register.csv,sha256=-N3F6L0097C-I79axINi_ewFAxiqbT_SOSW3-XtPkI4,3046
15
+ elphick/geomet/datasets/sample_data.py,sha256=jt5DWxdMmPbZGDuon2s8Q2wlX3cEegB0dSmRKF4pz4I,7684
16
+ elphick/geomet/extras.py,sha256=UMkAYp72X-30ckaZ-2ENffUUZ7k7JBTdtgsVdgxxYAk,1164
17
+ elphick/geomet/flowsheet/__init__.py,sha256=-lxSLPZNQfiLXKZ2qqS5XbbhrZA2ABi3ppx0LaHnNEI,33
18
+ elphick/geomet/flowsheet/flowsheet.py,sha256=__kgowBIyWfvXcdPWCFihoEUdOqTj7KszSbKGF1AkBo,52032
19
+ elphick/geomet/flowsheet/loader.py,sha256=8nd9Vqbg1de35iuoc4mdRFxrUsIBZed0ivXIAu80jBk,4756
20
+ elphick/geomet/flowsheet/operation.py,sha256=f8k0-Gr_Uy2SlEp8bwAaG4yeBa3DU0HoPn9wyWhYipE,9720
21
+ elphick/geomet/flowsheet/stream.py,sha256=NOXcYeZLSmOSoSRFc7M36Jc8c1ARgjiCvtRuixYfuqA,1370
22
+ elphick/geomet/interval_sample.py,sha256=fhcWBTA01TqvCBsJv7dzWZHRBpw_4W2Ahawks5SPj28,31320
23
+ elphick/geomet/io.py,sha256=tZsX_getGxL07dPlF3Ozyzvt2tFHE5OdgPM5pc5xL68,15709
24
+ elphick/geomet/plot.py,sha256=e9uz8L3QZ23CW4OYm78NhdZl01i0DxHfC4r1kigz7Ss,5732
25
+ elphick/geomet/profile.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
26
+ elphick/geomet/sample.py,sha256=cbkqkSbCu7IU09IOlEp_Wfx0-tYT6hfaOqUy30gMctM,1223
27
+ elphick/geomet/utils/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
28
+ elphick/geomet/utils/amenability.py,sha256=wZ4eIAmcJRfWUp0ZwzfVPUaHEe0g9N2LBaoNv1fZ61E,1874
29
+ elphick/geomet/utils/block_model_converter.py,sha256=REYbS4cu-8z0IzCpuU9ISlPnAvxs9nyX4Bm4yo9mBC8,3364
30
+ elphick/geomet/utils/components.py,sha256=oDR8w7aFKRP38u98yfHp-8MtOaG-c0YcGtwNjgrhGWA,3546
31
+ elphick/geomet/utils/data.py,sha256=AnQ3JXEt2M-T5doGljM_fvdX1CvGbMr6wwjxqcw0fjs,1983
32
+ elphick/geomet/utils/estimates.py,sha256=-x6KDQb-04IrxN8yO38Fx93F6_SGG67koagCtYBtW3c,4892
33
+ elphick/geomet/utils/interp.py,sha256=9lb2sEFfAWYzFECybLPD4nF0S85Xo37nNkxU1DG__A4,10679
34
+ elphick/geomet/utils/interp2.py,sha256=ybuQBNTQOdVzmVYOhQDx2LkGKpl8yxgbQPz_hwS8ClQ,6633
35
+ elphick/geomet/utils/layout.py,sha256=-c1EF-G0qGRQbLrrTS-LsbII-lnvw71y97iUBLd02do,2080
36
+ elphick/geomet/utils/moisture.py,sha256=t9WMwADyz-QAMW-cdah1tIlzTDrhooSoKOPdIlVQHvU,2192
37
+ elphick/geomet/utils/pandas.py,sha256=6sKl3WUjXLR7qFmqBzuCjnfCoUsLRapwZk2nO5BfzYI,17397
38
+ elphick/geomet/utils/parallel.py,sha256=l38JBTkCmdqKHQkS8njoA-sBN9XQGkhF59XtAhWShgs,842
39
+ elphick/geomet/utils/partition.py,sha256=U0jFpvdvZJVdutfB6RzUzKfO9NWCGtBkeySx-QbP-l4,1534
40
+ elphick/geomet/utils/size.py,sha256=EmV_sv2bOImQN3s7TWCniU_y83HNJEPtZH7fMMkYTcc,2272
41
+ elphick/geomet/utils/timer.py,sha256=8WNKLFcINRsZ3IsKtOIZ77YbKtqczyOOTEWY9h9Uxxw,3112
42
+ elphick/geomet/utils/viz.py,sha256=M0CnfDXBHtYb8aak1Sfz6XLvRSmkzX3ybIDllEmDR8A,1718
43
+ elphick/geomet/validate.py.hide,sha256=qAWJlgq0jp19UakVV0dEU_AsqV_JctUn1QTHn8cCRw0,6738
44
+ geometallurgy-0.4.15.dist-info/LICENSE,sha256=GrSVdcGtNbGvAYC_tIjLHBrIVPyg-Ksfe7ZGr087yCI,1069
45
+ geometallurgy-0.4.15.dist-info/METADATA,sha256=YW9b1XuvYaxff2WQK1NcydFmPXSQf2VTNgxn0sav2Iw,4469
46
+ geometallurgy-0.4.15.dist-info/WHEEL,sha256=WGfLGfLX43Ei_YORXSnT54hxFygu34kMpcQdmgmEwCQ,88
47
+ geometallurgy-0.4.15.dist-info/entry_points.txt,sha256=aQI-8kmaba_c9ZGOFkJgWl0MWBke5BQLNyPSVcbS7EU,58
48
+ geometallurgy-0.4.15.dist-info/RECORD,,
@@ -1,4 +1,4 @@
1
1
  Wheel-Version: 1.0
2
- Generator: poetry-core 1.9.1
2
+ Generator: poetry-core 1.6.0
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any