geometallurgy 0.4.8__tar.gz → 0.4.9__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (46) hide show
  1. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/PKG-INFO +1 -1
  2. geometallurgy-0.4.9/elphick/geomet/config/flowsheet_example_partition.yaml +32 -0
  3. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/flowsheet/flowsheet.py +62 -14
  4. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/flowsheet/operation.py +47 -4
  5. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/flowsheet/stream.py +7 -0
  6. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/interval_sample.py +31 -15
  7. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/pandas.py +2 -2
  8. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/partition.py +18 -0
  9. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/pyproject.toml +1 -1
  10. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/LICENSE +0 -0
  11. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/README.md +0 -0
  12. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/__init__.py +0 -0
  13. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/base.py +0 -0
  14. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/block_model.py +0 -0
  15. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/config/__init__.py +0 -0
  16. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/config/config_read.py +0 -0
  17. /geometallurgy-0.4.8/elphick/geomet/config/flowsheet_example.yaml → /geometallurgy-0.4.9/elphick/geomet/config/flowsheet_example_simple.yaml +0 -0
  18. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/config/mc_config.yml +0 -0
  19. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/data/downloader.py +0 -0
  20. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/data/register.csv +0 -0
  21. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/datasets/__init__.py +0 -0
  22. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/datasets/datasets.py +0 -0
  23. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/datasets/downloader.py +0 -0
  24. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/datasets/register.csv +0 -0
  25. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/datasets/sample_data.py +0 -0
  26. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/extras.py +0 -0
  27. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/flowsheet/__init__.py +0 -0
  28. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/flowsheet/loader.py +0 -0
  29. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/io.py +0 -0
  30. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/plot.py +0 -0
  31. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/profile.py +0 -0
  32. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/sample.py +0 -0
  33. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/__init__.py +0 -0
  34. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/amenability.py +0 -0
  35. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/block_model_converter.py +0 -0
  36. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/components.py +0 -0
  37. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/data.py +0 -0
  38. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/interp.py +0 -0
  39. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/layout.py +0 -0
  40. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/moisture.py +0 -0
  41. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/parallel.py +0 -0
  42. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/sampling.py +0 -0
  43. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/size.py +0 -0
  44. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/timer.py +0 -0
  45. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/utils/viz.py +0 -0
  46. {geometallurgy-0.4.8 → geometallurgy-0.4.9}/elphick/geomet/validate.py.hide +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: geometallurgy
3
- Version: 0.4.8
3
+ Version: 0.4.9
4
4
  Summary: Tools for the geometallurgist
5
5
  Home-page: https://github.com/elphick/geometallurgy
6
6
  Author: Greg
@@ -0,0 +1,32 @@
1
+ FLOWSHEET:
2
+ flowsheet:
3
+ name: Flowsheet
4
+ streams: # graph edges
5
+ Feed:
6
+ name: Feed
7
+ node_in: feed
8
+ node_out: screen
9
+ Coarse:
10
+ name: Coarse
11
+ node_in: screen
12
+ node_out: lump
13
+ Fine:
14
+ name: Fine
15
+ node_in: screen
16
+ node_out: fines
17
+ operations: # graph nodes
18
+ feed:
19
+ name: feed
20
+ screen:
21
+ name: screen
22
+ type: PartitionOperation
23
+ partition:
24
+ module: elphick.geomet.utils.partition
25
+ function: napier_munn_size_1mm
26
+ args: null # e.g. d50, ep if not defined in the (partial) function
27
+ output_stream: Lump
28
+ complement_stream: Fines
29
+ lump:
30
+ name: lump
31
+ fines:
32
+ name: fines
@@ -19,14 +19,14 @@ from plotly.subplots import make_subplots
19
19
  from elphick.geomet import Sample
20
20
  from elphick.geomet.base import MC
21
21
  from elphick.geomet.config.config_read import get_column_config
22
- from elphick.geomet.flowsheet.operation import NodeType, OP
22
+ from elphick.geomet.flowsheet.operation import NodeType, OP, PartitionOperation, Operation
23
23
  from elphick.geomet.plot import parallel_plot, comparison_plot
24
24
  from elphick.geomet.utils.layout import digraph_linear_layout
25
25
  from elphick.geomet.flowsheet.loader import streams_from_dataframe
26
26
  from elphick.geomet.utils.sampling import random_int
27
27
 
28
- if TYPE_CHECKING:
29
- from elphick.geomet.flowsheet.stream import Stream
28
+ # if TYPE_CHECKING:
29
+ from elphick.geomet.flowsheet.stream import Stream
30
30
 
31
31
  # generic type variable, used for type hinting that play nicely with subclasses
32
32
  FS = TypeVar('FS', bound='Flowsheet')
@@ -112,7 +112,7 @@ class Flowsheet:
112
112
  return cls().from_objects(objects=streams, name=name)
113
113
 
114
114
  @classmethod
115
- def from_dict(cls, config: dict) -> FS:
115
+ def from_dict_old(cls, config: dict) -> FS:
116
116
  """Create a flowsheet from a dictionary
117
117
 
118
118
  Args:
@@ -139,7 +139,17 @@ class Flowsheet:
139
139
  graph.add_edges_from(bunch_of_edges)
140
140
  operation_objects: dict = {}
141
141
  for node in graph.nodes:
142
- operation_objects[node] = Operation(name=node)
142
+ # create the correct type of node object
143
+ if node in flowsheet_config['operations']:
144
+ operation_type = flowsheet_config['operations'][node].get('type', 'Operation')
145
+ if operation_type == 'PartitionOperation':
146
+ # get the output stream names from the graph
147
+ output_stream_names = [d['name'] for u, v, d in graph.out_edges(node, data=True)]
148
+ node_config = flowsheet_config['operations'][node]
149
+ node_config['output_stream_names'] = output_stream_names
150
+ operation_objects[node] = PartitionOperation.from_dict(node_config)
151
+ else:
152
+ operation_objects[node] = Operation.from_dict(flowsheet_config['operations'][node])
143
153
  nx.set_node_attributes(graph, operation_objects, 'mc')
144
154
 
145
155
  graph = nx.convert_node_labels_to_integers(graph)
@@ -149,6 +159,26 @@ class Flowsheet:
149
159
 
150
160
  return obj
151
161
 
162
+ @classmethod
163
+ def from_dict(cls, config: dict) -> FS:
164
+ flowsheet = cls()
165
+
166
+ # Process streams
167
+ for stream_name, stream_data in config['FLOWSHEET']['streams'].items():
168
+ stream = Stream.from_dict(stream_data)
169
+ flowsheet.add_stream(stream)
170
+
171
+ # Process operations
172
+ for operation_name, operation_data in config['FLOWSHEET']['operations'].items():
173
+ operation_type = operation_data.get('type', 'Operation')
174
+ if operation_type == 'PartitionOperation':
175
+ operation = PartitionOperation.from_dict(operation_data)
176
+ else:
177
+ operation = Operation.from_dict(operation_data)
178
+ flowsheet.add_operation(operation)
179
+
180
+ return flowsheet
181
+
152
182
  @classmethod
153
183
  def from_yaml(cls, file_path: Path) -> FS:
154
184
  """Create a flowsheet from yaml
@@ -162,7 +192,7 @@ class Flowsheet:
162
192
  with open(file_path, 'r') as file:
163
193
  config = yaml.safe_load(file)
164
194
 
165
- return cls.from_dict(config)
195
+ return cls.from_dict_old(config)
166
196
 
167
197
  @classmethod
168
198
  def from_json(cls, file_path: Path) -> FS:
@@ -179,6 +209,14 @@ class Flowsheet:
179
209
 
180
210
  return cls.from_dict(config)
181
211
 
212
+ def add_stream(self, stream: 'Stream'):
213
+ """Add a stream to the flowsheet."""
214
+ self.graph.add_edge(stream.nodes[0], stream.nodes[1], mc=stream, name=stream.name)
215
+
216
+ def add_operation(self, operation: 'Operation'):
217
+ """Add an operation to the flowsheet."""
218
+ self.graph.add_node(operation.name, mc=operation)
219
+
182
220
  def copy_without_stream_data(self):
183
221
  """Copy without stream data"""
184
222
  new_flowsheet = Flowsheet(name=self.name)
@@ -218,13 +256,24 @@ class Flowsheet:
218
256
  edge_data['mc'].name = edge_data['name']
219
257
 
220
258
  if self.graph.nodes[node]['mc'].has_empty_output:
221
- mc: MC = self.graph.nodes[node]['mc'].solve()
222
- # copy the solved object to the empty output edges
223
- for successor in self.graph.successors(node):
224
- edge_data = self.graph.get_edge_data(node, successor)
225
- if edge_data and edge_data['mc'] is None:
226
- edge_data['mc'] = mc
227
- edge_data['mc'].name = edge_data['name']
259
+ # There are two cases to be managed, 1. a single output missing,
260
+ # 2. a partition operation that returns two outputs
261
+ if isinstance(self.graph.nodes[node]['mc'], PartitionOperation):
262
+ mc1, mc2 = self.graph.nodes[node]['mc'].solve()
263
+ # copy the solved object to the empty output edges
264
+ for successor in self.graph.successors(node):
265
+ edge_data = self.graph.get_edge_data(node, successor)
266
+ if edge_data and edge_data['mc'] is None:
267
+ edge_data['mc'] = mc1 if edge_data['name'] == 'preferred' else mc2
268
+ edge_data['mc'].name = edge_data['name']
269
+ else:
270
+ mc: MC = self.graph.nodes[node]['mc'].solve()
271
+ # copy the solved object to the empty output edges
272
+ for successor in self.graph.successors(node):
273
+ edge_data = self.graph.get_edge_data(node, successor)
274
+ if edge_data and edge_data['mc'] is None:
275
+ edge_data['mc'] = mc
276
+ edge_data['mc'].name = edge_data['name']
228
277
 
229
278
  missing_count: int = sum([1 for u, v, d in self.graph.edges(data=True) if d['mc'] is None])
230
279
 
@@ -1037,4 +1086,3 @@ class Flowsheet:
1037
1086
  mc: MC = self.get_edge_by_name(stream)
1038
1087
  mc.set_nodes([random_int(), random_int()])
1039
1088
  self._update_graph(mc)
1040
-
@@ -6,7 +6,11 @@ from typing import Optional, TypeVar
6
6
  import numpy as np
7
7
  import pandas as pd
8
8
 
9
+ from elphick.geomet import IntervalSample
9
10
  from elphick.geomet.base import MC
11
+ from elphick.geomet.flowsheet.stream import Stream
12
+ from elphick.geomet.utils.pandas import MeanIntervalIndex
13
+ from elphick.geomet.utils.partition import load_partition_function
10
14
 
11
15
  # generic type variable, used for type hinting that play nicely with subclasses
12
16
  OP = TypeVar('OP', bound='Operation')
@@ -180,6 +184,12 @@ class Operation:
180
184
  else:
181
185
  return candidates[0]
182
186
 
187
+ @classmethod
188
+ def from_dict(cls, config: dict) -> 'Operation':
189
+ name = config.get('name')
190
+
191
+ return cls(name=name)
192
+
183
193
 
184
194
  class Input(Operation):
185
195
  def __init__(self, name):
@@ -196,8 +206,41 @@ class Passthrough(Operation):
196
206
  super().__init__(name)
197
207
 
198
208
 
199
- class UnitOperation(Operation):
200
- def __init__(self, name, num_inputs, num_outputs):
209
+ class PartitionOperation(Operation):
210
+ """An operation that partitions the input stream into multiple output streams based on a partition function
211
+
212
+ The partition input is the mean of the fractions or the geomean if the fractions are in the size dimension
213
+ The partition function is typically a partial function so that the partition is defined for all arguments
214
+ other than the input mean fraction values in one or two dimensions. The argument names must match the
215
+ index names in the IntervalSample.
216
+
217
+ """
218
+
219
+ def __init__(self, name, partition=None):
201
220
  super().__init__(name)
202
- self.num_inputs = num_inputs
203
- self.num_outputs = num_outputs
221
+ self.partition = partition
222
+ self.partition_function = None
223
+ if self.partition and 'module' in self.partition and 'function' in self.partition:
224
+ self.partition_function = load_partition_function(self.partition['module'], self.partition['function'])
225
+
226
+ def solve(self) -> [MC, MC]:
227
+ if self.partition_function:
228
+ self.apply_partition()
229
+ # update the balance related attributes
230
+ self.check_balance()
231
+ return self.outputs
232
+
233
+ def apply_partition(self):
234
+ if len(self.inputs) != 1:
235
+ raise ValueError("PartitionOperation must have exactly one input")
236
+ for input_sample in self.inputs:
237
+ input_sample: IntervalSample
238
+ if input_sample is not None:
239
+ output, complement = input_sample.split_by_partition(self.partition_function)
240
+ self.outputs = [output, complement]
241
+
242
+ @classmethod
243
+ def from_dict(cls, config: dict) -> 'PartitionOperation':
244
+ name = config.get('name')
245
+ partition = config.get('partition')
246
+ return cls(name=name, partition=partition)
@@ -36,3 +36,10 @@ class Stream(MassComposition):
36
36
  # stream = cls(**filtered_kwargs)
37
37
  # stream.__class__ = type(obj.__class__.__name__, (obj.__class__, cls), {})
38
38
  # return stream
39
+
40
+ @classmethod
41
+ def from_dict(cls, config: dict) -> 'Stream':
42
+ name = config.get('name')
43
+ node_in = config.get('node_in')
44
+ node_out = config.get('node_out')
45
+ return cls(name=name).set_nodes([node_in, node_out])
@@ -1,4 +1,6 @@
1
1
  from __future__ import annotations
2
+
3
+ import functools
2
4
  from pathlib import Path
3
5
  from typing import Optional, Literal, Callable, Union, Iterable, TYPE_CHECKING
4
6
 
@@ -104,31 +106,45 @@ class IntervalSample(MassComposition):
104
106
  K = \\frac{{m_{preferred}}}{{m_{feed}}}
105
107
 
106
108
  :param partition_definition: A function that takes a data frame and returns a boolean series with a
107
- range [0, 1].
109
+ range [0, 1]. A 1D function must have an argument that matches the dimension of the interval index.
110
+ A 2D function must have two arguments that match the dimensions of the interval index.
108
111
  :param name_1: The name of the first sample.
109
112
  :param name_2: The name of the second sample.
110
113
  :return: A tuple of two IntervalSamples.
111
114
  """
112
115
  if not isinstance(partition_definition, Callable):
113
116
  raise TypeError("The definition is not a callable function")
114
- if 'dim' not in partition_definition.keywords.keys():
115
- raise NotImplementedError("The callable function passed does not have a dim")
116
-
117
- dim = partition_definition.keywords['dim']
118
- partition_definition.keywords.pop('dim')
119
117
 
120
- # get the mean of the intervals - the geomean if the interval is called size
121
- index = self.mass_data.index.get_level_values(dim)
122
- # check the index is an interval index
123
- if not isinstance(index, pd.IntervalIndex):
124
- raise ValueError(f"The index is not an IntervalIndex. The index is {type(index)}")
125
- index = MeanIntervalIndex(index)
126
- x = index.mean
118
+ # Check that the partition definition has the correct number of arguments and that the names match
119
+ if isinstance(self.mass_data.index, pd.MultiIndex):
120
+ interval_levels = [level for level in self.mass_data.index.levels if isinstance(level, pd.IntervalIndex)]
121
+ else:
122
+ interval_levels = [self.mass_data.index] if isinstance(self.mass_data.index, pd.IntervalIndex) else []
123
+
124
+ # Get the function from the partial object if necessary
125
+ partition_func = partition_definition.func if isinstance(partition_definition,
126
+ functools.partial) else partition_definition
127
+
128
+ # Check that the required argument names are present in the IntervalIndex levels
129
+ required_args = partition_func.__code__.co_varnames[:len(interval_levels)]
130
+ for arg, level in zip(required_args, interval_levels):
131
+ if arg != level.name:
132
+ raise ValueError(f"The partition definition argument name does not match the index name. "
133
+ f"Expected {level.name}, found {arg}")
134
+
135
+ fraction_means: dict = {}
136
+ # iterate the Index or MultiIndex
137
+ if isinstance(self.mass_data.index, pd.MultiIndex):
138
+ for idx in self.mass_data.index.levels[0]:
139
+ # get the mean of the fractions, by converting to a MeanIntervalIndex
140
+ fraction_means[idx] = MeanIntervalIndex(self.mass_data.index.get_loc_level(idx)).mean
141
+ else:
142
+ fraction_means[self.mass_data.index.name] = MeanIntervalIndex(self.mass_data.index).mean
127
143
 
128
144
  self.to_stream()
129
- self: Stream
145
+ self: 'Stream'
130
146
 
131
- pn: pd.Series = pd.Series(partition_definition(x), name='K', index=index)
147
+ pn: pd.Series = pd.Series(partition_definition(**fraction_means), name='K', index=self._mass_data.index)
132
148
  sample_1 = self.create_congruent_object(name=name_1).to_stream()
133
149
  sample_1.mass_data = self.mass_data.copy().multiply(pn, axis=0)
134
150
  sample_1.set_nodes([self.nodes[1], random_int()])
@@ -228,9 +228,9 @@ def calculate_partition(df_feed: pd.DataFrame,
228
228
 
229
229
  res: pd.DataFrame = df_preferred[[col_mass_dry]].div(df_feed[[col_mass_dry]]).rename(columns={col_mass_dry: 'K'})
230
230
  if df_preferred.index.name.lower() == 'size':
231
- res.insert(loc=0, column='da', value=mean_size(res.index))
231
+ res.insert(loc=0, column='size', value=mean_size(res.index))
232
232
  else:
233
- res.insert(loc=0, column='da', value=res.index.mid)
233
+ res.insert(loc=0, column=df_preferred.index.name.lower(), value=res.index.mid)
234
234
  return res
235
235
 
236
236
 
@@ -1,3 +1,6 @@
1
+ import importlib
2
+ from functools import partial
3
+
1
4
  import numpy as np
2
5
  import pandas as pd
3
6
 
@@ -33,6 +36,21 @@ def napier_munn(x: np.ndarray, d50: float, ep: float) -> np.ndarray:
33
36
  return pn
34
37
 
35
38
 
39
+ def napier_munn_size(size: np.ndarray, d50: float, ep: float) -> np.ndarray:
40
+ return napier_munn(size, d50, ep)
41
+
42
+
43
+ def napier_munn_density(density: np.ndarray, d50: float, ep: float) -> np.ndarray:
44
+ return napier_munn(density, d50, ep)
45
+
46
+
47
+ napier_munn_size_1mm = partial(napier_munn_size, d50=1.0, ep=0.1)
48
+
49
+
50
+ def load_partition_function(module_name, function_name):
51
+ module = importlib.import_module(module_name)
52
+ return getattr(module, function_name)
53
+
36
54
  # if __name__ == '__main__':
37
55
  # da = np.arange(0, 10)
38
56
  # PN = perfect(da, d50=6.3)
@@ -1,7 +1,7 @@
1
1
  [tool.poetry]
2
2
  name = "geometallurgy"
3
3
  packages = [{ include = "elphick/geomet" }]
4
- version = "0.4.8"
4
+ version = "0.4.9"
5
5
  description = "Tools for the geometallurgist"
6
6
  authors = ["Greg <11791585+elphick@users.noreply.github.com>"]
7
7
  repository = "https://github.com/elphick/geometallurgy"
File without changes
File without changes