aiphoria 0.0.1__py3-none-any.whl → 0.8.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (42) hide show
  1. aiphoria/__init__.py +59 -0
  2. aiphoria/core/__init__.py +55 -0
  3. aiphoria/core/builder.py +305 -0
  4. aiphoria/core/datachecker.py +1808 -0
  5. aiphoria/core/dataprovider.py +806 -0
  6. aiphoria/core/datastructures.py +1686 -0
  7. aiphoria/core/datavisualizer.py +431 -0
  8. aiphoria/core/datavisualizer_data/LICENSE +21 -0
  9. aiphoria/core/datavisualizer_data/datavisualizer_plotly.html +5561 -0
  10. aiphoria/core/datavisualizer_data/pako.min.js +2 -0
  11. aiphoria/core/datavisualizer_data/plotly-3.0.0.min.js +3879 -0
  12. aiphoria/core/flowmodifiersolver.py +1754 -0
  13. aiphoria/core/flowsolver.py +1472 -0
  14. aiphoria/core/logger.py +113 -0
  15. aiphoria/core/network_graph.py +136 -0
  16. aiphoria/core/network_graph_data/ECHARTS_LICENSE +202 -0
  17. aiphoria/core/network_graph_data/echarts_min.js +45 -0
  18. aiphoria/core/network_graph_data/network_graph.html +76 -0
  19. aiphoria/core/network_graph_data/network_graph.js +1391 -0
  20. aiphoria/core/parameters.py +269 -0
  21. aiphoria/core/types.py +20 -0
  22. aiphoria/core/utils.py +362 -0
  23. aiphoria/core/visualizer_parameters.py +7 -0
  24. aiphoria/data/example_scenario.xlsx +0 -0
  25. aiphoria/example.py +66 -0
  26. aiphoria/lib/docs/dynamic_stock.py +124 -0
  27. aiphoria/lib/odym/modules/ODYM_Classes.py +362 -0
  28. aiphoria/lib/odym/modules/ODYM_Functions.py +1299 -0
  29. aiphoria/lib/odym/modules/__init__.py +1 -0
  30. aiphoria/lib/odym/modules/dynamic_stock_model.py +808 -0
  31. aiphoria/lib/odym/modules/test/DSM_test_known_results.py +762 -0
  32. aiphoria/lib/odym/modules/test/ODYM_Classes_test_known_results.py +107 -0
  33. aiphoria/lib/odym/modules/test/ODYM_Functions_test_known_results.py +136 -0
  34. aiphoria/lib/odym/modules/test/__init__.py +2 -0
  35. aiphoria/runner.py +678 -0
  36. aiphoria-0.8.0.dist-info/METADATA +119 -0
  37. aiphoria-0.8.0.dist-info/RECORD +40 -0
  38. {aiphoria-0.0.1.dist-info → aiphoria-0.8.0.dist-info}/WHEEL +1 -1
  39. aiphoria-0.8.0.dist-info/licenses/LICENSE +21 -0
  40. aiphoria-0.0.1.dist-info/METADATA +0 -5
  41. aiphoria-0.0.1.dist-info/RECORD +0 -5
  42. {aiphoria-0.0.1.dist-info → aiphoria-0.8.0.dist-info}/top_level.txt +0 -0
aiphoria/example.py ADDED
@@ -0,0 +1,66 @@
1
+ import os
2
+ import sys
3
+ import shutil
4
+ from importlib.resources import files
5
+ from typing import Union
6
+ from .runner import run_scenarios
7
+
8
+
9
+ def run_example(path_to_output_dir: Union[str, None] = None,
10
+ remove_existing_output_dir: bool = False):
11
+ """
12
+ Run example scenario and place output to path_to_output_dir.
13
+ If no path_to_output_dir is provided then places results
14
+ to users home directory inside directory "aiphoria_example".
15
+ Existing directory is not deleted and error is shown if
16
+ directory already exists.
17
+
18
+ Deleting existing directory can be overridden by setting
19
+ parameter remove_existing_output_dir to True.
20
+
21
+
22
+ Examples:
23
+ run_example()
24
+ run_example("C:\\results\\aiphoria_example")
25
+ run_example("~/results/aiphoria_example")
26
+
27
+ NOTE:
28
+ - Path to output directory MUST BE in absolute format
29
+ (e.g. "C:\\results\\aiphoria_example" (Windows)
30
+ - ~ is expanded to absolute path automatically
31
+
32
+ :param path_to_output_dir: Absolute path to output directory
33
+ :param remove_existing_output_dir: If True then removes existing output directory (default = False)
34
+ """
35
+ output_dir_name = "aiphoria_example"
36
+ example_scenario_path = "data/example_scenario.xlsx"
37
+
38
+ if path_to_output_dir is None:
39
+ # Place results to users home directory
40
+ path_to_output_dir = os.path.expanduser(os.path.join("~", output_dir_name))
41
+ path_to_output_dir = os.path.realpath(path_to_output_dir)
42
+
43
+ if not os.path.isabs(path_to_output_dir):
44
+ sys.stderr.write("ERROR: Path to output directory is not in absolute format\n")
45
+ return False
46
+
47
+ if not remove_existing_output_dir and os.path.isdir(path_to_output_dir):
48
+ sys.stderr.write("Directory {} already exists\n".format(path_to_output_dir))
49
+ return False
50
+
51
+ # Target directory doesn't exist or is okay to remove
52
+ sys.stdout.write("Using output path = {}\n".format(path_to_output_dir))
53
+ shutil.rmtree(path_to_output_dir, ignore_errors=True)
54
+ os.makedirs(path_to_output_dir, exist_ok=True)
55
+
56
+ path_to_example_scenario = files("aiphoria").joinpath(example_scenario_path)
57
+ if not os.path.isfile(path_to_example_scenario):
58
+ sys.stderr.write("ERROR: Example scenario file {} not found, packaging issue\n")
59
+ return False
60
+
61
+ current_cwd = os.getcwd()
62
+ os.chdir(path_to_output_dir)
63
+ run_scenarios(path_to_example_scenario,
64
+ path_to_output_dir=path_to_output_dir,
65
+ remove_existing_output_dir=remove_existing_output_dir)
66
+ os.chdir(current_cwd)
@@ -0,0 +1,124 @@
1
+ # Load a local copy of the current ODYM branch:
2
+ import sys
3
+ import os
4
+ import numpy as np
5
+ import pandas as pd
6
+ import matplotlib.pyplot as plt
7
+ import pickle
8
+ import openpyxl
9
+ import pylab
10
+
11
+ # Specify path to dynamic stock model and to datafile, relative
12
+ MainPath = os.path.join('..', 'odym', 'modules')
13
+ DataPath = os.path.join('..', 'docs', 'files')
14
+ sys.path.insert(0, MainPath)
15
+
16
+ # add ODYM module directory to system path, absolute
17
+ sys.path.insert(0, os.path.join(os.getcwd(), '..', 'odym', 'modules'))
18
+ sys.path.insert(0, os.path.join(os.getcwd(), '..', 'docs', 'files'))
19
+
20
+ # Import ODYM files
21
+ import ODYM_Classes as msc # import the ODYM class file
22
+ import ODYM_Functions as msf # import the ODYM function file
23
+ import dynamic_stock_model as dsm # import the dynamic stock model library
24
+
25
+ # Read available years from the file
26
+ lifetimes_filename = 'testi_product_data.xlsx'
27
+ lifetimes_datasheet_name = 'Average_Lifetime'
28
+
29
+ regions = ['Argentina', 'Brazil', 'Canada',
30
+ 'Denmark', 'Ethiopia', 'France',
31
+ 'Greece', 'Hungary', 'Indonesia']
32
+
33
+ # Minimum and maximum year, inclusive
34
+ use_automatic_year_detection = True
35
+ min_year = 1960
36
+ max_year = 2009
37
+
38
+ years = []
39
+ for year in range(min_year, max_year + 1):
40
+ years.append(year)
41
+
42
+ # Create dictionary of model classifications
43
+ ModelClassification = {}
44
+
45
+ # Classification for time labelled 'Time' must always be present, with Items containing a list of odered integers
46
+ # representing years, months, or other discrete time intervals
47
+ ModelClassification['Time'] = msc.Classification(Name='Time', Dimension='Time', ID=1, Items=years)
48
+
49
+ # Classification for cohort is used to track age-cohorts in the stock.
50
+ ModelClassification['Cohort'] = msc.Classification(Name='Age-cohort', Dimension='Time', ID=2, Items=years)
51
+
52
+ # Classification for elements labelled 'Element' must always be present, with Items containing a list of the
53
+ # symbols of the elements covered.
54
+ ModelClassification['Element'] = msc.Classification(Name='Elements', Dimension='Element', ID=3, Items=['Fe'])
55
+
56
+ # Classification for regions is chosen to include the regions that are in the scope of this analysis.
57
+ ModelClassification['Region'] = msc.Classification(Name='Regions', Dimension='Region', ID=4, Items=regions)
58
+
59
+ # Get model time start, end, and duration:
60
+ Model_Time_Start = int(min(ModelClassification['Time'].Items))
61
+ Model_Time_End = int(max(ModelClassification['Time'].Items))
62
+ Model_Duration = Model_Time_End - Model_Time_Start
63
+
64
+ IndexTable = pd.DataFrame(
65
+ {'Aspect': ['Time', 'Age-cohort', 'Element', 'Region'], # 'Time' and 'Element' must be present!
66
+ 'Description': ['Model aspect "time"', 'Model aspect "age-cohort"', 'Model aspect "Element"',
67
+ 'Model aspect "Region where flow occurs"'],
68
+ 'Dimension': ['Time', 'Time', 'Element', 'Region'], # 'Time' and 'Element' are also dimensions
69
+ 'Classification': [ModelClassification[Aspect] for Aspect in ['Time', 'Cohort', 'Element', 'Region']],
70
+ # Unique one letter (upper or lower case) indices to be used later for calculations.
71
+ 'IndexLetter': ['t', 'c', 'e', 'r']})
72
+
73
+ # Default indexing of IndexTable, other indices are produced on the fly
74
+ IndexTable.set_index('Aspect', inplace=True)
75
+ print(IndexTable)
76
+
77
+ # Initialize MFA system
78
+ Dyn_MFA_System = msc.MFAsystem(Name='StockAccumulationSystem',
79
+ Geogr_Scope='9SelectedRegions',
80
+ Unit='kt',
81
+ ProcessList=[],
82
+ FlowDict={},
83
+ StockDict={},
84
+ ParameterDict={},
85
+ Time_Start=Model_Time_Start,
86
+ Time_End=Model_Time_End,
87
+ IndexTable=IndexTable,
88
+ Elements=IndexTable.loc['Element'].Classification.Items)
89
+
90
+ # LIFETIMES
91
+ lifetimes_workbook = openpyxl.load_workbook(os.path.join(DataPath, lifetimes_filename), data_only=True)
92
+ lifetimes_datasheet = lifetimes_workbook[lifetimes_datasheet_name]
93
+
94
+ # Use 1-based column numbers (= as column number is shown in Excel file)
95
+ lifetimes_col_region_name = 1
96
+ lifetimes_col_lifetime_value = 2
97
+
98
+ lifetimes_for_regions = []
99
+ lifetimes_rows = lifetimes_datasheet.iter_rows()
100
+ lifetime_line_number = 0
101
+ for row in lifetimes_rows:
102
+ lifetime_line_number = lifetime_line_number + 1
103
+ if lifetime_line_number == 1:
104
+ # Ignore first line in file
105
+ continue
106
+
107
+ region_name = row[lifetimes_col_region_name - 1].value
108
+ lifetime_value = row[lifetimes_col_lifetime_value - 1].value
109
+
110
+ # Ignore lines that do not contain proper data
111
+ if (region_name is None) or (lifetime_value is None):
112
+ continue
113
+
114
+ lifetimes_for_regions.append([region_name, lifetime_value])
115
+
116
+ for entry in lifetimes_for_regions:
117
+ print(entry)
118
+
119
+ # Get lifetimes for the regions
120
+ # num_regions = 10
121
+ # for row in range(1, num_regions):
122
+ # lifetimes_for_regions.append(lifetimes_datasheet.cell(row + 1, lifetimes_col_lifetime_value).value)
123
+ #
124
+ # print(lifetimes_for_regions)
@@ -0,0 +1,362 @@
1
+ # -*- coding: utf-8 -*-
2
+ """
3
+ Created on Thu Mar 2 17:29:41 2017
4
+
5
+ @author: spauliuk
6
+ """
7
+
8
+ """
9
+ File ODYM_Classes
10
+ Check https://github.com/IndEcol/ODYM for latest version.
11
+
12
+ Contains class definitions for ODYM
13
+
14
+ standard abbreviation: msc (material-system-classes)
15
+
16
+ dependencies:
17
+ numpy >= 1.9
18
+ scipy >= 0.14
19
+
20
+ Repository for this class, documentation, and tutorials: https://github.com/IndEcol/ODYM
21
+
22
+ """
23
+ import os
24
+ import logging
25
+ import numpy as np
26
+ import pandas as pd
27
+ import xlrd, xlwt
28
+
29
+ ####################################
30
+ # Define classes for ODYM #
31
+ ####################################
32
+
33
+ def __version__():
34
+ return str('1.0') # version number of this file
35
+
36
+
37
+ class Obj(object):
38
+ """
39
+ Class with the object definition for a data object (system, process, flow, ...) in ODYM
40
+ """
41
+ def __init__(self, Name=None, ID=None, UUID=None):
42
+ """ Basic initialisation of Obj."""
43
+ self.Name = Name # object name
44
+ self.ID = ID # object ID
45
+ self.UUID = UUID # object UUID
46
+ self.Aspects = {'Time': 'Model time','Cohort': 'Age-cohort','OriginProcess':'Process where flow originates','DestinationProcess':'Destination process of flow','OriginRegion': 'Region where flow originates from','DestinationRegion': 'Region where flow is bound to', 'Good': 'Process, good, or commodity', 'Material': 'Material: ore, alloy, scrap type, ...','Element': 'Chemical element' } # Define the aspects of the system variables
47
+ self.Dimensions = {'Time': 'Time', 'Process':'Process', 'Region': 'Region', 'Good': 'Process, good, or commodity', 'Material': 'Material: ore, alloy, scrap type, ...','Element': 'Chemical element' } # Define the dimensions of the system variables
48
+
49
+
50
+
51
+ class Classification(Obj):
52
+
53
+ """
54
+ Class for aspect classification
55
+ """
56
+
57
+ def __init__(self, Name = None, ID = None, UUID = None, Dimension = None, Items = None, IDs = None, AdditionalProporties = {}):
58
+ """ Basic initialisation of an item list for alloys, materials, etc."""
59
+ Obj.__init__(self, Name = Name, ID = ID, UUID = UUID) # Hand over parameters to parent class init
60
+ self.Dimension = Dimension # Dimension of classification: Time, Region, process, material, goods, ...
61
+ self.Items = Items # list with names of items
62
+ self.IDs = IDs # list with IDs of items
63
+ self.AdditionalProps = AdditionalProporties # Like population for regions, element composition for alloys, ...
64
+
65
+
66
+ class MFAsystem(Obj):
67
+
68
+ """
69
+ Class with the definition and methods for a system in ODYM
70
+ """
71
+
72
+ def __init__(self, Name, Time_Start, Time_End, Geogr_Scope, Unit, IndexTable, Elements, ProcessList = [], FlowDict = {}, StockDict = {}, ParameterDict = {}, Graphical = None, ID = None, UUID = None, ):
73
+ """ Initialisation of MFAsystem."""
74
+ Obj.__init__(self, Name = Name, ID = ID, UUID = UUID) # Hand over parameters to parent class init
75
+
76
+ self.Time_Start = Time_Start # start time of model (year: int)
77
+ self.Time_End = Time_End # end time of model (year: int)
78
+ self.Geogr_Scope = Geogr_Scope # geographical boundary (string)
79
+ self.Elements = Elements # list of chemical elements considered, indicated by atomic numbers
80
+ self.Unit = Unit # flow and stock base unit, without 'per yr'
81
+
82
+ self.ProcessList = ProcessList # list of processes, processes are referred to by their number
83
+ self.FlowDict = FlowDict # Dictionary of flows, are indexed by tuples of process they are attached to (p1,p2)
84
+ self.StockDict = StockDict # Dictionary of stocks, are indexed by process they are located at (p)
85
+ self.ParameterDict = ParameterDict # Dictionary of of parameters: lifetime, yield rates, etc.
86
+ self.IndexTable = IndexTable # Dictionary of abbreviations for aspect-classification tuples
87
+
88
+ self.Graphical = Graphical # Dictionary of graphical properties (size in pixel, background color, etc.)
89
+
90
+ @property
91
+ def Time_V(self):
92
+ """ Array of all model years"""
93
+ return np.arange(self.Time_Start,self.Time_End +1,1)
94
+
95
+ @property
96
+ def Time_L(self):
97
+ """ List of all model years"""
98
+ return np.arange(self.Time_Start,self.Time_End +1,1).tolist()
99
+
100
+ def IndexTableCheck(self):
101
+ """ Check whether chosen classifications fit to dimensions of index table."""
102
+ for indx in self.IndexTable.index:
103
+ if self.IndexTable.loc[indx]['Dimension'] != self.IndexTable.loc[indx]['Classification'].Dimension:
104
+ raise ValueError('Dimension mismatch. Dimension of classifiation needs to fit to dimension of flow or parameter index. Found a mismatch for the following index: {foo}. Check your index table definition!'.format(foo = indx))
105
+ if 'Time' not in self.IndexTable.index:
106
+ raise ValueError(' "Time" aspect must be present in IndexTable. Please check your index table definition!')
107
+ if 'Element' not in self.IndexTable.index:
108
+ raise ValueError(' "Element" aspect must be present in IndexTable. Please check your index table definition!')
109
+ if len(self.IndexTable.loc['Element'].Classification.Items) == 0:
110
+ raise ValueError('Need at least one element in element list, please check your classification definition!')
111
+ if len(self.IndexTable.loc['Time'].Classification.Items) == 0:
112
+ raise ValueError('Need at least one element in Time list, please check your classification definition!')
113
+
114
+ return True
115
+
116
+ def Initialize_FlowValues(self):
117
+ """ This method will construct empty numpy arrays (zeros) for all flows where the value is None and wheree the indices are given."""
118
+ for key in self.FlowDict:
119
+ if self.FlowDict[key].Values is None:
120
+ self.FlowDict[key].Values = np.zeros(tuple([len(self.IndexTable.set_index('IndexLetter').loc[x]['Classification'].Items) for x in self.FlowDict[key].Indices.split(',')]))
121
+ # Raw code, for development
122
+ # Indices = 't,Ro,a,e'
123
+ # IndList = Indices.split(',')
124
+ # Dimensions = [len(IndexTable.ix[x]['Classification'].Items) for x in IndList]
125
+ # Values = np.zeros(tuple(Dimensions))
126
+
127
+ def Initialize_StockValues(self):
128
+ """ This method will construct empty numpy arrays (zeros) for all stocks where the value is None and wheree the indices are given."""
129
+ for key in self.StockDict:
130
+ if self.StockDict[key].Values is None:
131
+ self.StockDict[key].Values = np.zeros(tuple([len(self.IndexTable.set_index('IndexLetter').loc[x]['Classification'].Items) for x in self.StockDict[key].Indices.split(',')]))
132
+
133
+ def Initialize_ParameterValues(self):
134
+ """ This method will construct empty numpy arrays (zeros) for all parameters where the value is None and wheree the indices are given."""
135
+ for key in self.ParameterDict:
136
+ if self.ParameterDict[key].Values is None:
137
+ self.ParameterDict[key].Values = np.zeros(tuple([len(self.IndexTable.set_index('IndexLetter').loc[x]['Classification'].Items) for x in self.ParameterDict[key].Indices.split(',')]))
138
+
139
+ def Consistency_Check(self):
140
+ """ Method that check a readily defined system for consistency of dimensions, Value setting, etc. See detailed comments."""
141
+
142
+ # 1) Check dimension consistency in index table:
143
+ A = self.IndexTableCheck()
144
+
145
+ # 2) Check whether all process indices that the flows refer to are in the process list:
146
+ for key in self.FlowDict:
147
+ if self.FlowDict[key].P_Start > len(self.ProcessList) -1:
148
+ raise ValueError('Start process of flow {foo} not present. Check your flow definition!'.format(foo = key))
149
+ if self.FlowDict[key].P_End > len(self.ProcessList) -1:
150
+ raise ValueError('End process of flow {foo} not present. Check your flow definition!'.format(foo = key))
151
+
152
+ # 3) Check whethe all flow valua arrays match with the index structure:
153
+ for key in self.FlowDict:
154
+ if tuple([len(self.IndexTable.set_index('IndexLetter').loc[x]['Classification'].Items) for x in self.FlowDict[key].Indices.split(',')]) != self.FlowDict[key].Values.shape:
155
+ raise ValueError('Dimension mismatch. Dimension of flow value array does not fit to flow indices for flow {foo}. Check your flow and flow value definition!'.format(foo = key))
156
+
157
+ return A, True, True
158
+
159
+ def Flow_Sum_By_Element(self,FlowKey):
160
+ """
161
+ Reduce flow values to a Time x Elements matrix and return as t x e array.
162
+ We take the indices of each flow, e.g., 't,O,D,G,m,e', strip off the ',' to get 'tODGme',
163
+ add a '->' and the index letters for time and element (here, t and e),
164
+ and call the Einstein sum function np.einsum with the string 'tODGme->te',
165
+ and apply it to the flow values.
166
+ """
167
+ return np.einsum(self.FlowDict[FlowKey].Indices.replace(',','') + '->'+ self.IndexTable.loc['Time'].IndexLetter + self.IndexTable.loc['Element'].IndexLetter ,self.FlowDict[FlowKey].Values)
168
+
169
+ def Stock_Sum_By_Element(self,StockKey):
170
+ """
171
+ Reduce stock values to a Time x Elements matrix and return as t x e array.
172
+ We take the indices of each stock, e.g., 't,c,G,m,e', strip off the ',' to get 'tcGme',
173
+ add a '->' and the index letters for time and element (here, t and e),
174
+ and call the Einstein sum function np.einsum with the string 'tcGme->te',
175
+ and apply it to the stock values.
176
+ """
177
+ return np.einsum(self.StockDict[StockKey].Indices.replace(',','') + '->'+ self.IndexTable.loc['Time'].IndexLetter + self.IndexTable.loc['Element'].IndexLetter ,self.StockDict[StockKey].Values)
178
+
179
+ def MassBalance(self, Element = None):
180
+ """
181
+ Determines mass balance of MFAsystem
182
+ We take the indices of each flow, e.g., 't,O,D,G,m,e', strip off the ',' to get 'tODGme',
183
+ add a '->' and the index letters for time and element (here, t and e),
184
+ and call the Einstein sum function np.einsum with the string 'tODGme->te',
185
+ and apply it to the flow values.
186
+ Sum to t and e is subtracted from process where flow is leaving from and added to destination process.
187
+ """
188
+ Bal = np.zeros((len(self.Time_L),len(self.ProcessList),len(self.Elements))) # Balance array: years x process x element:
189
+ #process position 0 is the balance for the system boundary, the other positions are for the processes,
190
+ #element position 0 is the balance for the entire mass, the other are for the balance of the individual elements
191
+
192
+ for key in self.FlowDict: # Add all flows to mass balance
193
+ Bal[:,self.FlowDict[key].P_Start,:] -= self.Flow_Sum_By_Element(key) # Flow leaving a process
194
+ Bal[:,self.FlowDict[key].P_End,:] += self.Flow_Sum_By_Element(key) # Flow entering a process
195
+
196
+ for key in self.StockDict: # Add all stock changes to the mass balance
197
+ if self.StockDict[key].Type == 1:
198
+ Bal[:,self.StockDict[key].P_Res,:] -= self.Stock_Sum_By_Element(key) # 1: net stock change or addition to stock
199
+ elif self.StockDict[key].Type == 2:
200
+ Bal[:,self.StockDict[key].P_Res,:] += self.Stock_Sum_By_Element(key) # 2: removal/release from stock
201
+
202
+ #add stock changes to process with number 0 ('system boundary, environment of system')
203
+ for key in self.StockDict:
204
+ if self.StockDict[key].Type == 1:
205
+ Bal[:,0,:] += self.Stock_Sum_By_Element(key) # 1: net stock change or addition to stock
206
+ elif self.StockDict[key].Type == 2:
207
+ Bal[:,0,:] -= self.Stock_Sum_By_Element(key) # 2: removal/release from stock
208
+
209
+ return Bal
210
+
211
+ def Check_If_All_Chem_Elements_Are_present(self,FlowKey,AllElementsIndex):
212
+ """
213
+ This method is applicable to systems where the chemical element list contains both 0 ('all' chemical elements) and individual elements.
214
+ It checks whether the sum of the system variable of the other elements equals the entry for element 0.
215
+ This means that the breakdown of the system variable into individual elements has the same mass as the total for all elements.
216
+ AllElementsindex is the position of the element 0 in the element list, typically, it is also 0.
217
+ """
218
+ txe = self.Flow_Sum_By_Element(FlowKey)
219
+ txe_0 = txe[:,AllElementsIndex]
220
+ txe_o = np.delete(txe,AllElementsIndex,axis=1).sum(axis=1)
221
+ if np.allclose(txe_0,txe_o):
222
+ Check = True
223
+ else:
224
+ Check = False
225
+ return Check, txe_0, txe_o # Check flag, time series for element 'all', time series for all 'other' elements.
226
+
227
+ def SankeyExport(self,Year, Path, Element): # Export data for given year in excel format for the D3.js Circular Sankey method
228
+ """ Exports MFAsystem to xls Template for the Circular Sankey method."""
229
+
230
+ TimeIndex = Year - self.Time_Start
231
+
232
+ myfont = xlwt.Font()
233
+ myfont.bold = True
234
+ mystyle = xlwt.XFStyle()
235
+ mystyle.font = myfont
236
+
237
+ Result_workbook = xlwt.Workbook(encoding = 'ascii')
238
+ Result_worksheet = Result_workbook.add_sheet('Nodes')
239
+ Result_worksheet.write(0, 0, label = 'Name', style = mystyle)
240
+ Result_worksheet.write(0, 1, label = 'Color', style = mystyle)
241
+ Result_worksheet.write(0, 2, label = 'Orientation', style = mystyle)
242
+ Result_worksheet.write(0, 3, label = 'Width', style = mystyle)
243
+ Result_worksheet.write(0, 4, label = 'Height', style = mystyle)
244
+ Result_worksheet.write(0, 5, label = 'x_position', style = mystyle)
245
+ Result_worksheet.write(0, 6, label = 'y_position', style = mystyle)
246
+
247
+ for m in range(0,len(self.ProcessList)):
248
+ if self.ProcessList[m].Graphical is None:
249
+ raise ValueError('Graphical properties of process number {foo} are not set. No export to Sankey possible, as position of process on canvas etc. needs is not specified.'.format(foo = m))
250
+ Result_worksheet.write(m +1, 0, label = self.ProcessList[m].Graphical['Name'])
251
+ Result_worksheet.write(m +1, 1, label = self.ProcessList[m].Graphical['Color'])
252
+ Result_worksheet.write(m +1, 2, label = self.ProcessList[m].Graphical['Angle'])
253
+ Result_worksheet.write(m +1, 3, label = self.ProcessList[m].Graphical['Width'])
254
+ Result_worksheet.write(m +1, 4, label = self.ProcessList[m].Graphical['Height'])
255
+ Result_worksheet.write(m +1, 5, label = self.ProcessList[m].Graphical['xPos'])
256
+ Result_worksheet.write(m +1, 6, label = self.ProcessList[m].Graphical['yPos'])
257
+
258
+ Result_worksheet = Result_workbook.add_sheet('Flows')
259
+ Result_worksheet.write(0, 0, label = 'StartNode', style = mystyle)
260
+ Result_worksheet.write(0, 1, label = 'EndNode', style = mystyle)
261
+ Result_worksheet.write(0, 2, label = 'Value', style = mystyle)
262
+ Result_worksheet.write(0, 3, label = 'Color', style = mystyle)
263
+
264
+ for key in self.FlowDict:
265
+ Result_worksheet.write(m +1, 0, label = self.FlowDict[key].P_Start)
266
+ Result_worksheet.write(m +1, 1, label = self.FlowDict[key].P_End)
267
+ Result_worksheet.write(m +1, 2, label = float(self.Flow_Sum_By_Element(key)[TimeIndex,Element]))
268
+ Result_worksheet.write(m +1, 3, label = self.FlowDict[key].Color)
269
+
270
+ Result_workbook.save(Path + self.Name + '_' + str(TimeIndex) + '_' + str(Element) + '_Sankey.xls')
271
+
272
+
273
+ class Process(Obj):
274
+
275
+ """
276
+ Class with the definition and methods for a process in ODYM
277
+ """
278
+
279
+ def __init__(self, Name = None, ID = None, UUID = None, Bipartite = None, Graphical = None, Extensions = None, Parameters = None):
280
+ """ Basic initialisation of a process."""
281
+ Obj.__init__(self, Name = Name, ID = ID, UUID = UUID) # Hand over parameters to parent class init
282
+ self.Bipartite = Bipartite # For bipartite system graphs, a string with value 't' or 'd' for transformation and distribution process indicates which group the process belongs to.
283
+ self.Extensions= Extensions # Dictionary of
284
+ self.Graphical = Graphical # # Dictionary of graphical properties: xPos = None, yPos = None, Orientation = None, Color=None, Width = None, Height=None,
285
+
286
+ def add_extension(self,Time = None, Name = None, Value=None, Unit = None, Uncert=None): # Extensions flows that are not part of the system-wide mass balance!
287
+ if self.Extensions is None:
288
+ self.Extensions = []
289
+ self.Extensions.append(Flow(P_Start = self.ID, P_End = None, Time = Time, Name = Name, Unit = Unit, Value = Value, Uncert = Uncert))
290
+
291
+ def add_parameter(self,Name = None):
292
+ if self.Parameters is None:
293
+ self.Parameters = []
294
+ self.Parameters.append(Parameter(Value = None))
295
+
296
+ class Flow(Obj): # Flow needs to at least have dimension time x element
297
+
298
+ """
299
+ Class with the definition and methods for a flow in ODYM
300
+ """
301
+
302
+ def __init__(self, Name = None, ID = None, UUID = None, P_Start = None, P_End = None, Indices = None, Values=None, Uncert=None, Unit = None, Color = None):
303
+ """ Basic initialisation of a flow."""
304
+ Obj.__init__(self, Name = Name, ID = ID, UUID = UUID) # Hand over parameters to parent class init
305
+ self.P_Start = P_Start # id of start process of flow (id: int)
306
+ self.P_End = P_End # id of end process of flow (id: int)
307
+ self.Indices = Indices # String with indices as defined in IndexTable, separated by ,: 't,c,p,s,e'
308
+
309
+ self.Values = Values # flow values, np.array, multidimensional, unit is system-wide unit
310
+ self.Uncert = Uncert # uncertainty of value in %
311
+ self.Unit = Unit # Unit string
312
+
313
+ self.Color = Color # color as string 'R,G,B', where each of R, G, B has a value of 0...255
314
+
315
+
316
+
317
+ class Stock(Obj): # Flow needs to at least have dimension time x element
318
+
319
+ """
320
+ Class with the definition and methods for a stock in ODYM
321
+ """
322
+
323
+ def __init__(self, Name = None, ID = None, UUID = None, P_Res = None, Indices = None, Type = None, Values=None, Uncert=None, Unit = None, Color = None):
324
+ """ Basic initialisation of a stock."""
325
+ Obj.__init__(self, Name = Name, ID = ID, UUID = UUID) # Hand over parameters to parent class init
326
+ self.P_Res = P_Res # id of process where stock resides (id: int)
327
+ self.Indices = Indices # String with indices as defined in IndexTable, separated by ,: 't,c,p,s,e'
328
+ self.Type = Type # Type is an int value, indicating: 0: stock, 1: (net) stock change or addition to stock, 2: removal from stock
329
+
330
+ self.Values = Values # flow values, np.array, multidimensional, unit is system-wide unit
331
+ self.Uncert = Uncert # uncertainty of value in %
332
+ self.Unit = Unit # Unit string
333
+
334
+ self.Color = Color # color as string 'R,G,B', where each of R, G, B has a value of 0...255
335
+
336
+
337
+ class Parameter(Obj):
338
+
339
+ """
340
+ Class with the definition and methods for parameters
341
+ """
342
+
343
+ def __init__(self, Name = None, ID = None, UUID = None, P_Res = None, MetaData = None, Indices = None, Values=None, Uncert=None, Unit = None):
344
+ """ Basic initialisation of a parameter."""
345
+ Obj.__init__(self, Name = Name, ID = ID, UUID = UUID) # Hand over parameters to parent class init
346
+ self.P_Res = P_Res # id of process to which parameter is assigned (id: int)
347
+ self.Indices = Indices # String with indices as defined in IndexTable, separated by ,: 't,c,p,s,e'
348
+ self.MetaData = MetaData # Dictionary with additional metadata
349
+
350
+ self.Values = Values # parameter values, np.array, multidimensional, unit is Unit
351
+ self.Uncert = Uncert # uncertainty of value in %
352
+ self.Unit = Unit # Unit of parameter values
353
+
354
+
355
+
356
+
357
+
358
+
359
+
360
+ #
361
+ #
362
+ #