aiphoria 0.0.1__py3-none-any.whl → 0.8.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aiphoria/__init__.py +59 -0
- aiphoria/core/__init__.py +55 -0
- aiphoria/core/builder.py +305 -0
- aiphoria/core/datachecker.py +1808 -0
- aiphoria/core/dataprovider.py +806 -0
- aiphoria/core/datastructures.py +1686 -0
- aiphoria/core/datavisualizer.py +431 -0
- aiphoria/core/datavisualizer_data/LICENSE +21 -0
- aiphoria/core/datavisualizer_data/datavisualizer_plotly.html +5561 -0
- aiphoria/core/datavisualizer_data/pako.min.js +2 -0
- aiphoria/core/datavisualizer_data/plotly-3.0.0.min.js +3879 -0
- aiphoria/core/flowmodifiersolver.py +1754 -0
- aiphoria/core/flowsolver.py +1472 -0
- aiphoria/core/logger.py +113 -0
- aiphoria/core/network_graph.py +136 -0
- aiphoria/core/network_graph_data/ECHARTS_LICENSE +202 -0
- aiphoria/core/network_graph_data/echarts_min.js +45 -0
- aiphoria/core/network_graph_data/network_graph.html +76 -0
- aiphoria/core/network_graph_data/network_graph.js +1391 -0
- aiphoria/core/parameters.py +269 -0
- aiphoria/core/types.py +20 -0
- aiphoria/core/utils.py +362 -0
- aiphoria/core/visualizer_parameters.py +7 -0
- aiphoria/data/example_scenario.xlsx +0 -0
- aiphoria/example.py +66 -0
- aiphoria/lib/docs/dynamic_stock.py +124 -0
- aiphoria/lib/odym/modules/ODYM_Classes.py +362 -0
- aiphoria/lib/odym/modules/ODYM_Functions.py +1299 -0
- aiphoria/lib/odym/modules/__init__.py +1 -0
- aiphoria/lib/odym/modules/dynamic_stock_model.py +808 -0
- aiphoria/lib/odym/modules/test/DSM_test_known_results.py +762 -0
- aiphoria/lib/odym/modules/test/ODYM_Classes_test_known_results.py +107 -0
- aiphoria/lib/odym/modules/test/ODYM_Functions_test_known_results.py +136 -0
- aiphoria/lib/odym/modules/test/__init__.py +2 -0
- aiphoria/runner.py +678 -0
- aiphoria-0.8.0.dist-info/METADATA +119 -0
- aiphoria-0.8.0.dist-info/RECORD +40 -0
- {aiphoria-0.0.1.dist-info → aiphoria-0.8.0.dist-info}/WHEEL +1 -1
- aiphoria-0.8.0.dist-info/licenses/LICENSE +21 -0
- aiphoria-0.0.1.dist-info/METADATA +0 -5
- aiphoria-0.0.1.dist-info/RECORD +0 -5
- {aiphoria-0.0.1.dist-info → aiphoria-0.8.0.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,1299 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""
|
|
3
|
+
Created on Thu Mar 2 17:33:00 2017
|
|
4
|
+
|
|
5
|
+
@author: spauliuk
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
"""
|
|
9
|
+
File ODYM_Functions
|
|
10
|
+
Check https://github.com/IndEcol/ODYM for latest version.
|
|
11
|
+
|
|
12
|
+
Contains class definitions for ODYM
|
|
13
|
+
|
|
14
|
+
standard abbreviation: msf (material-system-functions)
|
|
15
|
+
|
|
16
|
+
dependencies:
|
|
17
|
+
numpy >= 1.9
|
|
18
|
+
scipy >= 0.14
|
|
19
|
+
|
|
20
|
+
Repository for this class, documentation, and tutorials: https://github.com/IndEcol/ODYM
|
|
21
|
+
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
import os
|
|
25
|
+
import logging
|
|
26
|
+
import numpy as np
|
|
27
|
+
#import pandas as pd
|
|
28
|
+
import xlrd
|
|
29
|
+
import openpyxl
|
|
30
|
+
import pypandoc
|
|
31
|
+
import ODYM_Classes as msc
|
|
32
|
+
|
|
33
|
+
####################################
|
|
34
|
+
# Define functions #
|
|
35
|
+
####################################
|
|
36
|
+
|
|
37
|
+
def __version__(): # return version of this file
|
|
38
|
+
return str('1.0')
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def function_logger(log_filename, log_pathname, file_level=logging.DEBUG, console_level=logging.WARNING):
|
|
43
|
+
"""
|
|
44
|
+
This is the logging routine of the model. It returns alogger that can be used by other functions to write to the
|
|
45
|
+
log(file).
|
|
46
|
+
|
|
47
|
+
:param file_level: Verbosity level for the logger's output file. This can be log.WARNING (default),
|
|
48
|
+
log.INFO, log.DEBUG
|
|
49
|
+
:param log_filename: The filename for the logfile.
|
|
50
|
+
:param log_pathname: The pathname for the logfile.
|
|
51
|
+
:param console_level: Verbosity level for the logger's output file.
|
|
52
|
+
out
|
|
53
|
+
:param logfile_type: Type of file to write. Markdown syntax is the default.
|
|
54
|
+
TODO: If other outputs types are desired, they can be converted via pandoc.
|
|
55
|
+
:return: A logger that can be used by other files to write to the log(file)
|
|
56
|
+
"""
|
|
57
|
+
|
|
58
|
+
log_file = os.path.join(log_pathname, log_filename)
|
|
59
|
+
# logging.basicConfig(format='%(levelname)s (%(filename)s <%(funcName)s>): %(message)s',
|
|
60
|
+
# filename=log_file,
|
|
61
|
+
# level=logging.INFO)
|
|
62
|
+
logger = logging.getLogger()
|
|
63
|
+
logger.handlers = [] # required if you don't want to exit the shell
|
|
64
|
+
logger.setLevel(file_level)
|
|
65
|
+
|
|
66
|
+
# The logger for console output
|
|
67
|
+
console_log = logging.StreamHandler() #StreamHandler logs to console
|
|
68
|
+
console_log.setLevel(console_level)
|
|
69
|
+
# console_log_format = logging.Formatter('%(message)s')
|
|
70
|
+
console_log_format = logging.Formatter('%(levelname)s (%(filename)s <%(funcName)s>): %(message)s')
|
|
71
|
+
console_log.setFormatter(console_log_format)
|
|
72
|
+
logger.addHandler(console_log)
|
|
73
|
+
|
|
74
|
+
# The logger for log file output
|
|
75
|
+
file_log = logging.FileHandler(log_file, mode='w', encoding=None, delay=False)
|
|
76
|
+
file_log.setLevel(file_level)
|
|
77
|
+
file_log_format = logging.Formatter('%(message)s\n')
|
|
78
|
+
file_log.setFormatter(file_log_format)
|
|
79
|
+
logger.addHandler(file_log)
|
|
80
|
+
|
|
81
|
+
return logger, console_log, file_log
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def ensure_dir(f): # Checks whether a given directory f exists, and creates it if not
|
|
86
|
+
d = os.path.dirname(f)
|
|
87
|
+
if not os.path.exists(d):
|
|
88
|
+
os.makedirs(d)
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
|
|
92
|
+
def sort_index(mylist,direction): # returns index that sorts a list, either ascending or descending
|
|
93
|
+
if direction == 'ascending':
|
|
94
|
+
return sorted(range(len(mylist)), key=lambda k: mylist[k])
|
|
95
|
+
elif direction == 'descending':
|
|
96
|
+
return sorted(range(len(mylist)), key=lambda k: mylist[k], reverse=True)
|
|
97
|
+
else:
|
|
98
|
+
return None
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def GroupingDict2Array(GroupingDict, ElementList):
|
|
103
|
+
'''
|
|
104
|
+
Tbd.
|
|
105
|
+
'''
|
|
106
|
+
NoOfItems = len(GroupingDict.keys())
|
|
107
|
+
GroupingList = []
|
|
108
|
+
for m in GroupingDict.keys():
|
|
109
|
+
GroupingList.append(m)
|
|
110
|
+
ElementContentArray = np.zeros((100,NoOfItems))
|
|
111
|
+
PosCount = 0
|
|
112
|
+
for m in GroupingList:
|
|
113
|
+
for n in GroupingDict[m].keys():
|
|
114
|
+
ElInd = ElementList.index(n)
|
|
115
|
+
ElementContentArray[ElInd,PosCount] = GroupingDict[m][n]
|
|
116
|
+
PosCount += 1
|
|
117
|
+
return GroupingList, ElementContentArray
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def ListStringToListNumbers(ListStr):
|
|
122
|
+
"""
|
|
123
|
+
Extracts numbers from a string that looks like a list commant in python, and returns them as proper list
|
|
124
|
+
Examples: ListStringToListNumbers('[1,2,3]') yields [1,2,3]
|
|
125
|
+
"""
|
|
126
|
+
return [int(s) for s in ListStr[ListStr.find('['):ListStr.find(']')+1].replace('[',',').replace(']',',').split(',') if s.isdigit()]
|
|
127
|
+
|
|
128
|
+
|
|
129
|
+
|
|
130
|
+
def EvalItemSelectString(ItemSelectStr,IndexLength):
|
|
131
|
+
'''
|
|
132
|
+
Extract index item selection lists from ODYM datafile information
|
|
133
|
+
'''
|
|
134
|
+
if ItemSelectStr == 'All' or ItemSelectStr == 'ALL' or ItemSelectStr == 'all':
|
|
135
|
+
Res = 'all' # Selects all from list
|
|
136
|
+
elif ItemSelectStr.find('except') > -1: # type 'All except', return full list [0,1,2,5,6,7]
|
|
137
|
+
Res = np.arange(0,IndexLength)
|
|
138
|
+
b = ItemSelectStr[ItemSelectStr.find('['):ItemSelectStr.find(']')+1].replace('[',',').replace(']',',')
|
|
139
|
+
RemoveList = [int(s) for s in b.split(',') if s.isdigit()]
|
|
140
|
+
Res = np.delete(Res,RemoveList)
|
|
141
|
+
Res = Res.tolist()
|
|
142
|
+
elif ItemSelectStr.find(']') > -1: # type '[...]', return full list
|
|
143
|
+
Res = ItemSelectStr[ItemSelectStr.find('[')::]
|
|
144
|
+
elif ItemSelectStr.find(')') > -1: # type '[..:..)', return range a:b
|
|
145
|
+
Res = ItemSelectStr[ItemSelectStr.find('[')+1:-1]
|
|
146
|
+
else:
|
|
147
|
+
Res = 'ItemSelectString could not be detected.'
|
|
148
|
+
|
|
149
|
+
return Res
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
|
|
153
|
+
def MI_Tuple(value, Is):
|
|
154
|
+
"""
|
|
155
|
+
Define function for obtaining multiindex tuple from index value
|
|
156
|
+
value: flattened index position, Is: Number of values for each index dimension
|
|
157
|
+
Example: MI_Tuple(10, [3,4,2,6]) returns [0,0,1,4]
|
|
158
|
+
MI_Tuple is the inverse of Tuple_MI.
|
|
159
|
+
"""
|
|
160
|
+
IsValuesRev = []
|
|
161
|
+
CurrentValue = value
|
|
162
|
+
for m in range(0,len(Is)):
|
|
163
|
+
IsValuesRev.append(CurrentValue % Is[len(Is)-m-1])
|
|
164
|
+
CurrentValue = CurrentValue // Is[len(Is)-m-1]
|
|
165
|
+
return IsValuesRev[::-1]
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def Tuple_MI(Tuple, IdxLength):
|
|
170
|
+
"""
|
|
171
|
+
Function to return the absolution position of a multiindex when the index tuple
|
|
172
|
+
and the index hierarchy and size are given.
|
|
173
|
+
Example: Tuple_MI([2,7,3],[100,10,5]) = 138
|
|
174
|
+
Tuple_MI is the inverse of MI_Tuple.
|
|
175
|
+
"""
|
|
176
|
+
# First, generate the index position offset values
|
|
177
|
+
A = IdxLength[1:] + IdxLength[:1] # Shift 1 to left
|
|
178
|
+
A[-1] = 1 # Replace lowest index by 1
|
|
179
|
+
A.reverse()
|
|
180
|
+
IdxPosOffset = np.cumproduct(A).tolist()
|
|
181
|
+
IdxPosOffset.reverse()
|
|
182
|
+
Position = np.sum([a*b for a,b in zip(Tuple,IdxPosOffset)])
|
|
183
|
+
return Position
|
|
184
|
+
|
|
185
|
+
|
|
186
|
+
def TableWithFlowsToShares(Table,axis):
|
|
187
|
+
"""
|
|
188
|
+
Given a 2D-table with flow values that sum up to a total,
|
|
189
|
+
either along the columns (= across rows, axis =0) or along the rows (=across the columns, axis =1).
|
|
190
|
+
The function then converts the flows into shares (between 0 and 1), that each element has in the column sum (axis =0)
|
|
191
|
+
or the row sum (axis =1).
|
|
192
|
+
Only makes sense if all table entries have the same sign, that is not checked by the function.
|
|
193
|
+
"""
|
|
194
|
+
Shares = np.zeros(Table.shape)
|
|
195
|
+
if axis == 0: # shares along columns
|
|
196
|
+
colsum = Table.sum(axis=0)
|
|
197
|
+
Divisor = np.einsum('b,a->ab',colsum,np.ones(Table.shape[0]))
|
|
198
|
+
if axis == 1: # shares along rows
|
|
199
|
+
rowsum = Table.sum(axis=1)
|
|
200
|
+
Divisor = np.einsum('a,b->ab',rowsum,np.ones(Table.shape[1]))
|
|
201
|
+
Divided = np.divide(1, Divisor, out=np.zeros_like(Divisor), where=Divisor!=0)
|
|
202
|
+
Shares = Table * Divided
|
|
203
|
+
return Shares
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def DetermineElementComposition_All_Oth(me):
|
|
207
|
+
"""
|
|
208
|
+
Given an array of flows of materials (rows) broken down into chem. elements (columns),
|
|
209
|
+
where the first element is "all" and the last element is "other",
|
|
210
|
+
the function determines the share of each element in the material, and fills nonexistent rows with a 1 for all and other, resp.
|
|
211
|
+
"""
|
|
212
|
+
result = np.zeros(me.shape)
|
|
213
|
+
Shares = TableWithFlowsToShares(me[:,1::],1)
|
|
214
|
+
SharesSum = Shares.sum(axis=1)
|
|
215
|
+
result[:,0] = 1
|
|
216
|
+
result[:,1::] = Shares.copy()
|
|
217
|
+
for m in range(0,me.shape[0]):
|
|
218
|
+
if SharesSum[m] == 0:
|
|
219
|
+
result[m,-1] = 1
|
|
220
|
+
return result
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def ModelIndexPositions_FromData(Positions,RowPos,ColPos):
|
|
224
|
+
"""
|
|
225
|
+
This function is needed to read data files into ODYM. It takes the positions of a given data point
|
|
226
|
+
in the parameter file and checks where in the model index structure this data points belongs,
|
|
227
|
+
if it is needed at all.
|
|
228
|
+
"""
|
|
229
|
+
TargetPosition = []
|
|
230
|
+
for m in range(0,len(Positions)):
|
|
231
|
+
if m < len(RowPos):
|
|
232
|
+
try:
|
|
233
|
+
TargetPosition.append(Positions[m].index(RowPos[m]))
|
|
234
|
+
except:
|
|
235
|
+
break
|
|
236
|
+
else:
|
|
237
|
+
try:
|
|
238
|
+
TargetPosition.append(Positions[m].index(ColPos[m-len(RowPos)]))
|
|
239
|
+
except:
|
|
240
|
+
break
|
|
241
|
+
return TargetPosition
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
def ParseModelControl(Model_Configsheet,ScriptConfig):
|
|
245
|
+
""" Parse the RECC and ODYM model control parameters from the ODYM config sheet. """
|
|
246
|
+
SCix = 1
|
|
247
|
+
# search for script config list entry
|
|
248
|
+
while Model_Configsheet.cell(SCix, 2).value != 'General Info':
|
|
249
|
+
SCix += 1
|
|
250
|
+
|
|
251
|
+
SCix += 2 # start on first data row
|
|
252
|
+
while Model_Configsheet.cell(SCix, 4).value is not None:
|
|
253
|
+
ScriptConfig[Model_Configsheet.cell(SCix, 3).value] = Model_Configsheet.cell(SCix,4).value
|
|
254
|
+
SCix += 1
|
|
255
|
+
|
|
256
|
+
SCix = 1
|
|
257
|
+
# search for script config list entry
|
|
258
|
+
while Model_Configsheet.cell(SCix, 2).value != 'Software version selection':
|
|
259
|
+
SCix += 1
|
|
260
|
+
|
|
261
|
+
SCix += 2 # start on first data row
|
|
262
|
+
while Model_Configsheet.cell(SCix, 4).value is not None:
|
|
263
|
+
ScriptConfig[Model_Configsheet.cell(SCix, 3).value] = Model_Configsheet.cell(SCix,4).value
|
|
264
|
+
SCix += 1
|
|
265
|
+
|
|
266
|
+
return ScriptConfig
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
def ParseClassificationFile_Main(Classsheet,Mylog):
|
|
270
|
+
""" Parse the ODYM classification file, format version
|
|
271
|
+
"""
|
|
272
|
+
ci = 2 # column index to start with
|
|
273
|
+
MasterClassification = {} # Dict of master classifications
|
|
274
|
+
while Classsheet.cell(1,ci).value is not None:
|
|
275
|
+
TheseItems = []
|
|
276
|
+
ri = 11 # row index to start with
|
|
277
|
+
ThisName = Classsheet.cell(1,ci).value
|
|
278
|
+
ThisDim = Classsheet.cell(2,ci).value
|
|
279
|
+
ThisID = Classsheet.cell(4,ci).value
|
|
280
|
+
ThisUUID = Classsheet.cell(5,ci).value
|
|
281
|
+
while Classsheet.cell(ri,ci).value is not None:
|
|
282
|
+
TheseItems.append(Classsheet.cell(ri,ci).value) # read the classification items
|
|
283
|
+
ri += 1
|
|
284
|
+
MasterClassification[ThisName] = msc.Classification(Name = ThisName, Dimension = ThisDim, ID = ThisID, UUID = ThisUUID, Items = TheseItems)
|
|
285
|
+
ci += 1
|
|
286
|
+
|
|
287
|
+
return MasterClassification
|
|
288
|
+
|
|
289
|
+
|
|
290
|
+
def ParseConfigFile(Model_Configsheet,ScriptConfig,Mylog):
|
|
291
|
+
"""
|
|
292
|
+
Standard routine to parse the ODYM model config file.
|
|
293
|
+
"""
|
|
294
|
+
ITix = 0
|
|
295
|
+
|
|
296
|
+
# search for index table entry
|
|
297
|
+
while True:
|
|
298
|
+
if Model_Configsheet.cell(ITix+1, 2).value == 'Index Table':
|
|
299
|
+
break
|
|
300
|
+
else:
|
|
301
|
+
ITix += 1
|
|
302
|
+
|
|
303
|
+
IT_Aspects = []
|
|
304
|
+
IT_Description = []
|
|
305
|
+
IT_Dimension = []
|
|
306
|
+
IT_Classification = []
|
|
307
|
+
IT_Selector = []
|
|
308
|
+
IT_IndexLetter = []
|
|
309
|
+
ITix += 2 # start on first data row
|
|
310
|
+
while Model_Configsheet.cell(ITix+1,3).value is not None:
|
|
311
|
+
IT_Aspects.append(Model_Configsheet.cell(ITix+1,3).value)
|
|
312
|
+
IT_Description.append(Model_Configsheet.cell(ITix+1,4).value)
|
|
313
|
+
IT_Dimension.append(Model_Configsheet.cell(ITix+1,5).value)
|
|
314
|
+
IT_Classification.append(Model_Configsheet.cell(ITix+1,6).value)
|
|
315
|
+
IT_Selector.append(Model_Configsheet.cell(ITix+1,7).value)
|
|
316
|
+
IT_IndexLetter.append(Model_Configsheet.cell(ITix+1,8).value)
|
|
317
|
+
ITix += 1
|
|
318
|
+
|
|
319
|
+
Mylog.info('Read parameter list from model config sheet.')
|
|
320
|
+
PLix = 0
|
|
321
|
+
while True: # search for parameter list entry
|
|
322
|
+
if Model_Configsheet.cell(PLix+1, 2).value == 'Model Parameters':
|
|
323
|
+
break
|
|
324
|
+
else:
|
|
325
|
+
PLix += 1
|
|
326
|
+
|
|
327
|
+
PL_Names = []
|
|
328
|
+
PL_Description = []
|
|
329
|
+
PL_Version = []
|
|
330
|
+
PL_IndexStructure = []
|
|
331
|
+
PL_IndexMatch = []
|
|
332
|
+
PL_IndexLayer = []
|
|
333
|
+
PLix += 2 # start on first data row
|
|
334
|
+
while Model_Configsheet.cell(PLix+1,3).value is not None:
|
|
335
|
+
PL_Names.append(Model_Configsheet.cell(PLix+1,3).value)
|
|
336
|
+
PL_Description.append(Model_Configsheet.cell(PLix+1,4).value)
|
|
337
|
+
PL_Version.append(Model_Configsheet.cell(PLix+1,5).value)
|
|
338
|
+
PL_IndexStructure.append(Model_Configsheet.cell(PLix+1,6).value)
|
|
339
|
+
PL_IndexMatch.append(Model_Configsheet.cell(PLix+1,7).value)
|
|
340
|
+
PL_IndexLayer.append(ListStringToListNumbers(Model_Configsheet.cell(PLix+1,8).value)) # strip numbers out of list string
|
|
341
|
+
PLix += 1
|
|
342
|
+
|
|
343
|
+
Mylog.info('Read process list from model config sheet.')
|
|
344
|
+
PrLix = 1
|
|
345
|
+
|
|
346
|
+
# search for process list entry
|
|
347
|
+
while Model_Configsheet.cell(PrLix, 2).value != 'Process Group List':
|
|
348
|
+
PrLix += 1
|
|
349
|
+
|
|
350
|
+
PrL_Number = []
|
|
351
|
+
PrL_Name = []
|
|
352
|
+
PrL_Comment = []
|
|
353
|
+
PrL_Type = []
|
|
354
|
+
PrLix += 2 # start on first data row
|
|
355
|
+
|
|
356
|
+
while True:
|
|
357
|
+
if Model_Configsheet.cell(PrLix,3).value is None:
|
|
358
|
+
break
|
|
359
|
+
PrL_Number.append(int(Model_Configsheet.cell(PrLix,3).value))
|
|
360
|
+
PrL_Name.append(Model_Configsheet.cell(PrLix,4).value)
|
|
361
|
+
PrL_Type.append(Model_Configsheet.cell(PrLix,5).value)
|
|
362
|
+
PrL_Comment.append(Model_Configsheet.cell(PrLix,6).value)
|
|
363
|
+
PrLix += 1
|
|
364
|
+
|
|
365
|
+
# while Model_Configsheet.cell(PrLix,3).value is not None:
|
|
366
|
+
# print(Model_Configsheet.cell(PrLix,3).value)
|
|
367
|
+
# PrL_Number.append(int(Model_Configsheet.cell(PrLix,3).value))
|
|
368
|
+
# PrL_Name.append(Model_Configsheet.cell(PrLix,4).value)
|
|
369
|
+
# PrL_Type.append(Model_Configsheet.cell(PrLix,5).value)
|
|
370
|
+
# PrL_Comment.append(Model_Configsheet.cell(PrLix,6).value)
|
|
371
|
+
# PrLix += 1
|
|
372
|
+
|
|
373
|
+
Mylog.info('Read model run control from model config sheet.')
|
|
374
|
+
PrLix = 0
|
|
375
|
+
|
|
376
|
+
# search for model flow control entry
|
|
377
|
+
while True:
|
|
378
|
+
if Model_Configsheet.cell(PrLix+1, 2).value == 'Model flow control':
|
|
379
|
+
break
|
|
380
|
+
else:
|
|
381
|
+
PrLix += 1
|
|
382
|
+
|
|
383
|
+
# start on first data row
|
|
384
|
+
PrLix += 2
|
|
385
|
+
while True:
|
|
386
|
+
if Model_Configsheet.cell(PrLix+1, 3).value is not None:
|
|
387
|
+
try:
|
|
388
|
+
ScriptConfig[Model_Configsheet.cell(PrLix+1, 3).value] = Model_Configsheet.cell(PrLix+1,4).value
|
|
389
|
+
except:
|
|
390
|
+
None
|
|
391
|
+
PrLix += 1
|
|
392
|
+
else:
|
|
393
|
+
break
|
|
394
|
+
|
|
395
|
+
Mylog.info('Read model output control from model config sheet.')
|
|
396
|
+
PrLix = 0
|
|
397
|
+
|
|
398
|
+
# search for model flow control entry
|
|
399
|
+
while True:
|
|
400
|
+
if Model_Configsheet.cell(PrLix+1, 2).value == 'Model output control':
|
|
401
|
+
break
|
|
402
|
+
else:
|
|
403
|
+
PrLix += 1
|
|
404
|
+
|
|
405
|
+
# start on first data row
|
|
406
|
+
PrLix += 2
|
|
407
|
+
while True:
|
|
408
|
+
if Model_Configsheet.cell(PrLix+1, 3).value is not None:
|
|
409
|
+
try:
|
|
410
|
+
ScriptConfig[Model_Configsheet.cell(PrLix+1, 3).value] = Model_Configsheet.cell(PrLix+1,4).value
|
|
411
|
+
except:
|
|
412
|
+
None
|
|
413
|
+
PrLix += 1
|
|
414
|
+
else:
|
|
415
|
+
break
|
|
416
|
+
|
|
417
|
+
return IT_Aspects,IT_Description,IT_Dimension,IT_Classification,IT_Selector,IT_IndexLetter,PL_Names,PL_Description,PL_Version,PL_IndexStructure,PL_IndexMatch,PL_IndexLayer,PrL_Number,PrL_Name,PrL_Comment,PrL_Type,ScriptConfig
|
|
418
|
+
|
|
419
|
+
|
|
420
|
+
def ReadParameter(ParPath, ThisPar, ThisParIx, IndexMatch, ThisParLayerSel, MasterClassification,
|
|
421
|
+
IndexTable, IndexTable_ClassificationNames, ScriptConfig, Mylog):
|
|
422
|
+
"""
|
|
423
|
+
This function reads a model parameter from the corresponding parameter file
|
|
424
|
+
"""
|
|
425
|
+
Parfile = xlrd.open_workbook(ParPath + '.xlsx')
|
|
426
|
+
ParHeader = Parfile.sheet_by_name('Cover')
|
|
427
|
+
|
|
428
|
+
IM = eval(IndexMatch) # List that matches model aspects to parameter indices
|
|
429
|
+
|
|
430
|
+
ri = 1 # row index
|
|
431
|
+
MetaData = {}
|
|
432
|
+
while True: # read cover sheet info
|
|
433
|
+
ThisItem = ParHeader.cell_value(ri,0)
|
|
434
|
+
if ThisItem != 'Dataset_RecordType':
|
|
435
|
+
MetaData[ThisItem] = ParHeader.cell_value(ri,1)
|
|
436
|
+
ri += 1
|
|
437
|
+
else:
|
|
438
|
+
break # terminate while loop when all meta information is read.
|
|
439
|
+
# Now we are in the row of Dataset_RecordType
|
|
440
|
+
|
|
441
|
+
# Check whether parameter file uses same classification:
|
|
442
|
+
if 'ODYM_Classifications_Master_' + \
|
|
443
|
+
ScriptConfig['Version of master classification'] != MetaData['Dataset_Classification_version_number']:
|
|
444
|
+
Mylog.critical('CLASSIFICATION FILE FATAL ERROR: Classification file of parameter ' + ThisPar +
|
|
445
|
+
' is not identical to the classification master file used for the current model run.')
|
|
446
|
+
|
|
447
|
+
if ParHeader.cell_value(ri,1) == 'List':
|
|
448
|
+
IList = []
|
|
449
|
+
IListMeaning = []
|
|
450
|
+
ci = 1 # column index
|
|
451
|
+
while True:
|
|
452
|
+
if ParHeader.cell_value(ri +1,ci) != '':
|
|
453
|
+
IList.append(ParHeader.cell_value(ri +1,ci))
|
|
454
|
+
IListMeaning.append(ParHeader.cell_value(ri +2,ci))
|
|
455
|
+
ci += 1
|
|
456
|
+
else:
|
|
457
|
+
break
|
|
458
|
+
# Re-Order indices to fit model aspect order:
|
|
459
|
+
IList = [IList[i] for i in IM]
|
|
460
|
+
IListMeaning = [IListMeaning[i] for i in IM]
|
|
461
|
+
|
|
462
|
+
ValueList = []
|
|
463
|
+
VIComment = []
|
|
464
|
+
ci = 1 # column index
|
|
465
|
+
while True:
|
|
466
|
+
if ParHeader.cell_value(ri +4,ci) != '':
|
|
467
|
+
ValueList.append(ParHeader.cell_value(ri +3,ci))
|
|
468
|
+
VIComment.append(ParHeader.cell_value(ri +4,ci))
|
|
469
|
+
ci += 1
|
|
470
|
+
else:
|
|
471
|
+
break
|
|
472
|
+
|
|
473
|
+
# Check whether all indices are present in the index table of the model
|
|
474
|
+
if set(IList).issubset(set(IndexTable_ClassificationNames)) is False:
|
|
475
|
+
Mylog.error('CLASSIFICATION ERROR: Index list of data file for parameter ' + ThisPar +
|
|
476
|
+
' contains indices that are not part of the current model run.')
|
|
477
|
+
|
|
478
|
+
# Check how well items match between model and data, select items to import
|
|
479
|
+
IndexSizesM = [] # List of dimension size for model
|
|
480
|
+
for m in range(0,len(ThisParIx)):
|
|
481
|
+
ThisDim = ThisParIx[m]
|
|
482
|
+
# Check whether index is present in parameter file:
|
|
483
|
+
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
|
|
484
|
+
if ThisDimClassificationName != IList[m]:
|
|
485
|
+
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
|
|
486
|
+
ThisDim + ' of parameter ' + ThisPar +
|
|
487
|
+
' must be identical to the specified classification of the corresponding parameter dimension, which is ' + IList[m])
|
|
488
|
+
break # Stop parsing parameter, will cause model to halt
|
|
489
|
+
|
|
490
|
+
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
|
|
491
|
+
|
|
492
|
+
# Read parameter values into array:
|
|
493
|
+
Values = np.zeros((IndexSizesM))
|
|
494
|
+
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded
|
|
495
|
+
ValuesSheet = Parfile.sheet_by_name('Values_Master')
|
|
496
|
+
ColOffset = len(IList)
|
|
497
|
+
RowOffset = 1 # fixed for this format, different quantification layers (value, error, etc.) will be read later
|
|
498
|
+
cx = 0
|
|
499
|
+
while True:
|
|
500
|
+
try:
|
|
501
|
+
CV = ValuesSheet.cell_value(cx + RowOffset, ColOffset)
|
|
502
|
+
except:
|
|
503
|
+
break
|
|
504
|
+
TargetPosition = []
|
|
505
|
+
for mx in range(0,len(IList)): # mx iterates over the aspects of the parameter
|
|
506
|
+
CurrentItem = ValuesSheet.cell_value(cx + RowOffset, IM[mx])
|
|
507
|
+
try:
|
|
508
|
+
TargetPosition.append(IndexTable.set_index('IndexLetter').loc[ThisParIx[mx]].Classification.Items.index(CurrentItem))
|
|
509
|
+
except:
|
|
510
|
+
break # Current parameter value is not needed for model, outside scope for a certain aspect.
|
|
511
|
+
if len(TargetPosition) == len(ThisParIx):
|
|
512
|
+
Values[tuple(TargetPosition)] = CV
|
|
513
|
+
ValIns[tuple(TargetPosition)] = 1
|
|
514
|
+
cx += 1
|
|
515
|
+
|
|
516
|
+
Mylog.info('A total of ' + str(cx+1) + ' values was read from file for parameter ' + ThisPar + '.')
|
|
517
|
+
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar + ' were assigned.')
|
|
518
|
+
|
|
519
|
+
|
|
520
|
+
|
|
521
|
+
### Table version ###
|
|
522
|
+
if ParHeader.cell_value(ri,1) == 'Table': # have 3 while loops, one for row indices, one for column indices, one for value layers
|
|
523
|
+
|
|
524
|
+
RIList = []
|
|
525
|
+
RISize = []
|
|
526
|
+
RIListMeaning = []
|
|
527
|
+
ci = 1 # column index
|
|
528
|
+
while True:
|
|
529
|
+
if ParHeader.cell_value(ri +1,ci) != '':
|
|
530
|
+
RIList.append(ParHeader.cell_value(ri +1,ci))
|
|
531
|
+
RISize.append(int(ParHeader.cell_value(ri +2,1)))
|
|
532
|
+
RIListMeaning.append(ParHeader.cell_value(ri +3,ci))
|
|
533
|
+
ci += 1
|
|
534
|
+
else:
|
|
535
|
+
break
|
|
536
|
+
RISize = RISize[0]
|
|
537
|
+
|
|
538
|
+
CIList = []
|
|
539
|
+
CISize = []
|
|
540
|
+
CIListMeaning = []
|
|
541
|
+
ci = 1 # column index
|
|
542
|
+
while True:
|
|
543
|
+
if ParHeader.cell_value(ri +4,ci) != '':
|
|
544
|
+
CIList.append(ParHeader.cell_value(ri +4,ci))
|
|
545
|
+
CISize.append(int(ParHeader.cell_value(ri +5,1)))
|
|
546
|
+
CIListMeaning.append(ParHeader.cell_value(ri +6,ci))
|
|
547
|
+
ci += 1
|
|
548
|
+
else:
|
|
549
|
+
break
|
|
550
|
+
CISize = CISize[0]
|
|
551
|
+
|
|
552
|
+
# Re-Order indices to fit model aspect order:
|
|
553
|
+
ComIList = RIList + CIList
|
|
554
|
+
ComIList = [ComIList[i] for i in IM]
|
|
555
|
+
|
|
556
|
+
ValueList = []
|
|
557
|
+
VIComment = []
|
|
558
|
+
ci = 1 # column index
|
|
559
|
+
while True:
|
|
560
|
+
if ParHeader.cell_value(ri +7,ci) != '':
|
|
561
|
+
ValueList.append(ParHeader.cell_value(ri +7,ci))
|
|
562
|
+
VIComment.append(ParHeader.cell_value(ri +8,ci))
|
|
563
|
+
ci += 1
|
|
564
|
+
else:
|
|
565
|
+
break
|
|
566
|
+
|
|
567
|
+
# Check whether all indices are present in the index table of the model
|
|
568
|
+
if set(RIList).issubset(set(IndexTable_ClassificationNames)) is False:
|
|
569
|
+
Mylog.error('CLASSIFICATION ERROR: Row index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
|
|
570
|
+
if set(CIList).issubset(set(IndexTable_ClassificationNames)) is False:
|
|
571
|
+
Mylog.error('CLASSIFICATION ERROR: Column index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
|
|
572
|
+
|
|
573
|
+
# Determine index letters for RIList and CIList
|
|
574
|
+
RIIndexLetter = []
|
|
575
|
+
for m in range(0,len(RIList)):
|
|
576
|
+
RIIndexLetter.append(ThisParIx[IM.index(m)])
|
|
577
|
+
CIIndexLetter = []
|
|
578
|
+
for m in range(0,len(CIList)):
|
|
579
|
+
CIIndexLetter.append(ThisParIx[IM.index(m+len(RIList))])
|
|
580
|
+
|
|
581
|
+
# Check how well items match between model and data, select items to import
|
|
582
|
+
IndexSizesM = [] # List of dimension size for model
|
|
583
|
+
for m in range(0,len(ThisParIx)):
|
|
584
|
+
ThisDim = ThisParIx[m]
|
|
585
|
+
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
|
|
586
|
+
if ThisDimClassificationName != ComIList[m]:
|
|
587
|
+
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
|
|
588
|
+
ThisDim + ' of parameter ' + ThisPar +
|
|
589
|
+
' must be identical to the specified classification of the corresponding parameter dimension, which is ' +
|
|
590
|
+
ComIList[m])
|
|
591
|
+
break # Stop parsing parameter, will cause model to halt
|
|
592
|
+
|
|
593
|
+
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
|
|
594
|
+
|
|
595
|
+
# Read parameter values into array:
|
|
596
|
+
Values = np.zeros((IndexSizesM))
|
|
597
|
+
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded
|
|
598
|
+
ValuesSheet = Parfile.sheet_by_name(ValueList[ThisParLayerSel[0]])
|
|
599
|
+
ColOffset = len(RIList)
|
|
600
|
+
RowOffset = len(CIList)
|
|
601
|
+
RowNos = RISize
|
|
602
|
+
ColNos = CISize
|
|
603
|
+
|
|
604
|
+
TargetPos_R = []
|
|
605
|
+
for m in range(0,RowNos):
|
|
606
|
+
TP_RD = []
|
|
607
|
+
for mc in range(0,len(RIList)):
|
|
608
|
+
try:
|
|
609
|
+
CurrentItem = int(ValuesSheet.cell_value(m + RowOffset, mc))
|
|
610
|
+
except:
|
|
611
|
+
CurrentItem = ValuesSheet.cell_value(m + RowOffset, mc)
|
|
612
|
+
try:
|
|
613
|
+
IX = ThisParIx.find(RIIndexLetter[mc])
|
|
614
|
+
TPIX = IndexTable.set_index('IndexLetter').loc[RIIndexLetter[mc]].Classification.Items.index(CurrentItem)
|
|
615
|
+
TP_RD.append((IX,TPIX))
|
|
616
|
+
except:
|
|
617
|
+
TP_RD.append(None)
|
|
618
|
+
break
|
|
619
|
+
TargetPos_R.append(TP_RD)
|
|
620
|
+
|
|
621
|
+
TargetPos_C = []
|
|
622
|
+
for n in range(0,ColNos):
|
|
623
|
+
TP_CD = []
|
|
624
|
+
for mc in range(0,len(CIList)):
|
|
625
|
+
try:
|
|
626
|
+
CurrentItem = int(ValuesSheet.cell_value(mc, n + ColOffset))
|
|
627
|
+
except:
|
|
628
|
+
CurrentItem = ValuesSheet.cell_value(mc, n + ColOffset)
|
|
629
|
+
try:
|
|
630
|
+
IX = ThisParIx.find(CIIndexLetter[mc])
|
|
631
|
+
TPIX = IndexTable.set_index('IndexLetter').loc[CIIndexLetter[mc]].Classification.Items.index(CurrentItem)
|
|
632
|
+
TP_CD.append((IX,TPIX))
|
|
633
|
+
except:
|
|
634
|
+
TP_CD.append(None)
|
|
635
|
+
break
|
|
636
|
+
TargetPos_C.append(TP_CD)
|
|
637
|
+
|
|
638
|
+
for m in range(0,RowNos):
|
|
639
|
+
for n in range(0,ColNos):
|
|
640
|
+
TargetPosition = [0 for i in range(0,len(ComIList))]
|
|
641
|
+
try:
|
|
642
|
+
for i in range(0,len(RIList)):
|
|
643
|
+
TargetPosition[TargetPos_R[m][i][0]] = TargetPos_R[m][i][1]
|
|
644
|
+
for i in range(0,len(CIList)):
|
|
645
|
+
TargetPosition[TargetPos_C[n][i][0]] = TargetPos_C[n][i][1]
|
|
646
|
+
except:
|
|
647
|
+
TargetPosition = [0]
|
|
648
|
+
if len(TargetPosition) == len(ComIList):
|
|
649
|
+
Values[tuple(TargetPosition)] = ValuesSheet.cell_value(m + RowOffset, n + ColOffset)
|
|
650
|
+
ValIns[tuple(TargetPosition)] = 1
|
|
651
|
+
|
|
652
|
+
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar +
|
|
653
|
+
' were assigned.')
|
|
654
|
+
|
|
655
|
+
return MetaData, Values
|
|
656
|
+
|
|
657
|
+
|
|
658
|
+
|
|
659
|
+
def ReadParameterV2(ParPath, ThisPar, ThisParIx, IndexMatch, ThisParLayerSel, MasterClassification,
|
|
660
|
+
IndexTable, IndexTable_ClassificationNames, ScriptConfig, Mylog, ParseUncertainty):
|
|
661
|
+
"""
|
|
662
|
+
This function reads a model parameter from the corresponding parameter file
|
|
663
|
+
"""
|
|
664
|
+
Parfile = xlrd.open_workbook(ParPath + '.xlsx')
|
|
665
|
+
ParHeader = Parfile.sheet_by_name('Cover')
|
|
666
|
+
|
|
667
|
+
IM = eval(IndexMatch) # List that matches model aspects to parameter indices
|
|
668
|
+
|
|
669
|
+
ri = 1 # row index
|
|
670
|
+
MetaData = {}
|
|
671
|
+
while True: # read cover sheet info
|
|
672
|
+
ThisItem = ParHeader.cell_value(ri,0)
|
|
673
|
+
if (ThisItem != '[Empty on purpose]' and ThisItem != 'Dataset_RecordType'):
|
|
674
|
+
MetaData[ThisItem] = ParHeader.cell_value(ri,1)
|
|
675
|
+
if ThisItem == 'Dataset_Unit':
|
|
676
|
+
if ParHeader.cell_value(ri,1) == 'GLOBAL':
|
|
677
|
+
MetaData['Unit_Global'] = ParHeader.cell_value(ri,2)
|
|
678
|
+
MetaData['Unit_Global_Comment'] = ParHeader.cell_value(ri,3)
|
|
679
|
+
if ThisItem == 'Dataset_Uncertainty':
|
|
680
|
+
# if LIST is specified, nothing happens here.
|
|
681
|
+
if ParHeader.cell_value(ri,1) == 'GLOBAL':
|
|
682
|
+
MetaData['Dataset_Uncertainty_Global'] = ParHeader.cell_value(ri,2)
|
|
683
|
+
if ParHeader.cell_value(ri,1) == 'TABLE':
|
|
684
|
+
MetaData['Dataset_Uncertainty_Sheet'] = ParHeader.cell_value(ri,2)
|
|
685
|
+
if ThisItem == 'Dataset_Comment':
|
|
686
|
+
if ParHeader.cell_value(ri,1) == 'GLOBAL':
|
|
687
|
+
MetaData['Dataset_Comment_Global'] = ParHeader.cell_value(ri,2)
|
|
688
|
+
ri += 1
|
|
689
|
+
else:
|
|
690
|
+
break # terminate while loop when all meta information is read.
|
|
691
|
+
# Now we are in the row of Dataset_RecordType
|
|
692
|
+
|
|
693
|
+
# Check whether parameter file uses same classification:
|
|
694
|
+
if ScriptConfig['Version of master classification'] != MetaData['Dataset_Classification_version_number']:
|
|
695
|
+
Mylog.critical('CLASSIFICATION FILE FATAL ERROR: Classification file of parameter ' + ThisPar +
|
|
696
|
+
' is not identical to the classification master file used for the current model run.')
|
|
697
|
+
|
|
698
|
+
# Continue parsing until line 'Dataset_RecordType' is found:
|
|
699
|
+
while True:
|
|
700
|
+
ThisItem = ParHeader.cell_value(ri,0)
|
|
701
|
+
if ThisItem == 'Dataset_RecordType':
|
|
702
|
+
break
|
|
703
|
+
else:
|
|
704
|
+
ri += 1
|
|
705
|
+
|
|
706
|
+
### List version ###
|
|
707
|
+
if ParHeader.cell_value(ri,1) == 'LIST':
|
|
708
|
+
IList = []
|
|
709
|
+
IListMeaning = []
|
|
710
|
+
RI_Start = ri + 2
|
|
711
|
+
while True:
|
|
712
|
+
if ParHeader.cell_value(RI_Start,0) != '':
|
|
713
|
+
IList.append(ParHeader.cell_value(RI_Start,0))
|
|
714
|
+
IListMeaning.append(ParHeader.cell_value(RI_Start,1))
|
|
715
|
+
RI_Start += 1
|
|
716
|
+
else:
|
|
717
|
+
break
|
|
718
|
+
# Re-Order indices to fit model aspect order:
|
|
719
|
+
IList = [IList[i] for i in IM]
|
|
720
|
+
IListMeaning = [IListMeaning[i] for i in IM]
|
|
721
|
+
|
|
722
|
+
ValueList = []
|
|
723
|
+
VIComment = []
|
|
724
|
+
RI_Start = ri + 2
|
|
725
|
+
while True:
|
|
726
|
+
if ParHeader.cell_value(RI_Start,2) != '':
|
|
727
|
+
ValueList.append(ParHeader.cell_value(RI_Start,2))
|
|
728
|
+
VIComment.append(ParHeader.cell_value(RI_Start,3))
|
|
729
|
+
RI_Start += 1
|
|
730
|
+
else:
|
|
731
|
+
break
|
|
732
|
+
|
|
733
|
+
# Check whether all indices are present in the index table of the model
|
|
734
|
+
if set(IList).issubset(set(IndexTable_ClassificationNames)) is False:
|
|
735
|
+
Mylog.error('CLASSIFICATION ERROR: Index list of data file for parameter ' + ThisPar +
|
|
736
|
+
' contains indices that are not part of the current model run.')
|
|
737
|
+
|
|
738
|
+
# Check how well items match between model and data, select items to import
|
|
739
|
+
IndexSizesM = [] # List of dimension size for model
|
|
740
|
+
for m in range(0,len(ThisParIx)):
|
|
741
|
+
ThisDim = ThisParIx[m]
|
|
742
|
+
# Check whether index is present in parameter file:
|
|
743
|
+
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
|
|
744
|
+
if ThisDimClassificationName != IList[m]:
|
|
745
|
+
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
|
|
746
|
+
ThisDim + ' of parameter ' + ThisPar +
|
|
747
|
+
' must be identical to the specified classification of the corresponding parameter dimension, which is ' + IList[m])
|
|
748
|
+
break # Stop parsing parameter, will cause model to halt
|
|
749
|
+
|
|
750
|
+
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
|
|
751
|
+
|
|
752
|
+
# Read parameter values into array, uncertainty into list:
|
|
753
|
+
Values = np.zeros((IndexSizesM)) # Array for parameter values
|
|
754
|
+
Uncertainty = [None] * np.product(IndexSizesM) # parameter value uncertainties
|
|
755
|
+
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded
|
|
756
|
+
ValuesSheet = Parfile.sheet_by_name('Values_Master')
|
|
757
|
+
ColOffset = len(IList)
|
|
758
|
+
RowOffset = 1 # fixed for this format, different quantification layers (value, error, etc.) will be read later
|
|
759
|
+
cx = 0
|
|
760
|
+
while True:
|
|
761
|
+
try:
|
|
762
|
+
CV = ValuesSheet.cell_value(cx + RowOffset, ColOffset)
|
|
763
|
+
except:
|
|
764
|
+
break
|
|
765
|
+
TargetPosition = []
|
|
766
|
+
for mx in range(0,len(IList)): # mx iterates over the aspects of the parameter
|
|
767
|
+
CurrentItem = ValuesSheet.cell_value(cx + RowOffset, IM[mx])
|
|
768
|
+
try:
|
|
769
|
+
TargetPosition.append(IndexTable.set_index('IndexLetter').loc[ThisParIx[mx]].Classification.Items.index(CurrentItem))
|
|
770
|
+
except:
|
|
771
|
+
break # Current parameter value is not needed for model, outside scope for a certain aspect.
|
|
772
|
+
if len(TargetPosition) == len(ThisParIx):
|
|
773
|
+
Values[tuple(TargetPosition)] = CV
|
|
774
|
+
ValIns[tuple(TargetPosition)] = 1
|
|
775
|
+
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = ValuesSheet.cell_value(cx + RowOffset, ColOffset + 3)
|
|
776
|
+
cx += 1
|
|
777
|
+
|
|
778
|
+
Mylog.info('A total of ' + str(cx) + ' values was read from file for parameter ' + ThisPar + '.')
|
|
779
|
+
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar + ' were assigned.')
|
|
780
|
+
|
|
781
|
+
|
|
782
|
+
|
|
783
|
+
### Table version ###
|
|
784
|
+
if ParHeader.cell_value(ri,1) == 'TABLE': # have 3 while loops, one for row indices, one for column indices, one for value layers
|
|
785
|
+
ColNos = int(ParHeader.cell_value(ri,5)) # Number of columns in dataset
|
|
786
|
+
RowNos = int(ParHeader.cell_value(ri,3)) # Number of rows in dataset
|
|
787
|
+
|
|
788
|
+
RI = ri + 2 # row where indices start
|
|
789
|
+
RIList = []
|
|
790
|
+
RIListMeaning = []
|
|
791
|
+
while True:
|
|
792
|
+
if ParHeader.cell_value(RI,0) != '':
|
|
793
|
+
RIList.append(ParHeader.cell_value(RI,0))
|
|
794
|
+
RIListMeaning.append(ParHeader.cell_value(RI,1))
|
|
795
|
+
RI += 1
|
|
796
|
+
else:
|
|
797
|
+
break
|
|
798
|
+
|
|
799
|
+
RI = ri + 2 # row where indices start
|
|
800
|
+
CIList = []
|
|
801
|
+
CIListMeaning = []
|
|
802
|
+
while True:
|
|
803
|
+
if ParHeader.cell_value(RI,2) != '':
|
|
804
|
+
CIList.append(ParHeader.cell_value(RI,2))
|
|
805
|
+
CIListMeaning.append(ParHeader.cell_value(RI,3))
|
|
806
|
+
RI += 1
|
|
807
|
+
else:
|
|
808
|
+
break
|
|
809
|
+
|
|
810
|
+
# Re-Order indices to fit model aspect order:
|
|
811
|
+
ComIList = RIList + CIList # List of all indices, both rows and columns
|
|
812
|
+
ComIList = [ComIList[i] for i in IM]
|
|
813
|
+
|
|
814
|
+
RI = ri + 2 # row where indices start
|
|
815
|
+
ValueList = []
|
|
816
|
+
VIComment = []
|
|
817
|
+
while True:
|
|
818
|
+
if ParHeader.cell_value(RI,4) != '':
|
|
819
|
+
ValueList.append(ParHeader.cell_value(RI,4))
|
|
820
|
+
VIComment.append(ParHeader.cell_value(RI,5))
|
|
821
|
+
RI += 1
|
|
822
|
+
else:
|
|
823
|
+
break
|
|
824
|
+
|
|
825
|
+
# Check whether all indices are present in the index table of the model
|
|
826
|
+
if set(RIList).issubset(set(IndexTable_ClassificationNames)) is False:
|
|
827
|
+
Mylog.error('CLASSIFICATION ERROR: Row index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
|
|
828
|
+
if set(CIList).issubset(set(IndexTable_ClassificationNames)) is False:
|
|
829
|
+
Mylog.error('CLASSIFICATION ERROR: Column index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
|
|
830
|
+
|
|
831
|
+
# Determine index letters for RIList and CIList
|
|
832
|
+
RIIndexLetter = []
|
|
833
|
+
for m in range(0,len(RIList)):
|
|
834
|
+
RIIndexLetter.append(ThisParIx[IM.index(m)])
|
|
835
|
+
CIIndexLetter = []
|
|
836
|
+
for m in range(0,len(CIList)):
|
|
837
|
+
CIIndexLetter.append(ThisParIx[IM.index(m+len(RIList))])
|
|
838
|
+
|
|
839
|
+
# Check how well items match between model and data, select items to import
|
|
840
|
+
IndexSizesM = [] # List of dimension size for model
|
|
841
|
+
for m in range(0,len(ThisParIx)):
|
|
842
|
+
ThisDim = ThisParIx[m]
|
|
843
|
+
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
|
|
844
|
+
if ThisDimClassificationName != ComIList[m]:
|
|
845
|
+
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
|
|
846
|
+
ThisDim + ' of parameter ' + ThisPar +
|
|
847
|
+
' must be identical to the specified classification of the corresponding parameter dimension, which is ' +
|
|
848
|
+
ComIList[m])
|
|
849
|
+
break # Stop parsing parameter, will cause model to halt
|
|
850
|
+
|
|
851
|
+
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
|
|
852
|
+
|
|
853
|
+
# Read parameter values into array:
|
|
854
|
+
Values = np.zeros((IndexSizesM)) # Array for parameter values
|
|
855
|
+
Uncertainty = [None] * np.product(IndexSizesM) # parameter value uncertainties
|
|
856
|
+
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded, contains 0 or 1.
|
|
857
|
+
ValuesSheet = Parfile.sheet_by_name(ValueList[ThisParLayerSel[0]])
|
|
858
|
+
if ParseUncertainty == True:
|
|
859
|
+
if 'Dataset_Uncertainty_Sheet' in MetaData:
|
|
860
|
+
UncertSheet = Parfile.sheet_by_name(MetaData['Dataset_Uncertainty_Sheet'])
|
|
861
|
+
ColOffset = len(RIList)
|
|
862
|
+
RowOffset = len(CIList)
|
|
863
|
+
cx = 0
|
|
864
|
+
|
|
865
|
+
TargetPos_R = [] # Determine all row target positions in data array
|
|
866
|
+
for m in range(0,RowNos):
|
|
867
|
+
TP_RD = []
|
|
868
|
+
for mc in range(0,len(RIList)):
|
|
869
|
+
try:
|
|
870
|
+
CurrentItem = int(ValuesSheet.cell_value(m + RowOffset, mc)) # in case items come as int, e.g., years
|
|
871
|
+
except:
|
|
872
|
+
CurrentItem = ValuesSheet.cell_value(m + RowOffset, mc)
|
|
873
|
+
try:
|
|
874
|
+
IX = ThisParIx.find(RIIndexLetter[mc])
|
|
875
|
+
TPIX = IndexTable.set_index('IndexLetter').loc[RIIndexLetter[mc]].Classification.Items.index(CurrentItem)
|
|
876
|
+
TP_RD.append((IX,TPIX))
|
|
877
|
+
except:
|
|
878
|
+
TP_RD.append(None)
|
|
879
|
+
break
|
|
880
|
+
TargetPos_R.append(TP_RD)
|
|
881
|
+
|
|
882
|
+
|
|
883
|
+
TargetPos_C = [] # Determine all col target positions in data array
|
|
884
|
+
for n in range(0,ColNos):
|
|
885
|
+
TP_CD = []
|
|
886
|
+
for mc in range(0,len(CIList)):
|
|
887
|
+
try:
|
|
888
|
+
CurrentItem = int(ValuesSheet.cell_value(mc, n + ColOffset))
|
|
889
|
+
except:
|
|
890
|
+
CurrentItem = ValuesSheet.cell_value(mc, n + ColOffset)
|
|
891
|
+
try:
|
|
892
|
+
IX = ThisParIx.find(CIIndexLetter[mc])
|
|
893
|
+
TPIX = IndexTable.set_index('IndexLetter').loc[CIIndexLetter[mc]].Classification.Items.index(CurrentItem)
|
|
894
|
+
TP_CD.append((IX,TPIX))
|
|
895
|
+
except:
|
|
896
|
+
TP_CD.append(None)
|
|
897
|
+
break
|
|
898
|
+
TargetPos_C.append(TP_CD)
|
|
899
|
+
|
|
900
|
+
for m in range(0,RowNos): # Read values from excel template
|
|
901
|
+
for n in range(0,ColNos):
|
|
902
|
+
TargetPosition = [0 for i in range(0,len(ComIList))]
|
|
903
|
+
try:
|
|
904
|
+
for i in range(0,len(RIList)):
|
|
905
|
+
TargetPosition[TargetPos_R[m][i][0]] = TargetPos_R[m][i][1]
|
|
906
|
+
for i in range(0,len(CIList)):
|
|
907
|
+
TargetPosition[TargetPos_C[n][i][0]] = TargetPos_C[n][i][1]
|
|
908
|
+
except:
|
|
909
|
+
TargetPosition = [0]
|
|
910
|
+
if len(TargetPosition) == len(ComIList): # Read value if TargetPosition Tuple has same length as indexList
|
|
911
|
+
Values[tuple(TargetPosition)] = ValuesSheet.cell_value(m + RowOffset, n + ColOffset)
|
|
912
|
+
ValIns[tuple(TargetPosition)] = 1
|
|
913
|
+
# Add uncertainty
|
|
914
|
+
if ParseUncertainty == True:
|
|
915
|
+
if 'Dataset_Uncertainty_Global' in MetaData:
|
|
916
|
+
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = MetaData['Dataset_Uncertainty_Global']
|
|
917
|
+
if 'Dataset_Uncertainty_Sheet' in MetaData:
|
|
918
|
+
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = UncertSheet.cell_value(m + RowOffset, n + ColOffset)
|
|
919
|
+
cx += 1
|
|
920
|
+
|
|
921
|
+
Mylog.info('A total of ' + str(cx) + ' values was read from file for parameter ' + ThisPar + '.')
|
|
922
|
+
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar +
|
|
923
|
+
' were assigned.')
|
|
924
|
+
if ParseUncertainty == True:
|
|
925
|
+
return MetaData, Values, Uncertainty
|
|
926
|
+
else:
|
|
927
|
+
return MetaData, Values
|
|
928
|
+
|
|
929
|
+
def ReadParameterXLSX(ParPath, ThisPar, ThisParIx, IndexMatch, ThisParLayerSel, MasterClassification,
|
|
930
|
+
IndexTable, IndexTable_ClassificationNames, ScriptConfig, Mylog, ParseUncertainty):
|
|
931
|
+
"""
|
|
932
|
+
This function reads a model parameter from the corresponding parameter file and used openpyxl
|
|
933
|
+
"""
|
|
934
|
+
Parfile = openpyxl.load_workbook(ParPath + '.xlsx', data_only=True)
|
|
935
|
+
ParHeader = Parfile['Cover']
|
|
936
|
+
|
|
937
|
+
IM = eval(IndexMatch) # List that matches model aspects to parameter indices
|
|
938
|
+
|
|
939
|
+
ri = 2 # row index
|
|
940
|
+
MetaData = {}
|
|
941
|
+
while True: # read cover sheet info
|
|
942
|
+
ThisItem = ParHeader.cell(ri,1).value
|
|
943
|
+
if (ThisItem != '[Empty on purpose]' and ThisItem != 'Dataset_RecordType'):
|
|
944
|
+
MetaData[ThisItem] = ParHeader.cell(ri,2).value
|
|
945
|
+
if ThisItem == 'Dataset_Unit':
|
|
946
|
+
if ParHeader.cell(ri,2).value == 'GLOBAL':
|
|
947
|
+
MetaData['Unit_Global'] = ParHeader.cell(ri,3).value
|
|
948
|
+
MetaData['Unit_Global_Comment'] = ParHeader.cell(ri,4).value
|
|
949
|
+
if ThisItem == 'Dataset_Uncertainty':
|
|
950
|
+
# if LIST is specified, nothing happens here.
|
|
951
|
+
if ParHeader.cell(ri,2).value == 'GLOBAL':
|
|
952
|
+
MetaData['Dataset_Uncertainty_Global'] = ParHeader.cell(ri,3).value
|
|
953
|
+
if ParHeader.cell(ri,2).value == 'TABLE':
|
|
954
|
+
MetaData['Dataset_Uncertainty_Sheet'] = ParHeader.cell(ri,3).value
|
|
955
|
+
if ThisItem == 'Dataset_Comment':
|
|
956
|
+
if ParHeader.cell(ri,2).value == 'GLOBAL':
|
|
957
|
+
MetaData['Dataset_Comment_Global'] = ParHeader.cell(ri,3).value
|
|
958
|
+
ri += 1
|
|
959
|
+
else:
|
|
960
|
+
break # terminate while loop when all meta information is read.
|
|
961
|
+
# Now we are in the row of Dataset_RecordType
|
|
962
|
+
|
|
963
|
+
# Check whether parameter file uses same classification:
|
|
964
|
+
if ScriptConfig['Version of master classification'] != MetaData['Dataset_Classification_version_number']:
|
|
965
|
+
Mylog.critical('CLASSIFICATION FILE FATAL ERROR: Classification file of parameter ' + ThisPar +
|
|
966
|
+
' is not identical to the classification master file used for the current model run.')
|
|
967
|
+
|
|
968
|
+
# Continue parsing until line 'Dataset_RecordType' is found:
|
|
969
|
+
while True:
|
|
970
|
+
ThisItem = ParHeader.cell(ri,1).value
|
|
971
|
+
if ThisItem == 'Dataset_RecordType':
|
|
972
|
+
print(ParHeader.cell(ri,2).value)
|
|
973
|
+
break
|
|
974
|
+
else:
|
|
975
|
+
ri += 1
|
|
976
|
+
|
|
977
|
+
### List version ###
|
|
978
|
+
if ParHeader.cell(ri,2).value == 'LIST': # ri = 21
|
|
979
|
+
IList = []
|
|
980
|
+
IListMeaning = []
|
|
981
|
+
RI_Start = ri + 2
|
|
982
|
+
while ParHeader.cell(RI_Start,1).value is not None:
|
|
983
|
+
IList.append(ParHeader.cell(RI_Start,1).value)
|
|
984
|
+
IListMeaning.append(ParHeader.cell(RI_Start,2).value)
|
|
985
|
+
RI_Start += 1
|
|
986
|
+
# Re-Order indices to fit model aspect order:
|
|
987
|
+
IList = [IList[i] for i in IM]
|
|
988
|
+
IListMeaning = [IListMeaning[i] for i in IM]
|
|
989
|
+
|
|
990
|
+
ValueList = []
|
|
991
|
+
VIComment = []
|
|
992
|
+
RI_Start = ri + 2
|
|
993
|
+
while ParHeader.cell(RI_Start,3).value is not None:
|
|
994
|
+
ValueList.append(ParHeader.cell(RI_Start,3).value)
|
|
995
|
+
VIComment.append(ParHeader.cell(RI_Start,4).value)
|
|
996
|
+
RI_Start += 1
|
|
997
|
+
|
|
998
|
+
# Check whether all indices are present in the index table of the model
|
|
999
|
+
if set(IList).issubset(set(IndexTable_ClassificationNames)) is False:
|
|
1000
|
+
Mylog.error('CLASSIFICATION ERROR: Index list of data file for parameter ' + ThisPar +
|
|
1001
|
+
' contains indices that are not part of the current model run.')
|
|
1002
|
+
|
|
1003
|
+
# Check how well items match between model and data, select items to import
|
|
1004
|
+
IndexSizesM = [] # List of dimension size for model
|
|
1005
|
+
for m in range(0,len(ThisParIx)):
|
|
1006
|
+
ThisDim = ThisParIx[m]
|
|
1007
|
+
# Check whether index is present in parameter file:
|
|
1008
|
+
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
|
|
1009
|
+
if ThisDimClassificationName != IList[m]:
|
|
1010
|
+
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
|
|
1011
|
+
ThisDim + ' of parameter ' + ThisPar +
|
|
1012
|
+
' must be identical to the specified classification of the corresponding parameter dimension, which is ' + IList[m])
|
|
1013
|
+
break # Stop parsing parameter, will cause model to halt
|
|
1014
|
+
|
|
1015
|
+
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
|
|
1016
|
+
# Read parameter values into array, uncertainty into list:
|
|
1017
|
+
Values = np.zeros((IndexSizesM)) # Array for parameter values
|
|
1018
|
+
Uncertainty = [None] * np.product(IndexSizesM) # parameter value uncertainties
|
|
1019
|
+
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded
|
|
1020
|
+
ValuesSheet = Parfile['Values_Master']
|
|
1021
|
+
ColOffset = len(IList)
|
|
1022
|
+
RowOffset = 1 # fixed for this format, different quantification layers (value, error, etc.) will be read later
|
|
1023
|
+
cx = 0
|
|
1024
|
+
while True:
|
|
1025
|
+
if ValuesSheet.cell(cx + RowOffset+1, ColOffset+1).value is not None:
|
|
1026
|
+
CV = ValuesSheet.cell(cx + RowOffset+1, ColOffset+1).value
|
|
1027
|
+
else:
|
|
1028
|
+
break
|
|
1029
|
+
TargetPosition = []
|
|
1030
|
+
for mx in range(0,len(IList)): # mx iterates over the aspects of the parameter
|
|
1031
|
+
CurrentItem = ValuesSheet.cell(cx + RowOffset+1, IM[mx]+1).value
|
|
1032
|
+
|
|
1033
|
+
try:
|
|
1034
|
+
TargetPosition.append(IndexTable.set_index('IndexLetter').loc[ThisParIx[mx]].Classification.Items.index(CurrentItem))
|
|
1035
|
+
except:
|
|
1036
|
+
break # Current parameter value is not needed for model, outside scope for a certain aspect.
|
|
1037
|
+
if len(TargetPosition) == len(ThisParIx):
|
|
1038
|
+
Values[tuple(TargetPosition)] = CV
|
|
1039
|
+
ValIns[tuple(TargetPosition)] = 1
|
|
1040
|
+
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = ValuesSheet.cell(cx + RowOffset+1, ColOffset + 4).value
|
|
1041
|
+
cx += 1
|
|
1042
|
+
|
|
1043
|
+
Mylog.info('A total of ' + str(cx) + ' values was read from file for parameter ' + ThisPar + '.')
|
|
1044
|
+
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar + ' were assigned.')
|
|
1045
|
+
|
|
1046
|
+
|
|
1047
|
+
|
|
1048
|
+
### Table version ###
|
|
1049
|
+
if ParHeader.cell(ri,2).value == 'TABLE': # have 3 while loops, one for row indices, one for column indices, one for value layers
|
|
1050
|
+
ColNos = int(ParHeader.cell(ri,6).value) # Number of columns in dataset
|
|
1051
|
+
RowNos = int(ParHeader.cell(ri,4).value) # Number of rows in dataset
|
|
1052
|
+
|
|
1053
|
+
RI = ri + 2 # row where indices start
|
|
1054
|
+
RIList = []
|
|
1055
|
+
RIListMeaning = []
|
|
1056
|
+
while True:
|
|
1057
|
+
if ParHeader.cell(RI,1).value is not None:
|
|
1058
|
+
RIList.append(ParHeader.cell(RI,1).value)
|
|
1059
|
+
RIListMeaning.append(ParHeader.cell(RI,2).value)
|
|
1060
|
+
RI += 1
|
|
1061
|
+
else:
|
|
1062
|
+
break
|
|
1063
|
+
|
|
1064
|
+
RI = ri + 2 # row where indices start
|
|
1065
|
+
CIList = []
|
|
1066
|
+
CIListMeaning = []
|
|
1067
|
+
while True:
|
|
1068
|
+
if ParHeader.cell(RI,3).value is not None:
|
|
1069
|
+
CIList.append(ParHeader.cell(RI,3).value)
|
|
1070
|
+
CIListMeaning.append(ParHeader.cell(RI,4).value)
|
|
1071
|
+
RI += 1
|
|
1072
|
+
else:
|
|
1073
|
+
break
|
|
1074
|
+
|
|
1075
|
+
# Re-Order indices to fit model aspect order:
|
|
1076
|
+
ComIList = RIList + CIList # List of all indices, both rows and columns
|
|
1077
|
+
ComIList = [ComIList[i] for i in IM]
|
|
1078
|
+
|
|
1079
|
+
RI = ri + 2 # row where indices start
|
|
1080
|
+
ValueList = []
|
|
1081
|
+
VIComment = []
|
|
1082
|
+
while True:
|
|
1083
|
+
if ParHeader.cell(RI,5).value is not None:
|
|
1084
|
+
ValueList.append(ParHeader.cell(RI,5).value)
|
|
1085
|
+
VIComment.append(ParHeader.cell(RI,6).value)
|
|
1086
|
+
RI += 1
|
|
1087
|
+
else:
|
|
1088
|
+
break
|
|
1089
|
+
|
|
1090
|
+
# Check whether all indices are present in the index table of the model
|
|
1091
|
+
if set(RIList).issubset(set(IndexTable_ClassificationNames)) is False:
|
|
1092
|
+
Mylog.error('CLASSIFICATION ERROR: Row index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
|
|
1093
|
+
if set(CIList).issubset(set(IndexTable_ClassificationNames)) is False:
|
|
1094
|
+
Mylog.error('CLASSIFICATION ERROR: Column index list of data file for parameter ' + ThisPar + ' contains indices that are not part of the current model run.')
|
|
1095
|
+
|
|
1096
|
+
# Determine index letters for RIList and CIList
|
|
1097
|
+
RIIndexLetter = []
|
|
1098
|
+
for m in range(0,len(RIList)):
|
|
1099
|
+
RIIndexLetter.append(ThisParIx[IM.index(m)])
|
|
1100
|
+
CIIndexLetter = []
|
|
1101
|
+
for m in range(0,len(CIList)):
|
|
1102
|
+
CIIndexLetter.append(ThisParIx[IM.index(m+len(RIList))])
|
|
1103
|
+
|
|
1104
|
+
# Check how well items match between model and data, select items to import
|
|
1105
|
+
IndexSizesM = [] # List of dimension size for model
|
|
1106
|
+
for m in range(0,len(ThisParIx)):
|
|
1107
|
+
ThisDim = ThisParIx[m]
|
|
1108
|
+
ThisDimClassificationName = IndexTable.set_index('IndexLetter').loc[ThisDim].Classification.Name
|
|
1109
|
+
if ThisDimClassificationName != ComIList[m]:
|
|
1110
|
+
Mylog.error('CLASSIFICATION ERROR: Classification ' + ThisDimClassificationName + ' for aspect ' +
|
|
1111
|
+
ThisDim + ' of parameter ' + ThisPar +
|
|
1112
|
+
' must be identical to the specified classification of the corresponding parameter dimension, which is ' +
|
|
1113
|
+
ComIList[m])
|
|
1114
|
+
break # Stop parsing parameter, will cause model to halt
|
|
1115
|
+
|
|
1116
|
+
IndexSizesM.append(IndexTable.set_index('IndexLetter').loc[ThisDim]['IndexSize'])
|
|
1117
|
+
|
|
1118
|
+
# Read parameter values into array:
|
|
1119
|
+
Values = np.zeros((IndexSizesM)) # Array for parameter values
|
|
1120
|
+
Uncertainty = [None] * np.product(IndexSizesM) # parameter value uncertainties
|
|
1121
|
+
ValIns = np.zeros((IndexSizesM)) # Array to check how many values are actually loaded, contains 0 or 1.
|
|
1122
|
+
ValuesSheet = Parfile[ValueList[ThisParLayerSel[0]]]
|
|
1123
|
+
if ParseUncertainty == True:
|
|
1124
|
+
if 'Dataset_Uncertainty_Sheet' in MetaData:
|
|
1125
|
+
UncertSheet = Parfile[MetaData['Dataset_Uncertainty_Sheet']]
|
|
1126
|
+
ColOffset = len(RIList)
|
|
1127
|
+
RowOffset = len(CIList)
|
|
1128
|
+
cx = 0
|
|
1129
|
+
|
|
1130
|
+
TargetPos_R = [] # Determine all row target positions in data array
|
|
1131
|
+
for m in range(0,RowNos):
|
|
1132
|
+
TP_RD = []
|
|
1133
|
+
for mc in range(0,len(RIList)):
|
|
1134
|
+
try:
|
|
1135
|
+
CurrentItem = int(ValuesSheet.cell(m + RowOffset+1, mc+1).value) # in case items come as int, e.g., years
|
|
1136
|
+
except:
|
|
1137
|
+
CurrentItem = ValuesSheet.cell(m + RowOffset+1, mc+1).value
|
|
1138
|
+
try:
|
|
1139
|
+
IX = ThisParIx.find(RIIndexLetter[mc])
|
|
1140
|
+
TPIX = IndexTable.set_index('IndexLetter').loc[RIIndexLetter[mc]].Classification.Items.index(CurrentItem)
|
|
1141
|
+
TP_RD.append((IX,TPIX))
|
|
1142
|
+
except:
|
|
1143
|
+
TP_RD.append(None)
|
|
1144
|
+
break
|
|
1145
|
+
TargetPos_R.append(TP_RD)
|
|
1146
|
+
|
|
1147
|
+
|
|
1148
|
+
TargetPos_C = [] # Determine all col target positions in data array
|
|
1149
|
+
for n in range(0,ColNos):
|
|
1150
|
+
TP_CD = []
|
|
1151
|
+
for mc in range(0,len(CIList)):
|
|
1152
|
+
try:
|
|
1153
|
+
CurrentItem = int(ValuesSheet.cell(mc+1, n + ColOffset+1).value)
|
|
1154
|
+
except:
|
|
1155
|
+
CurrentItem = ValuesSheet.cell(mc+1, n + ColOffset+1).value
|
|
1156
|
+
try:
|
|
1157
|
+
IX = ThisParIx.find(CIIndexLetter[mc])
|
|
1158
|
+
TPIX = IndexTable.set_index('IndexLetter').loc[CIIndexLetter[mc]].Classification.Items.index(CurrentItem)
|
|
1159
|
+
TP_CD.append((IX,TPIX))
|
|
1160
|
+
except:
|
|
1161
|
+
TP_CD.append(None)
|
|
1162
|
+
break
|
|
1163
|
+
TargetPos_C.append(TP_CD)
|
|
1164
|
+
|
|
1165
|
+
for m in range(0,RowNos): # Read values from excel template
|
|
1166
|
+
for n in range(0,ColNos):
|
|
1167
|
+
TargetPosition = [0 for i in range(0,len(ComIList))]
|
|
1168
|
+
try:
|
|
1169
|
+
for i in range(0,len(RIList)):
|
|
1170
|
+
TargetPosition[TargetPos_R[m][i][0]] = TargetPos_R[m][i][1]
|
|
1171
|
+
for i in range(0,len(CIList)):
|
|
1172
|
+
TargetPosition[TargetPos_C[n][i][0]] = TargetPos_C[n][i][1]
|
|
1173
|
+
except:
|
|
1174
|
+
TargetPosition = [0]
|
|
1175
|
+
if len(TargetPosition) == len(ComIList): # Read value if TargetPosition Tuple has same length as indexList
|
|
1176
|
+
Values[tuple(TargetPosition)] = ValuesSheet.cell(m + RowOffset+1, n + ColOffset+1).value
|
|
1177
|
+
ValIns[tuple(TargetPosition)] = 1
|
|
1178
|
+
# Add uncertainty
|
|
1179
|
+
if ParseUncertainty == True:
|
|
1180
|
+
if 'Dataset_Uncertainty_Global' in MetaData:
|
|
1181
|
+
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = MetaData['Dataset_Uncertainty_Global']
|
|
1182
|
+
if 'Dataset_Uncertainty_Sheet' in MetaData:
|
|
1183
|
+
Uncertainty[Tuple_MI(TargetPosition, IndexSizesM)] = UncertSheet.cell_value(m + RowOffset +1, n + ColOffset +1)
|
|
1184
|
+
cx += 1
|
|
1185
|
+
|
|
1186
|
+
Mylog.info('A total of ' + str(cx) + ' values was read from file for parameter ' + ThisPar + '.')
|
|
1187
|
+
Mylog.info(str(ValIns.sum()) + ' of ' + str(np.prod(IndexSizesM)) + ' values for parameter ' + ThisPar +
|
|
1188
|
+
' were assigned.')
|
|
1189
|
+
if ParseUncertainty == True:
|
|
1190
|
+
return MetaData, Values, Uncertainty
|
|
1191
|
+
else:
|
|
1192
|
+
return MetaData, Values
|
|
1193
|
+
|
|
1194
|
+
|
|
1195
|
+
def ExcelSheetFill(Workbook, Sheetname, values, topcornerlabel=None,
|
|
1196
|
+
rowlabels=None, collabels=None, Style=None,
|
|
1197
|
+
rowselect=None, colselect=None):
|
|
1198
|
+
Sheet = Workbook.add_sheet(Sheetname)
|
|
1199
|
+
if topcornerlabel is not None:
|
|
1200
|
+
if Style is not None:
|
|
1201
|
+
Sheet.write(0,0,label = topcornerlabel, style = Style) # write top corner label
|
|
1202
|
+
else:
|
|
1203
|
+
Sheet.write(0,0,label = topcornerlabel) # write top corner label
|
|
1204
|
+
if rowselect is None: # assign row select if not present (includes all rows in that case)
|
|
1205
|
+
rowselect = np.ones((values.shape[0]))
|
|
1206
|
+
if colselect is None: # assign col select if not present (includes all columns in that case)
|
|
1207
|
+
colselect = np.ones((values.shape[1]))
|
|
1208
|
+
if rowlabels is not None: # write row labels
|
|
1209
|
+
rowindexcount = 0
|
|
1210
|
+
for m in range(0,len(rowlabels)):
|
|
1211
|
+
if rowselect[m] == 1: # True if True or 1
|
|
1212
|
+
if Style is None:
|
|
1213
|
+
Sheet.write(rowindexcount +1, 0, label = rowlabels[m])
|
|
1214
|
+
else:
|
|
1215
|
+
Sheet.write(rowindexcount +1, 0, label = rowlabels[m], style = Style)
|
|
1216
|
+
rowindexcount += 1
|
|
1217
|
+
if collabels is not None: # write column labels
|
|
1218
|
+
colindexcount = 0
|
|
1219
|
+
for m in range(0,len(collabels)):
|
|
1220
|
+
if colselect[m] == 1: # True if True or 1
|
|
1221
|
+
if Style is None:
|
|
1222
|
+
Sheet.write(0, colindexcount +1, label = collabels[m])
|
|
1223
|
+
else:
|
|
1224
|
+
Sheet.write(0, colindexcount +1, label = collabels[m], style = Style)
|
|
1225
|
+
colindexcount += 1
|
|
1226
|
+
# write values:
|
|
1227
|
+
rowindexcount = 0
|
|
1228
|
+
for m in range(0,values.shape[0]): # for all rows
|
|
1229
|
+
if rowselect[m] == 1:
|
|
1230
|
+
colindexcount = 0
|
|
1231
|
+
for n in range(0,values.shape[1]): # for all columns
|
|
1232
|
+
if colselect[n] == 1:
|
|
1233
|
+
Sheet.write(rowindexcount +1, colindexcount + 1, label=values[m, n])
|
|
1234
|
+
colindexcount += 1
|
|
1235
|
+
rowindexcount += 1
|
|
1236
|
+
|
|
1237
|
+
def ExcelExportAdd_tAB(Sheet,Data,rowoffset,coloffset,IName,UName,RName,FName,REName,ALabels,BLabels):
|
|
1238
|
+
"""
|
|
1239
|
+
This function exports a 3D array with aspects time, A, and B to a given excel sheet.
|
|
1240
|
+
Same as xlsxExportAdd_tAB but this function is for xls files with xlrd.
|
|
1241
|
+
The t dimension is exported in one row, the A and B dimensions as several rows.
|
|
1242
|
+
Each row starts with IName (indicator), UName (unit), RName (region),
|
|
1243
|
+
FName (figure where data are used), REName (Resource efficiency scenario),
|
|
1244
|
+
and then come the values for the dimensions A and B and from coloffset onwards, the time dimension.
|
|
1245
|
+
Function is meant to be used multiple times, so a rowoffset is given, incremented, and returned for the next run.
|
|
1246
|
+
"""
|
|
1247
|
+
for m in range(0,len(ALabels)):
|
|
1248
|
+
for n in range(0,len(BLabels)):
|
|
1249
|
+
Sheet.write(rowoffset, 0, label = IName)
|
|
1250
|
+
Sheet.write(rowoffset, 1, label = UName)
|
|
1251
|
+
Sheet.write(rowoffset, 2, label = RName)
|
|
1252
|
+
Sheet.write(rowoffset, 3, label = FName)
|
|
1253
|
+
Sheet.write(rowoffset, 4, label = REName)
|
|
1254
|
+
Sheet.write(rowoffset, 5, label = ALabels[m])
|
|
1255
|
+
Sheet.write(rowoffset, 6, label = BLabels[n])
|
|
1256
|
+
for t in range(0,Data.shape[0]):
|
|
1257
|
+
Sheet.write(rowoffset, coloffset + t, label = Data[t,m,n])
|
|
1258
|
+
rowoffset += 1
|
|
1259
|
+
|
|
1260
|
+
return rowoffset
|
|
1261
|
+
|
|
1262
|
+
def xlsxExportAdd_tAB(Sheet,Data,rowoffset,coloffset,IName,UName,RName,FName,REName,ALabels,BLabels):
|
|
1263
|
+
"""
|
|
1264
|
+
This function exports a 3D array with aspects time, A, and B to a given excel sheet.
|
|
1265
|
+
Same as ExcelExportAdd_tAB but this function is for xlsx files with openpyxl.
|
|
1266
|
+
The t dimension is exported in one row, the A and B dimensions as several rows.
|
|
1267
|
+
Each row starts with IName (indicator), UName (unit), RName (region),
|
|
1268
|
+
FName (figure where data are used), REName (Resource efficiency scenario),
|
|
1269
|
+
and then come the values for the dimensions A and B and from coloffset onwards, the time dimension.
|
|
1270
|
+
Function is meant to be used multiple times, so a rowoffset is given, incremented, and returned for the next run.
|
|
1271
|
+
"""
|
|
1272
|
+
for m in range(0,len(ALabels)):
|
|
1273
|
+
for n in range(0,len(BLabels)):
|
|
1274
|
+
Sheet.cell(row=rowoffset, column=1).value = IName
|
|
1275
|
+
Sheet.cell(row=rowoffset, column=2).value = UName
|
|
1276
|
+
Sheet.cell(row=rowoffset, column=3).value = RName
|
|
1277
|
+
Sheet.cell(row=rowoffset, column=4).value = FName
|
|
1278
|
+
Sheet.cell(row=rowoffset, column=5).value = REName
|
|
1279
|
+
Sheet.cell(row=rowoffset, column=6).value = ALabels[m]
|
|
1280
|
+
Sheet.cell(row=rowoffset, column=7).value = BLabels[n]
|
|
1281
|
+
for t in range(0,Data.shape[0]):
|
|
1282
|
+
Sheet.cell(row=rowoffset, column=coloffset + t +1).value = Data[t,m,n]
|
|
1283
|
+
rowoffset += 1
|
|
1284
|
+
|
|
1285
|
+
return rowoffset
|
|
1286
|
+
|
|
1287
|
+
def convert_log(file, file_format='html'):
|
|
1288
|
+
"""
|
|
1289
|
+
Converts the log file to a given file format
|
|
1290
|
+
|
|
1291
|
+
:param file: The filename and path
|
|
1292
|
+
:param file_format: The desired format
|
|
1293
|
+
"""
|
|
1294
|
+
output_filename = os.path.splitext(file)[0] + '.' + file_format
|
|
1295
|
+
output = pypandoc.convert_file(file, file_format, outputfile=output_filename)
|
|
1296
|
+
assert output == ""
|
|
1297
|
+
|
|
1298
|
+
# The End
|
|
1299
|
+
|