gimu 0.4.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gimu/__init__.py +0 -0
- gimu/config.py +82 -0
- gimu/easy_date.py +209 -0
- gimu/geo_common.py +222 -0
- gimu/gmf/__init__.py +0 -0
- gimu/gmf/data.py +118 -0
- gimu/gmf/modifier.py +129 -0
- gimu/project_cli.py +56 -0
- gimu/save2incon.py +30 -0
- gimu/t2listingh5.py +375 -0
- gimu/waiwera_copy.py +43 -0
- gimu/waiwera_listing.py +587 -0
- gimu-0.4.0.dist-info/METADATA +86 -0
- gimu-0.4.0.dist-info/RECORD +17 -0
- gimu-0.4.0.dist-info/WHEEL +4 -0
- gimu-0.4.0.dist-info/entry_points.txt +4 -0
- gimu-0.4.0.dist-info/licenses/LICENSE.txt +9 -0
gimu/gmf/modifier.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
1
|
+
""" handle GMF's modifier files that update model inputs with meta- parameters
|
|
2
|
+
|
|
3
|
+
- reads rocktype JSON and update model input
|
|
4
|
+
- reads upflow JSON and update model input
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import json
|
|
8
|
+
import os
|
|
9
|
+
import sys
|
|
10
|
+
|
|
11
|
+
def compute_rocktypes(rocktype_json, warn=True):
|
|
12
|
+
""" compute the derived rocktype properties and update in place
|
|
13
|
+
|
|
14
|
+
if warn, will raise exception when rules are not followed:
|
|
15
|
+
- along modifier >= 1.0
|
|
16
|
+
- across modifier <= 1.0
|
|
17
|
+
|
|
18
|
+
Example entry in rocktype_json:
|
|
19
|
+
- base rocktype
|
|
20
|
+
"A0000": {
|
|
21
|
+
"permeability": [1e-11, 1e-11, 1e-12 ],
|
|
22
|
+
"porosity": [0.25 ]
|
|
23
|
+
},
|
|
24
|
+
|
|
25
|
+
- fault modified rocktype
|
|
26
|
+
"AA000": {
|
|
27
|
+
"base": ["A0000"],
|
|
28
|
+
"fault direction": [1],
|
|
29
|
+
"modifier": {
|
|
30
|
+
"along": [1],
|
|
31
|
+
"across": [1],
|
|
32
|
+
"vertical": [1],
|
|
33
|
+
"porosity": [1]
|
|
34
|
+
},
|
|
35
|
+
"permeability": [1e-11, 1e-11, 1e-12],
|
|
36
|
+
"porosity": [0.25]
|
|
37
|
+
},
|
|
38
|
+
|
|
39
|
+
- fault interesection rocktype
|
|
40
|
+
"SVU11": {
|
|
41
|
+
"base": ["S0011"],
|
|
42
|
+
"modifier": {
|
|
43
|
+
"k1": [2.5],
|
|
44
|
+
"k2": [0.4],
|
|
45
|
+
"k3": [5],
|
|
46
|
+
"porosity": [1]
|
|
47
|
+
},
|
|
48
|
+
"permeability": [2.5000000000000004e-15, 4.0000000000000004e-16, 5.000000000000001e-15],
|
|
49
|
+
"porosity": [0.1]
|
|
50
|
+
}
|
|
51
|
+
|
|
52
|
+
"""
|
|
53
|
+
for rt_name, rt in rocktype_json.items():
|
|
54
|
+
if 'base' not in rt:
|
|
55
|
+
# base rocktype
|
|
56
|
+
pass
|
|
57
|
+
else:
|
|
58
|
+
if 'fault direction' in rt:
|
|
59
|
+
fault_dir = rt['fault direction'][0]
|
|
60
|
+
if float(rt['modifier']['along'][0]) < 1.0:
|
|
61
|
+
msg = f"Rocktype {rt_name} has fault modifier 'along' < 1.0"
|
|
62
|
+
if warn: raise Exception(msg)
|
|
63
|
+
else: print("Warning: " + msg)
|
|
64
|
+
if float(rt['modifier']['across'][0]) > 1.0:
|
|
65
|
+
msg = f"Rocktype {rt_name} has fault modifier 'across' > 1.0"
|
|
66
|
+
if warn: raise Exception(msg)
|
|
67
|
+
else: print("Warning: " + msg)
|
|
68
|
+
if fault_dir == 1:
|
|
69
|
+
k_modifier = [
|
|
70
|
+
float(rt['modifier']['along'][0]),
|
|
71
|
+
float(rt['modifier']['across'][0]),
|
|
72
|
+
float(rt['modifier']['vertical'][0]),
|
|
73
|
+
]
|
|
74
|
+
elif fault_dir == 2:
|
|
75
|
+
k_modifier = [
|
|
76
|
+
float(rt['modifier']['across'][0]),
|
|
77
|
+
float(rt['modifier']['along'][0]),
|
|
78
|
+
float(rt['modifier']['vertical'][0]),
|
|
79
|
+
]
|
|
80
|
+
else:
|
|
81
|
+
raise Exception(f"Fault direction {rt['fault direction'][0]} not 1 or 2 in rocktype {rt_name}")
|
|
82
|
+
else:
|
|
83
|
+
k_modifier = [
|
|
84
|
+
float(rt['modifier']['k1'][0]),
|
|
85
|
+
float(rt['modifier']['k2'][0]),
|
|
86
|
+
float(rt['modifier']['k3'][0]),
|
|
87
|
+
]
|
|
88
|
+
# update permeability in place
|
|
89
|
+
if rt['base'][0] in rocktype_json:
|
|
90
|
+
rt['permeability'] = [k * m for k, m in zip(
|
|
91
|
+
rocktype_json[rt['base'][0]]['permeability'], k_modifier)]
|
|
92
|
+
rt['porosity'][0] = rocktype_json[rt['base'][0]]['porosity'][0] * rt['modifier']['porosity'][0]
|
|
93
|
+
else:
|
|
94
|
+
msg = f"Base rocktype {rt['base'][0]} not found for rocktype {rt_name}"
|
|
95
|
+
if warn:
|
|
96
|
+
raise Exception(msg)
|
|
97
|
+
else:
|
|
98
|
+
print("Warning: " + msg)
|
|
99
|
+
rt['error'] = msg
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def update_waiwera_model_rocktypes(wjson, rtjson, skip_missing=True):
|
|
103
|
+
""" rtjson is loaded from the GMF modifier _rocktypes.json
|
|
104
|
+
"""
|
|
105
|
+
for rt in wjson['rock']['types']:
|
|
106
|
+
if rt['name'] not in rtjson:
|
|
107
|
+
if skip_missing:
|
|
108
|
+
print(f"Rocktype {rt['name']} not found in _rocktypes.json... skipped")
|
|
109
|
+
continue
|
|
110
|
+
else:
|
|
111
|
+
raise Exception(f"Rocktype {rt['name']} not found in _rocktypes.json")
|
|
112
|
+
modifier = rtjson[rt['name']]
|
|
113
|
+
rt['permeability'] = modifier['permeability']
|
|
114
|
+
rt['porosity'] = modifier['porosity'][0]
|
|
115
|
+
|
|
116
|
+
def update_aut2_model_rocktypes(dat, rtjson, skip_missing=True):
|
|
117
|
+
""" rtjson is loaded from the GMF modifier _rocktypes.json
|
|
118
|
+
"""
|
|
119
|
+
for rt in dat.grid.rocktypelist:
|
|
120
|
+
if rt.name not in rtjson:
|
|
121
|
+
if skip_missing:
|
|
122
|
+
print(f"Rocktype {rt.name} not found in _rocktypes.json... skipped")
|
|
123
|
+
continue
|
|
124
|
+
else:
|
|
125
|
+
raise Exception(f"Rocktype {rt.name} not found in _rocktypes.json")
|
|
126
|
+
modifier = rtjson[rt.name]
|
|
127
|
+
rt.permeability = modifier['permeability']
|
|
128
|
+
rt.porosity = modifier['porosity'][0]
|
|
129
|
+
|
gimu/project_cli.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
1
|
+
import importlib.metadata
|
|
2
|
+
from pathlib import Path
|
|
3
|
+
from pprint import pprint
|
|
4
|
+
import sys
|
|
5
|
+
|
|
6
|
+
try:
|
|
7
|
+
import tomllib # Python ≥3.11
|
|
8
|
+
except ImportError:
|
|
9
|
+
import tomli as tomllib # Python <3.11
|
|
10
|
+
|
|
11
|
+
def get_distribution():
|
|
12
|
+
top_pkg = __package__.split('.')[0] if __package__ else None
|
|
13
|
+
if top_pkg:
|
|
14
|
+
try:
|
|
15
|
+
return importlib.metadata.distribution(top_pkg)
|
|
16
|
+
except importlib.metadata.PackageNotFoundError:
|
|
17
|
+
pass
|
|
18
|
+
return None
|
|
19
|
+
|
|
20
|
+
def get_project_meta():
|
|
21
|
+
""" return pyproject.toml [project] """
|
|
22
|
+
current_file = Path(__file__).resolve()
|
|
23
|
+
for parent in [current_file.parent] + list(current_file.parents):
|
|
24
|
+
pyproject = parent / "pyproject.toml"
|
|
25
|
+
if pyproject.exists():
|
|
26
|
+
with open(pyproject, "rb") as f:
|
|
27
|
+
data = tomllib.load(f)
|
|
28
|
+
return data.get("project", {})
|
|
29
|
+
return None
|
|
30
|
+
|
|
31
|
+
def show_help():
|
|
32
|
+
dist = get_distribution()
|
|
33
|
+
name = dist.metadata['Name']
|
|
34
|
+
version = dist.metadata['Version']
|
|
35
|
+
description = dist.metadata['Summary']
|
|
36
|
+
commands = [ep.name for ep in dist.entry_points]
|
|
37
|
+
|
|
38
|
+
# meta = get_project_meta()
|
|
39
|
+
# name = meta['name']
|
|
40
|
+
# description = meta['description']
|
|
41
|
+
|
|
42
|
+
msg = '\n'.join([
|
|
43
|
+
f"",
|
|
44
|
+
f" {name} - {description}",
|
|
45
|
+
f"",
|
|
46
|
+
f" Version: {version}",
|
|
47
|
+
f"",
|
|
48
|
+
f" Available commands:",
|
|
49
|
+
] + [" " + cmd for cmd in sorted(commands)] + [
|
|
50
|
+
f"",
|
|
51
|
+
f" e.g.:",
|
|
52
|
+
f" save2incon model_1.save model_2.incon",
|
|
53
|
+
f"",
|
|
54
|
+
])
|
|
55
|
+
print(msg)
|
|
56
|
+
|
gimu/save2incon.py
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
1
|
+
""" use: save2incon a.save b.incon [-reset_kcyc] """
|
|
2
|
+
|
|
3
|
+
from sys import *
|
|
4
|
+
from t2incons import *
|
|
5
|
+
|
|
6
|
+
def main():
|
|
7
|
+
if len(argv) < 2:
|
|
8
|
+
print('use: save2incon a.save b.incon [-reset_kcyc] [-reset_porosity]')
|
|
9
|
+
exit(1)
|
|
10
|
+
|
|
11
|
+
readFrom = argv[1]
|
|
12
|
+
saveTo = argv[2]
|
|
13
|
+
|
|
14
|
+
if len(argv) > 3:
|
|
15
|
+
opts = argv[3:]
|
|
16
|
+
else:
|
|
17
|
+
opts = []
|
|
18
|
+
inc = t2incon(readFrom)
|
|
19
|
+
|
|
20
|
+
for opt in opts:
|
|
21
|
+
if opt == '-reset_kcyc':
|
|
22
|
+
inc.timing['kcyc'] = 1
|
|
23
|
+
inc.timing['iter'] = 1
|
|
24
|
+
if opt == '-reset_porosity':
|
|
25
|
+
inc.porosity = None
|
|
26
|
+
|
|
27
|
+
inc.write(saveTo)
|
|
28
|
+
|
|
29
|
+
if __name__ == '__main__':
|
|
30
|
+
main()
|
gimu/t2listingh5.py
ADDED
|
@@ -0,0 +1,375 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Copyright 2013, 2014 University of Auckland.
|
|
3
|
+
|
|
4
|
+
This file is part of TIM (Tim Isn't Mulgraph).
|
|
5
|
+
|
|
6
|
+
TIM is free software: you can redistribute it and/or modify
|
|
7
|
+
it under the terms of the GNU General Public License as published by
|
|
8
|
+
the Free Software Foundation, either version 3 of the License, or
|
|
9
|
+
(at your option) any later version.
|
|
10
|
+
|
|
11
|
+
TIM is distributed in the hope that it will be useful,
|
|
12
|
+
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
13
|
+
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
14
|
+
GNU General Public License for more details.
|
|
15
|
+
|
|
16
|
+
You should have received a copy of the GNU General Public License
|
|
17
|
+
along with TIM. If not, see <http://www.gnu.org/licenses/>.
|
|
18
|
+
"""
|
|
19
|
+
|
|
20
|
+
"""
|
|
21
|
+
Wrap AUTOUGH2's hdf5 output as t2listing
|
|
22
|
+
"""
|
|
23
|
+
|
|
24
|
+
import h5py
|
|
25
|
+
import numpy as np
|
|
26
|
+
|
|
27
|
+
from t2listing import *
|
|
28
|
+
# from t2listing import listingtable
|
|
29
|
+
from mulgrids import fix_blockname, unfix_blockname
|
|
30
|
+
|
|
31
|
+
from pprint import pprint as pp
|
|
32
|
+
import unittest
|
|
33
|
+
|
|
34
|
+
class h5table(listingtable):
|
|
35
|
+
""" Class emulating the listingtable class in PyTOUGH.
|
|
36
|
+
|
|
37
|
+
Class for table in listing file, with values addressable by index (0-based)
|
|
38
|
+
or row name, and column name: e.g. table[i] returns the ith row (as a
|
|
39
|
+
dictionary), table[rowname] returns the row with the specified name, and
|
|
40
|
+
table[colname] returns the column with the specified name.
|
|
41
|
+
|
|
42
|
+
!!! IMPORTANT !!!
|
|
43
|
+
.index needs to be set whenever listing object changed time index
|
|
44
|
+
"""
|
|
45
|
+
def __init__(self, cols, rows, h5_table,
|
|
46
|
+
num_keys = 1, allow_reverse_keys = False,
|
|
47
|
+
index = 0):
|
|
48
|
+
""" The row_format parameter is a dictionary with three keys,
|
|
49
|
+
'key','index' and 'values'. These contain the positions, in each row of
|
|
50
|
+
the table, of the start of the keys, index and data fields. The
|
|
51
|
+
row_line parameter is a list containing, for each row of the table, the
|
|
52
|
+
number of lines before it in the listing file, from the start of the
|
|
53
|
+
table. This is needed for TOUGH2_MP listing files, in which the rows
|
|
54
|
+
are not in index order and can also be duplicated.
|
|
55
|
+
|
|
56
|
+
h5_table should be the table within the h5 file.
|
|
57
|
+
"""
|
|
58
|
+
self.column_name = cols
|
|
59
|
+
self.row_name = rows
|
|
60
|
+
self.num_keys = num_keys
|
|
61
|
+
self.allow_reverse_keys = allow_reverse_keys
|
|
62
|
+
self._col = dict([(c,i) for i,c in enumerate(cols)])
|
|
63
|
+
self._row = dict([(r,i) for i,r in enumerate(rows)])
|
|
64
|
+
self._h5_table = h5_table
|
|
65
|
+
self._index = index # time index
|
|
66
|
+
|
|
67
|
+
def __repr__(self):
|
|
68
|
+
# h5 table lst._h5['element'][time index, eleme index, field index]
|
|
69
|
+
return repr(self.column_name) + '\n' + repr(self._h5_table[self._index, :, :])
|
|
70
|
+
|
|
71
|
+
def __getitem__(self, key):
|
|
72
|
+
if isinstance(key, int):
|
|
73
|
+
return dict(zip(['key'] + self.column_name, [self.row_name[key]] +
|
|
74
|
+
list(self._h5_table[self._index, key, :])))
|
|
75
|
+
else:
|
|
76
|
+
if key in self.column_name:
|
|
77
|
+
return self._h5_table[self._index, :, self._col[key]]
|
|
78
|
+
elif key in self.row_name:
|
|
79
|
+
rowindex = self._row[key]
|
|
80
|
+
return dict(zip(['key'] + self.column_name,
|
|
81
|
+
[self.row_name[rowindex]] +
|
|
82
|
+
list(self._h5_table[self._index, rowindex, :])))
|
|
83
|
+
elif len(key) > 1 and self.allow_reverse_keys:
|
|
84
|
+
revkey = key[::-1] # try reversed key for multi-key tables
|
|
85
|
+
if revkey in self.row_name:
|
|
86
|
+
rowindex = self._row[revkey]
|
|
87
|
+
return dict(zip(['key'] + self.column_name,
|
|
88
|
+
[self.row_name[rowindex][::-1]] +
|
|
89
|
+
list(-self._h5_table[self._index, rowindex, :])))
|
|
90
|
+
else: return None
|
|
91
|
+
|
|
92
|
+
def __add__(self, other):
|
|
93
|
+
raise NotImplementedError
|
|
94
|
+
"""Adds two listing tables together."""
|
|
95
|
+
if self.column_name == other.column_name and self.row_name == other.row_name:
|
|
96
|
+
from copy import copy
|
|
97
|
+
result = listingtable(copy(self.column_name), copy(self.row_name), num_keys = self.num_keys,
|
|
98
|
+
allow_reverse_keys = self.allow_reverse_keys)
|
|
99
|
+
result._data = self._data + other._data
|
|
100
|
+
return result
|
|
101
|
+
else: raise Exception("Incompatible tables: can't be added together.")
|
|
102
|
+
|
|
103
|
+
def __sub__(self, other):
|
|
104
|
+
raise NotImplementedError
|
|
105
|
+
"""Subtracts one listing table from another."""
|
|
106
|
+
if self.column_name == other.column_name and self.row_name == other.row_name:
|
|
107
|
+
from copy import copy
|
|
108
|
+
result = listingtable(copy(self.column_name), copy(self.row_name), num_keys = self.num_keys,
|
|
109
|
+
allow_reverse_keys = self.allow_reverse_keys)
|
|
110
|
+
result._data = self._data - other._data
|
|
111
|
+
return result
|
|
112
|
+
else: raise Exception("Incompatible tables: can't be subtracted.")
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
class t2listingh5(object):
|
|
116
|
+
def __init__(self, filename):
|
|
117
|
+
"""
|
|
118
|
+
"""
|
|
119
|
+
self._table = {}
|
|
120
|
+
self._h5 = h5py.File(filename, 'r')
|
|
121
|
+
self.filename = filename
|
|
122
|
+
self.setup()
|
|
123
|
+
self.simulator = 'AUTOUGH2_H5'
|
|
124
|
+
|
|
125
|
+
def setup(self):
|
|
126
|
+
self.fulltimes = self._h5['fulltimes']['TIME']
|
|
127
|
+
self.num_fulltimes = len(self.fulltimes)
|
|
128
|
+
self._index = 0 # this is the internal one
|
|
129
|
+
### element table
|
|
130
|
+
if 'element' in self._h5:
|
|
131
|
+
cols = [str(x.decode("utf-8")) for x in self._h5['element_fields']]
|
|
132
|
+
blocks = [fix_blockname(str(x.decode("utf-8"))) for x in self._h5['element_names']]
|
|
133
|
+
table = h5table(cols, blocks, self._h5['element'], num_keys=1)
|
|
134
|
+
self._table['element'] = table
|
|
135
|
+
### connection table
|
|
136
|
+
if 'connection' in self._h5:
|
|
137
|
+
cols = [str(x.decode("utf-8") ) for x in self._h5['connection_fields'][:]]
|
|
138
|
+
b1 = [fix_blockname(str(x.decode("utf-8"))) for x in self._h5['connection_names1'][:]]
|
|
139
|
+
b2 = [fix_blockname(str(x.decode("utf-8"))) for x in self._h5['connection_names2'][:]]
|
|
140
|
+
table = h5table(cols, [(b1,b2) for b1, b2 in zip(b1,b2)], self._h5['connection'], num_keys=2,
|
|
141
|
+
allow_reverse_keys=True)
|
|
142
|
+
self._table['connection'] = table
|
|
143
|
+
### generation table
|
|
144
|
+
if 'generation' in self._h5:
|
|
145
|
+
cols = [str(x.decode("utf-8")) for x in self._h5['generation_fields'][:]]
|
|
146
|
+
blocks = [fix_blockname(str(x.decode("utf-8"))) for x in self._h5['generation_eleme'][:]]
|
|
147
|
+
geners = [fix_blockname(str(x.decode("utf-8"))) for x in self._h5['generation_names'][:]]
|
|
148
|
+
table = h5table(cols, [(b,g) for b,g in zip(blocks,geners)], self._h5['generation'], num_keys=1)
|
|
149
|
+
self._table['generation'] = table
|
|
150
|
+
# makes tables in self._table accessible as attributes
|
|
151
|
+
for key,table in self._table.items():
|
|
152
|
+
setattr(self, key, table)
|
|
153
|
+
# have to be get first table ready
|
|
154
|
+
self.index = 0
|
|
155
|
+
|
|
156
|
+
def history(self, selection, short=False, start_datetime=None):
|
|
157
|
+
"""
|
|
158
|
+
short is not used at the moment
|
|
159
|
+
"""
|
|
160
|
+
if isinstance(selection, tuple):
|
|
161
|
+
selection = [selection]
|
|
162
|
+
results = []
|
|
163
|
+
for tbl,b,cname in selection:
|
|
164
|
+
table_name, bi, fieldi = self.selection_index(tbl, b, cname)
|
|
165
|
+
if bi < 0:
|
|
166
|
+
bi = len(self.block_name_index) + bi
|
|
167
|
+
### important to convert cell index
|
|
168
|
+
ys = self._h5[table_name][:,bi,fieldi]
|
|
169
|
+
results.append((self.fulltimes, ys))
|
|
170
|
+
if len(results) == 1: results = results[0]
|
|
171
|
+
return results
|
|
172
|
+
|
|
173
|
+
def selection_index(self, tbl, b, field):
|
|
174
|
+
dname = {
|
|
175
|
+
'e': 'element',
|
|
176
|
+
'c': 'connection',
|
|
177
|
+
'g': 'generation',
|
|
178
|
+
}
|
|
179
|
+
def eleme_index(b):
|
|
180
|
+
if isinstance(b, str):
|
|
181
|
+
bi = self.block_name_index[b]
|
|
182
|
+
elif isinstance(b, unicode):
|
|
183
|
+
bi = self.block_name_index[str(b)]
|
|
184
|
+
elif isinstance(b, int):
|
|
185
|
+
bi = b
|
|
186
|
+
else:
|
|
187
|
+
raise Exception('.history() block must be an int or str: %s (%s)' % (str(b),str(type(b))))
|
|
188
|
+
return bi
|
|
189
|
+
def conne_index(b):
|
|
190
|
+
if isinstance(b, tuple):
|
|
191
|
+
bi = self.connection_name_index[(str(b[0]), str(b[1]))]
|
|
192
|
+
elif isinstance(b, int):
|
|
193
|
+
bi = b
|
|
194
|
+
else:
|
|
195
|
+
raise Exception('.history() conne must be an int or (str,str): %s (%s)' % (str(b),str(type(b))))
|
|
196
|
+
return bi
|
|
197
|
+
def gener_index(b):
|
|
198
|
+
if isinstance(b, tuple):
|
|
199
|
+
bi = self.generation_name_index[(str(b[0]), str(b[1]))]
|
|
200
|
+
elif isinstance(b, int):
|
|
201
|
+
bi = b
|
|
202
|
+
else:
|
|
203
|
+
raise Exception('.history() gener must be an int or (str,str): %s (%s)' % (str(b),str(type(b))))
|
|
204
|
+
return bi
|
|
205
|
+
iname = {
|
|
206
|
+
'e': eleme_index,
|
|
207
|
+
'c': conne_index,
|
|
208
|
+
'g': gener_index,
|
|
209
|
+
}
|
|
210
|
+
if not hasattr(self, 'field_index'):
|
|
211
|
+
self.field_index = {}
|
|
212
|
+
for n,nn in dname.items():
|
|
213
|
+
for i,ff in enumerate(self._h5[nn + '_fields']):
|
|
214
|
+
self.field_index[(n,ff.decode("utf-8"))] = i
|
|
215
|
+
return dname[tbl], iname[tbl](b), self.field_index[(tbl,field)]
|
|
216
|
+
|
|
217
|
+
@property
|
|
218
|
+
def block_name_index(self):
|
|
219
|
+
if not hasattr(self, '_block_name_index'):
|
|
220
|
+
self._block_name_index = {}
|
|
221
|
+
# self._block_name_index.update({str(e):i for i,e in enumerate(self._h5['element_names'])})
|
|
222
|
+
self._block_name_index.update({fix_blockname(str(e.decode("utf-8"))):i for i,e in enumerate(self._h5['element_names'])})
|
|
223
|
+
return self._block_name_index
|
|
224
|
+
|
|
225
|
+
@property
|
|
226
|
+
def connection_name_index(self):
|
|
227
|
+
if not hasattr(self, '_connection_name_index'):
|
|
228
|
+
a = self._h5['connection_names1']
|
|
229
|
+
b = self._h5['connection_names2']
|
|
230
|
+
self._connection_name_index = {}
|
|
231
|
+
self._connection_name_index.update({(fix_blockname(str(x[0].decode("utf-8"))),fix_blockname(str(x[1].decode("utf-8")))):i for i,x in enumerate(zip(a,b))})
|
|
232
|
+
return self._connection_name_index
|
|
233
|
+
|
|
234
|
+
@property
|
|
235
|
+
def generation_name_index(self):
|
|
236
|
+
if not hasattr(self, '_generation_name_index'):
|
|
237
|
+
a = self._h5['generation_eleme']
|
|
238
|
+
b = self._h5['generation_names']
|
|
239
|
+
self._generation_name_index = {}
|
|
240
|
+
# self._generation_name_index.update({(str(x[0]),str(x[1])):i for i,x in enumerate(zip(a,b))})
|
|
241
|
+
# self._generation_name_index.update({(fix_blockname(str(x[0])),(str(x[1]))):i for i,x in enumerate(zip(a,b))})
|
|
242
|
+
# self._generation_name_index.update({((str(x[0])),fix_blockname(str(x[1]))):i for i,x in enumerate(zip(a,b))})
|
|
243
|
+
self._generation_name_index.update({(fix_blockname(str(x[0].decode("utf-8"))),fix_blockname(str(x[1].decode("utf-8")))):i for i,x in enumerate(zip(a,b))})
|
|
244
|
+
return self._generation_name_index
|
|
245
|
+
|
|
246
|
+
|
|
247
|
+
def read_tables(self):
|
|
248
|
+
""" copy values from h5 into listingtables, with slicing """
|
|
249
|
+
if 'element' in self.table_names:
|
|
250
|
+
self.element._index = self.index
|
|
251
|
+
# for i,cname in enumerate(self.element.column_name):
|
|
252
|
+
# self.element._data[:,i] = self._h5['element'][self._index, :, i]
|
|
253
|
+
if 'connection' in self.table_names:
|
|
254
|
+
self.connection._index = self.index
|
|
255
|
+
# for i,cname in enumerate(self.connection.column_name):
|
|
256
|
+
# self.connection._data[:,i] = self._h5['connection'][self._index, :, i]
|
|
257
|
+
if 'generation' in self.table_names:
|
|
258
|
+
self.generation._index = self.index
|
|
259
|
+
# for i,cname in enumerate(self.generation.column_name):
|
|
260
|
+
# self.generation._data[:,i] = self._h5['generation'][self._index, :, i]
|
|
261
|
+
|
|
262
|
+
def get_index(self): return self._index
|
|
263
|
+
def set_index(self, i):
|
|
264
|
+
self._index = i
|
|
265
|
+
if self._index < 0: self._index += self.num_fulltimes
|
|
266
|
+
self.read_tables()
|
|
267
|
+
index = property(get_index, set_index)
|
|
268
|
+
|
|
269
|
+
def first(self): self.index = 0
|
|
270
|
+
def last(self): self.index = -1
|
|
271
|
+
def next(self):
|
|
272
|
+
"""Find and read next set of results; returns false if at end of listing"""
|
|
273
|
+
more = self.index < self.num_fulltimes - 1
|
|
274
|
+
if more: self.index += 1
|
|
275
|
+
return more
|
|
276
|
+
def prev(self):
|
|
277
|
+
"""Find and read previous set of results; returns false if at start of listing"""
|
|
278
|
+
more = self.index > 0
|
|
279
|
+
if more: self.index -= 1
|
|
280
|
+
return more
|
|
281
|
+
|
|
282
|
+
def get_table_names(self):
|
|
283
|
+
return sorted(self._table.keys())
|
|
284
|
+
table_names = property(get_table_names)
|
|
285
|
+
|
|
286
|
+
def get_time(self): return self.fulltimes[self.index]
|
|
287
|
+
def set_time(self, t):
|
|
288
|
+
if t < self.fulltimes[0]: self.index = 0
|
|
289
|
+
elif t > self.fulltimes[-1]: self.index = -1
|
|
290
|
+
else:
|
|
291
|
+
dt = np.abs(self.fulltimes - t)
|
|
292
|
+
self.index = np.argmin(dt)
|
|
293
|
+
time = property(get_time, set_time)
|
|
294
|
+
|
|
295
|
+
|
|
296
|
+
class test_fivespot(unittest.TestCase):
|
|
297
|
+
def setUp(self):
|
|
298
|
+
self.lst_h = t2listingh5('fivespot.h5')
|
|
299
|
+
self.lst_t= t2listing('expected.listing')
|
|
300
|
+
|
|
301
|
+
def test_match_tables(self):
|
|
302
|
+
# check row and column names
|
|
303
|
+
def check_names(tbl):
|
|
304
|
+
tbl_h = getattr(self.lst_h, tbl)
|
|
305
|
+
tbl_t = getattr(self.lst_t, tbl)
|
|
306
|
+
self.assertEqual(tbl_h.row_name, tbl_t.row_name)
|
|
307
|
+
for i,field in enumerate(tbl_h.column_name):
|
|
308
|
+
if tbl_t.column_name[i] in field:
|
|
309
|
+
match = True
|
|
310
|
+
else:
|
|
311
|
+
match = False
|
|
312
|
+
self.assertEqual(match, True, '%s: column name mismatch' % tbl)
|
|
313
|
+
for tbl in ['element', 'connection', 'generation']:
|
|
314
|
+
check_names(tbl)
|
|
315
|
+
# check table values, after change index also
|
|
316
|
+
def check_tables():
|
|
317
|
+
rtol = 1.0e-5 # roughly 4~5 significant digits from text listing file
|
|
318
|
+
for field in ['Temperature', 'Pressure', 'Vapour saturation']:
|
|
319
|
+
np.testing.assert_allclose(self.lst_h.element[field],
|
|
320
|
+
self.lst_t.element[field], rtol=rtol)
|
|
321
|
+
for field in ['Mass flow', 'Enthalpy', 'Heat flow']:
|
|
322
|
+
np.testing.assert_allclose(self.lst_h.connection[field],
|
|
323
|
+
self.lst_t.connection[field], rtol=rtol)
|
|
324
|
+
for field in ['Generation rate', 'Enthalpy']:
|
|
325
|
+
np.testing.assert_allclose(self.lst_h.generation[field],
|
|
326
|
+
self.lst_t.generation[field], rtol=rtol)
|
|
327
|
+
check_tables()
|
|
328
|
+
self.lst_h.last(); self.lst_t.last()
|
|
329
|
+
check_tables()
|
|
330
|
+
self.lst_h.first(); self.lst_t.first()
|
|
331
|
+
check_tables()
|
|
332
|
+
|
|
333
|
+
# check table with element index
|
|
334
|
+
def check_table_by_index(i):
|
|
335
|
+
tbl_h = self.lst_h.element[i]
|
|
336
|
+
tbl_t = self.lst_t.element[i]
|
|
337
|
+
for k in tbl_h.keys():
|
|
338
|
+
if k == 'key':
|
|
339
|
+
self.assertEqual(tbl_h[k], tbl_t[k])
|
|
340
|
+
else:
|
|
341
|
+
np.testing.assert_approx_equal(tbl_h[k], tbl_t[k], significant=5)
|
|
342
|
+
for i in range(len(self.lst_h.element.row_name)):
|
|
343
|
+
check_table_by_index(i)
|
|
344
|
+
|
|
345
|
+
# check table with element name
|
|
346
|
+
def check_table_by_name(b):
|
|
347
|
+
tbl_h = self.lst_h.element[b]
|
|
348
|
+
tbl_t = self.lst_t.element[b]
|
|
349
|
+
for k in tbl_h.keys():
|
|
350
|
+
if k == 'key':
|
|
351
|
+
self.assertEqual(tbl_h[k], tbl_t[k])
|
|
352
|
+
else:
|
|
353
|
+
np.testing.assert_approx_equal(tbl_h[k], tbl_t[k], significant=5)
|
|
354
|
+
for b in self.lst_h.element.row_name:
|
|
355
|
+
check_table_by_index(b)
|
|
356
|
+
|
|
357
|
+
def test_match_history(self):
|
|
358
|
+
self.assertEqual(self.lst_h.num_fulltimes, self.lst_t.num_fulltimes)
|
|
359
|
+
np.testing.assert_allclose(self.lst_h.fulltimes, self.lst_t.fulltimes)
|
|
360
|
+
rtol = 1.0e-5
|
|
361
|
+
# seems allfield names are identical with fivespot's EOS
|
|
362
|
+
for sel in [('e', 'AA106', 'Pressure'),
|
|
363
|
+
('e', 'AA 66', 'Temperature'),
|
|
364
|
+
('c', ('AA 66', 'AA 67'), 'Mass flow'),
|
|
365
|
+
('g', ('AA 11', 'PRO 1'), 'Generation rate'),
|
|
366
|
+
('g', ('AA 11', 'PRO 1'), 'Enthalpy'),
|
|
367
|
+
]:
|
|
368
|
+
xs_h, ys_h = self.lst_h.history(sel)
|
|
369
|
+
xs_t, ys_t = self.lst_t.history(sel)
|
|
370
|
+
np.testing.assert_allclose(xs_h, xs_t, rtol=rtol)
|
|
371
|
+
np.testing.assert_allclose(ys_h, ys_t, rtol=rtol)
|
|
372
|
+
|
|
373
|
+
|
|
374
|
+
if __name__ == '__main__':
|
|
375
|
+
unittest.main(verbosity=2)
|
gimu/waiwera_copy.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
1
|
+
|
|
2
|
+
import json
|
|
3
|
+
import sys
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
|
|
6
|
+
def main():
|
|
7
|
+
"""
|
|
8
|
+
Duplicates a JSON file, adding a postfix to the filename and updating
|
|
9
|
+
an internal value.
|
|
10
|
+
"""
|
|
11
|
+
if len(sys.argv) != 3:
|
|
12
|
+
print("Usage: make_waiwera_copy <input.json> <postfix>")
|
|
13
|
+
sys.exit(1)
|
|
14
|
+
|
|
15
|
+
input_path = Path(sys.argv[1])
|
|
16
|
+
postfix = sys.argv[2]
|
|
17
|
+
|
|
18
|
+
if not input_path.exists() or input_path.suffix != '.json':
|
|
19
|
+
print(f"Error: Input file '{input_path}' does not exist or is not a .json file.")
|
|
20
|
+
sys.exit(1)
|
|
21
|
+
|
|
22
|
+
# Construct output path
|
|
23
|
+
output_path = input_path.with_name(f"{input_path.stem}{postfix}{input_path.suffix}")
|
|
24
|
+
|
|
25
|
+
# Read and modify JSON content
|
|
26
|
+
with open(input_path, 'r') as f:
|
|
27
|
+
data = json.load(f)
|
|
28
|
+
|
|
29
|
+
if "output" in data and "filename" in data["output"]:
|
|
30
|
+
h5_path = Path(data["output"]["filename"])
|
|
31
|
+
new_h5_filename = f"{h5_path.stem}{postfix}{h5_path.suffix}"
|
|
32
|
+
data["output"]["filename"] = new_h5_filename
|
|
33
|
+
else:
|
|
34
|
+
print("Warning: ['output']['filename'] key not found in the JSON file. No change made to content.")
|
|
35
|
+
|
|
36
|
+
# Write new JSON file
|
|
37
|
+
with open(output_path, 'w') as f:
|
|
38
|
+
json.dump(data, f, indent=2)
|
|
39
|
+
|
|
40
|
+
print(f"Successfully created '{output_path}'")
|
|
41
|
+
|
|
42
|
+
if __name__ == "__main__":
|
|
43
|
+
main()
|