res2df 1.3.6__py3-none-any.whl → 1.3.8__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- res2df/__init__.py +2 -3
- res2df/common.py +79 -75
- res2df/compdat.py +27 -32
- res2df/csv2res.py +5 -9
- res2df/equil.py +24 -29
- res2df/faults.py +2 -7
- res2df/fipreports.py +10 -14
- res2df/grid.py +61 -68
- res2df/gruptree.py +33 -35
- res2df/inferdims.py +6 -9
- res2df/nnc.py +9 -13
- res2df/opmkeywords/__init__.py +0 -0
- res2df/parameters.py +12 -12
- res2df/pillars.py +24 -31
- res2df/pvt.py +29 -34
- res2df/res2csv.py +10 -15
- res2df/res2csvlogger.py +1 -3
- res2df/resdatafiles.py +8 -8
- res2df/rft.py +36 -42
- res2df/satfunc.py +22 -28
- res2df/summary.py +57 -60
- res2df/trans.py +16 -38
- res2df/version.py +16 -3
- res2df/vfp/__init__.py +1 -1
- res2df/vfp/_vfp.py +28 -33
- res2df/vfp/_vfpcommon.py +18 -19
- res2df/vfp/_vfpdefs.py +2 -3
- res2df/vfp/_vfpinj.py +23 -58
- res2df/vfp/_vfpprod.py +28 -64
- res2df/wcon.py +4 -11
- res2df/wellcompletiondata.py +26 -26
- res2df/wellconnstatus.py +4 -5
- {res2df-1.3.6.dist-info → res2df-1.3.8.dist-info}/METADATA +4 -2
- {res2df-1.3.6.dist-info → res2df-1.3.8.dist-info}/RECORD +38 -37
- {res2df-1.3.6.dist-info → res2df-1.3.8.dist-info}/WHEEL +0 -0
- {res2df-1.3.6.dist-info → res2df-1.3.8.dist-info}/entry_points.txt +0 -0
- {res2df-1.3.6.dist-info → res2df-1.3.8.dist-info}/licenses/LICENSE +0 -0
- {res2df-1.3.6.dist-info → res2df-1.3.8.dist-info}/top_level.txt +0 -0
res2df/__init__.py
CHANGED
|
@@ -1,11 +1,10 @@
|
|
|
1
1
|
import importlib
|
|
2
|
-
from typing import List
|
|
3
2
|
|
|
4
3
|
from .__version__ import __version__ as __version__
|
|
5
4
|
from .res2csvlogger import getLogger_res2csv as getLogger_res2csv
|
|
6
5
|
from .resdatafiles import ResdataFiles as ResdataFiles
|
|
7
6
|
|
|
8
|
-
SUBMODULES:
|
|
7
|
+
SUBMODULES: list[str] = [
|
|
9
8
|
"compdat",
|
|
10
9
|
"equil",
|
|
11
10
|
"faults",
|
|
@@ -26,5 +25,5 @@ SUBMODULES: List[str] = [
|
|
|
26
25
|
]
|
|
27
26
|
|
|
28
27
|
|
|
29
|
-
for submodule in SUBMODULES
|
|
28
|
+
for submodule in [*SUBMODULES, "res2csv", "csv2res"]:
|
|
30
29
|
importlib.import_module("res2df." + submodule)
|
res2df/common.py
CHANGED
|
@@ -11,21 +11,24 @@ import shlex
|
|
|
11
11
|
import signal
|
|
12
12
|
import sys
|
|
13
13
|
from collections import defaultdict
|
|
14
|
+
from importlib import resources
|
|
14
15
|
from pathlib import Path
|
|
15
|
-
from typing import Any,
|
|
16
|
+
from typing import Any, cast
|
|
16
17
|
|
|
17
18
|
import dateutil.parser
|
|
18
19
|
import numpy as np
|
|
19
20
|
import pandas as pd
|
|
20
|
-
import pyarrow
|
|
21
|
+
import pyarrow as pa
|
|
22
|
+
from pyarrow import (
|
|
23
|
+
feather, # necessary as this module is not loaded unless explicitly imported
|
|
24
|
+
)
|
|
21
25
|
|
|
22
26
|
try:
|
|
23
|
-
|
|
24
|
-
import opm.io.deck # lgtm [py/import-and-import-from]
|
|
27
|
+
import opm.io.deck
|
|
25
28
|
|
|
26
29
|
# This import is seemingly not used, but necessary for some attributes
|
|
27
30
|
# to be included in DeckItem objects.
|
|
28
|
-
from opm.io.deck import DeckKeyword # noqa
|
|
31
|
+
from opm.io.deck import DeckKeyword # noqa: F401
|
|
29
32
|
except ImportError:
|
|
30
33
|
# Allow parts of res2df to work without OPM:
|
|
31
34
|
pass
|
|
@@ -35,7 +38,7 @@ from .constants import MAGIC_STDOUT
|
|
|
35
38
|
|
|
36
39
|
# Parse named JSON files, this exposes a dict of dictionary describing the contents
|
|
37
40
|
# of supported keyword data
|
|
38
|
-
OPMKEYWORDS:
|
|
41
|
+
OPMKEYWORDS: dict[str, dict] = {}
|
|
39
42
|
for keyw in [
|
|
40
43
|
"BRANPROP",
|
|
41
44
|
"COMPDAT",
|
|
@@ -82,14 +85,14 @@ for keyw in [
|
|
|
82
85
|
"WSEGVALV",
|
|
83
86
|
]:
|
|
84
87
|
OPMKEYWORDS[keyw] = json.loads(
|
|
85
|
-
(
|
|
88
|
+
(resources.files(__package__) / "opmkeywords" / keyw).read_text()
|
|
86
89
|
)
|
|
87
90
|
|
|
88
91
|
|
|
89
92
|
SVG_COLOR_NAMES = [
|
|
90
93
|
color.lower()
|
|
91
94
|
for color in (
|
|
92
|
-
(
|
|
95
|
+
(resources.files(__package__) / "svg_color_keyword_names.txt")
|
|
93
96
|
.read_text(encoding="utf-8")
|
|
94
97
|
.splitlines()
|
|
95
98
|
)
|
|
@@ -115,11 +118,11 @@ logger: logging.Logger = logging.getLogger(__name__)
|
|
|
115
118
|
|
|
116
119
|
|
|
117
120
|
def write_dframe_stdout_file(
|
|
118
|
-
dframe:
|
|
121
|
+
dframe: pd.DataFrame | pa.Table,
|
|
119
122
|
output: str,
|
|
120
123
|
index: bool = False,
|
|
121
|
-
caller_logger:
|
|
122
|
-
logstr:
|
|
124
|
+
caller_logger: logging.Logger | None = None,
|
|
125
|
+
logstr: str | None = None,
|
|
123
126
|
) -> None:
|
|
124
127
|
"""Write a dataframe to either stdout or a file
|
|
125
128
|
|
|
@@ -144,13 +147,13 @@ def write_dframe_stdout_file(
|
|
|
144
147
|
if caller_logger and isinstance(dframe, pd.DataFrame) and dframe.empty:
|
|
145
148
|
caller_logger.warning("Empty dataframe being written to disk")
|
|
146
149
|
if caller_logger and not logstr:
|
|
147
|
-
caller_logger.info("Writing to file %s",
|
|
150
|
+
caller_logger.info("Writing to file %s", output)
|
|
148
151
|
elif caller_logger and logstr:
|
|
149
152
|
caller_logger.info(logstr)
|
|
150
153
|
if isinstance(dframe, pd.DataFrame):
|
|
151
154
|
dframe.to_csv(output, index=index)
|
|
152
155
|
else:
|
|
153
|
-
|
|
156
|
+
feather.write_feather(dframe, dest=output)
|
|
154
157
|
|
|
155
158
|
|
|
156
159
|
def write_inc_stdout_file(string: str, outputfilename: str) -> None:
|
|
@@ -170,13 +173,13 @@ def parse_month(rdmonth: str) -> int:
|
|
|
170
173
|
return MONTH2NUM[rdmonth]
|
|
171
174
|
|
|
172
175
|
|
|
173
|
-
def datetime_to_ecldate(timestamp:
|
|
176
|
+
def datetime_to_ecldate(timestamp: str | datetime.datetime | datetime.date) -> str:
|
|
174
177
|
"""Convert a Python timestamp or date to the Eclipse DATE format"""
|
|
175
178
|
if isinstance(timestamp, str):
|
|
176
179
|
if list(map(len, timestamp.split(" ")[0].split("-"))) != [4, 2, 2]:
|
|
177
180
|
# Need this as dateutil.parser.isoparse() is not in Python 3.6.
|
|
178
181
|
raise ValueError("Use ISO-format for dates")
|
|
179
|
-
timestamp = dateutil.parser.parse(timestamp)
|
|
182
|
+
timestamp = dateutil.parser.parse(timestamp)
|
|
180
183
|
if not isinstance(timestamp, (datetime.datetime, datetime.date)):
|
|
181
184
|
raise TypeError("Require string or datetime")
|
|
182
185
|
string = f"{timestamp.day} '{NUM2MONTH[timestamp.month]}' {timestamp.year}"
|
|
@@ -188,9 +191,9 @@ def datetime_to_ecldate(timestamp: Union[str, datetime.datetime, datetime.date])
|
|
|
188
191
|
def keyworddata_to_df(
|
|
189
192
|
deck,
|
|
190
193
|
keyword: str,
|
|
191
|
-
renamer:
|
|
192
|
-
recordcountername:
|
|
193
|
-
emptyrecordcountername:
|
|
194
|
+
renamer: dict[str, str | list[str]] | None = None,
|
|
195
|
+
recordcountername: str | None = None,
|
|
196
|
+
emptyrecordcountername: str | None = None,
|
|
194
197
|
) -> pd.DataFrame:
|
|
195
198
|
"""Extract data associated to a keyword into tabular form.
|
|
196
199
|
|
|
@@ -212,8 +215,8 @@ def keyworddata_to_df(
|
|
|
212
215
|
row based on how many empty records is encountered. For PVTO f.ex,
|
|
213
216
|
this gives the PVTNUM indexing.
|
|
214
217
|
"""
|
|
215
|
-
|
|
216
|
-
|
|
218
|
+
dict_records: list[dict[str, Any]] = []
|
|
219
|
+
df_records: list[pd.DataFrame] = []
|
|
217
220
|
record_counter = 1
|
|
218
221
|
emptyrecord_counter = 1
|
|
219
222
|
for deckrecord in deck[keyword]:
|
|
@@ -235,41 +238,45 @@ def keyworddata_to_df(
|
|
|
235
238
|
if "DATA" in recdict and isinstance(recdict["DATA"], list):
|
|
236
239
|
assert renamer is not None
|
|
237
240
|
# If DATA is sometimes used for something else in the jsons, redo this.
|
|
238
|
-
|
|
241
|
+
renamed_data = renamer.get("DATA", [])
|
|
242
|
+
if isinstance(renamed_data, str):
|
|
243
|
+
renamed_data = [renamed_data]
|
|
244
|
+
data_dim = len(renamed_data) # The renamers must be in sync with json!
|
|
239
245
|
data_chunks = int(len(recdict["DATA"]) / data_dim)
|
|
240
246
|
try:
|
|
241
247
|
data_reshaped = np.reshape(recdict["DATA"], (data_chunks, data_dim))
|
|
242
248
|
except ValueError as err:
|
|
243
249
|
raise ValueError(
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
"Either your keyword is wrong, or your data is wrong"
|
|
247
|
-
)
|
|
250
|
+
f"Wrong number count for keyword {keyword}. \n"
|
|
251
|
+
"Either your keyword is wrong, or your data is wrong"
|
|
248
252
|
) from err
|
|
249
|
-
data_df = pd.DataFrame(columns=
|
|
253
|
+
data_df = pd.DataFrame(columns=renamed_data, data=data_reshaped)
|
|
250
254
|
# Assign the remaining items from the parsed dict to the dataframe:
|
|
251
255
|
for key, value in recdict.items():
|
|
252
256
|
if key != "DATA":
|
|
253
257
|
data_df[key] = value
|
|
254
|
-
|
|
255
|
-
record_counter += 1
|
|
258
|
+
df_records.append(data_df)
|
|
256
259
|
else:
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
if
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
260
|
+
dict_records.append(recdict)
|
|
261
|
+
record_counter += 1
|
|
262
|
+
if df_records and dict_records:
|
|
263
|
+
dict_df = pd.DataFrame(data=dict_records)
|
|
264
|
+
return pd.concat([*df_records, dict_df]).reset_index(drop=True)
|
|
265
|
+
elif df_records: # trust that this is all one type?
|
|
266
|
+
return pd.concat(df_records).reset_index(drop=True)
|
|
267
|
+
elif dict_records: # records contain lists.
|
|
268
|
+
return pd.DataFrame(data=dict_records).reset_index(drop=True)
|
|
269
|
+
else:
|
|
270
|
+
return pd.DataFrame()
|
|
264
271
|
|
|
265
272
|
|
|
266
273
|
def parse_opmio_deckrecord(
|
|
267
|
-
record: "opm.
|
|
274
|
+
record: "opm.opmcommon_python.DeckRecord",
|
|
268
275
|
keyword: str,
|
|
269
276
|
itemlistname: str = "items",
|
|
270
|
-
recordindex:
|
|
271
|
-
renamer:
|
|
272
|
-
) ->
|
|
277
|
+
recordindex: int | None = None,
|
|
278
|
+
renamer: dict[str, str] | dict[str, str | list[str]] | None = None,
|
|
279
|
+
) -> dict[str, Any]:
|
|
273
280
|
"""
|
|
274
281
|
Parse an opm.io.DeckRecord belonging to a certain keyword
|
|
275
282
|
|
|
@@ -289,7 +296,7 @@ def parse_opmio_deckrecord(
|
|
|
289
296
|
if keyword not in OPMKEYWORDS:
|
|
290
297
|
raise ValueError(f"Keyword {keyword} not supported by common.py")
|
|
291
298
|
|
|
292
|
-
rec_dict:
|
|
299
|
+
rec_dict: dict[str, Any] = {}
|
|
293
300
|
|
|
294
301
|
if recordindex is None: # Beware, 0 is different from None here.
|
|
295
302
|
itemlist = OPMKEYWORDS[keyword][itemlistname]
|
|
@@ -329,14 +336,13 @@ def parse_opmio_deckrecord(
|
|
|
329
336
|
# OPM DeckItem. A better solution has not yet
|
|
330
337
|
# been found in the OPM API. See also
|
|
331
338
|
# https://github.com/OPM/opm-common/issues/2598
|
|
332
|
-
# pylint: disable=protected-access
|
|
333
339
|
if record[item_idx].__defaulted(idx):
|
|
334
340
|
rec_dict[item_name][idx] = np.nan
|
|
335
341
|
else:
|
|
336
342
|
rec_dict[item_name] = jsonitem.get("default", None)
|
|
337
343
|
|
|
338
344
|
if renamer:
|
|
339
|
-
renamed_dict:
|
|
345
|
+
renamed_dict: dict[str, Any] = {}
|
|
340
346
|
for key, value in rec_dict.items():
|
|
341
347
|
if key in renamer and not isinstance(renamer[key], list):
|
|
342
348
|
renamed_dict[renamer[key]] = value # type: ignore
|
|
@@ -354,7 +360,7 @@ def parse_opmio_date_rec(record: "opm.io.DeckRecord") -> datetime.date:
|
|
|
354
360
|
return datetime.date(year=year, month=parse_month(month), day=day)
|
|
355
361
|
|
|
356
362
|
|
|
357
|
-
def parse_opmio_tstep_rec(record: "opm.io.DeckRecord") ->
|
|
363
|
+
def parse_opmio_tstep_rec(record: "opm.io.DeckRecord") -> list[float | int]:
|
|
358
364
|
"""Parse a record with TSTEP data
|
|
359
365
|
|
|
360
366
|
Return:
|
|
@@ -396,13 +402,13 @@ def merge_zones(
|
|
|
396
402
|
return df
|
|
397
403
|
zone_df = pd.DataFrame.from_dict(zonedict, orient="index", columns=[zoneheader])
|
|
398
404
|
zone_df.index.name = "K"
|
|
399
|
-
zone_df.reset_index(
|
|
405
|
+
zone_df = zone_df.reset_index()
|
|
400
406
|
|
|
401
407
|
df[zoneheader] = df[kname].map(defaultdict(lambda: None, zonedict))
|
|
402
408
|
return df
|
|
403
409
|
|
|
404
410
|
|
|
405
|
-
def comment_formatter(multiline:
|
|
411
|
+
def comment_formatter(multiline: str | None, prefix: str = "-- ") -> str:
|
|
406
412
|
"""Prepends comment characters to every line in input
|
|
407
413
|
|
|
408
414
|
If nothing is supplied, an empty string is returned.
|
|
@@ -422,11 +428,11 @@ def comment_formatter(multiline: Optional[str], prefix: str = "-- ") -> str:
|
|
|
422
428
|
|
|
423
429
|
|
|
424
430
|
def handle_wanted_keywords(
|
|
425
|
-
wanted:
|
|
431
|
+
wanted: list[str] | None,
|
|
426
432
|
deck: "opm.io.Deck",
|
|
427
|
-
supported:
|
|
433
|
+
supported: list[str],
|
|
428
434
|
modulename: str = "",
|
|
429
|
-
) ->
|
|
435
|
+
) -> list[str]:
|
|
430
436
|
"""Handle three list of keywords, wanted, available and supported
|
|
431
437
|
|
|
432
438
|
Args:
|
|
@@ -442,12 +448,12 @@ def handle_wanted_keywords(
|
|
|
442
448
|
keywords = supported
|
|
443
449
|
else:
|
|
444
450
|
# Warn if some keywords are unsupported:
|
|
445
|
-
not_supported:
|
|
451
|
+
not_supported: set[str] = set(wanted) - set(supported)
|
|
446
452
|
if not_supported:
|
|
447
453
|
logger.warning(
|
|
448
454
|
"Requested keyword(s) not supported by res2df.%s: %s",
|
|
449
455
|
modulename,
|
|
450
|
-
|
|
456
|
+
not_supported,
|
|
451
457
|
)
|
|
452
458
|
# Reduce to only supported keywords:
|
|
453
459
|
keywords = list(set(wanted) - set(not_supported))
|
|
@@ -455,9 +461,7 @@ def handle_wanted_keywords(
|
|
|
455
461
|
keywords_in_deck = [keyword for keyword in keywords if keyword in deck]
|
|
456
462
|
not_in_deck = set(keywords) - set(keywords_in_deck)
|
|
457
463
|
if not_in_deck:
|
|
458
|
-
logger.warning(
|
|
459
|
-
"Requested keyword(s) not present in deck: %s", str(not_in_deck)
|
|
460
|
-
)
|
|
464
|
+
logger.warning("Requested keyword(s) not present in deck: %s", not_in_deck)
|
|
461
465
|
# Reduce again to only present keywords, but without warning:
|
|
462
466
|
keywords = [keyword for keyword in keywords if keyword in deck]
|
|
463
467
|
|
|
@@ -506,11 +510,11 @@ def fill_reverse_parser(
|
|
|
506
510
|
|
|
507
511
|
def df2res(
|
|
508
512
|
dataframe: pd.DataFrame,
|
|
509
|
-
keywords:
|
|
510
|
-
comments:
|
|
511
|
-
supported:
|
|
512
|
-
consecutive:
|
|
513
|
-
filename:
|
|
513
|
+
keywords: str | list[str] | list[str | None] | None = None,
|
|
514
|
+
comments: dict[str, str] | None = None,
|
|
515
|
+
supported: list[str] | None = None,
|
|
516
|
+
consecutive: str | None = None,
|
|
517
|
+
filename: str | None = None,
|
|
514
518
|
) -> str:
|
|
515
519
|
"""Generate resdata :term:`include file` content from dataframes in res2df format.
|
|
516
520
|
|
|
@@ -552,7 +556,7 @@ def df2res(
|
|
|
552
556
|
logger.critical(
|
|
553
557
|
"%s inconsistent in input dataframe, got the values %s",
|
|
554
558
|
consecutive,
|
|
555
|
-
|
|
559
|
+
dataframe[consecutive].unique(),
|
|
556
560
|
)
|
|
557
561
|
raise ValueError
|
|
558
562
|
|
|
@@ -572,18 +576,18 @@ def df2res(
|
|
|
572
576
|
# Warn if some keywords are unsupported:
|
|
573
577
|
assert keywords is not None
|
|
574
578
|
assert supported is not None
|
|
575
|
-
not_supported:
|
|
579
|
+
not_supported: set[str | None] = set(keywords) - set(supported)
|
|
576
580
|
if not_supported:
|
|
577
581
|
logger.warning(
|
|
578
582
|
"Requested keyword(s) not supported by %s: %s",
|
|
579
583
|
calling_module.__name__, # type: ignore
|
|
580
|
-
|
|
584
|
+
not_supported,
|
|
581
585
|
)
|
|
582
586
|
# Warn if some requested keywords are not in frame:
|
|
583
587
|
not_in_frame = set(keywords) - keywords_in_frame
|
|
584
588
|
if not_in_frame:
|
|
585
589
|
logger.warning(
|
|
586
|
-
"Requested keyword(s) not present in dataframe: %s",
|
|
590
|
+
"Requested keyword(s) not present in dataframe: %s", not_in_frame
|
|
587
591
|
)
|
|
588
592
|
keywords = [
|
|
589
593
|
keyword
|
|
@@ -626,8 +630,8 @@ def df2res(
|
|
|
626
630
|
def generic_deck_table(
|
|
627
631
|
dframe: pd.DataFrame,
|
|
628
632
|
keyword: str,
|
|
629
|
-
comment:
|
|
630
|
-
renamer:
|
|
633
|
+
comment: str | None = None,
|
|
634
|
+
renamer: dict[str, str] | None = None,
|
|
631
635
|
drop_trailing_columns: bool = True,
|
|
632
636
|
) -> str:
|
|
633
637
|
"""Construct string contents of a :term:`.DATA file` table.
|
|
@@ -666,7 +670,7 @@ def generic_deck_table(
|
|
|
666
670
|
# sorting from that:
|
|
667
671
|
if renamer is not None:
|
|
668
672
|
inv_renamer = {value: key for key, value in renamer.items()}
|
|
669
|
-
dframe.rename(inv_renamer, axis="columns"
|
|
673
|
+
dframe = dframe.rename(inv_renamer, axis="columns")
|
|
670
674
|
|
|
671
675
|
keyword_col_headers = [item["name"] for item in OPMKEYWORDS[keyword]["items"]]
|
|
672
676
|
|
|
@@ -680,7 +684,7 @@ def generic_deck_table(
|
|
|
680
684
|
if rightmost_column == -1:
|
|
681
685
|
# No relevant data in the dataframe
|
|
682
686
|
return string
|
|
683
|
-
relevant_columns = keyword_col_headers[0 : rightmost_column + 1]
|
|
687
|
+
relevant_columns = keyword_col_headers[0 : rightmost_column + 1]
|
|
684
688
|
for colname in relevant_columns:
|
|
685
689
|
# Add those that are missing, as Eclipse defaults
|
|
686
690
|
if colname not in dframe:
|
|
@@ -731,7 +735,7 @@ def generic_deck_table(
|
|
|
731
735
|
|
|
732
736
|
# Now rename again to have prettier column names:
|
|
733
737
|
if renamer is not None:
|
|
734
|
-
dframe.rename(renamer, axis="columns"
|
|
738
|
+
dframe = dframe.rename(renamer, axis="columns")
|
|
735
739
|
# Add a final column with the end-slash, invisible header:
|
|
736
740
|
dframe[" "] = "/"
|
|
737
741
|
tablestring = dframe.to_string(header=True, index=False)
|
|
@@ -827,13 +831,13 @@ def stack_on_colnames(
|
|
|
827
831
|
dframe.columns = pd.MultiIndex.from_tuples(
|
|
828
832
|
tuplecolumns, names=["dummy", stackcolname]
|
|
829
833
|
)
|
|
830
|
-
dframe = dframe.stack(future_stack=True)
|
|
834
|
+
dframe = cast(pd.DataFrame, dframe.stack(future_stack=True))
|
|
831
835
|
staticcols = [col[0] for col in tuplecolumns if len(col) == 1]
|
|
832
836
|
dframe[staticcols] = dframe[staticcols].ffill()
|
|
833
|
-
dframe.reset_index(
|
|
837
|
+
dframe = dframe.reset_index()
|
|
834
838
|
# Drop rows stemming from the NaNs in the second tuple-element for
|
|
835
839
|
# static columns:
|
|
836
|
-
dframe.dropna(axis="index", subset=["DATE"]
|
|
840
|
+
dframe = dframe.dropna(axis="index", subset=["DATE"])
|
|
837
841
|
del dframe["level_0"]
|
|
838
842
|
dframe.index.name = ""
|
|
839
843
|
return dframe
|
|
@@ -851,7 +855,7 @@ def is_color(input_string: str) -> bool:
|
|
|
851
855
|
return bool(re.match(regex, input_string))
|
|
852
856
|
|
|
853
857
|
|
|
854
|
-
def parse_lyrfile(filename: str) ->
|
|
858
|
+
def parse_lyrfile(filename: str) -> list[dict[str, Any]] | None:
|
|
855
859
|
"""Return a list of dicts representation of the lyr file.
|
|
856
860
|
|
|
857
861
|
The lyr file contains data of the following format,
|
|
@@ -885,7 +889,7 @@ def parse_lyrfile(filename: str) -> Optional[List[Dict[str, Any]]]:
|
|
|
885
889
|
Returns:
|
|
886
890
|
A list of dictionaries representing the information in the lyr file.
|
|
887
891
|
|
|
888
|
-
"""
|
|
892
|
+
"""
|
|
889
893
|
|
|
890
894
|
zonelines = Path(filename).read_text(encoding="utf-8").splitlines()
|
|
891
895
|
|
|
@@ -893,11 +897,11 @@ def parse_lyrfile(filename: str) -> Optional[List[Dict[str, Any]]]:
|
|
|
893
897
|
zonelines = [line.split("--")[0].strip() for line in zonelines]
|
|
894
898
|
zonelines = [line for line in zonelines if line and not line.startswith("#")]
|
|
895
899
|
|
|
896
|
-
lyrlist:
|
|
900
|
+
lyrlist: list[dict[str, Any]] = []
|
|
897
901
|
for line in zonelines:
|
|
898
902
|
try:
|
|
899
903
|
linesplit = shlex.split(line)
|
|
900
|
-
zonedict:
|
|
904
|
+
zonedict: dict[str, Any] = {"name": linesplit[0]}
|
|
901
905
|
zone_color = linesplit.pop(-1) if is_color(linesplit[-1]) else None
|
|
902
906
|
if zone_color is not None:
|
|
903
907
|
zonedict["color"] = zone_color
|
|
@@ -923,7 +927,7 @@ def parse_lyrfile(filename: str) -> Optional[List[Dict[str, Any]]]:
|
|
|
923
927
|
return lyrlist
|
|
924
928
|
|
|
925
929
|
|
|
926
|
-
def convert_lyrlist_to_zonemap(lyrlist:
|
|
930
|
+
def convert_lyrlist_to_zonemap(lyrlist: list[dict[str, Any]]) -> dict[int, str]:
|
|
927
931
|
"""Returns a layer to zone map as a dictionary
|
|
928
932
|
|
|
929
933
|
Args:
|
res2df/compdat.py
CHANGED
|
@@ -14,13 +14,11 @@ import argparse
|
|
|
14
14
|
import contextlib
|
|
15
15
|
import datetime
|
|
16
16
|
import logging
|
|
17
|
-
from typing import Dict, List, Optional, Union
|
|
18
17
|
|
|
19
18
|
import numpy as np
|
|
20
19
|
import pandas as pd
|
|
21
20
|
|
|
22
21
|
with contextlib.suppress(ImportError):
|
|
23
|
-
# pylint: disable=unused-import
|
|
24
22
|
import opm.io.deck
|
|
25
23
|
|
|
26
24
|
from .common import (
|
|
@@ -43,7 +41,7 @@ documentation ever so slightly different when naming the data.
|
|
|
43
41
|
For COMPDAT dataframe columnnames, we prefer the RMS terms due to the
|
|
44
42
|
one very long one, and mixed-case in opm
|
|
45
43
|
"""
|
|
46
|
-
COMPDAT_RENAMER:
|
|
44
|
+
COMPDAT_RENAMER: dict[str, str] = {
|
|
47
45
|
"WELL": "WELL",
|
|
48
46
|
"I": "I",
|
|
49
47
|
"J": "J",
|
|
@@ -61,7 +59,7 @@ COMPDAT_RENAMER: Dict[str, str] = {
|
|
|
61
59
|
}
|
|
62
60
|
|
|
63
61
|
# Workaround an inconsistency in JSON-files for OPM-common < 2021.04:
|
|
64
|
-
WSEG_RENAMER:
|
|
62
|
+
WSEG_RENAMER: dict[str, str] = {
|
|
65
63
|
"SEG1": "SEGMENT1",
|
|
66
64
|
"SEG2": "SEGMENT2",
|
|
67
65
|
}
|
|
@@ -69,9 +67,9 @@ WSEG_RENAMER: Dict[str, str] = {
|
|
|
69
67
|
|
|
70
68
|
def deck2dfs(
|
|
71
69
|
deck: "opm.io.Deck",
|
|
72
|
-
start_date:
|
|
70
|
+
start_date: str | datetime.date | None = None,
|
|
73
71
|
unroll: bool = True,
|
|
74
|
-
) ->
|
|
72
|
+
) -> dict[str, pd.DataFrame]:
|
|
75
73
|
"""Loop through the :term:`deck` and pick up information found
|
|
76
74
|
|
|
77
75
|
The loop over the :term:`deck` is a state machine, as it has to pick up dates and
|
|
@@ -103,7 +101,7 @@ def deck2dfs(
|
|
|
103
101
|
if kword.name in ["DATES", "START"]:
|
|
104
102
|
for rec in kword:
|
|
105
103
|
date = parse_opmio_date_rec(rec)
|
|
106
|
-
logger.info("Parsing at date %s",
|
|
104
|
+
logger.info("Parsing at date %s", date)
|
|
107
105
|
elif kword.name == "TSTEP":
|
|
108
106
|
if not date:
|
|
109
107
|
logger.critical("Can't use TSTEP when there is no start_date")
|
|
@@ -114,9 +112,7 @@ def deck2dfs(
|
|
|
114
112
|
days = sum(steplist)
|
|
115
113
|
assert isinstance(date, datetime.date)
|
|
116
114
|
date += datetime.timedelta(days=days)
|
|
117
|
-
logger.info(
|
|
118
|
-
"Advancing %s days to %s through TSTEP", str(days), str(date)
|
|
119
|
-
)
|
|
115
|
+
logger.info("Advancing %s days to %s through TSTEP", days, date)
|
|
120
116
|
elif kword.name == "WELSPECS":
|
|
121
117
|
# Information from WELSPECS are to be used in case
|
|
122
118
|
# 0 or 1* is used in the I or J column in COMPDAT
|
|
@@ -238,7 +234,6 @@ def deck2dfs(
|
|
|
238
234
|
|
|
239
235
|
if unroll and not compdat_df.empty:
|
|
240
236
|
compdat_df = unrolldf(compdat_df, "K1", "K2")
|
|
241
|
-
|
|
242
237
|
if not welopen_df.empty:
|
|
243
238
|
compdat_df = applywelopen(
|
|
244
239
|
compdat_df,
|
|
@@ -263,16 +258,16 @@ def deck2dfs(
|
|
|
263
258
|
wsegaicd_df = unrolldf(wsegaicd_df, "SEGMENT1", "SEGMENT2")
|
|
264
259
|
|
|
265
260
|
if "KEYWORD_IDX" in compdat_df.columns:
|
|
266
|
-
compdat_df.drop(["KEYWORD_IDX"], axis=1
|
|
261
|
+
compdat_df = compdat_df.drop(["KEYWORD_IDX"], axis=1)
|
|
267
262
|
|
|
268
263
|
if "KEYWORD_IDX" in wsegsicd_df.columns:
|
|
269
|
-
wsegsicd_df.drop(["KEYWORD_IDX"], axis=1
|
|
264
|
+
wsegsicd_df = wsegsicd_df.drop(["KEYWORD_IDX"], axis=1)
|
|
270
265
|
|
|
271
266
|
if "KEYWORD_IDX" in wsegaicd_df.columns:
|
|
272
|
-
wsegaicd_df.drop(["KEYWORD_IDX"], axis=1
|
|
267
|
+
wsegaicd_df = wsegaicd_df.drop(["KEYWORD_IDX"], axis=1)
|
|
273
268
|
|
|
274
269
|
if "KEYWORD_IDX" in wsegvalv_df.columns:
|
|
275
|
-
wsegvalv_df.drop(["KEYWORD_IDX"], axis=1
|
|
270
|
+
wsegvalv_df = wsegvalv_df.drop(["KEYWORD_IDX"], axis=1)
|
|
276
271
|
|
|
277
272
|
return {
|
|
278
273
|
"COMPDAT": compdat_df,
|
|
@@ -310,7 +305,7 @@ def expand_welopen_defaults(
|
|
|
310
305
|
to this functions.
|
|
311
306
|
"""
|
|
312
307
|
|
|
313
|
-
def is_default(value:
|
|
308
|
+
def is_default(value: int | None) -> bool:
|
|
314
309
|
if value is None or np.isnan(value):
|
|
315
310
|
return True
|
|
316
311
|
return value <= 0
|
|
@@ -330,7 +325,7 @@ def expand_welopen_defaults(
|
|
|
330
325
|
|
|
331
326
|
# Any compdat entry with DATE==None are kept as they
|
|
332
327
|
# are assumed to have an earlier date than any dates defined
|
|
333
|
-
compdat_filtered = compdat_df[compdat_df["DATE"].
|
|
328
|
+
compdat_filtered = compdat_df[compdat_df["DATE"].isna()]
|
|
334
329
|
|
|
335
330
|
# If the welopen entry DATE!=None we filter on compdat entries
|
|
336
331
|
# <= this date
|
|
@@ -564,7 +559,7 @@ def expand_wlist(wlist_df: pd.DataFrame) -> pd.DataFrame:
|
|
|
564
559
|
# of dictionaries, which accumulates all WLIST directives. Every time the date
|
|
565
560
|
# changes, the current state is outputted as it was valid for the previous date.
|
|
566
561
|
|
|
567
|
-
currentstate:
|
|
562
|
+
currentstate: dict[str, str] = {}
|
|
568
563
|
|
|
569
564
|
if wlist_df.empty:
|
|
570
565
|
return wlist_df
|
|
@@ -615,7 +610,7 @@ def expand_wlist(wlist_df: pd.DataFrame) -> pd.DataFrame:
|
|
|
615
610
|
and wlist_record["NAME"] not in currentstate
|
|
616
611
|
):
|
|
617
612
|
raise ValueError(
|
|
618
|
-
f"WLIST ADD/DEL only works on existing well lists: {
|
|
613
|
+
f"WLIST ADD/DEL only works on existing well lists: {wlist_record!s}"
|
|
619
614
|
)
|
|
620
615
|
if wlist_record["ACTION"] == "ADD":
|
|
621
616
|
currentstate[wlist_record["NAME"]] = " ".join(
|
|
@@ -757,7 +752,7 @@ def expand_complump_in_welopen_df(
|
|
|
757
752
|
exp_welopens.append(cell_row)
|
|
758
753
|
|
|
759
754
|
dframe = pd.DataFrame(exp_welopens)
|
|
760
|
-
return dframe.astype(object).where(pd.
|
|
755
|
+
return dframe.astype(object).where(pd.notna(dframe), None)
|
|
761
756
|
|
|
762
757
|
|
|
763
758
|
def expand_wlist_in_welopen_df(
|
|
@@ -783,7 +778,7 @@ def expand_wlist_in_welopen_df(
|
|
|
783
778
|
.split()
|
|
784
779
|
):
|
|
785
780
|
wellrow = row.copy()
|
|
786
|
-
wellrow
|
|
781
|
+
wellrow["WELL"] = well
|
|
787
782
|
exp_welopens.append(wellrow)
|
|
788
783
|
else:
|
|
789
784
|
raise ValueError(f"Well list {wlistname} not defined at {row['DATE']}")
|
|
@@ -791,14 +786,14 @@ def expand_wlist_in_welopen_df(
|
|
|
791
786
|
# Explicit wellname was used, no expansion to happen:
|
|
792
787
|
exp_welopens.append(row)
|
|
793
788
|
dframe = pd.DataFrame(exp_welopens)
|
|
794
|
-
return dframe.astype(object).where(pd.
|
|
789
|
+
return dframe.astype(object).where(pd.notna(dframe), None)
|
|
795
790
|
|
|
796
791
|
|
|
797
792
|
def applywelopen(
|
|
798
793
|
compdat_df: pd.DataFrame,
|
|
799
794
|
welopen_df: pd.DataFrame,
|
|
800
|
-
wlist_df:
|
|
801
|
-
complump_df:
|
|
795
|
+
wlist_df: pd.DataFrame | None = None,
|
|
796
|
+
complump_df: pd.DataFrame | None = None,
|
|
802
797
|
) -> pd.DataFrame:
|
|
803
798
|
"""Apply WELOPEN actions to the COMPDAT dataframe.
|
|
804
799
|
|
|
@@ -850,9 +845,11 @@ def applywelopen(
|
|
|
850
845
|
"The WLIST dataframe must be expanded through expand_wlist()"
|
|
851
846
|
)
|
|
852
847
|
|
|
853
|
-
welopen_df = welopen_df.astype(object).where(pd.
|
|
854
|
-
|
|
855
|
-
|
|
848
|
+
welopen_df = welopen_df.astype(object).where(pd.notna(welopen_df), None)
|
|
849
|
+
if wlist_df is not None:
|
|
850
|
+
welopen_df = expand_wlist_in_welopen_df(welopen_df, wlist_df)
|
|
851
|
+
if complump_df is not None:
|
|
852
|
+
welopen_df = expand_complump_in_welopen_df(welopen_df, complump_df)
|
|
856
853
|
|
|
857
854
|
for _, row in welopen_df.iterrows():
|
|
858
855
|
acts_on_well = False
|
|
@@ -956,9 +953,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
|
|
|
956
953
|
|
|
957
954
|
def compdat_main(args):
|
|
958
955
|
"""Entry-point for module, for command line utility"""
|
|
959
|
-
logger = getLogger_res2csv(
|
|
960
|
-
__name__, vars(args)
|
|
961
|
-
)
|
|
956
|
+
logger = getLogger_res2csv(__name__, vars(args))
|
|
962
957
|
resdatafiles = ResdataFiles(args.DATAFILE)
|
|
963
958
|
compdat_df = df(resdatafiles, initvectors=args.initvectors)
|
|
964
959
|
write_dframe_stdout_file(compdat_df, args.output, index=False, caller_logger=logger)
|
|
@@ -966,8 +961,8 @@ def compdat_main(args):
|
|
|
966
961
|
|
|
967
962
|
def df(
|
|
968
963
|
resdatafiles: ResdataFiles,
|
|
969
|
-
initvectors:
|
|
970
|
-
zonemap:
|
|
964
|
+
initvectors: list[str] | None = None,
|
|
965
|
+
zonemap: dict[int, str] | None = None,
|
|
971
966
|
) -> pd.DataFrame:
|
|
972
967
|
"""Main function for Python API users
|
|
973
968
|
|
res2df/csv2res.py
CHANGED
|
@@ -5,7 +5,6 @@ for selected keywords
|
|
|
5
5
|
"""
|
|
6
6
|
|
|
7
7
|
import argparse
|
|
8
|
-
import sys
|
|
9
8
|
|
|
10
9
|
from .__version__ import __version__
|
|
11
10
|
from .equil import equil_reverse_main
|
|
@@ -35,14 +34,11 @@ def get_parser() -> argparse.ArgumentParser:
|
|
|
35
34
|
version=f"%(prog)s {__version__}",
|
|
36
35
|
)
|
|
37
36
|
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
)
|
|
44
|
-
else:
|
|
45
|
-
subparsers = parser.add_subparsers(parser_class=argparse.ArgumentParser)
|
|
37
|
+
subparsers = parser.add_subparsers( # type: ignore
|
|
38
|
+
required=True,
|
|
39
|
+
dest="subcommand",
|
|
40
|
+
parser_class=argparse.ArgumentParser,
|
|
41
|
+
)
|
|
46
42
|
|
|
47
43
|
summary_parser = subparsers.add_parser(
|
|
48
44
|
"summary",
|