res2df 1.3.7__py3-none-any.whl → 1.3.9__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
res2df/equil.py CHANGED
@@ -5,8 +5,8 @@ Extract EQUIL from a :term:`.DATA file` as Pandas DataFrame
5
5
  import argparse
6
6
  import contextlib
7
7
  import logging
8
+ from collections.abc import Container
8
9
  from pathlib import Path
9
- from typing import Dict, List, Optional, Union
10
10
 
11
11
  import pandas as pd
12
12
 
@@ -25,14 +25,13 @@ from .res2csvlogger import getLogger_res2csv
25
25
  from .resdatafiles import ResdataFiles
26
26
 
27
27
  with contextlib.suppress(ImportError):
28
- # pylint: disable=unused-import
29
28
  import opm.io
30
29
 
31
30
 
32
31
  logger = logging.getLogger(__name__)
33
32
 
34
- SUPPORTED_KEYWORDS: List[str] = ["EQUIL", "PBVD", "PDVD", "RSVD", "RVVD"]
35
- RENAMERS: Dict[str, Dict[str, Union[str, List[str]]]] = {}
33
+ SUPPORTED_KEYWORDS: list[str] = ["EQUIL", "PBVD", "PDVD", "RSVD", "RVVD"]
34
+ RENAMERS: dict[str, dict[str, str | list[str]]] = {}
36
35
  RENAMERS["PBVD"] = {"DATA": ["Z", "PB"]}
37
36
  RENAMERS["PDVD"] = {"DATA": ["Z", "PD"]}
38
37
  RENAMERS["RSVD"] = {"DATA": ["Z", "RS"]}
@@ -80,9 +79,9 @@ RENAMERS["oil-gas"] = {
80
79
 
81
80
 
82
81
  def df(
83
- deck: Union[str, ResdataFiles, "opm.libopmcommon_python.Deck"],
84
- keywords: Optional[List[str]] = None,
85
- ntequl: Optional[int] = None,
82
+ deck: "str | ResdataFiles | opm.opmcommon_python.Deck",
83
+ keywords: list[str] | None = None,
84
+ ntequl: int | None = None,
86
85
  ) -> pd.DataFrame:
87
86
  """Extract EQUIL related keyword data, EQUIL, RSVD, RVVD
88
87
  PBVD and PDVD.
@@ -138,7 +137,7 @@ def df(
138
137
 
139
138
 
140
139
  def rsvd_fromdeck(
141
- deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None
140
+ deck: "str | opm.opmcommon_python.Deck", ntequl: int | None = None
142
141
  ) -> pd.DataFrame:
143
142
  """Extract RSVD data from a :term:`deck`
144
143
 
@@ -155,7 +154,7 @@ def rsvd_fromdeck(
155
154
 
156
155
 
157
156
  def rvvd_fromdeck(
158
- deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None
157
+ deck: "str | opm.opmcommon_python.Deck", ntequl: int | None = None
159
158
  ) -> pd.DataFrame:
160
159
  """Extract RVVD data from a :term:`deck`
161
160
 
@@ -172,7 +171,7 @@ def rvvd_fromdeck(
172
171
 
173
172
 
174
173
  def pbvd_fromdeck(
175
- deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None
174
+ deck: "str | opm.opmcommon_python.Deck", ntequl: int | None = None
176
175
  ) -> pd.DataFrame:
177
176
  """Extract PBVD data from a :term:`deck`
178
177
 
@@ -189,7 +188,7 @@ def pbvd_fromdeck(
189
188
 
190
189
 
191
190
  def pdvd_fromdeck(
192
- deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None
191
+ deck: "str | opm.opmcommon_python.Deck", ntequl: int | None = None
193
192
  ) -> pd.DataFrame:
194
193
  """Extract PDVD data from a :term:`deck`
195
194
 
@@ -205,7 +204,7 @@ def pdvd_fromdeck(
205
204
  )
206
205
 
207
206
 
208
- def phases_from_deck(deck: Union[str, "opm.libopmcommon_python.Deck"]) -> str:
207
+ def phases_from_deck(deck: "str | opm.opmcommon_python.Deck") -> str:
209
208
  """Determined the set of phases from a :term:`deck`, as
210
209
  a string with values "oil-water-gas", "gas-water", "oil-water",
211
210
  or "oil-gas"
@@ -227,7 +226,7 @@ def phases_from_deck(deck: Union[str, "opm.libopmcommon_python.Deck"]) -> str:
227
226
  return ""
228
227
 
229
228
 
230
- def phases_from_columns(columns: List[str]) -> str:
229
+ def phases_from_columns(columns: Container[str]) -> str:
231
230
  """Determine the set of phases available in an
232
231
  equil dataframe, based on which columns are there.
233
232
  Returns "oil-water-gas", "gas-water", "oil-water",
@@ -253,7 +252,7 @@ def phases_from_columns(columns: List[str]) -> str:
253
252
 
254
253
 
255
254
  def equil_fromdeck(
256
- deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None
255
+ deck: "str | opm.opmcommon_python.Deck", ntequl: int | None = None
257
256
  ) -> pd.DataFrame:
258
257
  """Extract EQUIL data from a :term:`deck`
259
258
 
@@ -321,9 +320,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar
321
320
 
322
321
  def equil_main(args) -> None:
323
322
  """Read from disk and write CSV back to disk"""
324
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
325
- __name__, vars(args)
326
- )
323
+ logger = getLogger_res2csv(__name__, vars(args))
327
324
  resdatafiles = ResdataFiles(args.DATAFILE)
328
325
  if resdatafiles:
329
326
  deck = resdatafiles.get_deck()
@@ -355,9 +352,7 @@ def equil_reverse_main(args) -> None:
355
352
  """Entry-point for module, for command line utility
356
353
  for CSV to reservoir simulator :term:`include files <include file>`
357
354
  """
358
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
359
- __name__, vars(args)
360
- )
355
+ logger = getLogger_res2csv(__name__, vars(args))
361
356
  equil_df = pd.read_csv(args.csvfile)
362
357
  logger.info("Parsed %s", args.csvfile)
363
358
  inc_string = df2res(equil_df, keywords=args.keywords)
@@ -366,10 +361,10 @@ def equil_reverse_main(args) -> None:
366
361
 
367
362
  def df2res(
368
363
  equil_df: pd.DataFrame,
369
- keywords: Optional[List[str]] = None,
370
- comments: Optional[Dict[str, str]] = None,
364
+ keywords: list[str] | None = None,
365
+ comments: dict[str, str] | None = None,
371
366
  withphases: bool = False,
372
- filename: Optional[str] = None,
367
+ filename: str | None = None,
373
368
  ) -> str:
374
369
  """Generate string contents of :term:`include files <include file>`
375
370
  from dataframes with solution (EQUIL, RSVD++) data.
@@ -404,7 +399,7 @@ def df2res(
404
399
  return string
405
400
 
406
401
 
407
- def df2res_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
402
+ def df2res_equil(dframe: pd.DataFrame, comment: str | None = None) -> str:
408
403
  """Create string with :term:`include file` contents for EQUIL keyword
409
404
 
410
405
  Args:
@@ -435,7 +430,7 @@ def df2res_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
435
430
  )
436
431
 
437
432
 
438
- def df2res_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
433
+ def df2res_rsvd(dframe: pd.DataFrame, comment: str | None = None) -> str:
439
434
  """Create string with :term:`include file` contents for RSVD keyword
440
435
 
441
436
  This data consists of one table (rs as a function
@@ -448,7 +443,7 @@ def df2res_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
448
443
  return _df2res_equilfuncs("RSVD", dframe, comment)
449
444
 
450
445
 
451
- def df2res_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
446
+ def df2res_rvvd(dframe: pd.DataFrame, comment: str | None = None) -> str:
452
447
  """Create string with :term:`include file` contents for RVVD keyword
453
448
 
454
449
  This data consists of one table (rv as a function
@@ -461,7 +456,7 @@ def df2res_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
461
456
  return _df2res_equilfuncs("RVVD", dframe, comment)
462
457
 
463
458
 
464
- def df2res_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
459
+ def df2res_pbvd(dframe: pd.DataFrame, comment: str | None = None) -> str:
465
460
  """Create string with :term:`include file` contents for PBVD keyword
466
461
 
467
462
  Bubble-point versus depth
@@ -476,7 +471,7 @@ def df2res_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
476
471
  return _df2res_equilfuncs("PBVD", dframe, comment)
477
472
 
478
473
 
479
- def df2res_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None):
474
+ def df2res_pdvd(dframe: pd.DataFrame, comment: str | None = None):
480
475
  """Create string with :term:`include file` contents for PDVD keyword.
481
476
 
482
477
  Dew-point versus depth.
@@ -492,7 +487,7 @@ def df2res_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None):
492
487
 
493
488
 
494
489
  def _df2res_equilfuncs(
495
- keyword: str, dframe: pd.DataFrame, comment: Optional[str] = None
490
+ keyword: str, dframe: pd.DataFrame, comment: str | None = None
496
491
  ) -> str:
497
492
  """Internal function to be used by df2res_<keyword>() functions"""
498
493
  if dframe.empty:
res2df/faults.py CHANGED
@@ -8,7 +8,6 @@ a DataFrame
8
8
  import argparse
9
9
  import contextlib
10
10
  import logging
11
- from typing import Union
12
11
 
13
12
  import pandas as pd
14
13
 
@@ -18,8 +17,6 @@ from .resdatafiles import ResdataFiles
18
17
 
19
18
  with contextlib.suppress(ImportError):
20
19
  # Needed for mypy
21
-
22
- # pylint: disable=unused-import
23
20
  import opm.io
24
21
 
25
22
 
@@ -30,7 +27,7 @@ COLUMNS = ["NAME", "I", "J", "K", "FACE"]
30
27
  ALLOWED_FACES = ["X", "Y", "Z", "I", "J", "K", "X-", "Y-", "Z-", "I-", "J-", "K-"]
31
28
 
32
29
 
33
- def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame:
30
+ def df(deck: "ResdataFiles | opm.opmcommon_python.Deck") -> pd.DataFrame:
34
31
  """Produce a dataframe of fault data from a :term:`deck`
35
32
 
36
33
  All data for the keyword FAULTS will be returned.
@@ -85,9 +82,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
85
82
 
86
83
  def faults_main(args) -> None:
87
84
  """Read from disk and write CSV back to disk"""
88
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
89
- __name__, vars(args)
90
- )
85
+ logger = getLogger_res2csv(__name__, vars(args))
91
86
  resdatafiles = ResdataFiles(args.DATAFILE)
92
87
  if resdatafiles:
93
88
  deck = resdatafiles.get_deck()
res2df/fipreports.py CHANGED
@@ -1,11 +1,9 @@
1
- # pylint: disable=c0301
2
1
  """Extract FIP region reports from PRT file"""
3
2
 
4
3
  import argparse
5
4
  import datetime
6
5
  import logging
7
6
  import re
8
- from typing import List, Optional, Union
9
7
 
10
8
  import numpy as np
11
9
  import pandas as pd
@@ -16,7 +14,7 @@ from .resdatafiles import ResdataFiles
16
14
 
17
15
  logger = logging.getLogger(__name__)
18
16
 
19
- REGION_REPORT_COLUMNS: List[str] = [
17
+ REGION_REPORT_COLUMNS: list[str] = [
20
18
  "DATE",
21
19
  "FIPNAME",
22
20
  "REGION",
@@ -50,7 +48,7 @@ def report_block_lineparser(line: str) -> tuple:
50
48
  return ()
51
49
 
52
50
  colonsections = line.split(":")
53
- to_index: Optional[int]
51
+ to_index: int | None
54
52
  if "OUTFLOW TO REGION" in line:
55
53
  to_index = int(colonsections[1].split()[3])
56
54
  row_name = "OUTFLOW TO REGION"
@@ -59,9 +57,9 @@ def report_block_lineparser(line: str) -> tuple:
59
57
  row_name = " ".join(colonsections[1].strip().upper().split())
60
58
 
61
59
  # Oil section:
62
- liquid_oil: Optional[float] = None
63
- vapour_oil: Optional[float] = None
64
- total_oil: Optional[float] = None
60
+ liquid_oil: float | None = None
61
+ vapour_oil: float | None = None
62
+ total_oil: float | None = None
65
63
  if len(colonsections[2].split()) == 3:
66
64
  (liquid_oil, vapour_oil, total_oil) = map(
67
65
  float_or_nan, colonsections[2].split()
@@ -98,7 +96,7 @@ def report_block_lineparser(line: str) -> tuple:
98
96
  )
99
97
 
100
98
 
101
- def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFrame:
99
+ def df(prtfile: str | ResdataFiles, fipname: str = "FIPNUM") -> pd.DataFrame:
102
100
  """
103
101
  Parses a PRT file from and finds FIPXXXX REGION REPORT blocks and
104
102
  organizes those numbers into a dataframe
@@ -153,13 +151,13 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra
153
151
  )
154
152
  if newdate != date:
155
153
  date = newdate
156
- logger.debug("Found date: %s", str(date))
154
+ logger.debug("Found date: %s", date)
157
155
  continue
158
156
  matchedreportblock = re.match(reportblockmatcher, line)
159
157
  if matchedreportblock:
160
158
  in_report_block = True
161
159
  region_index = int(matchedreportblock.group(1))
162
- logger.debug(" Region report for region %s", str(region_index))
160
+ logger.debug(" Region report for region %s", region_index)
163
161
  continue
164
162
  if line.startswith(" ============================"):
165
163
  in_report_block = False
@@ -187,7 +185,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra
187
185
  line = "".join(linechars)
188
186
 
189
187
  records.append(
190
- [date, fipname, region_index] + list(report_block_lineparser(line))
188
+ [date, fipname, region_index, *list(report_block_lineparser(line))]
191
189
  )
192
190
  return pd.DataFrame(data=records, columns=REGION_REPORT_COLUMNS)
193
191
 
@@ -211,9 +209,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
211
209
 
212
210
  def fipreports_main(args) -> None:
213
211
  """Command line API"""
214
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
215
- __name__, vars(args)
216
- )
212
+ logger = getLogger_res2csv(__name__, vars(args))
217
213
  if args.PRTFILE.endswith(".PRT"):
218
214
  prtfile = args.PRTFILE
219
215
  else:
res2df/grid.py CHANGED
@@ -16,13 +16,11 @@ import fnmatch
16
16
  import logging
17
17
  import textwrap
18
18
  from pathlib import Path
19
- from typing import Dict, List, Optional, Tuple, Type, Union
20
19
 
21
20
  import dateutil.parser
22
21
  import numpy as np
23
22
  import pandas as pd
24
- import pyarrow
25
- import pyarrow.feather
23
+ import pyarrow as pa
26
24
  import resfo
27
25
  from resdata.resfile import ResdataFile
28
26
 
@@ -39,13 +37,13 @@ from .resdatafiles import ResdataFiles
39
37
  logger = logging.getLogger(__name__)
40
38
 
41
39
 
42
- def get_available_rst_dates(resdatafiles: ResdataFiles) -> List[datetime.date]:
40
+ def get_available_rst_dates(resdatafiles: ResdataFiles) -> list[datetime.date]:
43
41
  """Return a list of datetime objects for the available dates in the RST file"""
44
42
  report_indices = ResdataFile.file_report_list(resdatafiles.get_rstfilename())
45
43
  logger.info(
46
44
  "Restart report indices (count %s): %s",
47
- str(len(report_indices)),
48
- str(report_indices),
45
+ len(report_indices),
46
+ report_indices,
49
47
  )
50
48
  return [
51
49
  resdatafiles.get_rstfile().iget_restart_sim_time(index).date()
@@ -55,8 +53,8 @@ def get_available_rst_dates(resdatafiles: ResdataFiles) -> List[datetime.date]:
55
53
 
56
54
  def dates2rstindices(
57
55
  resdatafiles: ResdataFiles,
58
- dates: Optional[Union[str, datetime.date, List[datetime.date]]],
59
- ) -> Tuple[List[int], List[datetime.date], List[str]]:
56
+ dates: str | datetime.date | list[datetime.date] | None,
57
+ ) -> tuple[list[int], list[datetime.date], list[str]]:
60
58
  """Return the restart index/indices for a given datetime or list of datetimes
61
59
 
62
60
  dates: datetime.date or list of datetime.date, must
@@ -110,22 +108,22 @@ def dates2rstindices(
110
108
  chosendates = [x for x in dates if x in availabledates]
111
109
  if not chosendates:
112
110
  raise ValueError("None of the requested dates were found")
113
- if len(chosendates) < len(availabledates):
111
+ if len(chosendates) < len(dates):
114
112
  logger.warning("Not all dates found in UNRST\n")
115
113
  else:
116
114
  raise ValueError("date " + str(dates) + " not understood")
117
115
 
118
116
  logger.info(
119
117
  "Available dates (count %s) in RST: %s",
120
- str(len(availabledates)),
121
- str([x.isoformat() for x in availabledates]),
118
+ len(availabledates),
119
+ [x.isoformat() for x in availabledates],
122
120
  )
123
121
  rstindices = [availabledates.index(x) for x in chosendates]
124
122
  isostrings = [x.isoformat() for x in chosendates]
125
123
  return (rstindices, chosendates, isostrings)
126
124
 
127
125
 
128
- def _df2pyarrow(dframe: pd.DataFrame) -> pyarrow.Table:
126
+ def _df2pyarrow(dframe: pd.DataFrame) -> pa.Table:
129
127
  """Construct a pyarrow table from dataframe with
130
128
  grid information
131
129
 
@@ -133,25 +131,25 @@ def _df2pyarrow(dframe: pd.DataFrame) -> pyarrow.Table:
133
131
 
134
132
  32-bit types will be used for integers and floats (todo)
135
133
  """
136
- field_list: List[pyarrow.Field] = []
134
+ field_list: list[pa.Field] = []
137
135
  for colname in dframe.columns:
138
136
  if pd.api.types.is_integer_dtype(dframe.dtypes[colname]):
139
- dtype = pyarrow.int32()
137
+ dtype = pa.int32()
140
138
  elif pd.api.types.is_string_dtype(dframe.dtypes[colname]):
141
139
  # Parameters are potentially merged into the dataframe.
142
- dtype = pyarrow.string()
140
+ dtype = pa.string()
143
141
  else:
144
- dtype = pyarrow.float32()
145
- field_list.append(pyarrow.field(colname, dtype))
142
+ dtype = pa.float32()
143
+ field_list.append(pa.field(colname, dtype))
146
144
 
147
- schema = pyarrow.schema(field_list)
148
- return pyarrow.Table.from_pandas(dframe, schema=schema, preserve_index=False)
145
+ schema = pa.schema(field_list)
146
+ return pa.Table.from_pandas(dframe, schema=schema, preserve_index=False)
149
147
 
150
148
 
151
149
  def rst2df(
152
150
  resdatafiles: ResdataFiles,
153
- date: Union[str, datetime.date, List[datetime.date]],
154
- vectors: Optional[Union[str, List[str]]] = None,
151
+ date: str | datetime.date | list[datetime.date],
152
+ vectors: str | list[str] | None = None,
155
153
  dateinheaders: bool = False,
156
154
  stackdates: bool = False,
157
155
  ) -> pd.DataFrame:
@@ -193,12 +191,12 @@ def rst2df(
193
191
  # data for:
194
192
  (rstindices, chosendates, isodates) = dates2rstindices(resdatafiles, date)
195
193
 
196
- logger.info("Extracting restart information at dates %s", str(isodates))
194
+ logger.info("Extracting restart information at dates %s", isodates)
197
195
 
198
196
  # Determine the available restart vectors, we only include
199
197
  # those with correct length, meaning that they are defined
200
198
  # for all active cells:
201
- activecells = resdatafiles.get_egrid().getNumActive()
199
+ activecells = resdatafiles.get_egrid().get_num_active()
202
200
  rstvectors = []
203
201
  for vec in resfo.lazy_read(resdatafiles.get_rstfilename()):
204
202
  keyword_name = vec.read_keyword().strip()
@@ -218,22 +216,20 @@ def rst2df(
218
216
  # Filter the rst vectors once more, all of them
219
217
  # might not be available at all timesteps:
220
218
  present_rstvectors = []
221
- for vec in rstvectors:
219
+ for rst_vec in rstvectors:
222
220
  try:
223
- if resdatafiles.get_rstfile().iget_named_kw(vec, rstindex):
224
- present_rstvectors.append(vec)
221
+ if resdatafiles.get_rstfile().iget_named_kw(rst_vec, rstindex):
222
+ present_rstvectors.append(rst_vec)
225
223
  except IndexError:
226
224
  pass
227
225
  logger.info(
228
226
  "Present restart vectors at index %s: %s",
229
- str(rstindex),
230
- str(present_rstvectors),
227
+ rstindex,
228
+ present_rstvectors,
231
229
  )
232
230
  if not present_rstvectors:
233
231
  if vectorswasdefaulted:
234
- logger.warning(
235
- "No restart vectors available at index %s", str(rstindex)
236
- )
232
+ logger.warning("No restart vectors available at index %s", rstindex)
237
233
  continue
238
234
 
239
235
  # Make the dataframe
@@ -262,7 +258,7 @@ def rst2df(
262
258
  # Tag the column names if requested, or if multiple rst indices
263
259
  # are asked for
264
260
  datestr = chosendates[rstindices.index(rstindex)].isoformat()
265
- if dateinheaders or len(rstindices) > 1 and not stackdates:
261
+ if dateinheaders or (len(rstindices) > 1 and not stackdates):
266
262
  rst_df.columns = [colname + "@" + datestr for colname in rst_df.columns]
267
263
 
268
264
  # resdata emits a number around -1.0000000200408773e+20 which
@@ -270,7 +266,7 @@ def rst2df(
270
266
  rst_df = rst_df.where(rst_df > -1e20 + 1e13) # some trial and error
271
267
 
272
268
  # Remove columns that are all NaN:
273
- rst_df.dropna(axis="columns", how="all", inplace=True)
269
+ rst_df = rst_df.dropna(axis="columns", how="all")
274
270
 
275
271
  rst_df.index.name = "active"
276
272
 
@@ -282,13 +278,14 @@ def rst2df(
282
278
  if not stackdates:
283
279
  return pd.concat(rst_dfs.values(), axis=1).reset_index()
284
280
 
285
- rststack = pd.concat(rst_dfs, sort=False).reset_index()
286
- rststack.rename(columns={"level_0": "DATE"}, inplace=True)
281
+ rststack = (
282
+ pd.concat(rst_dfs, sort=False).reset_index().rename(columns={"level_0": "DATE"})
283
+ )
287
284
  return rststack
288
285
 
289
286
 
290
287
  def gridgeometry2df(
291
- resdatafiles: ResdataFiles, zonemap: Optional[Dict[int, str]] = None
288
+ resdatafiles: ResdataFiles, zonemap: dict[int, str] | None = None
292
289
  ) -> pd.DataFrame:
293
290
  """Produce a Pandas Dataframe with grid geometry
294
291
 
@@ -314,9 +311,9 @@ def gridgeometry2df(
314
311
  if not egrid_file or not grid:
315
312
  raise ValueError("No EGRID file supplied")
316
313
 
317
- logger.info("Extracting grid geometry from %s", str(egrid_file))
314
+ logger.info("Extracting grid geometry from %s", egrid_file)
318
315
  index_frame = grid.export_index(active_only=True)
319
- ijk = index_frame.values[:, 0:3] + 1 # ijk from resdata.grid is off by one
316
+ ijk = index_frame.to_numpy()[:, 0:3] + 1 # ijk from resdata.grid is off by one
320
317
 
321
318
  xyz = grid.export_position(index_frame)
322
319
  vol = grid.export_volume(index_frame)
@@ -368,8 +365,8 @@ def gridgeometry2df(
368
365
  def merge_initvectors(
369
366
  resdatafiles: ResdataFiles,
370
367
  dframe: pd.DataFrame,
371
- initvectors: List[str],
372
- ijknames: Optional[List[str]] = None,
368
+ initvectors: list[str],
369
+ ijknames: list[str] | None = None,
373
370
  ) -> pd.DataFrame:
374
371
  """Merge in INIT vectors to a dataframe by I, J, K.
375
372
 
@@ -406,13 +403,13 @@ def merge_initvectors(
406
403
  initvectors = [initvectors]
407
404
  assert isinstance(initvectors, list)
408
405
 
409
- logger.info("Merging INIT data %s into dataframe", str(initvectors))
410
- ijkinit = df(resdatafiles, vectors=initvectors)[["I", "J", "K"] + initvectors]
411
- return pd.merge(dframe, ijkinit, left_on=ijknames, right_on=["I", "J", "K"])
406
+ logger.info("Merging INIT data %s into dataframe", initvectors)
407
+ ijkinit = df(resdatafiles, vectors=initvectors)[["I", "J", "K", *initvectors]]
408
+ return dframe.merge(ijkinit, left_on=ijknames, right_on=["I", "J", "K"])
412
409
 
413
410
 
414
411
  def init2df(
415
- resdatafiles: ResdataFiles, vectors: Optional[Union[str, List[str]]] = None
412
+ resdatafiles: ResdataFiles, vectors: str | list[str] | None = None
416
413
  ) -> pd.DataFrame:
417
414
  """Extract information from INIT file with cell data
418
415
 
@@ -440,7 +437,7 @@ def init2df(
440
437
  for vec in init.headers:
441
438
  if vec[0] == "PORV" and any(fnmatch.fnmatch("PORV", key) for key in vectors):
442
439
  include_porv = True
443
- elif vec[1] == egrid.getNumActive() and any(
440
+ elif vec[1] == egrid.get_num_active() and any(
444
441
  fnmatch.fnmatch(vec[0], key) for key in vectors
445
442
  ):
446
443
  usevectors.append(vec[0])
@@ -459,7 +456,7 @@ def init2df(
459
456
  init_df = init_df.where(init_df > -1e20 + 1e13) # some trial and error
460
457
 
461
458
  # Remove columns that are all NaN:
462
- init_df.dropna(axis="columns", how="all", inplace=True)
459
+ init_df = init_df.dropna(axis="columns", how="all")
463
460
 
464
461
  else:
465
462
  init_df = pd.DataFrame() # empty
@@ -469,21 +466,21 @@ def init2df(
469
466
  porv_numpy = init.iget_named_kw("PORV", 0).numpyView()
470
467
  glob_idxs = [
471
468
  egrid.get_global_index(active_index=ix)
472
- for ix in range(egrid.getNumActive())
469
+ for ix in range(egrid.get_num_active())
473
470
  ]
474
471
  init_df["PORV"] = porv_numpy[glob_idxs]
475
- logger.info("Extracted %s from INIT file", str(init_df.columns.values))
472
+ logger.info("Extracted %s from INIT file", init_df.columns.to_numpy())
476
473
  return init_df
477
474
 
478
475
 
479
476
  def df(
480
477
  resdatafiles: ResdataFiles,
481
- vectors: Union[str, List[str]] = "*",
478
+ vectors: str | list[str] = "*",
482
479
  dropconstants: bool = False,
483
- rstdates: Optional[Union[str, datetime.date, List[datetime.date]]] = None,
480
+ rstdates: str | datetime.date | list[datetime.date] | None = None,
484
481
  dateinheaders: bool = False,
485
482
  stackdates: bool = False,
486
- zonemap: Optional[Dict[int, str]] = None,
483
+ zonemap: dict[int, str] | None = None,
487
484
  ):
488
485
  """Produce a dataframe with grid information
489
486
 
@@ -591,7 +588,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
591
588
 
592
589
 
593
590
  def drop_constant_columns(
594
- dframe: pd.DataFrame, alwayskeep: Optional[Union[str, List[str]]] = None
591
+ dframe: pd.DataFrame, alwayskeep: str | list[str] | None = None
595
592
  ) -> pd.DataFrame:
596
593
  """Drop/delete constant columns from a dataframe.
597
594
 
@@ -618,16 +615,16 @@ def drop_constant_columns(
618
615
  if len(dframe[col].unique()) == 1:
619
616
  columnstodelete.append(col)
620
617
  if columnstodelete:
621
- logging.info("Deleting constant columns %s", str(columnstodelete))
618
+ logging.info("Deleting constant columns %s", columnstodelete)
622
619
  return dframe.drop(columnstodelete, axis=1)
623
620
 
624
621
 
625
622
  def df2res(
626
623
  grid_df: pd.DataFrame,
627
- keywords: Union[str, List[str]],
628
- resdatafiles: Optional[ResdataFiles] = None,
629
- dtype: Optional[Type] = None,
630
- filename: Optional[str] = None,
624
+ keywords: str | list[str],
625
+ resdatafiles: ResdataFiles | None = None,
626
+ dtype: type | None = None,
627
+ filename: str | None = None,
631
628
  nocomments: bool = False,
632
629
  ) -> str:
633
630
  """
@@ -680,24 +677,24 @@ def df2res(
680
677
  active_cells = None
681
678
  if resdatafiles is not None and resdatafiles.get_egrid() is not None:
682
679
  global_size = resdatafiles.get_egrid().get_global_size()
683
- active_cells = resdatafiles.get_egrid().getNumActive()
680
+ active_cells = resdatafiles.get_egrid().get_num_active()
684
681
 
685
682
  if "GLOBAL_INDEX" not in grid_df:
686
683
  logger.warning(
687
- ("Global index not found in grid dataframe. Assumes all cells are active")
684
+ "Global index not found in grid dataframe. Assumes all cells are active"
688
685
  )
689
686
  # Drop NaN rows for columns to be used (triggered by stacked
690
687
  # dates and no global index, unlikely)
691
688
  # Also copy dataframe to avoid side-effects on incoming data.
692
689
  grid_df = grid_df.dropna(
693
- axis="rows", subset=[keyword for keyword in keywords if keyword in grid_df]
690
+ axis="index", subset=[keyword for keyword in keywords if keyword in grid_df]
694
691
  )
695
692
  grid_df["GLOBAL_INDEX"] = grid_df.index
696
693
 
697
694
  if global_size is None:
698
695
  global_size = int(grid_df["GLOBAL_INDEX"].max() + 1)
699
696
  active_cells = len(grid_df[grid_df.index >= 0])
700
- logger.warning("Global grid size estimated to %s", str(global_size))
697
+ logger.warning("Global grid size estimated to %s", global_size)
701
698
 
702
699
  res2df_header = (
703
700
  "Output file printed by "
@@ -716,14 +713,14 @@ def df2res(
716
713
  # If we have NaNs in the dataframe, we will be more careful (costs memory)
717
714
  if grid_df.isna().any().any():
718
715
  grid_df = grid_df.dropna(
719
- axis="rows", subset=[keyword for keyword in keywords if keyword in grid_df]
716
+ axis="index", subset=[keyword for keyword in keywords if keyword in grid_df]
720
717
  )
721
718
 
722
719
  for keyword in keywords:
723
720
  if keyword not in grid_df.columns:
724
721
  raise ValueError(f"Keyword {keyword} not found in grid dataframe")
725
722
  vector = np.zeros(global_size)
726
- vector[grid_df["GLOBAL_INDEX"].astype(int).values] = grid_df[keyword]
723
+ vector[grid_df["GLOBAL_INDEX"].astype(int).to_numpy()] = grid_df[keyword]
727
724
  if dtype is int:
728
725
  vector = vector.astype(int)
729
726
  if dtype is float:
@@ -764,9 +761,7 @@ def df2res(
764
761
 
765
762
  def grid_main(args) -> None:
766
763
  """This is the command line API"""
767
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
768
- __name__, vars(args)
769
- )
764
+ logger = getLogger_res2csv(__name__, vars(args))
770
765
  resdatafiles = ResdataFiles(args.DATAFILE)
771
766
  grid_df = df(
772
767
  resdatafiles,