res2df 1.3.6__py3-none-any.whl → 1.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
res2df/gruptree.py CHANGED
@@ -7,7 +7,7 @@ import datetime
7
7
  import logging
8
8
  import sys
9
9
  import warnings
10
- from typing import Any, Dict, List, Optional, Tuple, Union
10
+ from typing import Any
11
11
 
12
12
  import numpy as np
13
13
  import pandas as pd
@@ -15,8 +15,6 @@ import treelib
15
15
 
16
16
  with contextlib.suppress(ImportError):
17
17
  # Needed for mypy
18
-
19
- # pylint: disable=unused-import
20
18
  import opm.io
21
19
 
22
20
  from .common import (
@@ -33,8 +31,8 @@ logger = logging.getLogger(__name__)
33
31
 
34
32
 
35
33
  def df(
36
- deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"],
37
- startdate: Optional[datetime.date] = None,
34
+ deck: "ResdataFiles | opm.opmcommon_python.Deck",
35
+ startdate: datetime.date | None = None,
38
36
  welspecs: bool = True,
39
37
  ) -> pd.DataFrame:
40
38
  """Extract all group information from a :term:`deck`
@@ -64,7 +62,7 @@ def df(
64
62
  information is found in :term:`deck`.
65
63
  """
66
64
 
67
- date: Optional[datetime.date]
65
+ date: datetime.date | None
68
66
  date = startdate if startdate is not None else None
69
67
 
70
68
  if isinstance(deck, ResdataFiles):
@@ -76,16 +74,16 @@ def df(
76
74
  # In order for the GRUPTREE/BRANPROP keywords to accumulate, we
77
75
  # store the edges as dictionaries indexed by the edge
78
76
  # (which is a tuple of child and parent).
79
- currentedges: Dict[str, Dict[Tuple[str, str], Dict[str, Any]]] = {
77
+ currentedges: dict[str, dict[tuple[str, str], dict[str, Any]]] = {
80
78
  "GRUPTREE": {},
81
79
  "BRANPROP": {},
82
80
  }
83
81
  # Same approach for the welspecs keywords
84
- wellspecsedges: Dict[Tuple[str, str], str] = {}
82
+ wellspecsedges: dict[tuple[str, str], str] = {}
85
83
  # Node properties from GRUPNET/NODEPROP is stored in a dataframe
86
84
  # Note that it's not allowed to mix GRUPNET and NODEPROP in eclipse
87
85
  # so the datframe will only contain columns from one of them
88
- nodedata: Dict[str, pd.DataFrame] = {
86
+ nodedata: dict[str, pd.DataFrame] = {
89
87
  "GRUPNET": pd.DataFrame(),
90
88
  "NODEPROP": pd.DataFrame(),
91
89
  }
@@ -115,7 +113,7 @@ def df(
115
113
  if kword.name in ["DATES", "START"]:
116
114
  for rec in kword:
117
115
  date = parse_opmio_date_rec(rec)
118
- logger.debug("Parsing at date %s", str(date))
116
+ logger.debug("Parsing at date %s", date)
119
117
  elif kword.name == "TSTEP":
120
118
  assert date is not None
121
119
  for rec in kword:
@@ -123,9 +121,7 @@ def df(
123
121
  # Assuming not LAB units, then the unit is days.
124
122
  days = sum(steplist)
125
123
  date += datetime.timedelta(days=days)
126
- logger.info(
127
- "Advancing %s days to %s through TSTEP", str(days), str(date)
128
- )
124
+ logger.info("Advancing %s days to %s through TSTEP", days, date)
129
125
  if kword.name in ["GRUPTREE", "BRANPROP"]:
130
126
  found_keywords[kword.name] = True
131
127
  renamer = (
@@ -137,13 +133,13 @@ def df(
137
133
  edge_dict = parse_opmio_deckrecord(edgerec, kword.name, renamer=renamer)
138
134
  child = edge_dict.pop("CHILD_GROUP")
139
135
  parent = edge_dict.pop("PARENT_GROUP")
140
- currentedges[kword.name][(child, parent)] = edge_dict
136
+ currentedges[kword.name][child, parent] = edge_dict
141
137
 
142
138
  if kword.name == "WELSPECS" and welspecs:
143
139
  found_keywords["WELSPECS"] = True
144
140
  for wellrec in kword:
145
141
  wspc_dict = parse_opmio_deckrecord(wellrec, "WELSPECS")
146
- wellspecsedges[(wspc_dict["WELL"], wspc_dict["GROUP"])] = "WELSPECS"
142
+ wellspecsedges[wspc_dict["WELL"], wspc_dict["GROUP"]] = "WELSPECS"
147
143
 
148
144
  if kword.name in ["GRUPNET", "NODEPROP"]:
149
145
  found_keywords[kword.name] = True
@@ -189,12 +185,12 @@ def df(
189
185
 
190
186
 
191
187
  def _write_edgerecords(
192
- currentedges: Dict[str, Dict[Tuple[str, str], Dict[str, Any]]],
193
- nodedata: Dict[str, pd.DataFrame],
194
- wellspecsedges: Dict[Tuple[str, str], str],
188
+ currentedges: dict[str, dict[tuple[str, str], dict[str, Any]]],
189
+ nodedata: dict[str, pd.DataFrame],
190
+ wellspecsedges: dict[tuple[str, str], str],
195
191
  found_keywords: dict,
196
- date: Optional[datetime.date],
197
- ) -> List[dict]:
192
+ date: datetime.date | None,
193
+ ) -> list[dict]:
198
194
  """Writes a new GRUPTREE tree if there are new instances of
199
195
  GRUPTREE, GRUPNET or WELSPECS and writes a new BRANPROP tree
200
196
  if there are new instances of BRANPROP, NODEPROP or WELSPECS.
@@ -219,12 +215,12 @@ def _write_edgerecords(
219
215
 
220
216
 
221
217
  def _merge_edges_and_nodeinfo(
222
- currentedges: Dict[Tuple[str, str], Dict[str, Any]],
218
+ currentedges: dict[tuple[str, str], dict[str, Any]],
223
219
  nodedata_df: pd.DataFrame,
224
- wellspecsedges: Dict[Tuple[str, str], str],
225
- date: Optional[datetime.date],
220
+ wellspecsedges: dict[tuple[str, str], str],
221
+ date: datetime.date | None,
226
222
  treetype: str,
227
- ) -> List[dict]:
223
+ ) -> list[dict[str, Any]]:
228
224
  """Merge a list of edges with information from the nodedata dataframe.
229
225
 
230
226
  Edges where there is no parent (root nodes) are identified and added
@@ -241,7 +237,7 @@ def _merge_edges_and_nodeinfo(
241
237
  Returns:
242
238
  List of dictionaries (that can be made into a dataframe)
243
239
  """
244
- edgerecords = []
240
+ edgerecords: list[dict[str, Any]] = []
245
241
  childs = set()
246
242
  parents = set()
247
243
  # Write GRUPTREE/BRANPROP edges
@@ -253,7 +249,9 @@ def _merge_edges_and_nodeinfo(
253
249
  rec_dict.update(edge_dict)
254
250
  # Add node data
255
251
  if child in nodedata_df.index:
256
- rec_dict.update(nodedata_df.loc[child])
252
+ rec_dict.update(
253
+ {str(k): v for k, v in nodedata_df.loc[child].to_dict().items()}
254
+ )
257
255
  edgerecords.append(rec_dict)
258
256
 
259
257
  # Write WELSPECS edges
@@ -280,16 +278,18 @@ def _merge_edges_and_nodeinfo(
280
278
  parents |= {"FIELD"}
281
279
 
282
280
  roots = parents - childs
283
- rootrecords = []
281
+ rootrecords: list[dict[str, Any]] = []
284
282
  for root in roots:
285
283
  rec_dict = {"DATE": date, "CHILD": root, "KEYWORD": treetype}
286
284
  if root in nodedata_df.index:
287
- rec_dict.update(nodedata_df.loc[root])
285
+ rec_dict.update(
286
+ {str(k): v for k, v in nodedata_df.loc[root].to_dict().items()}
287
+ )
288
288
  rootrecords.append(rec_dict)
289
289
  return rootrecords + edgerecords
290
290
 
291
291
 
292
- def edge_dataframe2dict(dframe: pd.DataFrame) -> List[dict]:
292
+ def edge_dataframe2dict(dframe: pd.DataFrame) -> list[dict]:
293
293
  """Convert list of edges in a dataframe into a
294
294
  nested dictionary (tree).
295
295
 
@@ -321,7 +321,7 @@ def edge_dataframe2dict(dframe: pd.DataFrame) -> List[dict]:
321
321
  subtrees: dict = collections.defaultdict(dict)
322
322
  edges = [] # List of tuples
323
323
  for _, row in dframe.iterrows():
324
- if not pd.isnull(row["PARENT"]):
324
+ if not pd.isna(row["PARENT"]):
325
325
  edges.append((row["CHILD"], row["PARENT"]))
326
326
  for child, parent in edges:
327
327
  subtrees[parent][child] = subtrees[child]
@@ -332,7 +332,7 @@ def edge_dataframe2dict(dframe: pd.DataFrame) -> List[dict]:
332
332
 
333
333
 
334
334
  def _add_to_tree_from_dict(
335
- nested_dict: dict, name: str, tree: treelib.Tree, parent: Optional[str] = None
335
+ nested_dict: dict, name: str, tree: treelib.Tree, parent: str | None = None
336
336
  ) -> None:
337
337
  assert isinstance(nested_dict, dict)
338
338
  tree.create_node(name, name, parent=parent)
@@ -361,7 +361,7 @@ def tree_from_dict(nested_dict: dict) -> treelib.Tree | str:
361
361
  "The dict given to tree_from_dict() must have "
362
362
  "exactly one top level key, representing a single tree."
363
363
  )
364
- root_name = list(nested_dict.keys())[0]
364
+ root_name = next(iter(nested_dict.keys()))
365
365
  tree = treelib.Tree()
366
366
  _add_to_tree_from_dict(nested_dict[root_name], root_name, tree)
367
367
  return tree
@@ -459,9 +459,7 @@ def prettyprint(dframe: pd.DataFrame) -> str:
459
459
 
460
460
  def gruptree_main(args) -> None:
461
461
  """Entry-point for module, for command line utility."""
462
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
463
- __name__, vars(args)
464
- )
462
+ logger = getLogger_res2csv(__name__, vars(args))
465
463
  if not args.output and not args.prettyprint:
466
464
  print("Nothing to do. Set --output or --prettyprint")
467
465
  sys.exit(0)
res2df/inferdims.py CHANGED
@@ -5,7 +5,6 @@ reservoir simulator decks (typically single include-files)
5
5
 
6
6
  import contextlib
7
7
  import logging
8
- from typing import Dict, Optional, Union
9
8
 
10
9
  with contextlib.suppress(ImportError):
11
10
  import opm.io
@@ -16,7 +15,7 @@ from .resdatafiles import ResdataFiles
16
15
  logger = logging.getLogger(__name__)
17
16
 
18
17
  # Constants to use for pointing to positions in the xxxDIMS keyword
19
- DIMS_POS: Dict[str, int] = {"NTPVT": 1, "NTSFUN": 0, "NTEQUL": 0}
18
+ DIMS_POS: dict[str, int] = {"NTPVT": 1, "NTSFUN": 0, "NTEQUL": 0}
20
19
 
21
20
 
22
21
  def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int:
@@ -138,9 +137,9 @@ def inject_dimcount(
138
137
  def inject_xxxdims_ntxxx(
139
138
  xxxdims: str,
140
139
  ntxxx_name: str,
141
- deck: Union[str, "opm.libopmcommon_python.Deck"],
142
- ntxxx_value: Optional[int] = None,
143
- ) -> "opm.libopmcommon_python.Deck":
140
+ deck: "str | opm.opmcommon_python.Deck",
141
+ ntxxx_value: int | None = None,
142
+ ) -> "opm.opmcommon_python.Deck":
144
143
  """Ensures TABDIMS/EQLDIMS is present in a :term:`deck`.
145
144
 
146
145
  If ntxxx_value=None and ntxxx_name not in the :term:`deck`, ntxxx_name will
@@ -169,9 +168,7 @@ def inject_xxxdims_ntxxx(
169
168
  return deck
170
169
 
171
170
  if xxxdims in deck and ntxxx_value is not None:
172
- logger.warning(
173
- "Ignoring %s argument, it is already in the deck", str(ntxxx_name)
174
- )
171
+ logger.warning("Ignoring %s argument, it is already in the deck", ntxxx_name)
175
172
  return deck
176
173
 
177
174
  if not isinstance(deck, str):
@@ -182,7 +179,7 @@ def inject_xxxdims_ntxxx(
182
179
  # Estimate if ntxxx_value is not provided:
183
180
  if ntxxx_value is None:
184
181
  ntxxx_estimate = guess_dim(deck, xxxdims, DIMS_POS[ntxxx_name])
185
- logger.warning("Estimated %s=%s", ntxxx_name, str(ntxxx_estimate))
182
+ logger.warning("Estimated %s=%s", ntxxx_name, ntxxx_estimate)
186
183
  else:
187
184
  ntxxx_estimate = ntxxx_value
188
185
 
res2df/nnc.py CHANGED
@@ -8,7 +8,6 @@ import datetime
8
8
  import logging
9
9
  import os
10
10
  from pathlib import Path
11
- from typing import Optional
12
11
 
13
12
  import pandas as pd
14
13
 
@@ -60,7 +59,7 @@ def df(
60
59
  # Grid indices for first cell in cell pairs, into a vertical
61
60
  # vector. The indices are "global" in resdata terms, and are
62
61
  # 1-based (FORTRAN). Convert to zero-based before sending to get_ijk()
63
- nnc1 = egrid_file["NNC1"][0].numpy_view().reshape(-1, 1)
62
+ nnc1 = egrid_file["NNC1"][0].numpy_view()
64
63
  logger.info(
65
64
  "NNC1: len: %d, min: %d, max: %d (global indices)",
66
65
  len(nnc1),
@@ -76,7 +75,8 @@ def df(
76
75
  nnc1_df[idx_cols1] += 1
77
76
 
78
77
  # Grid indices for second cell in cell pairs
79
- nnc2 = egrid_file["NNC2"][0].numpy_view().reshape(-1, 1)
78
+ nnc2 = egrid_file["NNC2"][0].numpy_view()
79
+
80
80
  logger.info(
81
81
  "NNC2: len: %d, min: %d, max: %d (global indices)",
82
82
  len(nnc2),
@@ -91,7 +91,7 @@ def df(
91
91
  nnc2_df[idx_cols2] += 1
92
92
 
93
93
  # Obtain transmissibility value, corresponding to the cell pairs above.
94
- tran = init_file["TRANNNC"][0].numpy_view().reshape(-1, 1)
94
+ tran = init_file["TRANNNC"][0].numpy_view()
95
95
  logger.info(
96
96
  "TRANNNC: len: %d, min: %f, max: %f, mean=%f",
97
97
  len(tran),
@@ -123,15 +123,13 @@ def add_nnc_coords(nncdf: pd.DataFrame, resdatafiles: ResdataFiles) -> pd.DataFr
123
123
  Incoming dataframe augmented with the columns X, Y and Z.
124
124
  """
125
125
  gridgeometry = gridgeometry2df(resdatafiles)
126
- gnncdf = pd.merge(
127
- nncdf,
126
+ gnncdf = nncdf.merge(
128
127
  gridgeometry,
129
128
  how="left",
130
129
  left_on=["I1", "J1", "K1"],
131
130
  right_on=["I", "J", "K"],
132
131
  )
133
- gnncdf = pd.merge(
134
- gnncdf,
132
+ gnncdf = gnncdf.merge(
135
133
  gridgeometry,
136
134
  how="left",
137
135
  left_on=["I2", "J2", "K2"],
@@ -146,7 +144,7 @@ def add_nnc_coords(nncdf: pd.DataFrame, resdatafiles: ResdataFiles) -> pd.DataFr
146
144
  gnncdf["Z"] = gnncdf[["Z", "Z_2"]].mean(axis=1)
147
145
 
148
146
  # Let go of the temporary columns we have in gnncdf
149
- return gnncdf[list(nncdf.columns) + ["X", "Y", "Z"]]
147
+ return gnncdf[[*list(nncdf.columns), "X", "Y", "Z"]]
150
148
 
151
149
 
152
150
  def filter_vertical(nncdf: pd.DataFrame) -> pd.DataFrame:
@@ -206,7 +204,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
206
204
 
207
205
 
208
206
  def df2res_editnnc(
209
- nnc_df: pd.DataFrame, filename: Optional[str] = None, nocomments: bool = False
207
+ nnc_df: pd.DataFrame, filename: str | None = None, nocomments: bool = False
210
208
  ) -> str:
211
209
  """Write an EDITNNC keyword
212
210
 
@@ -284,9 +282,7 @@ def df2res_editnnc(
284
282
 
285
283
  def nnc_main(args) -> None:
286
284
  """Command line access point from main() or from res2csv via subparser"""
287
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
288
- __name__, vars(args)
289
- )
285
+ logger = getLogger_res2csv(__name__, vars(args))
290
286
  resdatafiles = ResdataFiles(args.DATAFILE)
291
287
  nncdf = df(resdatafiles, coords=args.coords, pillars=args.pillars)
292
288
  write_dframe_stdout_file(
File without changes
res2df/parameters.py CHANGED
@@ -5,7 +5,7 @@ import json
5
5
  import logging
6
6
  import warnings
7
7
  from pathlib import Path
8
- from typing import Any, Dict, List, Union
8
+ from typing import Any
9
9
 
10
10
  import pandas as pd
11
11
  import yaml
@@ -16,8 +16,8 @@ logger = logging.getLogger(__name__)
16
16
 
17
17
 
18
18
  def find_parameter_files(
19
- deckpath: Union[ResdataFiles, str, Path], filebase: str = "parameters"
20
- ) -> List[Path]:
19
+ deckpath: ResdataFiles | str | Path, filebase: str = "parameters"
20
+ ) -> list[Path]:
21
21
  """Locate a default prioritized list of files to try to read as key-value
22
22
 
23
23
  File extensions .yml, .json and .txt are recognized and will be found in
@@ -41,13 +41,13 @@ def find_parameter_files(
41
41
  eclbasepath = Path(deckpath).parent.absolute()
42
42
  else:
43
43
  raise TypeError
44
- files_to_lookfor: List[str] = [
44
+ files_to_lookfor: list[str] = [
45
45
  filebase + ".json",
46
46
  filebase + ".yml",
47
47
  filebase + ".txt",
48
48
  filebase,
49
49
  ]
50
- paths_to_check: List[Path] = [Path("."), Path(".."), Path("..") / Path("..")]
50
+ paths_to_check: list[Path] = [Path("."), Path(".."), Path("..") / Path("..")]
51
51
  foundfiles = []
52
52
  for path in paths_to_check:
53
53
  for fname in files_to_lookfor:
@@ -57,7 +57,7 @@ def find_parameter_files(
57
57
  return foundfiles
58
58
 
59
59
 
60
- def load_parameterstxt(filename: Union[str, Path]) -> Dict[str, Any]:
60
+ def load_parameterstxt(filename: str | Path) -> dict[str, Any]:
61
61
  """Read parameters.txt into a dictionary
62
62
 
63
63
  Lines starting with a hash will be ignored.
@@ -87,8 +87,8 @@ def load_parameterstxt(filename: Union[str, Path]) -> Dict[str, Any]:
87
87
 
88
88
 
89
89
  def load_all(
90
- filenames: Union[List[str], List[Path]], warnduplicates: bool = True
91
- ) -> Dict[str, Any]:
90
+ filenames: list[str] | list[Path], warnduplicates: bool = True
91
+ ) -> dict[str, Any]:
92
92
  """Reads a list of parameter filenames
93
93
 
94
94
  Dictionaries for all files will be merged into one.
@@ -101,19 +101,19 @@ def load_all(
101
101
  filenames: Order matters.
102
102
  warnduplicates: If True (default), overlapping keys will be warned.
103
103
  """
104
- keyvalues: Dict[str, Any] = {}
104
+ keyvalues: dict[str, Any] = {}
105
105
  for fname in filenames:
106
106
  new_params = load(fname)
107
107
  if warnduplicates and keyvalues:
108
108
  duplicates = set(keyvalues.keys()).intersection(set(new_params.keys()))
109
109
  if duplicates:
110
- logger.debug("Duplicates keys %s", str(duplicates))
110
+ logger.debug("Duplicates keys %s", duplicates)
111
111
  new_params.update(keyvalues)
112
112
  keyvalues = new_params
113
113
  return keyvalues
114
114
 
115
115
 
116
- def load(filename: Union[str, Path]) -> Dict[str, Any]:
116
+ def load(filename: str | Path) -> dict[str, Any]:
117
117
  """Read a parameter file as txt, yaml or json
118
118
 
119
119
  Returns:
@@ -168,7 +168,7 @@ def load(filename: Union[str, Path]) -> Dict[str, Any]:
168
168
 
169
169
  if not params_dict:
170
170
  logger.warning("%s could not be parsed as yaml, json or txt", filename)
171
- logger.warning("%s%s%s", str(yaml_error), str(json_error), str(txt_error))
171
+ logger.warning("%s%s%s", yaml_error, json_error, txt_error)
172
172
  raise ValueError(f"Could not parse {filename}")
173
173
 
174
174
  # Filter to values that are NOT dict's. We can have dict as value when
res2df/pillars.py CHANGED
@@ -3,7 +3,6 @@
3
3
  import argparse
4
4
  import datetime
5
5
  import logging
6
- from typing import Dict, List, Optional, Union
7
6
 
8
7
  import dateutil.parser
9
8
  import pandas as pd
@@ -16,7 +15,7 @@ from .resdatafiles import ResdataFiles
16
15
 
17
16
  logger: logging.Logger = logging.getLogger(__name__)
18
17
 
19
- AGGREGATORS: Dict[str, str] = {
18
+ AGGREGATORS: dict[str, str] = {
20
19
  "VOLUME": "sum",
21
20
  "PORV": "sum",
22
21
  "PERMX": "mean",
@@ -38,8 +37,8 @@ AGGREGATORS: Dict[str, str] = {
38
37
 
39
38
  def df(
40
39
  resdatafiles: ResdataFiles,
41
- region: Optional[str] = None,
42
- rstdates: Optional[Union[str, datetime.date, List[datetime.date]]] = None,
40
+ region: str | None = None,
41
+ rstdates: str | datetime.date | list[datetime.date] | None = None,
43
42
  soilcutoff: float = 0.2,
44
43
  sgascutoff: float = 0.7,
45
44
  swatcutoff: float = 0.7,
@@ -135,16 +134,14 @@ def df(
135
134
  datestr=datestr,
136
135
  )
137
136
  if not contacts.empty:
138
- grouped = pd.merge(grouped, contacts, how="left")
137
+ grouped = grouped.merge(contacts, how="left")
139
138
 
140
139
  if stackdates:
141
140
  return stack_on_colnames(grouped, sep="@", stackcolname="DATE", inplace=True)
142
141
  return grouped
143
142
 
144
143
 
145
- def compute_volumes(
146
- grid_df: pd.DataFrame, datestr: Optional[str] = None
147
- ) -> pd.DataFrame:
144
+ def compute_volumes(grid_df: pd.DataFrame, datestr: str | None = None) -> pd.DataFrame:
148
145
  """Compute "dynamic" volumes, volumes for data coming from the
149
146
  UNRST file (SWAT+SGAS)
150
147
 
@@ -194,11 +191,11 @@ def compute_volumes(
194
191
 
195
192
  def compute_pillar_contacts(
196
193
  grid_df: pd.DataFrame,
197
- region: Optional[str] = None,
194
+ region: str | None = None,
198
195
  soilcutoff: float = 0.2,
199
196
  sgascutoff: float = 0.7,
200
197
  swatcutoff: float = 0.7,
201
- datestr: Optional[str] = None,
198
+ datestr: str | None = None,
202
199
  ) -> pd.DataFrame:
203
200
  """Compute contacts pr. pillar in a grid dataframe.
204
201
 
@@ -268,21 +265,21 @@ def compute_pillar_contacts(
268
265
  .reset_index()
269
266
  )
270
267
  if soilcutoff and "SOIL" + atdatestr in grid_df:
271
- logger.info(
272
- "Calculating oil-water-contacts based on SOILcutoff %s", str(soilcutoff)
273
- )
268
+ logger.info("Calculating oil-water-contacts based on SOILcutoff %s", soilcutoff)
274
269
  owc = (
275
- grid_df[grid_df["SOIL" + atdatestr] > soilcutoff]
276
- .groupby(groupbies)
277
- .agg({"Z": "max"})
270
+ (
271
+ grid_df[grid_df["SOIL" + atdatestr] > soilcutoff]
272
+ .groupby(groupbies)
273
+ .agg({"Z": "max"})
274
+ )
275
+ .rename(columns={"Z": "OWC" + atdatestr})
276
+ .reset_index()
278
277
  )
279
- owc.rename(columns={"Z": "OWC" + atdatestr}, inplace=True)
280
- owc.reset_index(inplace=True)
281
278
  # Filter the owc frame to only those pillars that also has water:
282
- owc = pd.merge(waterpillars, owc, how="inner").drop("Z", axis="columns")
279
+ owc = waterpillars.merge(owc, how="inner").drop("Z", axis="columns")
283
280
 
284
281
  if sgascutoff and "SGAS" + atdatestr in grid_df:
285
- logger.info("Calculating gas-contacts based on gas cutoff %s", str(sgascutoff))
282
+ logger.info("Calculating gas-contacts based on gas cutoff %s", sgascutoff)
286
283
  if "SOIL" + atdatestr in grid_df and "SGAS" + atdatestr in grid_df:
287
284
  # Pillars to be used for GOC computation
288
285
  gocpillars = (
@@ -298,8 +295,7 @@ def compute_pillar_contacts(
298
295
  ]
299
296
  .groupby(groupbies)
300
297
  .agg({"Z": "max"})
301
- )
302
- goc.rename(columns={"Z": "GOC" + atdatestr}, inplace=True)
298
+ ).rename(columns={"Z": "GOC" + atdatestr})
303
299
  else:
304
300
  # Two-phase gas-water: GWC computation
305
301
  gocpillars = waterpillars # In case of gas-water
@@ -307,11 +303,10 @@ def compute_pillar_contacts(
307
303
  grid_df[grid_df["SGAS" + atdatestr] > sgascutoff]
308
304
  .groupby(groupbies)
309
305
  .agg({"Z": "max"})
310
- )
311
- goc.rename(columns={"Z": "GWC" + atdatestr}, inplace=True)
312
- goc.reset_index(inplace=True)
306
+ ).rename(columns={"Z": "GWC" + atdatestr})
307
+ goc = goc.reset_index()
313
308
  # Filter the goc frame to only those with oil or water:
314
- goc = pd.merge(gocpillars, goc, how="inner").drop("Z", axis="columns")
309
+ goc = gocpillars.merge(goc, how="inner").drop("Z", axis="columns")
315
310
 
316
311
  # We need to avoid merging with potentially empty DataFrames
317
312
  if owc.empty and goc.empty:
@@ -320,7 +315,7 @@ def compute_pillar_contacts(
320
315
  return owc
321
316
  if owc.empty and not goc.empty:
322
317
  return goc
323
- return pd.merge(owc, goc)
318
+ return owc.merge(goc)
324
319
 
325
320
 
326
321
  def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
@@ -413,9 +408,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
413
408
 
414
409
  def pillars_main(args) -> None:
415
410
  """This is the command line API"""
416
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
417
- __name__, vars(args)
418
- )
411
+ logger = getLogger_res2csv(__name__, vars(args))
419
412
 
420
413
  resdatafiles = ResdataFiles(args.DATAFILE)
421
414
  dframe = df(
@@ -430,7 +423,7 @@ def pillars_main(args) -> None:
430
423
  groupbies = []
431
424
  aggregators = {
432
425
  key: AGGREGATORS[key.split("@")[0]]
433
- for key in dframe
426
+ for key in dframe.columns
434
427
  if key.split("@")[0] in AGGREGATORS
435
428
  }
436
429
  if args.region and args.group: