res2df 1.3.7__py3-none-any.whl → 1.3.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
res2df/satfunc.py CHANGED
@@ -17,12 +17,10 @@ import argparse
17
17
  import contextlib
18
18
  import logging
19
19
  from pathlib import Path
20
- from typing import Dict, List, Optional, Union
21
20
 
22
21
  import pandas as pd
23
22
 
24
23
  with contextlib.suppress(ImportError):
25
- # pylint: disable=unused-import
26
24
  import opm.io
27
25
 
28
26
  from .common import (
@@ -40,7 +38,7 @@ from .resdatafiles import ResdataFiles
40
38
 
41
39
  logger: logging.Logger = logging.getLogger(__name__)
42
40
 
43
- SUPPORTED_KEYWORDS: List[str] = [
41
+ SUPPORTED_KEYWORDS: list[str] = [
44
42
  "SWOF",
45
43
  "SGOF",
46
44
  "SWFN",
@@ -54,7 +52,7 @@ SUPPORTED_KEYWORDS: List[str] = [
54
52
  # RENAMERS are a dictionary of dictionaries, referring to
55
53
  # how we should rename deck record items, from the JSON
56
54
  # files in opm.common and into Dataframe column names.
57
- RENAMERS: Dict[str, Dict[str, Union[List[str], str]]] = {}
55
+ RENAMERS: dict[str, dict[str, list[str] | str]] = {}
58
56
  RENAMERS["SGFN"] = {"DATA": ["SG", "KRG", "PCOG"]}
59
57
  RENAMERS["SGOF"] = {"DATA": ["SG", "KRG", "KROG", "PCOG"]}
60
58
  RENAMERS["SGWFN"] = {"DATA": ["SG", "KRG", "KRW", "PCGW"]}
@@ -66,9 +64,9 @@ RENAMERS["SWOF"] = {"DATA": ["SW", "KRW", "KROW", "PCOW"]}
66
64
 
67
65
 
68
66
  def df(
69
- deck: Union[str, "opm.libopmcommon_python.Deck"],
70
- keywords: Optional[List[str]] = None,
71
- ntsfun: Optional[int] = None,
67
+ deck: "str | ResdataFiles | opm.opmcommon_python.Deck",
68
+ keywords: list[str] | None = None,
69
+ ntsfun: int | None = None,
72
70
  ) -> pd.DataFrame:
73
71
  """Extract the data in the saturation function keywords as a Pandas
74
72
  DataFrames.
@@ -124,7 +122,7 @@ def df(
124
122
  # SUPPORTED_KEYWORDS (mainly to get WaterOil before GasOil)
125
123
  # We do that by converting to a Categorical series:
126
124
  dframe["KEYWORD"] = pd.Categorical(dframe["KEYWORD"], SUPPORTED_KEYWORDS)
127
- dframe.sort_values(["SATNUM", "KEYWORD"], inplace=True)
125
+ dframe = dframe.sort_values(["SATNUM", "KEYWORD"])
128
126
  dframe["KEYWORD"] = dframe["KEYWORD"].astype(str)
129
127
  logger.info(
130
128
  "Extracted keywords %s for %i SATNUMs",
@@ -147,14 +145,14 @@ def interpolate_defaults(dframe: pd.DataFrame) -> pd.DataFrame:
147
145
  assert len(sat_cols) == 1, (
148
146
  f"Could not determine a single saturation column in {dframe.columns}"
149
147
  )
150
- sat_col = list(sat_cols)[0]
148
+ sat_col = next(iter(sat_cols))
151
149
 
152
150
  if dframe[sat_col].isna().any():
153
151
  raise ValueError("nan in saturation column is not allowed")
154
152
 
155
153
  filled_frames = []
156
154
  for _, subframe in dframe.groupby("SATNUM"):
157
- subframe.set_index(sat_col, inplace=True)
155
+ subframe = subframe.set_index(sat_col)
158
156
  numeric_columns = subframe.select_dtypes(include=["float", "int"]).columns
159
157
  subframe[numeric_columns] = subframe[numeric_columns].interpolate(
160
158
  method="index", limit_area="inside"
@@ -201,9 +199,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar
201
199
 
202
200
  def satfunc_main(args) -> None:
203
201
  """Entry-point for module, for command line utility"""
204
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
205
- __name__, vars(args)
206
- )
202
+ logger = getLogger_res2csv(__name__, vars(args))
207
203
  resdatafiles = ResdataFiles(args.DATAFILE)
208
204
  if resdatafiles:
209
205
  deck = resdatafiles.get_deck()
@@ -234,9 +230,7 @@ def satfunc_main(args) -> None:
234
230
 
235
231
  def satfunc_reverse_main(args) -> None:
236
232
  """For command line utility for CSV to resdata"""
237
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
238
- __name__, vars(args)
239
- )
233
+ logger = getLogger_res2csv(__name__, vars(args))
240
234
  satfunc_df = pd.read_csv(args.csvfile)
241
235
  logger.info("Parsed %s", args.csvfile)
242
236
  inc_string = df2res(satfunc_df, keywords=args.keywords)
@@ -245,9 +239,9 @@ def satfunc_reverse_main(args) -> None:
245
239
 
246
240
  def df2res(
247
241
  satfunc_df: pd.DataFrame,
248
- keywords: Optional[List[str]] = None,
249
- comments: Optional[Dict[str, str]] = None,
250
- filename: Optional[str] = None,
242
+ keywords: list[str] | None = None,
243
+ comments: dict[str, str] | None = None,
244
+ filename: str | None = None,
251
245
  ) -> str:
252
246
  """Generate resdata :term:`include file` content from dataframes with
253
247
  saturation functions (SWOF, SGOF, ...)
@@ -279,7 +273,7 @@ def df2res(
279
273
  return string
280
274
 
281
275
 
282
- def df2res_swof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
276
+ def df2res_swof(dframe: pd.DataFrame, comment: str | None = None) -> str:
283
277
  """Create string with :term:`include file` contents for SWOF. Used by df2res().
284
278
 
285
279
  Args:
@@ -289,7 +283,7 @@ def df2res_swof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
289
283
  return _df2res_satfuncs("SWOF", dframe, comment)
290
284
 
291
285
 
292
- def df2res_sgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
286
+ def df2res_sgof(dframe: pd.DataFrame, comment: str | None = None) -> str:
293
287
  """Create string with :term:`include file` contents for SGOF. Used by df2res().
294
288
 
295
289
  Args:
@@ -299,7 +293,7 @@ def df2res_sgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
299
293
  return _df2res_satfuncs("SGOF", dframe, comment)
300
294
 
301
295
 
302
- def df2res_sgfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
296
+ def df2res_sgfn(dframe: pd.DataFrame, comment: str | None = None) -> str:
303
297
  """Create string with :term:`include file` contents for SGFN. Used by df2res().
304
298
 
305
299
  Args:
@@ -309,7 +303,7 @@ def df2res_sgfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
309
303
  return _df2res_satfuncs("SGFN", dframe, comment)
310
304
 
311
305
 
312
- def df2res_sgwfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
306
+ def df2res_sgwfn(dframe: pd.DataFrame, comment: str | None = None) -> str:
313
307
  """Create string with :term:`include file` contents for SGWFN. Used by df2res().
314
308
 
315
309
  Args:
@@ -319,7 +313,7 @@ def df2res_sgwfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
319
313
  return _df2res_satfuncs("SGWFN", dframe, comment)
320
314
 
321
315
 
322
- def df2res_swfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
316
+ def df2res_swfn(dframe: pd.DataFrame, comment: str | None = None) -> str:
323
317
  """Create string with :term:`include file` contents for SWFN. Used by df2res().
324
318
 
325
319
  Args:
@@ -329,7 +323,7 @@ def df2res_swfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
329
323
  return _df2res_satfuncs("SWFN", dframe, comment)
330
324
 
331
325
 
332
- def df2res_slgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
326
+ def df2res_slgof(dframe: pd.DataFrame, comment: str | None = None) -> str:
333
327
  """Create string with :term:`include file` contents for SLGOF. Used by df2res().
334
328
 
335
329
  Args:
@@ -339,7 +333,7 @@ def df2res_slgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
339
333
  return _df2res_satfuncs("SLGOF", dframe, comment)
340
334
 
341
335
 
342
- def df2res_sof2(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
336
+ def df2res_sof2(dframe: pd.DataFrame, comment: str | None = None) -> str:
343
337
  """Create string with :term:`include file` contents for SOF2. Used by df2res().
344
338
 
345
339
  Args:
@@ -349,7 +343,7 @@ def df2res_sof2(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
349
343
  return _df2res_satfuncs("SOF2", dframe, comment)
350
344
 
351
345
 
352
- def df2res_sof3(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
346
+ def df2res_sof3(dframe: pd.DataFrame, comment: str | None = None) -> str:
353
347
  """Create string with :term:`include file` contents for SOF3. Used by df2res().
354
348
 
355
349
  Args:
@@ -360,7 +354,7 @@ def df2res_sof3(dframe: pd.DataFrame, comment: Optional[str] = None) -> str:
360
354
 
361
355
 
362
356
  def _df2res_satfuncs(
363
- keyword: str, dframe: pd.DataFrame, comment: Optional[str] = None
357
+ keyword: str, dframe: pd.DataFrame, comment: str | None = None
364
358
  ) -> str:
365
359
  if dframe.empty:
366
360
  return "-- No data!\n"
res2df/summary.py CHANGED
@@ -6,14 +6,14 @@ import argparse
6
6
  import datetime as dt
7
7
  import logging
8
8
  import os
9
+ from collections.abc import Iterable
9
10
  from pathlib import Path
10
- from typing import Any, Dict, List, Optional, Union
11
+ from typing import Any
11
12
 
12
13
  import dateutil
13
14
  import numpy as np
14
15
  import pandas as pd
15
- import pyarrow
16
- import pyarrow.feather
16
+ import pyarrow as pa
17
17
  from resdata.summary import Summary
18
18
 
19
19
  from .common import write_dframe_stdout_file
@@ -27,7 +27,7 @@ logger: logging.Logger = logging.getLogger(__name__)
27
27
  FREQ_RAW: str = "raw"
28
28
  FREQ_FIRST: str = "first"
29
29
  FREQ_LAST: str = "last"
30
- PD_FREQ_MNEMONICS: Dict[str, str] = {
30
+ PD_FREQ_MNEMONICS: dict[str, str] = {
31
31
  "daily": "D",
32
32
  "weekly": "W-MON",
33
33
  "monthly": "MS",
@@ -38,10 +38,12 @@ PD_FREQ_MNEMONICS: Dict[str, str] = {
38
38
  """Mapping from res2df custom offset strings to Pandas DateOffset strings.
39
39
  See
40
40
  https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects
41
- """ # noqa
41
+ """
42
42
 
43
43
 
44
- def date_range(start_date: dt.date, end_date: dt.date, freq: str) -> List[dt.datetime]:
44
+ def date_range(
45
+ start_date: dt.date, end_date: dt.date, freq: str
46
+ ) -> Iterable[dt.datetime]:
45
47
  """Wrapper for pandas.date_range to allow for extra res2df specific mnemonics
46
48
  'yearly', 'daily', 'weekly', mapped over to pandas DateOffsets.
47
49
 
@@ -62,7 +64,7 @@ def date_range(start_date: dt.date, end_date: dt.date, freq: str) -> List[dt.dat
62
64
  return _fallback_date_range(start_date, end_date, freq)
63
65
 
64
66
 
65
- def _ensure_date_or_none(some_date: Optional[Union[str, dt.date]]) -> Optional[dt.date]:
67
+ def _ensure_date_or_none(some_date: str | dt.date | None) -> dt.date | None:
66
68
  """Ensures an object is either a date or None
67
69
 
68
70
  Args:
@@ -83,11 +85,11 @@ def _ensure_date_or_none(some_date: Optional[Union[str, dt.date]]) -> Optional[d
83
85
 
84
86
 
85
87
  def _crop_datelist(
86
- summarydates: List[dt.datetime],
87
- freq: Union[dt.date, dt.datetime, str],
88
- start_date: Optional[dt.date] = None,
89
- end_date: Optional[dt.date] = None,
90
- ) -> Union[List[dt.date], List[dt.datetime]]:
88
+ summarydates: list[dt.datetime],
89
+ freq: dt.date | dt.datetime | str,
90
+ start_date: dt.date | None = None,
91
+ end_date: dt.date | None = None,
92
+ ) -> list[dt.date] | list[dt.datetime]:
91
93
  """Helper function for resample_smry_dates, taking care of
92
94
  the special cases where the list of dates should not be resampled, but
93
95
  only cropped or returned as is.
@@ -102,7 +104,7 @@ def _crop_datelist(
102
104
  Returns:
103
105
  list of datetimes.
104
106
  """
105
- datetimes: Union[List[dt.date], List[dt.datetime]] = [] # type: ignore
107
+ datetimes: list[dt.date] | list[dt.datetime] = [] # type: ignore
106
108
  if freq == FREQ_RAW:
107
109
  datetimes = summarydates
108
110
  datetimes.sort()
@@ -110,7 +112,7 @@ def _crop_datelist(
110
112
  # Convert to datetime (at 00:00:00)
111
113
  start_date = dt.datetime.combine(start_date, dt.datetime.min.time())
112
114
  datetimes = [x for x in datetimes if x > start_date]
113
- datetimes = [start_date] + datetimes
115
+ datetimes = [start_date, *datetimes]
114
116
  if end_date:
115
117
  end_date = dt.datetime.combine(end_date, dt.datetime.min.time())
116
118
  datetimes = [x for x in datetimes if x < end_date]
@@ -162,7 +164,7 @@ def _fallback_date_roll(rollme: dt.datetime, direction: str, freq: str) -> dt.da
162
164
  )
163
165
 
164
166
 
165
- def _fallback_date_range(start: dt.date, end: dt.date, freq: str) -> List[dt.datetime]:
167
+ def _fallback_date_range(start: dt.date, end: dt.date, freq: str) -> list[dt.datetime]:
166
168
  """Fallback routine for generating date ranges beyond Pandas datetime64[ns]
167
169
  year-2262 limit.
168
170
 
@@ -192,12 +194,12 @@ def _fallback_date_range(start: dt.date, end: dt.date, freq: str) -> List[dt.dat
192
194
 
193
195
 
194
196
  def resample_smry_dates(
195
- summarydates: List[dt.datetime],
197
+ summarydates: list[dt.datetime],
196
198
  freq: str = FREQ_RAW,
197
199
  normalize: bool = True,
198
- start_date: Optional[Union[str, dt.date]] = None,
199
- end_date: Optional[Union[str, dt.date]] = None,
200
- ) -> Union[List[dt.date], List[dt.datetime]]:
200
+ start_date: str | dt.date | None = None,
201
+ end_date: str | dt.date | None = None,
202
+ ) -> list[dt.date] | list[dt.datetime]:
201
203
  """
202
204
  Resample (optionally) a list of date(time)s to a new datelist according to options.
203
205
 
@@ -252,12 +254,12 @@ def resample_smry_dates(
252
254
  # For yearly frequency it will return [1997-01-01, 2021-01-01].
253
255
  offset = pd.tseries.frequencies.to_offset(PD_FREQ_MNEMONICS.get(freq, freq))
254
256
  try:
255
- start_normalized = offset.rollback(start_smry.date()).date()
257
+ start_normalized = offset.rollback(start_smry).date()
256
258
  except pd.errors.OutOfBoundsDatetime:
257
259
  # Pandas only supports datetime up to year 2262
258
260
  start_normalized = _fallback_date_roll(start_smry, "back", freq).date()
259
261
  try:
260
- end_normalized = offset.rollforward(end_smry.date()).date()
262
+ end_normalized = offset.rollforward(end_smry).date()
261
263
  except pd.errors.OutOfBoundsDatetime:
262
264
  # Pandas only supports datetime up to year 2262
263
265
  end_normalized = _fallback_date_roll(end_smry, "forward", freq).date()
@@ -283,7 +285,7 @@ def resample_smry_dates(
283
285
  # fit on frequency boundary. Force include these if
284
286
  # supplied as user arguments.
285
287
  if start_date and start_date not in dates:
286
- dates = [start_date] + dates
288
+ dates = [start_date, *dates]
287
289
  if end_date and end_date not in dates:
288
290
  dates += [end_date]
289
291
  return dates
@@ -291,16 +293,15 @@ def resample_smry_dates(
291
293
 
292
294
  def df(
293
295
  resdatafiles: ResdataFiles,
294
- time_index: Optional[str] = None,
295
- column_keys: Optional[Union[List[str], str]] = None,
296
- start_date: Optional[Union[str, dt.date]] = None,
297
- end_date: Optional[Union[str, dt.date]] = None,
296
+ time_index: str | None = None,
297
+ column_keys: list[str] | str | None = None,
298
+ start_date: str | dt.date | None = None,
299
+ end_date: str | dt.date | None = None,
298
300
  include_restart: bool = False,
299
301
  params: bool = False,
300
- paramfile: Optional[str] = None,
302
+ paramfile: str | None = None,
301
303
  datetime: bool = False, # A very poor choice of argument name [pylint]
302
304
  ):
303
- # pylint: disable=too-many-arguments
304
305
  """
305
306
  Extract data from UNSMRY as Pandas dataframes.
306
307
 
@@ -356,7 +357,7 @@ def df(
356
357
  logger.warning("Error reading summary instance, returning empty dataframe")
357
358
  return pd.DataFrame()
358
359
 
359
- time_index_arg: Optional[Union[List[dt.date], List[dt.datetime]]]
360
+ time_index_arg: list[dt.date] | list[dt.datetime] | None
360
361
  if isinstance(time_index, str) and time_index == "raw":
361
362
  time_index_arg = resample_smry_dates(
362
363
  summary.dates,
@@ -483,7 +484,7 @@ def _ensure_unique_datetime_index(dframe: pd.DataFrame) -> pd.DataFrame:
483
484
  return dframe
484
485
 
485
486
 
486
- def _df2pyarrow(dframe: pd.DataFrame) -> pyarrow.Table:
487
+ def _df2pyarrow(dframe: pd.DataFrame) -> pa.Table:
487
488
  """Construct a Pyarrow table from a dataframe, conserving metadata.
488
489
 
489
490
  All integer columns will have datatype int32, all floats will have float32
@@ -497,11 +498,11 @@ def _df2pyarrow(dframe: pd.DataFrame) -> pyarrow.Table:
497
498
  This index is always named DATE in the pyarrow table.
498
499
  """
499
500
 
500
- field_list: List[pyarrow.Field] = []
501
- field_list.append(pyarrow.field("DATE", pyarrow.timestamp("ms")))
501
+ field_list: list[pa.Field] = []
502
+ field_list.append(pa.field("DATE", pa.timestamp("ms")))
502
503
  column_arrays = [dframe.index.to_numpy().astype("datetime64[ms]")]
503
504
 
504
- dframe_values = dframe.values.transpose()
505
+ dframe_values = dframe.to_numpy().transpose()
505
506
  for col_idx, colname in enumerate(dframe.columns):
506
507
  if "meta" in dframe.attrs and colname in dframe.attrs["meta"]:
507
508
  # Boolean objects in the metadata dictionary must be converted to bytes:
@@ -512,24 +513,24 @@ def _df2pyarrow(dframe: pd.DataFrame) -> pyarrow.Table:
512
513
  else:
513
514
  field_metadata = {}
514
515
  if pd.api.types.is_integer_dtype(dframe.dtypes[colname]):
515
- dtype = pyarrow.int32()
516
+ dtype = pa.int32()
516
517
  elif pd.api.types.is_string_dtype(dframe.dtypes[colname]):
517
518
  # Parameters are potentially merged into the dataframe.
518
- dtype = pyarrow.string()
519
+ dtype = pa.string()
519
520
  else:
520
- dtype = pyarrow.float32()
521
- field_list.append(pyarrow.field(colname, dtype, metadata=field_metadata))
521
+ dtype = pa.float32()
522
+ field_list.append(pa.field(colname, dtype, metadata=field_metadata))
522
523
  column_arrays.append(dframe_values[col_idx])
523
524
 
524
- schema = pyarrow.schema(field_list)
525
+ schema = pa.schema(field_list)
525
526
 
526
- return pyarrow.table(column_arrays, schema=schema)
527
+ return pa.table(column_arrays, schema=schema)
527
528
 
528
529
 
529
530
  def _merge_params(
530
531
  dframe: pd.DataFrame,
531
- paramfile: Optional[Union[str, Path]] = None,
532
- resdatafiles: Optional[Union[str, ResdataFiles]] = None,
532
+ paramfile: str | Path | None = None,
533
+ resdatafiles: str | ResdataFiles | None = None,
533
534
  ) -> pd.DataFrame:
534
535
  """Locate parameters in a <key> <value> file and add to the dataframe.
535
536
 
@@ -539,7 +540,7 @@ def _merge_params(
539
540
 
540
541
  if paramfile is None and resdatafiles is not None:
541
542
  param_files = find_parameter_files(resdatafiles)
542
- logger.info("Loading parameters from files: %s", str(param_files))
543
+ logger.info("Loading parameters from files: %s", param_files)
543
544
  param_dict = load_all(param_files)
544
545
  elif (
545
546
  paramfile is not None
@@ -547,10 +548,10 @@ def _merge_params(
547
548
  and not Path(paramfile).is_absolute()
548
549
  ):
549
550
  param_files = find_parameter_files(resdatafiles, filebase=str(paramfile))
550
- logger.info("Loading parameters from files: %s", str(param_files))
551
+ logger.info("Loading parameters from files: %s", param_files)
551
552
  param_dict = load_all(param_files)
552
553
  elif paramfile is not None and Path(paramfile).is_absolute():
553
- logger.info("Loading parameters from file: %s", str(paramfile))
554
+ logger.info("Loading parameters from file: %s", paramfile)
554
555
  param_dict = load(paramfile)
555
556
  else:
556
557
  raise ValueError("Not able to locate parameters.txt")
@@ -564,7 +565,7 @@ def _merge_params(
564
565
  return dframe
565
566
 
566
567
 
567
- def smry_meta(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]:
568
+ def smry_meta(resdatafiles: ResdataFiles) -> dict[str, dict[str, Any]]:
568
569
  """Provide metadata for summary data vectors.
569
570
 
570
571
  A dictionary indexed by summary vector name is returned, and each
@@ -584,7 +585,7 @@ def smry_meta(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]:
584
585
  else:
585
586
  summary = resdatafiles.get_summary()
586
587
 
587
- meta: Dict[str, Dict[str, Any]] = {}
588
+ meta: dict[str, dict[str, Any]] = {}
588
589
  for col in summary:
589
590
  meta[col] = {}
590
591
  meta[col]["unit"] = summary.unit(col)
@@ -618,7 +619,7 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
618
619
  dframe = dframe.copy()
619
620
  if "DATE" in dframe.columns:
620
621
  # Infer datatype (Pandas cannot answer it) based on the first element:
621
- if isinstance(dframe["DATE"].values[0], str):
622
+ if isinstance(dframe["DATE"].to_numpy()[0], str):
622
623
  # Do not use pd.Series.apply() here, Pandas would try to convert it to
623
624
  # datetime64[ns] which is limited at year 2262.
624
625
  dframe["DATE"] = pd.Series(
@@ -629,7 +630,7 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
629
630
  dtype="object",
630
631
  index=dframe.index,
631
632
  )
632
- if isinstance(dframe["DATE"].values[0], dt.date):
633
+ if isinstance(dframe["DATE"].to_numpy()[0], dt.date):
633
634
  dframe["DATE"] = pd.Series(
634
635
  [
635
636
  dt.datetime.combine(dateobj, dt.datetime.min.time())
@@ -639,20 +640,20 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
639
640
  index=dframe.index,
640
641
  )
641
642
 
642
- dframe.set_index("DATE", inplace=True)
643
+ dframe = dframe.set_index("DATE")
643
644
  if not isinstance(
644
- dframe.index.values[0], (dt.datetime, np.datetime64, pd.Timestamp)
645
+ dframe.index.to_numpy()[0], (dt.datetime, np.datetime64, pd.Timestamp)
645
646
  ):
646
647
  raise ValueError(
647
648
  "dataframe must have a datetime index, got "
648
- f"{dframe.index.values[0]} of type {type(dframe.index.values[0])}"
649
+ f"{dframe.index.to_numpy()[0]} of type {type(dframe.index.to_numpy()[0])}"
649
650
  )
650
- dframe.sort_index(axis=0, inplace=True)
651
+ dframe = dframe.sort_index(axis=0)
651
652
 
652
653
  # This column will appear if dataframes are naively written to CSV
653
654
  # files and read back in again.
654
655
  if "Unnamed: 0" in dframe:
655
- dframe.drop("Unnamed: 0", axis="columns", inplace=True)
656
+ dframe = dframe.drop("Unnamed: 0", axis="columns")
656
657
 
657
658
  block_columns = [
658
659
  col for col in dframe.columns if (col.startswith("B") or col.startswith("LB"))
@@ -661,7 +662,7 @@ def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame:
661
662
  dframe = dframe.drop(columns=block_columns)
662
663
  logger.warning(
663
664
  "Dropped columns with block data, not supported: %s",
664
- str({colname.partition(":")[0] + ":*" for colname in block_columns}),
665
+ {colname.partition(":")[0] + ":*" for colname in block_columns},
665
666
  )
666
667
 
667
668
  return dframe
@@ -795,9 +796,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar
795
796
 
796
797
  def summary_main(args) -> None:
797
798
  """Read summary data from disk and write CSV back to disk"""
798
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
799
- __name__, vars(args)
800
- )
799
+ logger = getLogger_res2csv(__name__, vars(args))
801
800
  eclbase = (
802
801
  args.DATAFILE.replace(".DATA", "").replace(".UNSMRY", "").replace(".SMSPEC", "")
803
802
  )
@@ -827,9 +826,7 @@ def summary_main(args) -> None:
827
826
 
828
827
  def summary_reverse_main(args) -> None:
829
828
  """Entry point for usage with "csv2res summary" on the command line"""
830
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
831
- __name__, vars(args)
832
- )
829
+ logger = getLogger_res2csv(__name__, vars(args))
833
830
 
834
831
  summary_df = pd.read_csv(args.csvfile)
835
832
  logger.info("Parsed %s", args.csvfile)
res2df/trans.py CHANGED
@@ -5,8 +5,8 @@ Extract transmissibility information from output files as Dataframes.
5
5
 
6
6
  import argparse
7
7
  import logging
8
- from typing import List, Optional, Union
9
8
 
9
+ import networkx
10
10
  import pandas as pd
11
11
 
12
12
  from .common import write_dframe_stdout_file
@@ -15,19 +15,12 @@ from .nnc import df as create_nnc_df
15
15
  from .res2csvlogger import getLogger_res2csv
16
16
  from .resdatafiles import ResdataFiles
17
17
 
18
- try:
19
- import networkx
20
-
21
- HAVE_NETWORKX = True
22
- except ImportError:
23
- HAVE_NETWORKX = False
24
-
25
18
  logger = logging.getLogger(__name__)
26
19
 
27
20
 
28
21
  def df(
29
22
  resdatafiles: ResdataFiles,
30
- vectors: Optional[Union[str, List[str]]] = None,
23
+ vectors: str | list[str] | None = None,
31
24
  boundaryfilter: bool = False,
32
25
  group: bool = False,
33
26
  coords: bool = False,
@@ -105,15 +98,13 @@ def df(
105
98
  existing_vectors = [vec for vec in vectors if vec in grid_df.columns]
106
99
  if len(existing_vectors) < len(vectors):
107
100
  logger.warning(
108
- "Vectors %s not found, skipping", str(set(vectors) - set(existing_vectors))
101
+ "Vectors %s not found, skipping", set(vectors) - set(existing_vectors)
109
102
  )
110
103
  vectors = existing_vectors
111
104
  logger.info("Building transmissibility dataframe")
112
105
  if not onlykdir:
113
106
  tranx = pd.DataFrame(grid_df[grid_df["TRANX"] > 0][["I", "J", "K", "TRANX"]])
114
- tranx.rename(
115
- columns={"I": "I1", "J": "J1", "K": "K1", "TRANX": "TRAN"}, inplace=True
116
- )
107
+ tranx = tranx.rename(columns={"I": "I1", "J": "J1", "K": "K1", "TRANX": "TRAN"})
117
108
  tranx["I2"] = tranx["I1"] + 1
118
109
  tranx["J2"] = tranx["J1"]
119
110
  tranx["K2"] = tranx["K1"]
@@ -123,9 +114,7 @@ def df(
123
114
 
124
115
  if not onlykdir:
125
116
  trany = pd.DataFrame(grid_df[grid_df["TRANY"] > 0][["I", "J", "K", "TRANY"]])
126
- trany.rename(
127
- columns={"I": "I1", "J": "J1", "K": "K1", "TRANY": "TRAN"}, inplace=True
128
- )
117
+ trany = trany.rename(columns={"I": "I1", "J": "J1", "K": "K1", "TRANY": "TRAN"})
129
118
  trany["I2"] = trany["I1"]
130
119
  trany["J2"] = trany["J1"] + 1
131
120
  trany["K2"] = trany["K1"]
@@ -135,9 +124,7 @@ def df(
135
124
 
136
125
  if not onlyijdir:
137
126
  tranz = pd.DataFrame(grid_df[grid_df["TRANZ"] > 0][["I", "J", "K", "TRANZ"]])
138
- tranz.rename(
139
- columns={"I": "I1", "J": "J1", "K": "K1", "TRANZ": "TRAN"}, inplace=True
140
- )
127
+ tranz = tranz.rename(columns={"I": "I1", "J": "J1", "K": "K1", "TRANZ": "TRAN"})
141
128
  tranz["I2"] = tranz["I1"]
142
129
  tranz["J2"] = tranz["J1"]
143
130
  tranz["K2"] = tranz["K1"] + 1
@@ -164,18 +151,16 @@ def df(
164
151
  vectorscoords.append("Z")
165
152
 
166
153
  if vectorscoords:
167
- logger.info("Adding vectors %s", str(vectorscoords))
154
+ logger.info("Adding vectors %s", vectorscoords)
168
155
  grid_df = grid_df.reset_index()
169
- trans_df = pd.merge(
170
- trans_df,
171
- grid_df[["I", "J", "K"] + vectorscoords],
156
+ trans_df = trans_df.merge(
157
+ grid_df[["I", "J", "K", *vectorscoords]],
172
158
  left_on=["I1", "J1", "K1"],
173
159
  right_on=["I", "J", "K"],
174
160
  )
175
161
  trans_df = trans_df.drop(["I", "J", "K"], axis=1)
176
- trans_df = pd.merge(
177
- trans_df,
178
- grid_df[["I", "J", "K"] + vectorscoords],
162
+ trans_df = trans_df.merge(
163
+ grid_df[["I", "J", "K", *vectorscoords]],
179
164
  left_on=["I2", "J2", "K2"],
180
165
  right_on=["I", "J", "K"],
181
166
  suffixes=("1", "2"),
@@ -203,7 +188,7 @@ def df(
203
188
  if group:
204
189
  assert len(vectors) == 1 # This is checked above
205
190
  assert boundaryfilter
206
- logger.info("Grouping transmissiblity over %s interfaces", str(vectors[0]))
191
+ logger.info("Grouping transmissiblity over %s interfaces", vectors[0])
207
192
  vec1 = vectors[0] + "1"
208
193
  vec2 = vectors[0] + "2"
209
194
  pairname = vectors[0] + "PAIR"
@@ -236,19 +221,14 @@ def df(
236
221
  return trans_df
237
222
 
238
223
 
239
- def make_nx_graph(
240
- resdatafiles: ResdataFiles, region: str = "FIPNUM"
241
- ) -> "networkx.Graph":
224
+ def make_nx_graph(resdatafiles: ResdataFiles, region: str = "FIPNUM") -> networkx.Graph:
242
225
  """Construct a networkx graph for the transmissibilities."""
243
- if not HAVE_NETWORKX:
244
- logger.error("Please install networkx for this function to work")
245
- return None
246
226
  trans_df = df(resdatafiles, vectors=[region], coords=True, group=True)
247
227
  reg1 = region + "1"
248
228
  reg2 = region + "2"
249
- graph = networkx.Graph()
229
+ graph: networkx.Graph = networkx.Graph()
250
230
  graph.add_weighted_edges_from(
251
- [tuple(row) for row in trans_df[[reg1, reg2, "TRAN"]].values]
231
+ [tuple(row) for row in trans_df[[reg1, reg2, "TRAN"]].to_numpy()]
252
232
  )
253
233
  return graph
254
234
 
@@ -306,9 +286,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser:
306
286
 
307
287
  def trans_main(args):
308
288
  """This is the command line API"""
309
- logger = getLogger_res2csv( # pylint: disable=redefined-outer-name
310
- __name__, vars(args)
311
- )
289
+ logger = getLogger_res2csv(__name__, vars(args))
312
290
  resdatafiles = ResdataFiles(args.DATAFILE)
313
291
  trans_df = df(
314
292
  resdatafiles,
res2df/version.py CHANGED
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
28
28
  commit_id: COMMIT_ID
29
29
  __commit_id__: COMMIT_ID
30
30
 
31
- __version__ = version = '1.3.7'
32
- __version_tuple__ = version_tuple = (1, 3, 7)
31
+ __version__ = version = '1.3.8'
32
+ __version_tuple__ = version_tuple = (1, 3, 8)
33
33
 
34
34
  __commit_id__ = commit_id = None
res2df/vfp/__init__.py CHANGED
@@ -2,7 +2,7 @@
2
2
  keywords in Eclipse.
3
3
  """
4
4
 
5
- from ._vfp import ( # noqa F:401
5
+ from ._vfp import ( # noqa: F401
6
6
  basic_data,
7
7
  basic_data2df,
8
8
  basic_data2pyarrow,