res2df 1.3.7__tar.gz → 1.3.8__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {res2df-1.3.7 → res2df-1.3.8}/.github/workflows/codecov.yml +3 -3
- {res2df-1.3.7 → res2df-1.3.8}/.github/workflows/publish.yml +1 -1
- {res2df-1.3.7 → res2df-1.3.8}/.github/workflows/res2df.yml +2 -2
- {res2df-1.3.7 → res2df-1.3.8}/.github/workflows/style.yml +2 -2
- {res2df-1.3.7 → res2df-1.3.8}/.github/workflows/typing.yml +2 -2
- {res2df-1.3.7 → res2df-1.3.8}/.pre-commit-config.yaml +2 -2
- {res2df-1.3.7/src/res2df.egg-info → res2df-1.3.8}/PKG-INFO +4 -2
- {res2df-1.3.7 → res2df-1.3.8}/docs/conf.py +1 -1
- {res2df-1.3.7 → res2df-1.3.8}/pyproject.toml +19 -6
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/__init__.py +2 -3
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/common.py +79 -75
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/compdat.py +27 -32
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/csv2res.py +5 -9
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/equil.py +24 -29
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/faults.py +2 -7
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/fipreports.py +10 -14
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/grid.py +58 -63
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/gruptree.py +33 -35
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/inferdims.py +6 -9
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/nnc.py +5 -10
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/parameters.py +12 -12
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/pillars.py +24 -31
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/pvt.py +29 -34
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/res2csv.py +10 -15
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/res2csvlogger.py +1 -3
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/resdatafiles.py +8 -8
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/rft.py +36 -42
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/satfunc.py +22 -28
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/summary.py +57 -60
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/trans.py +16 -38
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/version.py +3 -3
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/vfp/__init__.py +1 -1
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/vfp/_vfp.py +28 -33
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/vfp/_vfpcommon.py +18 -19
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/vfp/_vfpdefs.py +2 -3
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/vfp/_vfpinj.py +23 -58
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/vfp/_vfpprod.py +28 -64
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/wcon.py +4 -11
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/wellcompletiondata.py +26 -26
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/wellconnstatus.py +4 -5
- {res2df-1.3.7 → res2df-1.3.8/src/res2df.egg-info}/PKG-INFO +4 -2
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df.egg-info/SOURCES.txt +1 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df.egg-info/requires.txt +3 -1
- res2df-1.3.8/tests/__init__.py +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_common.py +6 -7
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_compdat.py +6 -8
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_eclfiles.py +1 -3
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_equil.py +26 -27
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_ert_hooks.py +2 -6
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_faults.py +1 -2
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_fipreports.py +5 -5
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_grid.py +24 -8
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_gruptree.py +3 -4
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_hook_implementations.py +1 -3
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_inferdims.py +3 -4
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_integration.py +1 -2
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_logging.py +1 -3
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_nnc.py +3 -6
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_parameters.py +3 -2
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_pvt.py +5 -7
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_rft.py +20 -22
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_satfunc.py +1 -2
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_summary.py +17 -18
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_summary_restarts.py +4 -6
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_trans.py +1 -10
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_userapi.py +1 -2
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_vfp.py +1 -1
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_wcon.py +1 -2
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_wellcompletiondata.py +2 -2
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_wellconnstatus.py +2 -4
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_welopen.py +1 -2
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_wlist.py +1 -2
- {res2df-1.3.7 → res2df-1.3.8}/.gitignore +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/CONTRIBUTING.md +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/LICENSE +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/README.md +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/SECURITY.md +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/ci/testkomodo.sh +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/_static/equinor-logo.png +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/_static/equinor-logo2.jpg +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/_static/equinor_logo.jpg +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/_static/equinor_logo_only.jpg +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/_templates/layout.html +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/contribution.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/csv2res.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/glossary.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/history.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/index.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/installation.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/introduction.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/res2csv.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/compdat.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/compdat.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/equil-example.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/equil.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/fipnum.inc +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/fipreports-example.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/fipreports-example.txt +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/fipreports.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/grid.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/grid.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/gruptree.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/gruptree.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/gruptreenet.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/images/injectoranalysis.png +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/images/multibranch-rftanalysis.png +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/nnc.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/nnc.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/outflow.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/pillars-dyn1-stacked.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/pillars-dyn1-unstacked.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/pillars-example1.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/pillars.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/pvt.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/pvt.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/rft.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/rft_columns.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/satfunc.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/satfunc.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/summary.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/summary.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/trans-boundaries.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/trans-group.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/trans.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/trans1.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/wcon.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/wcon.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/well_connection_status.csv +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage/wellconnstatus.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/docs/usage.rst +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/setup.cfg +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/__version__.py +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/constants.py +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/hook_implementations/__init__.py +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/hook_implementations/forward_model_steps.py +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/BRANPROP +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/COMPDAT +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/COMPLUMP +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/COMPSEGS +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/DENSITY +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/EQLDIMS +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/EQUIL +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/FAULTS +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/GRUPNET +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/GRUPTREE +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/NODEPROP +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/PBVD +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/PDVD +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/PVDG +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/PVDO +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/PVTG +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/PVTO +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/PVTW +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/ROCK +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/RSVD +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/RVVD +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/SGFN +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/SGOF +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/SGWFN +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/SLGOF +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/SOF2 +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/SOF3 +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/SWFN +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/SWOF +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/TABDIMS +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/VFPINJ +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/VFPPROD +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/WCONHIST +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/WCONINJE +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/WCONINJH +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/WCONPROD +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/WELOPEN +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/WELSEGS +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/WELSPECS +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/WLIST +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/WSEGAICD +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/WSEGSICD +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/WSEGVALV +0 -0
- {res2df-1.3.7/tests → res2df-1.3.8/src/res2df/opmkeywords}/__init__.py +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/readme +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/opmkeywords/runmetoupdate.sh +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df/svg_color_keyword_names.txt +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df.egg-info/dependency_links.txt +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df.egg-info/entry_points.txt +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df.egg-info/not-zip-safe +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/src/res2df.egg-info/top_level.txt +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/conftest.py +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/EIGHTCELLS.DATA +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/EIGHTCELLS.EGRID +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/EIGHTCELLS.INIT +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/EIGHTCELLS.INSPEC +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/EIGHTCELLS.PRT +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/EIGHTCELLS.RFT +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/EIGHTCELLS.RSSPEC +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/EIGHTCELLS.SMSPEC +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/EIGHTCELLS.UNRST +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/EIGHTCELLS.UNSMRY +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/eightcells_duplicated_summary_vector/EIGHTCELLS_DUPES.DATA +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/eightcells_duplicated_summary_vector/EIGHTCELLS_DUPES.SMSPEC +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/eightcells_duplicated_summary_vector/EIGHTCELLS_DUPES.UNSMRY +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/eightcells/zones.lyr +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/fipreports/TEST1.PRT +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/grid/reek.faults +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/grid/reek.grid +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/grid/reek.multflt +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/grid/reek.multz +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/grid/reek.perm +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/grid/reek.poro +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/props/let-sgof.txt +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/props/let-swof.txt +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/props/reek.endpoints +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/props/reek.pvt +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/props/reek.swatinit +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/props/sgof.txt +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/props/swof.inc +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/props/swof.txt +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/regions/reek.eqlnum +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/regions/reek.fipnum +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/schedule/op6_aicd1_gp.sch +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/schedule/op6_icd1_gp.sch +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/schedule/op6_valve1_gp.sch +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/schedule/reek_history.sch +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/solution/reek.equil +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/include/summary/reek.smry +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/2_R001_REEK-0-OPMFLOW.PRT +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/2_R001_REEK-0.ECLEND +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/2_R001_REEK-0.EGRID +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/2_R001_REEK-0.INIT +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/2_R001_REEK-0.LOG +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/2_R001_REEK-0.PRT +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/2_R001_REEK-0.RFT +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/2_R001_REEK-0.SMSPEC +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/2_R001_REEK-0.UNRST +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/2_R001_REEK-0.UNSMRY +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/reek/eclipse/model/zones.lyr +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP.DATA +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP.SMSPEC +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP.UNSMRY +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP_LONG.DATA +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP_LONG.SMSPEC +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP_LONG.UNSMRY +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP_WITH_TIMESTEP.DATA +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP_WITH_TIMESTEP.SMSPEC +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP_WITH_TIMESTEP.UNSMRY +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP_WITH_TIMESTEP_LONG.DATA +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP_WITH_TIMESTEP_LONG.SMSPEC +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/data/timesteps/SHORT_STEP_WITH_TIMESTEP_LONG.UNSMRY +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_init.py +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_pillars.py +0 -0
- {res2df-1.3.7 → res2df-1.3.8}/tests/test_zonemap.py +0 -0
|
@@ -26,12 +26,12 @@ jobs:
|
|
|
26
26
|
- uses: actions/checkout@v5
|
|
27
27
|
|
|
28
28
|
- name: Setup Python
|
|
29
|
-
uses: actions/setup-python@
|
|
29
|
+
uses: actions/setup-python@v6
|
|
30
30
|
with:
|
|
31
31
|
python-version: 3.11
|
|
32
32
|
|
|
33
33
|
- name: Install uv
|
|
34
|
-
uses: astral-sh/setup-uv@
|
|
34
|
+
uses: astral-sh/setup-uv@v7
|
|
35
35
|
with:
|
|
36
36
|
enable-cache: true
|
|
37
37
|
python-version: "3.11"
|
|
@@ -44,7 +44,7 @@ jobs:
|
|
|
44
44
|
run: |
|
|
45
45
|
pytest tests --disable-warnings --cov=res2df --cov-report=xml
|
|
46
46
|
# Uninstall packages that res2df supports not being installed:
|
|
47
|
-
uv pip uninstall ert
|
|
47
|
+
uv pip uninstall ert opm
|
|
48
48
|
# Run tests again in cov-append-mode:
|
|
49
49
|
pytest tests --disable-warnings --cov=res2df --cov-report=xml --cov-append
|
|
50
50
|
bash <(curl -s https://codecov.io/bash)
|
|
@@ -45,12 +45,12 @@ jobs:
|
|
|
45
45
|
run: git fetch --unshallow --tags
|
|
46
46
|
|
|
47
47
|
- name: Set up Python ${{ matrix.python-version }}
|
|
48
|
-
uses: actions/setup-python@
|
|
48
|
+
uses: actions/setup-python@v6
|
|
49
49
|
with:
|
|
50
50
|
python-version: ${{ matrix.python-version }}
|
|
51
51
|
|
|
52
52
|
- name: Install uv
|
|
53
|
-
uses: astral-sh/setup-uv@
|
|
53
|
+
uses: astral-sh/setup-uv@v7
|
|
54
54
|
with:
|
|
55
55
|
enable-cache: true
|
|
56
56
|
python-version: ${{ matrix.python-version }}
|
|
@@ -28,12 +28,12 @@ jobs:
|
|
|
28
28
|
fetch-depth: 0
|
|
29
29
|
|
|
30
30
|
- name: Set up Python ${{ matrix.python-version }}
|
|
31
|
-
uses: actions/setup-python@
|
|
31
|
+
uses: actions/setup-python@v6
|
|
32
32
|
with:
|
|
33
33
|
python-version: ${{ matrix.python-version }}
|
|
34
34
|
|
|
35
35
|
- name: Install uv
|
|
36
|
-
uses: astral-sh/setup-uv@
|
|
36
|
+
uses: astral-sh/setup-uv@v7
|
|
37
37
|
with:
|
|
38
38
|
enable-cache: true
|
|
39
39
|
python-version: ${{ matrix.python-version }}
|
|
@@ -28,12 +28,12 @@ jobs:
|
|
|
28
28
|
fetch-depth: 0
|
|
29
29
|
|
|
30
30
|
- name: Set up Python ${{ matrix.python-version }}
|
|
31
|
-
uses: actions/setup-python@
|
|
31
|
+
uses: actions/setup-python@v6
|
|
32
32
|
with:
|
|
33
33
|
python-version: ${{ matrix.python-version }}
|
|
34
34
|
|
|
35
35
|
- name: Install uv
|
|
36
|
-
uses: astral-sh/setup-uv@
|
|
36
|
+
uses: astral-sh/setup-uv@v7
|
|
37
37
|
with:
|
|
38
38
|
enable-cache: true
|
|
39
39
|
python-version: ${{ matrix.python-version }}
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: res2df
|
|
3
|
-
Version: 1.3.
|
|
3
|
+
Version: 1.3.8
|
|
4
4
|
Summary: Convert reservoir simulator input and output to DataFrames
|
|
5
5
|
Author-email: Håvard Berland <havb@equinor.com>
|
|
6
6
|
License: GNU GENERAL PUBLIC LICENSE
|
|
@@ -696,6 +696,7 @@ Description-Content-Type: text/markdown
|
|
|
696
696
|
License-File: LICENSE
|
|
697
697
|
Requires-Dist: resdata>=5.0.0-b0
|
|
698
698
|
Requires-Dist: resfo
|
|
699
|
+
Requires-Dist: networkx
|
|
699
700
|
Requires-Dist: numpy
|
|
700
701
|
Requires-Dist: opm>=2020.10.2
|
|
701
702
|
Requires-Dist: pandas
|
|
@@ -703,7 +704,6 @@ Requires-Dist: pyarrow
|
|
|
703
704
|
Requires-Dist: pyyaml>=5.1
|
|
704
705
|
Requires-Dist: treelib
|
|
705
706
|
Provides-Extra: tests
|
|
706
|
-
Requires-Dist: networkx; extra == "tests"
|
|
707
707
|
Requires-Dist: pytest; extra == "tests"
|
|
708
708
|
Requires-Dist: pytest-cov; extra == "tests"
|
|
709
709
|
Requires-Dist: pytest-mock; extra == "tests"
|
|
@@ -715,6 +715,8 @@ Requires-Dist: mypy; extra == "types"
|
|
|
715
715
|
Requires-Dist: types-PyYAML; extra == "types"
|
|
716
716
|
Requires-Dist: types-python-dateutil; extra == "types"
|
|
717
717
|
Requires-Dist: types-setuptools; extra == "types"
|
|
718
|
+
Requires-Dist: types-networkx; extra == "types"
|
|
719
|
+
Requires-Dist: pandas-stubs; extra == "types"
|
|
718
720
|
Provides-Extra: docs
|
|
719
721
|
Requires-Dist: autoapi; extra == "docs"
|
|
720
722
|
Requires-Dist: ipython; extra == "docs"
|
|
@@ -35,6 +35,7 @@ dynamic = ["version"]
|
|
|
35
35
|
dependencies= [
|
|
36
36
|
"resdata>=5.0.0-b0",
|
|
37
37
|
"resfo",
|
|
38
|
+
"networkx",
|
|
38
39
|
"numpy",
|
|
39
40
|
"opm>=2020.10.2",
|
|
40
41
|
"pandas",
|
|
@@ -45,7 +46,6 @@ dependencies= [
|
|
|
45
46
|
|
|
46
47
|
[project.optional-dependencies]
|
|
47
48
|
tests = [
|
|
48
|
-
"networkx",
|
|
49
49
|
"pytest",
|
|
50
50
|
"pytest-cov",
|
|
51
51
|
"pytest-mock",
|
|
@@ -56,7 +56,9 @@ types = [
|
|
|
56
56
|
"mypy",
|
|
57
57
|
"types-PyYAML",
|
|
58
58
|
"types-python-dateutil",
|
|
59
|
-
"types-setuptools"
|
|
59
|
+
"types-setuptools",
|
|
60
|
+
"types-networkx",
|
|
61
|
+
"pandas-stubs",
|
|
60
62
|
]
|
|
61
63
|
docs = [
|
|
62
64
|
"autoapi",
|
|
@@ -97,12 +99,18 @@ res2df = "src/res2df"
|
|
|
97
99
|
res2df = ["opmkeywords/*", "py.typed", "svg_color_keyword_names.txt"]
|
|
98
100
|
|
|
99
101
|
[tool.mypy]
|
|
102
|
+
strict_equality = true
|
|
103
|
+
extra_checks = true
|
|
104
|
+
|
|
105
|
+
[[tool.mypy.overrides]]
|
|
106
|
+
module = [
|
|
107
|
+
"opm.*",
|
|
108
|
+
"resfo",
|
|
109
|
+
"pyarrow", # should be removed for pyarrow > 21.0.0
|
|
110
|
+
"resdata.*",
|
|
111
|
+
]
|
|
100
112
|
ignore_missing_imports = true
|
|
101
113
|
|
|
102
|
-
[tool.pylint]
|
|
103
|
-
# Module docstrings are not required
|
|
104
|
-
disable = "missing-module-docstring"
|
|
105
|
-
|
|
106
114
|
[tool.pytest.ini_options]
|
|
107
115
|
markers = [
|
|
108
116
|
"integration: marks a test as an integration test",
|
|
@@ -127,6 +135,9 @@ select = [
|
|
|
127
135
|
"PL", # pylint
|
|
128
136
|
"NPY", # numpy specific rules
|
|
129
137
|
"C4", # flake8-comprehensions
|
|
138
|
+
"PD", # pandas-vet
|
|
139
|
+
"RUF", # ruff specific rules
|
|
140
|
+
"UP", # pyupgrade
|
|
130
141
|
]
|
|
131
142
|
preview = true
|
|
132
143
|
ignore = [
|
|
@@ -143,10 +154,12 @@ ignore = [
|
|
|
143
154
|
"PLR0904", # too-many-public-methods
|
|
144
155
|
"PLR1702", # too-many-nested-blocks
|
|
145
156
|
"PLW3201", # bad-dunder-method-name
|
|
157
|
+
"PD013", # pandas-use-of-dot-stack
|
|
146
158
|
]
|
|
147
159
|
|
|
148
160
|
[tool.ruff.lint.extend-per-file-ignores]
|
|
149
161
|
"src/res2df/__init__.py" = ["PLC0414"]
|
|
162
|
+
"tests/*" = ["RUF005"]
|
|
150
163
|
|
|
151
164
|
[tool.ruff.lint.pylint]
|
|
152
165
|
max-args = 20
|
|
@@ -1,11 +1,10 @@
|
|
|
1
1
|
import importlib
|
|
2
|
-
from typing import List
|
|
3
2
|
|
|
4
3
|
from .__version__ import __version__ as __version__
|
|
5
4
|
from .res2csvlogger import getLogger_res2csv as getLogger_res2csv
|
|
6
5
|
from .resdatafiles import ResdataFiles as ResdataFiles
|
|
7
6
|
|
|
8
|
-
SUBMODULES:
|
|
7
|
+
SUBMODULES: list[str] = [
|
|
9
8
|
"compdat",
|
|
10
9
|
"equil",
|
|
11
10
|
"faults",
|
|
@@ -26,5 +25,5 @@ SUBMODULES: List[str] = [
|
|
|
26
25
|
]
|
|
27
26
|
|
|
28
27
|
|
|
29
|
-
for submodule in SUBMODULES
|
|
28
|
+
for submodule in [*SUBMODULES, "res2csv", "csv2res"]:
|
|
30
29
|
importlib.import_module("res2df." + submodule)
|
|
@@ -11,21 +11,24 @@ import shlex
|
|
|
11
11
|
import signal
|
|
12
12
|
import sys
|
|
13
13
|
from collections import defaultdict
|
|
14
|
+
from importlib import resources
|
|
14
15
|
from pathlib import Path
|
|
15
|
-
from typing import Any,
|
|
16
|
+
from typing import Any, cast
|
|
16
17
|
|
|
17
18
|
import dateutil.parser
|
|
18
19
|
import numpy as np
|
|
19
20
|
import pandas as pd
|
|
20
|
-
import pyarrow
|
|
21
|
+
import pyarrow as pa
|
|
22
|
+
from pyarrow import (
|
|
23
|
+
feather, # necessary as this module is not loaded unless explicitly imported
|
|
24
|
+
)
|
|
21
25
|
|
|
22
26
|
try:
|
|
23
|
-
|
|
24
|
-
import opm.io.deck # lgtm [py/import-and-import-from]
|
|
27
|
+
import opm.io.deck
|
|
25
28
|
|
|
26
29
|
# This import is seemingly not used, but necessary for some attributes
|
|
27
30
|
# to be included in DeckItem objects.
|
|
28
|
-
from opm.io.deck import DeckKeyword # noqa
|
|
31
|
+
from opm.io.deck import DeckKeyword # noqa: F401
|
|
29
32
|
except ImportError:
|
|
30
33
|
# Allow parts of res2df to work without OPM:
|
|
31
34
|
pass
|
|
@@ -35,7 +38,7 @@ from .constants import MAGIC_STDOUT
|
|
|
35
38
|
|
|
36
39
|
# Parse named JSON files, this exposes a dict of dictionary describing the contents
|
|
37
40
|
# of supported keyword data
|
|
38
|
-
OPMKEYWORDS:
|
|
41
|
+
OPMKEYWORDS: dict[str, dict] = {}
|
|
39
42
|
for keyw in [
|
|
40
43
|
"BRANPROP",
|
|
41
44
|
"COMPDAT",
|
|
@@ -82,14 +85,14 @@ for keyw in [
|
|
|
82
85
|
"WSEGVALV",
|
|
83
86
|
]:
|
|
84
87
|
OPMKEYWORDS[keyw] = json.loads(
|
|
85
|
-
(
|
|
88
|
+
(resources.files(__package__) / "opmkeywords" / keyw).read_text()
|
|
86
89
|
)
|
|
87
90
|
|
|
88
91
|
|
|
89
92
|
SVG_COLOR_NAMES = [
|
|
90
93
|
color.lower()
|
|
91
94
|
for color in (
|
|
92
|
-
(
|
|
95
|
+
(resources.files(__package__) / "svg_color_keyword_names.txt")
|
|
93
96
|
.read_text(encoding="utf-8")
|
|
94
97
|
.splitlines()
|
|
95
98
|
)
|
|
@@ -115,11 +118,11 @@ logger: logging.Logger = logging.getLogger(__name__)
|
|
|
115
118
|
|
|
116
119
|
|
|
117
120
|
def write_dframe_stdout_file(
|
|
118
|
-
dframe:
|
|
121
|
+
dframe: pd.DataFrame | pa.Table,
|
|
119
122
|
output: str,
|
|
120
123
|
index: bool = False,
|
|
121
|
-
caller_logger:
|
|
122
|
-
logstr:
|
|
124
|
+
caller_logger: logging.Logger | None = None,
|
|
125
|
+
logstr: str | None = None,
|
|
123
126
|
) -> None:
|
|
124
127
|
"""Write a dataframe to either stdout or a file
|
|
125
128
|
|
|
@@ -144,13 +147,13 @@ def write_dframe_stdout_file(
|
|
|
144
147
|
if caller_logger and isinstance(dframe, pd.DataFrame) and dframe.empty:
|
|
145
148
|
caller_logger.warning("Empty dataframe being written to disk")
|
|
146
149
|
if caller_logger and not logstr:
|
|
147
|
-
caller_logger.info("Writing to file %s",
|
|
150
|
+
caller_logger.info("Writing to file %s", output)
|
|
148
151
|
elif caller_logger and logstr:
|
|
149
152
|
caller_logger.info(logstr)
|
|
150
153
|
if isinstance(dframe, pd.DataFrame):
|
|
151
154
|
dframe.to_csv(output, index=index)
|
|
152
155
|
else:
|
|
153
|
-
|
|
156
|
+
feather.write_feather(dframe, dest=output)
|
|
154
157
|
|
|
155
158
|
|
|
156
159
|
def write_inc_stdout_file(string: str, outputfilename: str) -> None:
|
|
@@ -170,13 +173,13 @@ def parse_month(rdmonth: str) -> int:
|
|
|
170
173
|
return MONTH2NUM[rdmonth]
|
|
171
174
|
|
|
172
175
|
|
|
173
|
-
def datetime_to_ecldate(timestamp:
|
|
176
|
+
def datetime_to_ecldate(timestamp: str | datetime.datetime | datetime.date) -> str:
|
|
174
177
|
"""Convert a Python timestamp or date to the Eclipse DATE format"""
|
|
175
178
|
if isinstance(timestamp, str):
|
|
176
179
|
if list(map(len, timestamp.split(" ")[0].split("-"))) != [4, 2, 2]:
|
|
177
180
|
# Need this as dateutil.parser.isoparse() is not in Python 3.6.
|
|
178
181
|
raise ValueError("Use ISO-format for dates")
|
|
179
|
-
timestamp = dateutil.parser.parse(timestamp)
|
|
182
|
+
timestamp = dateutil.parser.parse(timestamp)
|
|
180
183
|
if not isinstance(timestamp, (datetime.datetime, datetime.date)):
|
|
181
184
|
raise TypeError("Require string or datetime")
|
|
182
185
|
string = f"{timestamp.day} '{NUM2MONTH[timestamp.month]}' {timestamp.year}"
|
|
@@ -188,9 +191,9 @@ def datetime_to_ecldate(timestamp: Union[str, datetime.datetime, datetime.date])
|
|
|
188
191
|
def keyworddata_to_df(
|
|
189
192
|
deck,
|
|
190
193
|
keyword: str,
|
|
191
|
-
renamer:
|
|
192
|
-
recordcountername:
|
|
193
|
-
emptyrecordcountername:
|
|
194
|
+
renamer: dict[str, str | list[str]] | None = None,
|
|
195
|
+
recordcountername: str | None = None,
|
|
196
|
+
emptyrecordcountername: str | None = None,
|
|
194
197
|
) -> pd.DataFrame:
|
|
195
198
|
"""Extract data associated to a keyword into tabular form.
|
|
196
199
|
|
|
@@ -212,8 +215,8 @@ def keyworddata_to_df(
|
|
|
212
215
|
row based on how many empty records is encountered. For PVTO f.ex,
|
|
213
216
|
this gives the PVTNUM indexing.
|
|
214
217
|
"""
|
|
215
|
-
|
|
216
|
-
|
|
218
|
+
dict_records: list[dict[str, Any]] = []
|
|
219
|
+
df_records: list[pd.DataFrame] = []
|
|
217
220
|
record_counter = 1
|
|
218
221
|
emptyrecord_counter = 1
|
|
219
222
|
for deckrecord in deck[keyword]:
|
|
@@ -235,41 +238,45 @@ def keyworddata_to_df(
|
|
|
235
238
|
if "DATA" in recdict and isinstance(recdict["DATA"], list):
|
|
236
239
|
assert renamer is not None
|
|
237
240
|
# If DATA is sometimes used for something else in the jsons, redo this.
|
|
238
|
-
|
|
241
|
+
renamed_data = renamer.get("DATA", [])
|
|
242
|
+
if isinstance(renamed_data, str):
|
|
243
|
+
renamed_data = [renamed_data]
|
|
244
|
+
data_dim = len(renamed_data) # The renamers must be in sync with json!
|
|
239
245
|
data_chunks = int(len(recdict["DATA"]) / data_dim)
|
|
240
246
|
try:
|
|
241
247
|
data_reshaped = np.reshape(recdict["DATA"], (data_chunks, data_dim))
|
|
242
248
|
except ValueError as err:
|
|
243
249
|
raise ValueError(
|
|
244
|
-
|
|
245
|
-
|
|
246
|
-
"Either your keyword is wrong, or your data is wrong"
|
|
247
|
-
)
|
|
250
|
+
f"Wrong number count for keyword {keyword}. \n"
|
|
251
|
+
"Either your keyword is wrong, or your data is wrong"
|
|
248
252
|
) from err
|
|
249
|
-
data_df = pd.DataFrame(columns=
|
|
253
|
+
data_df = pd.DataFrame(columns=renamed_data, data=data_reshaped)
|
|
250
254
|
# Assign the remaining items from the parsed dict to the dataframe:
|
|
251
255
|
for key, value in recdict.items():
|
|
252
256
|
if key != "DATA":
|
|
253
257
|
data_df[key] = value
|
|
254
|
-
|
|
255
|
-
record_counter += 1
|
|
258
|
+
df_records.append(data_df)
|
|
256
259
|
else:
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
if
|
|
260
|
-
|
|
261
|
-
|
|
262
|
-
|
|
263
|
-
|
|
260
|
+
dict_records.append(recdict)
|
|
261
|
+
record_counter += 1
|
|
262
|
+
if df_records and dict_records:
|
|
263
|
+
dict_df = pd.DataFrame(data=dict_records)
|
|
264
|
+
return pd.concat([*df_records, dict_df]).reset_index(drop=True)
|
|
265
|
+
elif df_records: # trust that this is all one type?
|
|
266
|
+
return pd.concat(df_records).reset_index(drop=True)
|
|
267
|
+
elif dict_records: # records contain lists.
|
|
268
|
+
return pd.DataFrame(data=dict_records).reset_index(drop=True)
|
|
269
|
+
else:
|
|
270
|
+
return pd.DataFrame()
|
|
264
271
|
|
|
265
272
|
|
|
266
273
|
def parse_opmio_deckrecord(
|
|
267
|
-
record: "opm.
|
|
274
|
+
record: "opm.opmcommon_python.DeckRecord",
|
|
268
275
|
keyword: str,
|
|
269
276
|
itemlistname: str = "items",
|
|
270
|
-
recordindex:
|
|
271
|
-
renamer:
|
|
272
|
-
) ->
|
|
277
|
+
recordindex: int | None = None,
|
|
278
|
+
renamer: dict[str, str] | dict[str, str | list[str]] | None = None,
|
|
279
|
+
) -> dict[str, Any]:
|
|
273
280
|
"""
|
|
274
281
|
Parse an opm.io.DeckRecord belonging to a certain keyword
|
|
275
282
|
|
|
@@ -289,7 +296,7 @@ def parse_opmio_deckrecord(
|
|
|
289
296
|
if keyword not in OPMKEYWORDS:
|
|
290
297
|
raise ValueError(f"Keyword {keyword} not supported by common.py")
|
|
291
298
|
|
|
292
|
-
rec_dict:
|
|
299
|
+
rec_dict: dict[str, Any] = {}
|
|
293
300
|
|
|
294
301
|
if recordindex is None: # Beware, 0 is different from None here.
|
|
295
302
|
itemlist = OPMKEYWORDS[keyword][itemlistname]
|
|
@@ -329,14 +336,13 @@ def parse_opmio_deckrecord(
|
|
|
329
336
|
# OPM DeckItem. A better solution has not yet
|
|
330
337
|
# been found in the OPM API. See also
|
|
331
338
|
# https://github.com/OPM/opm-common/issues/2598
|
|
332
|
-
# pylint: disable=protected-access
|
|
333
339
|
if record[item_idx].__defaulted(idx):
|
|
334
340
|
rec_dict[item_name][idx] = np.nan
|
|
335
341
|
else:
|
|
336
342
|
rec_dict[item_name] = jsonitem.get("default", None)
|
|
337
343
|
|
|
338
344
|
if renamer:
|
|
339
|
-
renamed_dict:
|
|
345
|
+
renamed_dict: dict[str, Any] = {}
|
|
340
346
|
for key, value in rec_dict.items():
|
|
341
347
|
if key in renamer and not isinstance(renamer[key], list):
|
|
342
348
|
renamed_dict[renamer[key]] = value # type: ignore
|
|
@@ -354,7 +360,7 @@ def parse_opmio_date_rec(record: "opm.io.DeckRecord") -> datetime.date:
|
|
|
354
360
|
return datetime.date(year=year, month=parse_month(month), day=day)
|
|
355
361
|
|
|
356
362
|
|
|
357
|
-
def parse_opmio_tstep_rec(record: "opm.io.DeckRecord") ->
|
|
363
|
+
def parse_opmio_tstep_rec(record: "opm.io.DeckRecord") -> list[float | int]:
|
|
358
364
|
"""Parse a record with TSTEP data
|
|
359
365
|
|
|
360
366
|
Return:
|
|
@@ -396,13 +402,13 @@ def merge_zones(
|
|
|
396
402
|
return df
|
|
397
403
|
zone_df = pd.DataFrame.from_dict(zonedict, orient="index", columns=[zoneheader])
|
|
398
404
|
zone_df.index.name = "K"
|
|
399
|
-
zone_df.reset_index(
|
|
405
|
+
zone_df = zone_df.reset_index()
|
|
400
406
|
|
|
401
407
|
df[zoneheader] = df[kname].map(defaultdict(lambda: None, zonedict))
|
|
402
408
|
return df
|
|
403
409
|
|
|
404
410
|
|
|
405
|
-
def comment_formatter(multiline:
|
|
411
|
+
def comment_formatter(multiline: str | None, prefix: str = "-- ") -> str:
|
|
406
412
|
"""Prepends comment characters to every line in input
|
|
407
413
|
|
|
408
414
|
If nothing is supplied, an empty string is returned.
|
|
@@ -422,11 +428,11 @@ def comment_formatter(multiline: Optional[str], prefix: str = "-- ") -> str:
|
|
|
422
428
|
|
|
423
429
|
|
|
424
430
|
def handle_wanted_keywords(
|
|
425
|
-
wanted:
|
|
431
|
+
wanted: list[str] | None,
|
|
426
432
|
deck: "opm.io.Deck",
|
|
427
|
-
supported:
|
|
433
|
+
supported: list[str],
|
|
428
434
|
modulename: str = "",
|
|
429
|
-
) ->
|
|
435
|
+
) -> list[str]:
|
|
430
436
|
"""Handle three list of keywords, wanted, available and supported
|
|
431
437
|
|
|
432
438
|
Args:
|
|
@@ -442,12 +448,12 @@ def handle_wanted_keywords(
|
|
|
442
448
|
keywords = supported
|
|
443
449
|
else:
|
|
444
450
|
# Warn if some keywords are unsupported:
|
|
445
|
-
not_supported:
|
|
451
|
+
not_supported: set[str] = set(wanted) - set(supported)
|
|
446
452
|
if not_supported:
|
|
447
453
|
logger.warning(
|
|
448
454
|
"Requested keyword(s) not supported by res2df.%s: %s",
|
|
449
455
|
modulename,
|
|
450
|
-
|
|
456
|
+
not_supported,
|
|
451
457
|
)
|
|
452
458
|
# Reduce to only supported keywords:
|
|
453
459
|
keywords = list(set(wanted) - set(not_supported))
|
|
@@ -455,9 +461,7 @@ def handle_wanted_keywords(
|
|
|
455
461
|
keywords_in_deck = [keyword for keyword in keywords if keyword in deck]
|
|
456
462
|
not_in_deck = set(keywords) - set(keywords_in_deck)
|
|
457
463
|
if not_in_deck:
|
|
458
|
-
logger.warning(
|
|
459
|
-
"Requested keyword(s) not present in deck: %s", str(not_in_deck)
|
|
460
|
-
)
|
|
464
|
+
logger.warning("Requested keyword(s) not present in deck: %s", not_in_deck)
|
|
461
465
|
# Reduce again to only present keywords, but without warning:
|
|
462
466
|
keywords = [keyword for keyword in keywords if keyword in deck]
|
|
463
467
|
|
|
@@ -506,11 +510,11 @@ def fill_reverse_parser(
|
|
|
506
510
|
|
|
507
511
|
def df2res(
|
|
508
512
|
dataframe: pd.DataFrame,
|
|
509
|
-
keywords:
|
|
510
|
-
comments:
|
|
511
|
-
supported:
|
|
512
|
-
consecutive:
|
|
513
|
-
filename:
|
|
513
|
+
keywords: str | list[str] | list[str | None] | None = None,
|
|
514
|
+
comments: dict[str, str] | None = None,
|
|
515
|
+
supported: list[str] | None = None,
|
|
516
|
+
consecutive: str | None = None,
|
|
517
|
+
filename: str | None = None,
|
|
514
518
|
) -> str:
|
|
515
519
|
"""Generate resdata :term:`include file` content from dataframes in res2df format.
|
|
516
520
|
|
|
@@ -552,7 +556,7 @@ def df2res(
|
|
|
552
556
|
logger.critical(
|
|
553
557
|
"%s inconsistent in input dataframe, got the values %s",
|
|
554
558
|
consecutive,
|
|
555
|
-
|
|
559
|
+
dataframe[consecutive].unique(),
|
|
556
560
|
)
|
|
557
561
|
raise ValueError
|
|
558
562
|
|
|
@@ -572,18 +576,18 @@ def df2res(
|
|
|
572
576
|
# Warn if some keywords are unsupported:
|
|
573
577
|
assert keywords is not None
|
|
574
578
|
assert supported is not None
|
|
575
|
-
not_supported:
|
|
579
|
+
not_supported: set[str | None] = set(keywords) - set(supported)
|
|
576
580
|
if not_supported:
|
|
577
581
|
logger.warning(
|
|
578
582
|
"Requested keyword(s) not supported by %s: %s",
|
|
579
583
|
calling_module.__name__, # type: ignore
|
|
580
|
-
|
|
584
|
+
not_supported,
|
|
581
585
|
)
|
|
582
586
|
# Warn if some requested keywords are not in frame:
|
|
583
587
|
not_in_frame = set(keywords) - keywords_in_frame
|
|
584
588
|
if not_in_frame:
|
|
585
589
|
logger.warning(
|
|
586
|
-
"Requested keyword(s) not present in dataframe: %s",
|
|
590
|
+
"Requested keyword(s) not present in dataframe: %s", not_in_frame
|
|
587
591
|
)
|
|
588
592
|
keywords = [
|
|
589
593
|
keyword
|
|
@@ -626,8 +630,8 @@ def df2res(
|
|
|
626
630
|
def generic_deck_table(
|
|
627
631
|
dframe: pd.DataFrame,
|
|
628
632
|
keyword: str,
|
|
629
|
-
comment:
|
|
630
|
-
renamer:
|
|
633
|
+
comment: str | None = None,
|
|
634
|
+
renamer: dict[str, str] | None = None,
|
|
631
635
|
drop_trailing_columns: bool = True,
|
|
632
636
|
) -> str:
|
|
633
637
|
"""Construct string contents of a :term:`.DATA file` table.
|
|
@@ -666,7 +670,7 @@ def generic_deck_table(
|
|
|
666
670
|
# sorting from that:
|
|
667
671
|
if renamer is not None:
|
|
668
672
|
inv_renamer = {value: key for key, value in renamer.items()}
|
|
669
|
-
dframe.rename(inv_renamer, axis="columns"
|
|
673
|
+
dframe = dframe.rename(inv_renamer, axis="columns")
|
|
670
674
|
|
|
671
675
|
keyword_col_headers = [item["name"] for item in OPMKEYWORDS[keyword]["items"]]
|
|
672
676
|
|
|
@@ -680,7 +684,7 @@ def generic_deck_table(
|
|
|
680
684
|
if rightmost_column == -1:
|
|
681
685
|
# No relevant data in the dataframe
|
|
682
686
|
return string
|
|
683
|
-
relevant_columns = keyword_col_headers[0 : rightmost_column + 1]
|
|
687
|
+
relevant_columns = keyword_col_headers[0 : rightmost_column + 1]
|
|
684
688
|
for colname in relevant_columns:
|
|
685
689
|
# Add those that are missing, as Eclipse defaults
|
|
686
690
|
if colname not in dframe:
|
|
@@ -731,7 +735,7 @@ def generic_deck_table(
|
|
|
731
735
|
|
|
732
736
|
# Now rename again to have prettier column names:
|
|
733
737
|
if renamer is not None:
|
|
734
|
-
dframe.rename(renamer, axis="columns"
|
|
738
|
+
dframe = dframe.rename(renamer, axis="columns")
|
|
735
739
|
# Add a final column with the end-slash, invisible header:
|
|
736
740
|
dframe[" "] = "/"
|
|
737
741
|
tablestring = dframe.to_string(header=True, index=False)
|
|
@@ -827,13 +831,13 @@ def stack_on_colnames(
|
|
|
827
831
|
dframe.columns = pd.MultiIndex.from_tuples(
|
|
828
832
|
tuplecolumns, names=["dummy", stackcolname]
|
|
829
833
|
)
|
|
830
|
-
dframe = dframe.stack(future_stack=True)
|
|
834
|
+
dframe = cast(pd.DataFrame, dframe.stack(future_stack=True))
|
|
831
835
|
staticcols = [col[0] for col in tuplecolumns if len(col) == 1]
|
|
832
836
|
dframe[staticcols] = dframe[staticcols].ffill()
|
|
833
|
-
dframe.reset_index(
|
|
837
|
+
dframe = dframe.reset_index()
|
|
834
838
|
# Drop rows stemming from the NaNs in the second tuple-element for
|
|
835
839
|
# static columns:
|
|
836
|
-
dframe.dropna(axis="index", subset=["DATE"]
|
|
840
|
+
dframe = dframe.dropna(axis="index", subset=["DATE"])
|
|
837
841
|
del dframe["level_0"]
|
|
838
842
|
dframe.index.name = ""
|
|
839
843
|
return dframe
|
|
@@ -851,7 +855,7 @@ def is_color(input_string: str) -> bool:
|
|
|
851
855
|
return bool(re.match(regex, input_string))
|
|
852
856
|
|
|
853
857
|
|
|
854
|
-
def parse_lyrfile(filename: str) ->
|
|
858
|
+
def parse_lyrfile(filename: str) -> list[dict[str, Any]] | None:
|
|
855
859
|
"""Return a list of dicts representation of the lyr file.
|
|
856
860
|
|
|
857
861
|
The lyr file contains data of the following format,
|
|
@@ -885,7 +889,7 @@ def parse_lyrfile(filename: str) -> Optional[List[Dict[str, Any]]]:
|
|
|
885
889
|
Returns:
|
|
886
890
|
A list of dictionaries representing the information in the lyr file.
|
|
887
891
|
|
|
888
|
-
"""
|
|
892
|
+
"""
|
|
889
893
|
|
|
890
894
|
zonelines = Path(filename).read_text(encoding="utf-8").splitlines()
|
|
891
895
|
|
|
@@ -893,11 +897,11 @@ def parse_lyrfile(filename: str) -> Optional[List[Dict[str, Any]]]:
|
|
|
893
897
|
zonelines = [line.split("--")[0].strip() for line in zonelines]
|
|
894
898
|
zonelines = [line for line in zonelines if line and not line.startswith("#")]
|
|
895
899
|
|
|
896
|
-
lyrlist:
|
|
900
|
+
lyrlist: list[dict[str, Any]] = []
|
|
897
901
|
for line in zonelines:
|
|
898
902
|
try:
|
|
899
903
|
linesplit = shlex.split(line)
|
|
900
|
-
zonedict:
|
|
904
|
+
zonedict: dict[str, Any] = {"name": linesplit[0]}
|
|
901
905
|
zone_color = linesplit.pop(-1) if is_color(linesplit[-1]) else None
|
|
902
906
|
if zone_color is not None:
|
|
903
907
|
zonedict["color"] = zone_color
|
|
@@ -923,7 +927,7 @@ def parse_lyrfile(filename: str) -> Optional[List[Dict[str, Any]]]:
|
|
|
923
927
|
return lyrlist
|
|
924
928
|
|
|
925
929
|
|
|
926
|
-
def convert_lyrlist_to_zonemap(lyrlist:
|
|
930
|
+
def convert_lyrlist_to_zonemap(lyrlist: list[dict[str, Any]]) -> dict[int, str]:
|
|
927
931
|
"""Returns a layer to zone map as a dictionary
|
|
928
932
|
|
|
929
933
|
Args:
|