From 6d0e944e90bc6e926868c72bb8c713b994818604 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 09:42:56 +0100 Subject: [PATCH 01/68] ecl2df->res2df --- .github/workflows/codecov.yml | 8 +- .github/workflows/ecl2df.yml | 24 +- .gitignore | 6 +- .pylintrc | 2 +- README.md | 14 +- docs/conf.py | 20 +- docs/contribution.rst | 34 +- docs/csv2ecl.rst | 6 +- docs/ecl2csv.rst | 4 +- docs/index.rst | 6 +- docs/installation.rst | 6 +- docs/introduction.rst | 10 +- docs/usage/compdat.rst | 2 +- docs/usage/equil.rst | 6 +- docs/usage/fipnum.inc | 2 +- docs/usage/fipreports.rst | 2 +- docs/usage/grid.rst | 16 +- docs/usage/nnc.rst | 4 +- docs/usage/pillars.rst | 16 +- docs/usage/pvt.rst | 6 +- docs/usage/satfunc.rst | 14 +- docs/usage/summary.rst | 4 +- docs/usage/trans.rst | 18 +- docs/usage/wcon.rst | 2 +- {ecl2df => res2df}/__init__.py | 4 +- {ecl2df => res2df}/common.py | 18 +- {ecl2df => res2df}/compdat.py | 6 +- {ecl2df => res2df}/config_jobs/CSV2ECL | 0 {ecl2df => res2df}/config_jobs/ECL2CSV | 0 {ecl2df => res2df}/constants.py | 2 +- {ecl2df => res2df}/csv2ecl.py | 16 +- {ecl2df => res2df}/ecl2csv.py | 16 +- {ecl2df => res2df}/eclfiles.py | 2 +- {ecl2df => res2df}/equil.py | 4 +- {ecl2df => res2df}/faults.py | 4 +- {ecl2df => res2df}/fipreports.py | 4 +- {ecl2df => res2df}/grid.py | 8 +- {ecl2df => res2df}/gruptree.py | 4 +- .../hook_implementations/__init__.py | 0 .../hook_implementations/jobs.py | 12 +- {ecl2df => res2df}/inferdims.py | 6 +- {ecl2df => res2df}/nnc.py | 10 +- {ecl2df => res2df}/opmkeywords/BRANPROP | 0 {ecl2df => res2df}/opmkeywords/COMPDAT | 0 {ecl2df => res2df}/opmkeywords/COMPLUMP | 0 {ecl2df => res2df}/opmkeywords/COMPSEGS | 0 {ecl2df => res2df}/opmkeywords/DENSITY | 0 {ecl2df => res2df}/opmkeywords/EQLDIMS | 0 {ecl2df => res2df}/opmkeywords/EQUIL | 0 {ecl2df => res2df}/opmkeywords/FAULTS | 0 {ecl2df => res2df}/opmkeywords/GRUPNET | 0 {ecl2df => res2df}/opmkeywords/GRUPTREE | 0 {ecl2df => res2df}/opmkeywords/NODEPROP | 0 {ecl2df => res2df}/opmkeywords/PBVD | 0 {ecl2df => res2df}/opmkeywords/PDVD | 0 {ecl2df => res2df}/opmkeywords/PVDG | 0 {ecl2df => res2df}/opmkeywords/PVDO | 0 {ecl2df => res2df}/opmkeywords/PVTG | 0 {ecl2df => res2df}/opmkeywords/PVTO | 0 {ecl2df => res2df}/opmkeywords/PVTW | 0 {ecl2df => res2df}/opmkeywords/ROCK | 0 {ecl2df => res2df}/opmkeywords/RSVD | 0 {ecl2df => res2df}/opmkeywords/RVVD | 0 {ecl2df => res2df}/opmkeywords/SGFN | 0 {ecl2df => res2df}/opmkeywords/SGOF | 0 {ecl2df => res2df}/opmkeywords/SGWFN | 0 {ecl2df => res2df}/opmkeywords/SLGOF | 0 {ecl2df => res2df}/opmkeywords/SOF2 | 0 {ecl2df => res2df}/opmkeywords/SOF3 | 0 {ecl2df => res2df}/opmkeywords/SWFN | 0 {ecl2df => res2df}/opmkeywords/SWOF | 0 {ecl2df => res2df}/opmkeywords/TABDIMS | 0 {ecl2df => res2df}/opmkeywords/VFPINJ | 0 {ecl2df => res2df}/opmkeywords/VFPPROD | 0 {ecl2df => res2df}/opmkeywords/WCONHIST | 0 {ecl2df => res2df}/opmkeywords/WCONINJE | 0 {ecl2df => res2df}/opmkeywords/WCONINJH | 0 {ecl2df => res2df}/opmkeywords/WCONPROD | 0 {ecl2df => res2df}/opmkeywords/WELOPEN | 0 {ecl2df => res2df}/opmkeywords/WELSEGS | 0 {ecl2df => res2df}/opmkeywords/WELSPECS | 0 {ecl2df => res2df}/opmkeywords/WLIST | 0 {ecl2df => res2df}/opmkeywords/WSEGAICD | 0 {ecl2df => res2df}/opmkeywords/WSEGSICD | 0 {ecl2df => res2df}/opmkeywords/WSEGVALV | 0 {ecl2df => res2df}/opmkeywords/readme | 2 +- .../opmkeywords/runmetoupdate.sh | 0 {ecl2df => res2df}/parameters.py | 2 +- {ecl2df => res2df}/pillars.py | 2 +- {ecl2df => res2df}/pvt.py | 4 +- {ecl2df => res2df}/rft.py | 2 +- {ecl2df => res2df}/satfunc.py | 6 +- {ecl2df => res2df}/summary.py | 6 +- .../svg_color_keyword_names.txt | 0 {ecl2df => res2df}/trans.py | 12 +- {ecl2df => res2df}/vfp/__init__.py | 2 +- {ecl2df => res2df}/vfp/_vfp.py | 2 +- {ecl2df => res2df}/vfp/_vfpcommon.py | 2 +- {ecl2df => res2df}/vfp/_vfpdefs.py | 2 +- {ecl2df => res2df}/vfp/_vfpinj.py | 2 +- {ecl2df => res2df}/vfp/_vfpprod.py | 2 +- {ecl2df => res2df}/wcon.py | 4 +- {ecl2df => res2df}/wellcompletiondata.py | 4 +- {ecl2df => res2df}/wellconnstatus.py | 4 +- setup.cfg | 2 +- setup.py | 20 +- tests/conftest.py | 8 +- .../eclipse/model/2_R001_REEK-0-OPMFLOW.PRT | 738 +++++++++--------- tests/test_common.py | 4 +- tests/test_compdat.py | 2 +- tests/test_eclfiles.py | 2 +- tests/test_equil.py | 6 +- tests/test_ert_hooks.py | 14 +- tests/test_faults.py | 4 +- tests/test_fipreports.py | 6 +- tests/test_grid.py | 12 +- tests/test_gruptree.py | 4 +- tests/test_hook_implementations.py | 10 +- tests/test_inferdims.py | 2 +- tests/test_init.py | 18 +- tests/test_integration.py | 4 +- tests/test_logging.py | 26 +- tests/test_nnc.py | 4 +- tests/test_parameters.py | 4 +- tests/test_pillars.py | 6 +- tests/test_pvt.py | 4 +- tests/test_rft.py | 4 +- tests/test_satfunc.py | 4 +- tests/test_summary.py | 16 +- tests/test_trans.py | 6 +- tests/test_userapi.py | 36 +- tests/test_vfp.py | 56 +- tests/test_wcon.py | 4 +- tests/test_wellcompletiondata.py | 6 +- tests/test_wellconnstatus.py | 4 +- tests/test_welopen.py | 6 +- tests/test_wlist.py | 2 +- tests/test_zonemap.py | 36 +- 138 files changed, 744 insertions(+), 744 deletions(-) rename {ecl2df => res2df}/__init__.py (96%) rename {ecl2df => res2df}/common.py (98%) rename {ecl2df => res2df}/compdat.py (99%) rename {ecl2df => res2df}/config_jobs/CSV2ECL (100%) rename {ecl2df => res2df}/config_jobs/ECL2CSV (100%) rename {ecl2df => res2df}/constants.py (86%) rename {ecl2df => res2df}/csv2ecl.py (88%) rename {ecl2df => res2df}/ecl2csv.py (97%) rename {ecl2df => res2df}/eclfiles.py (99%) rename {ecl2df => res2df}/equil.py (99%) rename {ecl2df => res2df}/faults.py (96%) rename {ecl2df => res2df}/fipreports.py (98%) rename {ecl2df => res2df}/grid.py (99%) rename {ecl2df => res2df}/gruptree.py (99%) rename {ecl2df => res2df}/hook_implementations/__init__.py (100%) rename {ecl2df => res2df}/hook_implementations/jobs.py (85%) rename {ecl2df => res2df}/inferdims.py (98%) rename {ecl2df => res2df}/nnc.py (97%) rename {ecl2df => res2df}/opmkeywords/BRANPROP (100%) rename {ecl2df => res2df}/opmkeywords/COMPDAT (100%) rename {ecl2df => res2df}/opmkeywords/COMPLUMP (100%) rename {ecl2df => res2df}/opmkeywords/COMPSEGS (100%) rename {ecl2df => res2df}/opmkeywords/DENSITY (100%) rename {ecl2df => res2df}/opmkeywords/EQLDIMS (100%) rename {ecl2df => res2df}/opmkeywords/EQUIL (100%) rename {ecl2df => res2df}/opmkeywords/FAULTS (100%) rename {ecl2df => res2df}/opmkeywords/GRUPNET (100%) rename {ecl2df => res2df}/opmkeywords/GRUPTREE (100%) rename {ecl2df => res2df}/opmkeywords/NODEPROP (100%) rename {ecl2df => res2df}/opmkeywords/PBVD (100%) rename {ecl2df => res2df}/opmkeywords/PDVD (100%) rename {ecl2df => res2df}/opmkeywords/PVDG (100%) rename {ecl2df => res2df}/opmkeywords/PVDO (100%) rename {ecl2df => res2df}/opmkeywords/PVTG (100%) rename {ecl2df => res2df}/opmkeywords/PVTO (100%) rename {ecl2df => res2df}/opmkeywords/PVTW (100%) rename {ecl2df => res2df}/opmkeywords/ROCK (100%) rename {ecl2df => res2df}/opmkeywords/RSVD (100%) rename {ecl2df => res2df}/opmkeywords/RVVD (100%) rename {ecl2df => res2df}/opmkeywords/SGFN (100%) rename {ecl2df => res2df}/opmkeywords/SGOF (100%) rename {ecl2df => res2df}/opmkeywords/SGWFN (100%) rename {ecl2df => res2df}/opmkeywords/SLGOF (100%) rename {ecl2df => res2df}/opmkeywords/SOF2 (100%) rename {ecl2df => res2df}/opmkeywords/SOF3 (100%) rename {ecl2df => res2df}/opmkeywords/SWFN (100%) rename {ecl2df => res2df}/opmkeywords/SWOF (100%) rename {ecl2df => res2df}/opmkeywords/TABDIMS (100%) rename {ecl2df => res2df}/opmkeywords/VFPINJ (100%) rename {ecl2df => res2df}/opmkeywords/VFPPROD (100%) rename {ecl2df => res2df}/opmkeywords/WCONHIST (100%) rename {ecl2df => res2df}/opmkeywords/WCONINJE (100%) rename {ecl2df => res2df}/opmkeywords/WCONINJH (100%) rename {ecl2df => res2df}/opmkeywords/WCONPROD (100%) rename {ecl2df => res2df}/opmkeywords/WELOPEN (100%) rename {ecl2df => res2df}/opmkeywords/WELSEGS (100%) rename {ecl2df => res2df}/opmkeywords/WELSPECS (100%) rename {ecl2df => res2df}/opmkeywords/WLIST (100%) rename {ecl2df => res2df}/opmkeywords/WSEGAICD (100%) rename {ecl2df => res2df}/opmkeywords/WSEGSICD (100%) rename {ecl2df => res2df}/opmkeywords/WSEGVALV (100%) rename {ecl2df => res2df}/opmkeywords/readme (79%) rename {ecl2df => res2df}/opmkeywords/runmetoupdate.sh (100%) rename {ecl2df => res2df}/parameters.py (99%) rename {ecl2df => res2df}/pillars.py (99%) rename {ecl2df => res2df}/pvt.py (99%) rename {ecl2df => res2df}/rft.py (99%) rename {ecl2df => res2df}/satfunc.py (98%) rename {ecl2df => res2df}/summary.py (99%) rename {ecl2df => res2df}/svg_color_keyword_names.txt (100%) rename {ecl2df => res2df}/trans.py (97%) rename {ecl2df => res2df}/vfp/__init__.py (83%) rename {ecl2df => res2df}/vfp/_vfp.py (99%) rename {ecl2df => res2df}/vfp/_vfpcommon.py (99%) rename {ecl2df => res2df}/vfp/_vfpdefs.py (99%) rename {ecl2df => res2df}/vfp/_vfpinj.py (99%) rename {ecl2df => res2df}/vfp/_vfpprod.py (99%) rename {ecl2df => res2df}/wcon.py (97%) rename {ecl2df => res2df}/wellcompletiondata.py (99%) rename {ecl2df => res2df}/wellconnstatus.py (97%) diff --git a/.github/workflows/codecov.yml b/.github/workflows/codecov.yml index 71688375e..599ff0b54 100644 --- a/.github/workflows/codecov.yml +++ b/.github/workflows/codecov.yml @@ -23,16 +23,16 @@ jobs: with: python-version: 3.8 - - name: Install ecl2df + - name: Install res2df run: | pip install pip -U pip install .[tests,ert] - name: Generate coverage report and upload run: | - pytest tests --disable-warnings --cov=ecl2df --cov-report=xml - # Uninstall packages that ecl2df supports not being installed: + pytest tests --disable-warnings --cov=res2df --cov-report=xml + # Uninstall packages that res2df supports not being installed: pip uninstall --yes ert networkx opm # Run tests again in cov-append-mode: - pytest tests --disable-warnings --cov=ecl2df --cov-report=xml --cov-append + pytest tests --disable-warnings --cov=res2df --cov-report=xml --cov-append bash <(curl -s https://codecov.io/bash) diff --git a/.github/workflows/ecl2df.yml b/.github/workflows/ecl2df.yml index 1eaa793e0..bd0fbee9a 100644 --- a/.github/workflows/ecl2df.yml +++ b/.github/workflows/ecl2df.yml @@ -1,4 +1,4 @@ -name: ecl2df +name: res2df on: push: @@ -15,7 +15,7 @@ env: ERT_SHOW_BACKTRACE: 1 jobs: - ecl2df: + res2df: runs-on: ubuntu-latest strategy: matrix: @@ -40,11 +40,11 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: Install ecl2df with dependencies + - name: Install res2df with dependencies run: | pip install --upgrade pip pip install . - python -c "import ecl2df" + python -c "import res2df" - name: Install ert if: matrix.install-ert @@ -55,17 +55,17 @@ jobs: - name: Check code style and typing run: | - black --check ecl2df tests setup.py docs/conf.py - flake8 ecl2df tests - isort --check-only --profile black ecl2df tests - mypy ecl2df + black --check res2df tests setup.py docs/conf.py + flake8 res2df tests + isort --check-only --profile black res2df tests + mypy res2df - name: List all installed packages run: pip freeze - name: Run tests run: | - python -c "import ecl2df" + python -c "import res2df" pytest tests/ - name: Syntax check documentation @@ -81,8 +81,8 @@ jobs: run: | cp -R ./build/sphinx/html ../html - git config --local user.email "ecl2df-github-action" - git config --local user.name "ecl2df-github-action" + git config --local user.email "res2df-github-action" + git config --local user.name "res2df-github-action" git fetch origin gh-pages git checkout --track origin/gh-pages git clean -f -f -d -x @@ -103,7 +103,7 @@ jobs: if: github.event_name == 'release' && matrix.python-version == '3.8' env: TWINE_USERNAME: __token__ - TWINE_PASSWORD: ${{ secrets.ecl2df_pypi_token }} + TWINE_PASSWORD: ${{ secrets.res2df_pypi_token }} run: | python -m pip install --upgrade setuptools wheel twine python setup.py sdist bdist_wheel diff --git a/.gitignore b/.gitignore index 7deb22b05..fe6149219 100644 --- a/.gitignore +++ b/.gitignore @@ -1,13 +1,13 @@ .eggs .cache .coverage -ecl2df.egg-info +res2df.egg-info .tox *.swp *.pyc *~ docs/modules.rst -docs/ecl2df.rst -ecl2df/version.py +docs/res2df.rst +res2df/version.py \#* .\#* diff --git a/.pylintrc b/.pylintrc index ee7791fa6..aafe0859c 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,4 +1,4 @@ -# PYLINT: General settings for ecl2df +# PYLINT: General settings for res2df [GENERAL] disable=R0205, F0010, C0330, E1136, E0401,C0114 diff --git a/README.md b/README.md index 5d265bfad..c3d20f181 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,14 @@ -[![Build Status](https://img.shields.io/github/workflow/status/equinor/ecl2df/ecl2df)](https://github.com/equinor/ecl2df/actions?query=workflow%3Aecl2df) -[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/equinor/ecl2df.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/equinor/ecl2df/context:python) -[![Total alerts](https://img.shields.io/lgtm/alerts/g/equinor/ecl2df.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/equinor/ecl2df/alerts/) -[![codecov](https://codecov.io/gh/equinor/ecl2df/branch/master/graph/badge.svg)](https://codecov.io/gh/equinor/ecl2df) +[![Build Status](https://img.shields.io/github/workflow/status/equinor/res2df/res2df)](https://github.com/equinor/res2df/actions?query=workflow%3Ares2df) +[![Language grade: Python](https://img.shields.io/lgtm/grade/python/g/equinor/res2df.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/equinor/res2df/context:python) +[![Total alerts](https://img.shields.io/lgtm/alerts/g/equinor/res2df.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/equinor/res2df/alerts/) +[![codecov](https://codecov.io/gh/equinor/res2df/branch/master/graph/badge.svg)](https://codecov.io/gh/equinor/res2df) [![Python 3.8-3.10](https://img.shields.io/badge/python-3.8%20|%203.9%20|%203.10-blue.svg)](https://www.python.org) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://black.readthedocs.io/) [![License: GPL v3](https://img.shields.io/badge/License-GPLv3-blue.svg)](https://www.gnu.org/licenses/gpl-3.0) -# ecl2df +# res2df -ecl2df is a Pandas DataFrame wrapper around libecl and opm.io, which +res2df is a Pandas DataFrame wrapper around libecl and opm.io, which are used to access binary files outputted by the reservoir simulator Eclipse, or its input files --- or any other tool outputting to the same data format. @@ -24,7 +24,7 @@ There is a command line frontend for almost all functionality, called the dataframes to files in CSV format, and a similar `csv2ecl` for the reverse operation. -For documentation, see +For documentation, see ## License diff --git a/docs/conf.py b/docs/conf.py index 4d3297f8a..e7ba8b232 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -19,14 +19,14 @@ # -- Project information ----------------------------------------------------- -project = "ecl2df" +project = "res2df" author = "Håvard Berland" copyright = f"Equinor 2019-{datetime.datetime.now().year}" # The short X.Y version -import ecl2df # noqa +import res2df # noqa -release = metadata.version("ecl2df") +release = metadata.version("res2df") version = release # -- General configuration --------------------------------------------------- @@ -50,7 +50,7 @@ "sphinxarg.ext", ] -autoapi_modules: dict = {"ecl2df": None} +autoapi_modules: dict = {"res2df": None} autodoc_default_options = {"members": None} @@ -118,7 +118,7 @@ # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. -htmlhelp_basename = "ecl2dfdoc" +htmlhelp_basename = "res2dfdoc" # -- Options for LaTeX output ------------------------------------------------ @@ -141,14 +141,14 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). -latex_documents = [(master_doc, "ecl2df.tex", "ecl2df Documentation", author, "manual")] +latex_documents = [(master_doc, "res2df.tex", "res2df Documentation", author, "manual")] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). -man_pages = [(master_doc, "ecl2df", "ecl2df Documentation", [author], 1)] +man_pages = [(master_doc, "res2df", "res2df Documentation", [author], 1)] # -- Options for Texinfo output ---------------------------------------------- @@ -159,10 +159,10 @@ texinfo_documents = [ ( master_doc, - "ecl2df", - "ecl2df Documentation", + "res2df", + "res2df Documentation", author, - "ecl2df", + "res2df", "One line description of project.", "Miscellaneous", ) diff --git a/docs/contribution.rst b/docs/contribution.rst index 0ad8b9bbe..f6a4e8d4d 100644 --- a/docs/contribution.rst +++ b/docs/contribution.rst @@ -1,41 +1,41 @@ ====================== -Contributing to ecl2df +Contributing to res2df ====================== -Contributing to ecl2df is easiest on Linux computers. Windows has not been +Contributing to res2df is easiest on Linux computers. Windows has not been tested, and for Mac you will have to compile OPM yourself. Getting started as a developer ------------------------------ -The first thing to do, is to create a fork of ecl2df to your personal -github account. Go to https://github.com/equinor/ecl2df and click the Fork +The first thing to do, is to create a fork of res2df to your personal +github account. Go to https://github.com/equinor/res2df and click the Fork button. Clone your fork to your local computer: .. code-block:: console - git clone git@github.com:/ecl2df - cd ecl2df + git clone git@github.com:/res2df + cd res2df Then add the upstream repository: .. code-block:: console - git remote add upstream git@github.com:equinor/ecl2df + git remote add upstream git@github.com:equinor/res2df This requires a valid login setup with SSH keys for you github account, needed for write access. After cloning, you should make a Python virtual environment in which you install -ecl2df and its dependencies. If you want to create a new virtual environment for -ecl2df, you can do something like the following: +res2df and its dependencies. If you want to create a new virtual environment for +res2df, you can do something like the following: .. code-block:: console - python3 -m venv venv-ecl2df - source venv-ecl2df/bin/activate + python3 -m venv venv-res2df + source venv-res2df/bin/activate and then run ``pip`` : @@ -43,7 +43,7 @@ and then run ``pip`` : pip install -e .[tests,docs] -to install ecl2df in "edit"-mode together will all dependencies for ecl2df, its +to install res2df in "edit"-mode together will all dependencies for res2df, its test suite and documentation. A good start is to verify that all tests pass after having cloned the @@ -73,13 +73,13 @@ Komodo in order to prepare for the command: NB: For every monthly Komodo release, you might have to remake your komodo-venv. -Using ecl2df without OPM +Using res2df without OPM ------------------------ -OPM is only pip-installable on Linux. To use the non-OPM dependent ecl2df +OPM is only pip-installable on Linux. To use the non-OPM dependent res2df modules on something else than Linux (but with libecl installed), you should install all the dependencies (except OPM) using ``pip`` (see ``setup.py`` for -list of dependencies), and then install ecl2df with the ``--no-deps`` option +list of dependencies), and then install res2df with the ``--no-deps`` option to ``pip``. After this, the non-OPM dependent modules should work, and others will fail with import errors. @@ -88,7 +88,7 @@ Development workflow If you have a feature or bugfix, a typical procedure is to: -* Consider writing an issue on https://github.com/equinor/ecl2df/issues describing +* Consider writing an issue on https://github.com/equinor/res2df/issues describing what is not working or what is not present. * Make a new git branch for your contribution, from an updated master branch. * Write a test for the feature or a test proving the bug. Verify that ``pytest`` @@ -100,7 +100,7 @@ If you have a feature or bugfix, a typical procedure is to: * Check your code quality with pylint. New code should aim for maximal pylint score. Pylint exceptions should only be used when warranted. * Commit your changes, remember to add any new files. -* Push your branch to your fork on github, and go to github.com/equinor/ecl2df +* Push your branch to your fork on github, and go to github.com/equinor/res2df and make a pull request from your branch. Link your pull request to any relevant issue. * Fix any errors that pop up from automated checks. diff --git a/docs/csv2ecl.rst b/docs/csv2ecl.rst index 9087a99a7..ac5438806 100644 --- a/docs/csv2ecl.rst +++ b/docs/csv2ecl.rst @@ -1,8 +1,8 @@ csv2ecl ======= -Some of the modules inside ecl2df is able to write Eclipse include files -from dataframes (in the format dumped by ecl2df). This makes it possible +Some of the modules inside res2df is able to write Eclipse include files +from dataframes (in the format dumped by res2df). This makes it possible to produce Eclipse input data in any application that can write CSV files, and use this tool to convert it into Eclipse include files, or it can facilitate operations/manipulations of an existing deck using any tool @@ -18,5 +18,5 @@ supported keywords for a submodule which is also found in the CSV file provided, will be dumped to output file. .. argparse:: - :ref: ecl2df.csv2ecl.get_parser + :ref: res2df.csv2ecl.get_parser :prog: csv2ecl diff --git a/docs/ecl2csv.rst b/docs/ecl2csv.rst index 89f125e46..4646c0ac9 100644 --- a/docs/ecl2csv.rst +++ b/docs/ecl2csv.rst @@ -1,7 +1,7 @@ ecl2csv ======= -Most of the functionality in ecl2df is exposed to the command line through +Most of the functionality in res2df is exposed to the command line through the script *ecl2csv*. The first argument to this script is always the submodule (subcommand) from which you want functionality. Mandatory argument is always an Eclipse deck or sometimes individual Eclipse include files, and @@ -9,5 +9,5 @@ there is usually an ``--output`` option to specify which file to dump the CSV to. If you want output to your terminal, use ``-`` as the output filename. .. argparse:: - :ref: ecl2df.ecl2csv.get_parser + :ref: res2df.ecl2csv.get_parser :prog: ecl2csv diff --git a/docs/index.rst b/docs/index.rst index b61ddf06d..c1ed59c0e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,7 +1,7 @@ -ecl2df +res2df ====== -ecl2df is a Pandas DataFrame wrapper around libecl and opm.io, which +res2df is a Pandas DataFrame wrapper around libecl and opm.io, which are used to access binary files outputted by the reservoir simulator Eclipse, or its input files --- or any other tool outputting to the same data format. @@ -22,7 +22,7 @@ data format. :maxdepth: 10 :caption: Python API - ecl2df/ecl2df + res2df/res2df Indices and tables ================== diff --git a/docs/installation.rst b/docs/installation.rst index b7b6510d0..89a7a09a3 100644 --- a/docs/installation.rst +++ b/docs/installation.rst @@ -1,18 +1,18 @@ Installation ============ -Internally in Equinor, ecl2df is distributed through Komodo and +Internally in Equinor, res2df is distributed through Komodo and nothing is needed besides activating Komodo. See https://fmu-docs.equinor.com/docs/komodo/equinor_komodo_usage.html for Komodo instructions. -On Linux computers outside Equinor, ecl2df should be installed from +On Linux computers outside Equinor, res2df should be installed from https://pypi.org: .. code-block:: console - pip install ecl2df + pip install res2df For MacOS, the OPM dependency is not available from pypi, and OPM must be compiled manually. diff --git a/docs/introduction.rst b/docs/introduction.rst index e5a8aa131..95f50d06c 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -1,7 +1,7 @@ Introduction ============ -*ecl2df* is a `Pandas DataFrame `_ wrapper +*res2df* is a `Pandas DataFrame `_ wrapper around `libecl `_ and `opm.io `_, which are used to access binary files outputted by the reservoir simulator Eclipse, or its @@ -31,11 +31,11 @@ a Pandas Dataframe. .. code-block:: python - import ecl2df + import res2df - eclfiles = ecl2df.EclFiles("MYECLDECK.DATA") - smry = ecl2df.summary.df(eclfiles, column_keys="F*", time_index="monthly") - hc_contacts = ecl2df.pillars.df(eclfiles, rstdates="all") + eclfiles = res2df.EclFiles("MYECLDECK.DATA") + smry = res2df.summary.df(eclfiles, column_keys="F*", time_index="monthly") + hc_contacts = res2df.pillars.df(eclfiles, rstdates="all") See the API for more documentation and possibilities for each module. diff --git a/docs/usage/compdat.rst b/docs/usage/compdat.rst index 1c2ff78b0..e32a37ef5 100644 --- a/docs/usage/compdat.rst +++ b/docs/usage/compdat.rst @@ -10,7 +10,7 @@ statements from the actions in WELOPEN. compdat.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/compdat.csv', index=False) .. code-block:: python - from ecl2df import compdat, EclFiles + from res2df import compdat, EclFiles eclfiles = EclFiles("MYDATADECK.DATA") dframe = compdat.df(eclfiles) diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index d1433a780..ae1c41854 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -1,7 +1,7 @@ equil ----- -This is the ecl2df module for processing the ``SOLUTION`` section of +This is the res2df module for processing the ``SOLUTION`` section of the Eclipse input deck. Supported keywords are ``EQUIL``, ``RSVD``, ``RVVD``, ``PBVD`` and @@ -9,14 +9,14 @@ Supported keywords are ``EQUIL``, ``RSVD``, ``RVVD``, ``PBVD`` and .. code-block:: python - from ecl2df import equil, EclFiles + from res2df import equil, EclFiles dframe = equil.df(EclFiles('MYECLDECK.DATA')) Which will provide a dataframe similar to the example below. Note that the column `Z` is used both for datum depth and the depth values in ``RSVD`` tables. The amount of columns obtained depends on the input dataset, and should be possible -to link up with the Eclipse documentation. API doc: :func:`ecl2df.equil.df` +to link up with the Eclipse documentation. API doc: :func:`res2df.equil.df` .. dframe = equil.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) diff --git a/docs/usage/fipnum.inc b/docs/usage/fipnum.inc index 1e30880bb..456c130cf 100644 --- a/docs/usage/fipnum.inc +++ b/docs/usage/fipnum.inc @@ -1,4 +1,4 @@ --- Output file printed by ecl2df.grid 0.6.0 +-- Output file printed by res2df.grid 0.6.0 -- at 2020-04-23 10:46:22.529558 FIPNUM diff --git a/docs/usage/fipreports.rst b/docs/usage/fipreports.rst index 905b69894..fbf24c495 100644 --- a/docs/usage/fipreports.rst +++ b/docs/usage/fipreports.rst @@ -17,7 +17,7 @@ This table found in a PRT file will be parsed to the following dataframe: :header-rows: 1 In this particular example, ``FIPZON`` was selected explicitly, either using the command line client or the Python API -through an option to the :func:`ecl2df.fipreports.df` function. +through an option to the :func:`res2df.fipreports.df` function. Using this module is easiest through ``ecl2csv fipreports``. diff --git a/docs/usage/grid.rst b/docs/usage/grid.rst index 35a4e51f6..7a1112bb9 100644 --- a/docs/usage/grid.rst +++ b/docs/usage/grid.rst @@ -9,12 +9,12 @@ Typical usage .. code-block:: python - from ecl2df import grid, EclFiles + from res2df import grid, EclFiles eclfiles = EclFiles('MYDATADECK.DATA') dframe = grid.df(eclfiles, rstdates='last') -where the API is documented at :func:`ecl2df.grid.df`. +where the API is documented at :func:`res2df.grid.df`. .. eclfiles = EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA') @@ -110,20 +110,20 @@ the whereabouts of the file: .. code-block:: python - from ecl2df import grid, EclFiles, common + from res2df import grid, EclFiles, common eclfiles = EclFiles("'MYDATADECK.DATA") dframe = grid.df(eclfiles) # The filename with layers is relative to DATA-file location # or an absolute path. - subzonemap = ecl2df.common.parse_zonemapfile("subzones.lyr") + subzonemap = res2df.common.parse_zonemapfile("subzones.lyr") dframe_with_subzones = common.merge_zones( dframe, subzonemap, zoneheader="SUBZONE", kname="K" ) For more control over merging of zones, check the documentation for -the function :func:`ecl2df.common.merge_zones` and -:meth:`ecl2df.common.parse_zonemapfile` +the function :func:`res2df.common.merge_zones` and +:meth:`res2df.common.parse_zonemapfile` Dynamic data ^^^^^^^^^^^^ @@ -147,7 +147,7 @@ Generating Eclipse include files from grid data If you have loaded grid data into a Pandas frame, some operations are easily performed, scaling porosity, permeability etc. Or remapping some region parameters. Using the -:func:`ecl2df.grid.df2ecl()` function these manipulated vectors can be written back as +:func:`res2df.grid.df2ecl()` function these manipulated vectors can be written back as include files to Eclipse. Say you want to change the FIPNUM, and that FIPNUM 6 should be removed, and set @@ -155,7 +155,7 @@ it to FIPNUM 5. This can be accomplished using .. code-block:: python - from ecl2df import grid, EclFiles, common + from res2df import grid, EclFiles, common eclfiles = EclFiles("'MYDATADECK.DATA") dframe = grid.df(eclfiles) diff --git a/docs/usage/nnc.rst b/docs/usage/nnc.rst index ba8205612..2e94cfb7b 100644 --- a/docs/usage/nnc.rst +++ b/docs/usage/nnc.rst @@ -15,7 +15,7 @@ Run in serial to get this output. .. code-block:: python - from ecl2df import nnc, EclFiles + from res2df import nnc, EclFiles eclfiles = EclFiles('MYDATADECK.DATA') dframe = nnc.df(eclfiles) @@ -36,7 +36,7 @@ average of the xyz for each of the cells involved in a connection pair) as extra columns. If you only want vertical connections, add the option ``--pillars`` or ``-vertical``, -or set ``pillars=True`` if using the Python API (:func:`ecl2df.nnc.df`) +or set ``pillars=True`` if using the Python API (:func:`res2df.nnc.df`) ``EDITNNC`` export ^^^^^^^^^^^^^^^^^^ diff --git a/docs/usage/pillars.rst b/docs/usage/pillars.rst index 5e8ba7936..e5949cfe5 100644 --- a/docs/usage/pillars.rst +++ b/docs/usage/pillars.rst @@ -13,9 +13,9 @@ Typical usage is to obtain property statistics, and compute contacts pr. pillar (and optionally pr some region parameter). .. - from ecl2df import pillars, EclFiles - pillars.df(ecl2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) - pillars.df(ecl2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head().to_csv("pillars-example1.csv"float_format="%.1f", index=False)) + from res2df import pillars, EclFiles + pillars.df(res2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) + pillars.df(res2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head().to_csv("pillars-example1.csv"float_format="%.1f", index=False)) .. csv-table:: Example pillar table :file: pillars-example1.csv @@ -33,7 +33,7 @@ repeated for each region value where it exists. Dynamic data, volumes and fluid contacts ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -The API :func:`ecl2df.pillars.df` and command line client allows specifying +The API :func:`res2df.pillars.df` and command line client allows specifying dates if dynamic data should be included through the ``rstdates`` option to the API or the ``--rstdates`` option on the command line. Providing dates as an option will trigger computation of phase volumes ``WATVOL``, ``OILVOL``, and @@ -55,7 +55,7 @@ Gas-water contact is only computed when ``SOIL`` is not present in the simulation (two-phase runs), it will be the deepest cell centre with gas saturation above sgascutoff, among those pillars with at least one cell above ``swatcutoff``. See the API documentation, -:func:`ecl2df.pillars.compute_pillar_contacts`. +:func:`res2df.pillars.compute_pillar_contacts`. The functionality is also available through the command line tool ``ecl2csv pillars`` as in the example: @@ -80,7 +80,7 @@ using ``--group`` to the command line client, and add optionally a ``--region`` parameter to group over a particular region, typically ``EQLNUM``. The Python API will group over any data that is supplied via the ``region`` -option, check :func:`ecl2df.pillars.df` +option, check :func:`res2df.pillars.df` Stacked version @@ -90,14 +90,14 @@ By default, dynamic data are added as a set of columns for every date, like in this example: .. - pillars.df(ecl2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), rstdates='all').dropna().head().to_csv('pillars-dyn1-unstacked.csv', float_format="%.1f", index=False) + pillars.df(res2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), rstdates='all').dropna().head().to_csv('pillars-dyn1-unstacked.csv', float_format="%.1f", index=False) .. csv-table:: Example pillar table with dynamical data, unstacked :file: pillars-dyn1-unstacked.csv :header-rows: 1 This may be what you want, however it is also possible to have ``DATE`` as a column, -obtained by triggering the stacking option in :func:`ecl2df.pillars.df` or +obtained by triggering the stacking option in :func:`res2df.pillars.df` or ``--stackdates`` on the command line and get data like this: diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index 265ab570c..fac399cb6 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -9,7 +9,7 @@ Example usage: .. code-block:: python - from ecl2df import pvt, EclFiles + from res2df import pvt, EclFiles eclfiles = EclFiles("MYDATADECK.DATA") dframe = pvt.df(eclfiles) @@ -79,8 +79,8 @@ Eclipse from your modified data by issuing When injecting this produced ``pvt.inc`` into any new Eclipse deck, ensure you check which keywords have been written out, compared to what you gave in to -`ecl2df.pvt` above. Any non-supported keywords will get lost in the import phase -and need to be catered for outside ecl2df. +`res2df.pvt` above. Any non-supported keywords will get lost in the import phase +and need to be catered for outside res2df. The last step can also be done using the ``csv2ecl`` command line utility if you dump to CSV from your Python code instead. diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index cdb25423a..05ef3dcdf 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -15,7 +15,7 @@ column. .. code-block:: python - from ecl2df import satfunc, EclFiles + from res2df import satfunc, EclFiles eclfiles = EclFiles('MYDATADECK.DATA') dframe = satfunc.df(eclfiles) @@ -57,7 +57,7 @@ the command For a dataframe or a CSV file in the format provided by this module, an Eclipse include file can be generated either with the Python API -:func:`ecl2df.satfunc.df2ecl` function or the command +:func:`res2df.satfunc.df2ecl` function or the command .. code-block:: console @@ -79,7 +79,7 @@ because you need to avoid SOWCR + SWCR overshooting 1, you can write a code .. code-block:: python - from ecl2df import satfunc + from res2df import satfunc # Read an Eclipse include file directly into a DataFrame with open("relperm.inc") as f_handle: @@ -109,13 +109,13 @@ The pyscal library Manipulation of curve shapes or potentially interpolation between curves is hard to do directly on the dataframes. Before doing manipulations of dataframes in -``ecl2df.satfunc``, consider if it is better to implement the manipulations +``res2df.satfunc``, consider if it is better to implement the manipulations through the `pyscal `_ library. Pyscal can create curves from parametrizations, and interpolate between curves. Pyscal can create initialize its relperm objects from Eclipse include files -though the parsing capabilities of ecl2df.satfunc. +though the parsing capabilities of res2df.satfunc. -The function ``pyscal.pyscallist.df()`` is analogous to ``ecl2df.satfunc.df()`` in -what it produces, and the :func:`ecl2df.satfunc.df2ecl()` can be used on both +The function ``pyscal.pyscallist.df()`` is analogous to ``res2df.satfunc.df()`` in +what it produces, and the :func:`res2df.satfunc.df2ecl()` can be used on both (potentially with some filtering needed.). diff --git a/docs/usage/summary.rst b/docs/usage/summary.rst index 0b7c01d06..968d37670 100644 --- a/docs/usage/summary.rst +++ b/docs/usage/summary.rst @@ -9,7 +9,7 @@ Pandas Dataframes. .. code-block:: python - from ecl2df import summary, EclFiles + from res2df import summary, EclFiles eclfiles = EclFiles("MYDATADECK.DATA") dframe = summary.df(eclfiles, column_keys="F*PT", time_index="yearly") @@ -21,7 +21,7 @@ Eclipse equivalent to ``time_index="raw"``, other options are *daily*, *weekly*, data. Additional arguments are available, see the -`API documentation `_ +`API documentation `_ for an extensive overview. .. csv-table:: Example summary table diff --git a/docs/usage/trans.rst b/docs/usage/trans.rst index d27c00b4c..8e18ebf9c 100644 --- a/docs/usage/trans.rst +++ b/docs/usage/trans.rst @@ -4,20 +4,20 @@ trans The trans module can extract transmissibilities (neighbour and non-neigbor-connections) from a simulation grid. -Python API: :func:`ecl2df.trans.df` +Python API: :func:`res2df.trans.df` Applied on an Eclipse deck, the *trans* module will give out a dataframe of neighbour connections .. code-block:: python - from ecl2df import trans, EclFiles + from res2df import trans, EclFiles eclfiles = EclFiles("MYDATADECK.DATA") - dframe = ecl2df.trans.df(eclfiles) + dframe = res2df.trans.df(eclfiles) .. - ecl2df.trans.df(ecl2df.EclFiles("2_R001_REEK-0.DATA")).sample(7)\ + res2df.trans.df(res2df.EclFiles("2_R001_REEK-0.DATA")).sample(7)\ .to_csv("trans1.csv", float_format="%.2f", index=False) .. csv-table:: Neighbour transmissibilities, sample rows from an example simulation. @@ -79,12 +79,12 @@ like this. Example: .. code-block:: python - dframe = ecl2df.trans.df(eclfiles, vectors="FIPNUM", boundaryfilter=True, addnnc=True) + dframe = res2df.trans.df(eclfiles, vectors="FIPNUM", boundaryfilter=True, addnnc=True) which gives the dataframe .. - ecl2df.trans.df(ecl2df.EclFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", boundaryfilter=True).sample(10).to_csv("trans-boundaries.csv", index=False, float_format="%.2f") + res2df.trans.df(res2df.EclFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", boundaryfilter=True).sample(10).to_csv("trans-boundaries.csv", index=False, float_format="%.2f") .. csv-table:: Sample rows from connections where FIPNUM is changing :file: trans-boundaries.csv @@ -105,13 +105,13 @@ over a region interface. This is accomplished by adding the ``group=True`` optio .. code-block:: python - from ecl2df import trans, EclFiles + from res2df import trans, EclFiles eclfiles = EclFiles("MYDATADECK.DATA") - dframe = ecl2df.trans.df(eclfiles, vectors="FIPNUM", addnnc=True, group=True) + dframe = res2df.trans.df(eclfiles, vectors="FIPNUM", addnnc=True, group=True) .. - ecl2df.trans.df(ecl2df.EclFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", group=True).to_csv("trans-group.csv", index=False, float_format="%.2f") + res2df.trans.df(res2df.EclFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", group=True).to_csv("trans-group.csv", index=False, float_format="%.2f") .. csv-table:: Transmissibilities summed over each FIPNUM interface :file: trans-group.csv diff --git a/docs/usage/wcon.rst b/docs/usage/wcon.rst index 35f8ea4a8..393607935 100644 --- a/docs/usage/wcon.rst +++ b/docs/usage/wcon.rst @@ -8,7 +8,7 @@ WCONPROD from an Eclipse deck. wcon.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/wcon.csv', index=False) .. code-block:: python - from ecl2df import wcon, EclFiles + from res2df import wcon, EclFiles eclfiles = EclFiles("MYDATADECK.DATA") dframe = wcon.df(eclfiles) diff --git a/ecl2df/__init__.py b/res2df/__init__.py similarity index 96% rename from ecl2df/__init__.py rename to res2df/__init__.py index cf264352f..e0b530b6d 100644 --- a/ecl2df/__init__.py +++ b/res2df/__init__.py @@ -35,7 +35,7 @@ def getLogger_ecl2csv( - module_name: str = "ecl2df", args_dict: Optional[Dict[str, Union[str, bool]]] = None + module_name: str = "res2df", args_dict: Optional[Dict[str, Union[str, bool]]] = None ) -> logging.Logger: # pylint: disable=invalid-name """Provide a custom logger for ecl2csv and csv2ecl @@ -92,4 +92,4 @@ def getLogger_ecl2csv( for submodule in SUBMODULES + ["ecl2csv", "csv2ecl"]: - importlib.import_module("ecl2df." + submodule) + importlib.import_module("res2df." + submodule) diff --git a/ecl2df/common.py b/res2df/common.py similarity index 98% rename from ecl2df/common.py rename to res2df/common.py index c9aef1e18..a59e2590a 100644 --- a/ecl2df/common.py +++ b/res2df/common.py @@ -1,4 +1,4 @@ -"""Common functions for ecl2df modules""" +"""Common functions for res2df modules""" import argparse import datetime @@ -26,10 +26,10 @@ # to be included in DeckItem objects. from opm.io.deck import DeckKeyword # noqa except ImportError: - # Allow parts of ecl2df to work without OPM: + # Allow parts of res2df to work without OPM: pass -from ecl2df import __version__ +from res2df import __version__ from .constants import MAGIC_STDOUT @@ -450,7 +450,7 @@ def handle_wanted_keywords( not_supported: Set[str] = set(wanted) - set(supported) if not_supported: logger.warning( - "Requested keyword(s) not supported by ecl2df.%s: %s", + "Requested keyword(s) not supported by res2df.%s: %s", modulename, str(not_supported), ) @@ -481,7 +481,7 @@ def fill_reverse_parser( defaultoutputfile: Default output filename """ parser.add_argument( - "csvfile", help="Name of CSV file with " + modulename + " data on ecl2df format" + "csvfile", help="Name of CSV file with " + modulename + " data on res2df format" ) parser.add_argument( "-o", @@ -516,7 +516,7 @@ def df2ecl( consecutive: Optional[str] = None, filename: Optional[str] = None, ) -> str: - """Generate Eclipse include strings from dataframes in ecl2df format. + """Generate Eclipse include strings from dataframes in res2df format. This function hands over the actual text generation pr. keyword to functions named df2ecl_ in the calling module. @@ -525,7 +525,7 @@ def df2ecl( for the actual string construction. Args: - dataframe: Dataframe with Eclipse data on ecl2df format. + dataframe: Dataframe with Eclipse data on res2df format. keywords: List of keywords to include. Will be reduced to the set of keywords available in dataframe and to those supported comments: Dictionary indexed by keyword with comments to be @@ -596,7 +596,7 @@ def df2ecl( return "" string = "" - ecl2df_header = ( + res2df_header = ( "Output file printed by " + calling_module.__name__ # type: ignore + " " @@ -605,7 +605,7 @@ def df2ecl( + " at " + str(datetime.datetime.now()) ) - string += comment_formatter(ecl2df_header) + string += comment_formatter(res2df_header) string += "\n" if "master" in comments: string += comment_formatter(comments["master"]) diff --git a/ecl2df/compdat.py b/res2df/compdat.py similarity index 99% rename from ecl2df/compdat.py rename to res2df/compdat.py index dace91cd4..7d5a23ae5 100644 --- a/ecl2df/compdat.py +++ b/res2df/compdat.py @@ -22,10 +22,10 @@ # pylint: disable=unused-import import opm.io.deck except ImportError: - # Allow parts of ecl2df to work without OPM: + # Allow parts of res2df to work without OPM: pass -from ecl2df import getLogger_ecl2csv +from res2df import getLogger_ecl2csv from .common import ( get_wells_matching_template, @@ -517,7 +517,7 @@ def unroll_complump(complump_df: pd.DataFrame) -> pd.DataFrame: ) if val_i == 0 or val_j == 0 or val_k1 == 0 or val_k2 == 0: raise ValueError( - f"Defaulted COMPLUMP coordinates are not supported in ecl2df: {row}" + f"Defaulted COMPLUMP coordinates are not supported in res2df: {row}" ) if val_k2 < val_k1: raise ValueError(f"K2 must be equal to or greater than K1: {row}") diff --git a/ecl2df/config_jobs/CSV2ECL b/res2df/config_jobs/CSV2ECL similarity index 100% rename from ecl2df/config_jobs/CSV2ECL rename to res2df/config_jobs/CSV2ECL diff --git a/ecl2df/config_jobs/ECL2CSV b/res2df/config_jobs/ECL2CSV similarity index 100% rename from ecl2df/config_jobs/ECL2CSV rename to res2df/config_jobs/ECL2CSV diff --git a/ecl2df/constants.py b/res2df/constants.py similarity index 86% rename from ecl2df/constants.py rename to res2df/constants.py index 545430ed8..75ed52176 100644 --- a/ecl2df/constants.py +++ b/res2df/constants.py @@ -1,4 +1,4 @@ -"""Constants for use in ecl2df.""" +"""Constants for use in res2df.""" # This is a magic filename that means read/write from/to stdout # This makes it impossible to write to a file called "-" on disk diff --git a/ecl2df/csv2ecl.py b/res2df/csv2ecl.py similarity index 88% rename from ecl2df/csv2ecl.py rename to res2df/csv2ecl.py index 4cab5970b..279ba4c69 100644 --- a/ecl2df/csv2ecl.py +++ b/res2df/csv2ecl.py @@ -1,13 +1,13 @@ #!/usr/bin/env python """ -Convert dataframes (in ecl2df format) to Eclipse include files, +Convert dataframes (in res2df format) to Eclipse include files, for selected keywords """ import argparse import sys -from ecl2df import __version__, equil, pvt, satfunc, summary, vfp +from res2df import __version__, equil, pvt, satfunc, summary, vfp # String constants in use for generating ERT forward model documentation: DESCRIPTION: str = """Convert CSV files into Eclipse include files. Uses the command @@ -30,8 +30,8 @@ def get_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( - "csv2ecl (" + __version__ + ") is a command line frontend to ecl2df. " - "Documentation at https://equinor.github.io/ecl2df/ " + "csv2ecl (" + __version__ + ") is a command line frontend to res2df. " + "Documentation at https://equinor.github.io/res2df/ " ), ) parser.add_argument( @@ -62,7 +62,7 @@ def get_parser() -> argparse.ArgumentParser: help="Write SOLUTION include files", description=( "Write SOLUTION keywords (EQUIL, RSVD, RVVD) " - "to Eclipse include files from CSV in ecl2df format." + "to Eclipse include files from CSV in res2df format." ), ) equil.fill_reverse_parser(equil_parser) @@ -72,7 +72,7 @@ def get_parser() -> argparse.ArgumentParser: "pvt", help="Write PVT include files", description=( - "Write Eclipse include files from CSV files on the ecl2df format." + "Write Eclipse include files from CSV files on the res2df format." ), ) pvt.fill_reverse_parser(pvt_parser) @@ -83,7 +83,7 @@ def get_parser() -> argparse.ArgumentParser: help="Write saturation function include files", description=( "Write saturation function include files from CSV files on " - "the ecl2df format." + "the res2df format." ), ) satfunc.fill_reverse_parser(satfunc_parser) @@ -93,7 +93,7 @@ def get_parser() -> argparse.ArgumentParser: "vfp", help="Write VFPPROD/VFPINJ include files", description=( - "Write VFPPROD/VFPINJ include files from CSV files on the ecl2df format." + "Write VFPPROD/VFPINJ include files from CSV files on the res2df format." ), ) vfp.fill_reverse_parser(vfp_parser) diff --git a/ecl2df/ecl2csv.py b/res2df/ecl2csv.py similarity index 97% rename from ecl2df/ecl2csv.py rename to res2df/ecl2csv.py index b0b702370..57e487987 100644 --- a/ecl2df/ecl2csv.py +++ b/res2df/ecl2csv.py @@ -1,7 +1,7 @@ #!/usr/bin/env python """ End-user command line tool for accessing functionality -in ecl2df +in res2df """ import argparse import functools @@ -9,7 +9,7 @@ import sys from typing import Optional -from ecl2df import __version__ +from res2df import __version__ # String constants in use for generating ERT forward model documentation: DESCRIPTION: str = """Convert Eclipse input and output files into CSV files, @@ -19,7 +19,7 @@ For supplying options to subcommands, you can use the arguments ```` where ``n`` goes from 1 to 10. -For more documentation, see https://equinor.github.io/ecl2df/. +For more documentation, see https://equinor.github.io/res2df/. """ CATEGORY: str = "utility.eclipse" EXAMPLES: str = """ @@ -44,8 +44,8 @@ def get_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( - "ecl2csv (" + __version__ + ") is a command line frontend to ecl2df. " - "Documentation at https://equinor.github.io/ecl2df/ " + "ecl2csv (" + __version__ + ") is a command line frontend to res2df. " + "Documentation at https://equinor.github.io/res2df/ " ), ) parser.add_argument( @@ -244,7 +244,7 @@ def get_parser() -> argparse.ArgumentParser: for submodule, subparser in subparsers_dict.items(): # Use the submodule's fill_parser() to add the submodule specific # arguments: - importlib.import_module("ecl2df." + submodule).fill_parser( # type: ignore + importlib.import_module("res2df." + submodule).fill_parser( # type: ignore subparser ) @@ -286,7 +286,7 @@ def run_subparser_main( Args: args (Namespace): argparse argument namespace - submodule: One of ecl2df's submodules. That module + submodule: One of res2df's submodules. That module must have a function called _main() parser: Used for raising errors. """ @@ -300,7 +300,7 @@ def run_subparser_main( if len(positionals) > 1 and parser is not None: parser.error(f"Unknown argument in {positionals}") - mod = importlib.import_module("ecl2df." + submodule) + mod = importlib.import_module("res2df." + submodule) main_func = getattr(mod, submodule + "_main") main_func(args) diff --git a/ecl2df/eclfiles.py b/res2df/eclfiles.py similarity index 99% rename from ecl2df/eclfiles.py rename to res2df/eclfiles.py index 91186b90b..2fca379eb 100644 --- a/ecl2df/eclfiles.py +++ b/res2df/eclfiles.py @@ -18,7 +18,7 @@ from resdata.resfile import ResdataFile from resdata.summary import Summary -from ecl2df import common +from res2df import common logger = logging.getLogger(__name__) diff --git a/ecl2df/equil.py b/res2df/equil.py similarity index 99% rename from ecl2df/equil.py rename to res2df/equil.py index 2a91e6796..2f7c73a82 100644 --- a/ecl2df/equil.py +++ b/res2df/equil.py @@ -9,7 +9,7 @@ import pandas as pd -from ecl2df import common, getLogger_ecl2csv, inferdims +from res2df import common, getLogger_ecl2csv, inferdims from .eclfiles import EclFiles @@ -363,7 +363,7 @@ def df2ecl( solution (EQUIL, RSVD++) data. Args: - equil_df: Dataframe with data on ecl2df format. + equil_df: Dataframe with data on res2df format. keywords: List of keywords to include. Must be supported and present in the incoming dataframe. comments: Dictionary indexed by keyword with comments to be diff --git a/ecl2df/faults.py b/res2df/faults.py similarity index 96% rename from ecl2df/faults.py rename to res2df/faults.py index c168106fe..3a2031d21 100644 --- a/ecl2df/faults.py +++ b/res2df/faults.py @@ -10,8 +10,8 @@ import pandas as pd -from ecl2df import EclFiles, getLogger_ecl2csv -from ecl2df.common import parse_opmio_deckrecord, write_dframe_stdout_file +from res2df import EclFiles, getLogger_ecl2csv +from res2df.common import parse_opmio_deckrecord, write_dframe_stdout_file try: # Needed for mypy diff --git a/ecl2df/fipreports.py b/res2df/fipreports.py similarity index 98% rename from ecl2df/fipreports.py rename to res2df/fipreports.py index de94f3329..4107161f2 100644 --- a/ecl2df/fipreports.py +++ b/res2df/fipreports.py @@ -10,8 +10,8 @@ import numpy as np import pandas as pd -from ecl2df import EclFiles, getLogger_ecl2csv -from ecl2df.common import parse_ecl_month, write_dframe_stdout_file +from res2df import EclFiles, getLogger_ecl2csv +from res2df.common import parse_ecl_month, write_dframe_stdout_file logger = logging.getLogger(__name__) diff --git a/ecl2df/grid.py b/res2df/grid.py similarity index 99% rename from ecl2df/grid.py rename to res2df/grid.py index 2af2755ca..98a782b61 100644 --- a/ecl2df/grid.py +++ b/res2df/grid.py @@ -24,7 +24,7 @@ import pyarrow.feather from resdata.resfile import ResdataFile -from ecl2df import __version__, common, getLogger_ecl2csv +from res2df import __version__, common, getLogger_ecl2csv from .eclfiles import EclFiles @@ -695,9 +695,9 @@ def df2ecl( active_cells = len(grid_df[grid_df.index >= 0]) logger.warning("Global grid size estimated to %s", str(global_size)) - ecl2df_header = ( + res2df_header = ( "Output file printed by " - + "ecl2df.grid " + + "res2df.grid " + __version__ + "\n" + " at " @@ -706,7 +706,7 @@ def df2ecl( string = "" if not nocomments: - string += common.comment_formatter(ecl2df_header) + string += common.comment_formatter(res2df_header) string += "\n" # If we have NaNs in the dataframe, we will be more careful (costs memory) diff --git a/ecl2df/gruptree.py b/res2df/gruptree.py similarity index 99% rename from ecl2df/gruptree.py rename to res2df/gruptree.py index 0e95344ac..882c347d8 100644 --- a/ecl2df/gruptree.py +++ b/res2df/gruptree.py @@ -19,8 +19,8 @@ except ImportError: pass -from ecl2df import EclFiles, getLogger_ecl2csv -from ecl2df.common import ( +from res2df import EclFiles, getLogger_ecl2csv +from res2df.common import ( parse_opmio_date_rec, parse_opmio_deckrecord, parse_opmio_tstep_rec, diff --git a/ecl2df/hook_implementations/__init__.py b/res2df/hook_implementations/__init__.py similarity index 100% rename from ecl2df/hook_implementations/__init__.py rename to res2df/hook_implementations/__init__.py diff --git a/ecl2df/hook_implementations/jobs.py b/res2df/hook_implementations/jobs.py similarity index 85% rename from ecl2df/hook_implementations/jobs.py rename to res2df/hook_implementations/jobs.py index d4f7485e0..7efcf09c2 100644 --- a/ecl2df/hook_implementations/jobs.py +++ b/res2df/hook_implementations/jobs.py @@ -18,7 +18,7 @@ def decorator(func): def _get_jobs_from_directory(directory): - resource_directory = Path(sys.modules["ecl2df"].__file__).parent / directory + resource_directory = Path(sys.modules["res2df"].__file__).parent / directory all_files = [ resource_directory / filename @@ -29,7 +29,7 @@ def _get_jobs_from_directory(directory): @hook_implementation -@plugin_response(plugin_name="ecl2df") +@plugin_response(plugin_name="res2df") def installable_jobs(): return _get_jobs_from_directory("config_jobs") @@ -44,13 +44,13 @@ def _get_module_variable_if_exists(module_name, variable_name, default=""): @hook_implementation -@plugin_response(plugin_name="ecl2df") +@plugin_response(plugin_name="res2df") def job_documentation(job_name): - ecl2df_jobs = set(installable_jobs().data.keys()) - if job_name not in ecl2df_jobs: + res2df_jobs = set(installable_jobs().data.keys()) + if job_name not in res2df_jobs: return None - module_name = f"ecl2df.{job_name.lower()}" + module_name = f"res2df.{job_name.lower()}" description = _get_module_variable_if_exists( module_name=module_name, variable_name="DESCRIPTION" diff --git a/ecl2df/inferdims.py b/res2df/inferdims.py similarity index 98% rename from ecl2df/inferdims.py rename to res2df/inferdims.py index ed76f049c..aa0f9f5eb 100644 --- a/ecl2df/inferdims.py +++ b/res2df/inferdims.py @@ -9,10 +9,10 @@ try: import opm.io except ImportError: - # Let parts of ecl2df work without OPM: + # Let parts of res2df work without OPM: pass -from ecl2df import EclFiles +from res2df import EclFiles logger = logging.getLogger(__name__) @@ -48,7 +48,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: if dimitem not in [0]: raise ValueError("Only item 0 in EQLDIMS can be estimated") - # A less than ecl2df-standard permissive opm.io, when using + # A less than res2df-standard permissive opm.io, when using # this one opm.io will fail if there are extra records # in tables (if NTSFUN in TABDIMS is wrong f.ex): opmioparser_recovery_fail_extra_records = [ diff --git a/ecl2df/nnc.py b/res2df/nnc.py similarity index 97% rename from ecl2df/nnc.py rename to res2df/nnc.py index b201ecac2..b5ce0e56b 100644 --- a/ecl2df/nnc.py +++ b/res2df/nnc.py @@ -10,8 +10,8 @@ import pandas as pd -from ecl2df import EclFiles, __version__, common, getLogger_ecl2csv, grid -from ecl2df.common import write_dframe_stdout_file +from res2df import EclFiles, __version__, common, getLogger_ecl2csv, grid +from res2df.common import write_dframe_stdout_file logger: logging.Logger = logging.getLogger(__name__) @@ -234,8 +234,8 @@ def df2ecl_editnnc( """ string = "" - ecl2df_header = ( - "Output file printed by ecl2df.nnc" + res2df_header = ( + "Output file printed by res2df.nnc" + " " + __version__ + "\n" @@ -243,7 +243,7 @@ def df2ecl_editnnc( + str(datetime.datetime.now()) ) if not nocomments: - string += common.comment_formatter(ecl2df_header) + string += common.comment_formatter(res2df_header) string += "\n" if "DIR" in nnc_df: diff --git a/ecl2df/opmkeywords/BRANPROP b/res2df/opmkeywords/BRANPROP similarity index 100% rename from ecl2df/opmkeywords/BRANPROP rename to res2df/opmkeywords/BRANPROP diff --git a/ecl2df/opmkeywords/COMPDAT b/res2df/opmkeywords/COMPDAT similarity index 100% rename from ecl2df/opmkeywords/COMPDAT rename to res2df/opmkeywords/COMPDAT diff --git a/ecl2df/opmkeywords/COMPLUMP b/res2df/opmkeywords/COMPLUMP similarity index 100% rename from ecl2df/opmkeywords/COMPLUMP rename to res2df/opmkeywords/COMPLUMP diff --git a/ecl2df/opmkeywords/COMPSEGS b/res2df/opmkeywords/COMPSEGS similarity index 100% rename from ecl2df/opmkeywords/COMPSEGS rename to res2df/opmkeywords/COMPSEGS diff --git a/ecl2df/opmkeywords/DENSITY b/res2df/opmkeywords/DENSITY similarity index 100% rename from ecl2df/opmkeywords/DENSITY rename to res2df/opmkeywords/DENSITY diff --git a/ecl2df/opmkeywords/EQLDIMS b/res2df/opmkeywords/EQLDIMS similarity index 100% rename from ecl2df/opmkeywords/EQLDIMS rename to res2df/opmkeywords/EQLDIMS diff --git a/ecl2df/opmkeywords/EQUIL b/res2df/opmkeywords/EQUIL similarity index 100% rename from ecl2df/opmkeywords/EQUIL rename to res2df/opmkeywords/EQUIL diff --git a/ecl2df/opmkeywords/FAULTS b/res2df/opmkeywords/FAULTS similarity index 100% rename from ecl2df/opmkeywords/FAULTS rename to res2df/opmkeywords/FAULTS diff --git a/ecl2df/opmkeywords/GRUPNET b/res2df/opmkeywords/GRUPNET similarity index 100% rename from ecl2df/opmkeywords/GRUPNET rename to res2df/opmkeywords/GRUPNET diff --git a/ecl2df/opmkeywords/GRUPTREE b/res2df/opmkeywords/GRUPTREE similarity index 100% rename from ecl2df/opmkeywords/GRUPTREE rename to res2df/opmkeywords/GRUPTREE diff --git a/ecl2df/opmkeywords/NODEPROP b/res2df/opmkeywords/NODEPROP similarity index 100% rename from ecl2df/opmkeywords/NODEPROP rename to res2df/opmkeywords/NODEPROP diff --git a/ecl2df/opmkeywords/PBVD b/res2df/opmkeywords/PBVD similarity index 100% rename from ecl2df/opmkeywords/PBVD rename to res2df/opmkeywords/PBVD diff --git a/ecl2df/opmkeywords/PDVD b/res2df/opmkeywords/PDVD similarity index 100% rename from ecl2df/opmkeywords/PDVD rename to res2df/opmkeywords/PDVD diff --git a/ecl2df/opmkeywords/PVDG b/res2df/opmkeywords/PVDG similarity index 100% rename from ecl2df/opmkeywords/PVDG rename to res2df/opmkeywords/PVDG diff --git a/ecl2df/opmkeywords/PVDO b/res2df/opmkeywords/PVDO similarity index 100% rename from ecl2df/opmkeywords/PVDO rename to res2df/opmkeywords/PVDO diff --git a/ecl2df/opmkeywords/PVTG b/res2df/opmkeywords/PVTG similarity index 100% rename from ecl2df/opmkeywords/PVTG rename to res2df/opmkeywords/PVTG diff --git a/ecl2df/opmkeywords/PVTO b/res2df/opmkeywords/PVTO similarity index 100% rename from ecl2df/opmkeywords/PVTO rename to res2df/opmkeywords/PVTO diff --git a/ecl2df/opmkeywords/PVTW b/res2df/opmkeywords/PVTW similarity index 100% rename from ecl2df/opmkeywords/PVTW rename to res2df/opmkeywords/PVTW diff --git a/ecl2df/opmkeywords/ROCK b/res2df/opmkeywords/ROCK similarity index 100% rename from ecl2df/opmkeywords/ROCK rename to res2df/opmkeywords/ROCK diff --git a/ecl2df/opmkeywords/RSVD b/res2df/opmkeywords/RSVD similarity index 100% rename from ecl2df/opmkeywords/RSVD rename to res2df/opmkeywords/RSVD diff --git a/ecl2df/opmkeywords/RVVD b/res2df/opmkeywords/RVVD similarity index 100% rename from ecl2df/opmkeywords/RVVD rename to res2df/opmkeywords/RVVD diff --git a/ecl2df/opmkeywords/SGFN b/res2df/opmkeywords/SGFN similarity index 100% rename from ecl2df/opmkeywords/SGFN rename to res2df/opmkeywords/SGFN diff --git a/ecl2df/opmkeywords/SGOF b/res2df/opmkeywords/SGOF similarity index 100% rename from ecl2df/opmkeywords/SGOF rename to res2df/opmkeywords/SGOF diff --git a/ecl2df/opmkeywords/SGWFN b/res2df/opmkeywords/SGWFN similarity index 100% rename from ecl2df/opmkeywords/SGWFN rename to res2df/opmkeywords/SGWFN diff --git a/ecl2df/opmkeywords/SLGOF b/res2df/opmkeywords/SLGOF similarity index 100% rename from ecl2df/opmkeywords/SLGOF rename to res2df/opmkeywords/SLGOF diff --git a/ecl2df/opmkeywords/SOF2 b/res2df/opmkeywords/SOF2 similarity index 100% rename from ecl2df/opmkeywords/SOF2 rename to res2df/opmkeywords/SOF2 diff --git a/ecl2df/opmkeywords/SOF3 b/res2df/opmkeywords/SOF3 similarity index 100% rename from ecl2df/opmkeywords/SOF3 rename to res2df/opmkeywords/SOF3 diff --git a/ecl2df/opmkeywords/SWFN b/res2df/opmkeywords/SWFN similarity index 100% rename from ecl2df/opmkeywords/SWFN rename to res2df/opmkeywords/SWFN diff --git a/ecl2df/opmkeywords/SWOF b/res2df/opmkeywords/SWOF similarity index 100% rename from ecl2df/opmkeywords/SWOF rename to res2df/opmkeywords/SWOF diff --git a/ecl2df/opmkeywords/TABDIMS b/res2df/opmkeywords/TABDIMS similarity index 100% rename from ecl2df/opmkeywords/TABDIMS rename to res2df/opmkeywords/TABDIMS diff --git a/ecl2df/opmkeywords/VFPINJ b/res2df/opmkeywords/VFPINJ similarity index 100% rename from ecl2df/opmkeywords/VFPINJ rename to res2df/opmkeywords/VFPINJ diff --git a/ecl2df/opmkeywords/VFPPROD b/res2df/opmkeywords/VFPPROD similarity index 100% rename from ecl2df/opmkeywords/VFPPROD rename to res2df/opmkeywords/VFPPROD diff --git a/ecl2df/opmkeywords/WCONHIST b/res2df/opmkeywords/WCONHIST similarity index 100% rename from ecl2df/opmkeywords/WCONHIST rename to res2df/opmkeywords/WCONHIST diff --git a/ecl2df/opmkeywords/WCONINJE b/res2df/opmkeywords/WCONINJE similarity index 100% rename from ecl2df/opmkeywords/WCONINJE rename to res2df/opmkeywords/WCONINJE diff --git a/ecl2df/opmkeywords/WCONINJH b/res2df/opmkeywords/WCONINJH similarity index 100% rename from ecl2df/opmkeywords/WCONINJH rename to res2df/opmkeywords/WCONINJH diff --git a/ecl2df/opmkeywords/WCONPROD b/res2df/opmkeywords/WCONPROD similarity index 100% rename from ecl2df/opmkeywords/WCONPROD rename to res2df/opmkeywords/WCONPROD diff --git a/ecl2df/opmkeywords/WELOPEN b/res2df/opmkeywords/WELOPEN similarity index 100% rename from ecl2df/opmkeywords/WELOPEN rename to res2df/opmkeywords/WELOPEN diff --git a/ecl2df/opmkeywords/WELSEGS b/res2df/opmkeywords/WELSEGS similarity index 100% rename from ecl2df/opmkeywords/WELSEGS rename to res2df/opmkeywords/WELSEGS diff --git a/ecl2df/opmkeywords/WELSPECS b/res2df/opmkeywords/WELSPECS similarity index 100% rename from ecl2df/opmkeywords/WELSPECS rename to res2df/opmkeywords/WELSPECS diff --git a/ecl2df/opmkeywords/WLIST b/res2df/opmkeywords/WLIST similarity index 100% rename from ecl2df/opmkeywords/WLIST rename to res2df/opmkeywords/WLIST diff --git a/ecl2df/opmkeywords/WSEGAICD b/res2df/opmkeywords/WSEGAICD similarity index 100% rename from ecl2df/opmkeywords/WSEGAICD rename to res2df/opmkeywords/WSEGAICD diff --git a/ecl2df/opmkeywords/WSEGSICD b/res2df/opmkeywords/WSEGSICD similarity index 100% rename from ecl2df/opmkeywords/WSEGSICD rename to res2df/opmkeywords/WSEGSICD diff --git a/ecl2df/opmkeywords/WSEGVALV b/res2df/opmkeywords/WSEGVALV similarity index 100% rename from ecl2df/opmkeywords/WSEGVALV rename to res2df/opmkeywords/WSEGVALV diff --git a/ecl2df/opmkeywords/readme b/res2df/opmkeywords/readme similarity index 79% rename from ecl2df/opmkeywords/readme rename to res2df/opmkeywords/readme index fe7237899..fd341e5df 100644 --- a/ecl2df/opmkeywords/readme +++ b/res2df/opmkeywords/readme @@ -1,6 +1,6 @@ This directory contains JSON files downloaded from https://github.com/OPM/opm-common/tree/master/src/opm/parser/eclipse/share/keywords/000_Eclipse100 -When a new keyword is to be supported by ecl2df, add it in +When a new keyword is to be supported by res2df, add it in runmetoupdate.sh AND add it to the list of supported keywords in common.py diff --git a/ecl2df/opmkeywords/runmetoupdate.sh b/res2df/opmkeywords/runmetoupdate.sh similarity index 100% rename from ecl2df/opmkeywords/runmetoupdate.sh rename to res2df/opmkeywords/runmetoupdate.sh diff --git a/ecl2df/parameters.py b/res2df/parameters.py similarity index 99% rename from ecl2df/parameters.py rename to res2df/parameters.py index b97af9c57..6b7ea1b57 100644 --- a/ecl2df/parameters.py +++ b/res2df/parameters.py @@ -10,7 +10,7 @@ import pandas as pd import yaml -from ecl2df.eclfiles import EclFiles +from res2df.eclfiles import EclFiles logger = logging.getLogger(__name__) diff --git a/ecl2df/pillars.py b/res2df/pillars.py similarity index 99% rename from ecl2df/pillars.py rename to res2df/pillars.py index 5d6bf63f5..96000238e 100644 --- a/ecl2df/pillars.py +++ b/res2df/pillars.py @@ -8,7 +8,7 @@ import dateutil.parser import pandas as pd -from ecl2df import EclFiles, common, getLogger_ecl2csv, grid +from res2df import EclFiles, common, getLogger_ecl2csv, grid logger: logging.Logger = logging.getLogger(__name__) diff --git a/ecl2df/pvt.py b/res2df/pvt.py similarity index 99% rename from ecl2df/pvt.py rename to res2df/pvt.py index 3dd0a6101..05d9ee894 100644 --- a/ecl2df/pvt.py +++ b/res2df/pvt.py @@ -11,7 +11,7 @@ import pandas as pd -from ecl2df import EclFiles, common, getLogger_ecl2csv, inferdims +from res2df import EclFiles, common, getLogger_ecl2csv, inferdims try: # Needed for mypy @@ -332,7 +332,7 @@ def df2ecl( """Generate Eclipse include strings from PVT dataframes Args: - pvt_df: Dataframe with PVT data on ecl2df format. + pvt_df: Dataframe with PVT data on res2df format. keywords: List of keywords to include. Must be supported and present in the incoming dataframe. comments: Dictionary indexed by keyword with comments to be diff --git a/ecl2df/rft.py b/res2df/rft.py similarity index 99% rename from ecl2df/rft.py rename to res2df/rft.py index 9cb22ec68..7dbcde11b 100644 --- a/ecl2df/rft.py +++ b/res2df/rft.py @@ -23,7 +23,7 @@ import pandas as pd from resdata.resfile import ResdataFile -from ecl2df import getLogger_ecl2csv +from res2df import getLogger_ecl2csv from .common import merge_zones, write_dframe_stdout_file from .eclfiles import EclFiles diff --git a/ecl2df/satfunc.py b/res2df/satfunc.py similarity index 98% rename from ecl2df/satfunc.py rename to res2df/satfunc.py index 227038755..5f1d00c46 100644 --- a/ecl2df/satfunc.py +++ b/res2df/satfunc.py @@ -25,7 +25,7 @@ except ImportError: pass -from ecl2df import common, getLogger_ecl2csv, inferdims +from res2df import common, getLogger_ecl2csv, inferdims from .common import write_dframe_stdout_file from .eclfiles import EclFiles @@ -130,7 +130,7 @@ def df( def interpolate_defaults(dframe: pd.DataFrame) -> pd.DataFrame: """Interpolate NaN's linearly in saturation. Saturation function tables in Eclipse decks can have certain values defaulted. - When parsed by common.ecl2df, these values are returned as np.nan. + When parsed by common.res2df, these values are returned as np.nan. The incoming dataframe must be associated to one keyword only, but can consist of multiple SATNUMs. """ @@ -241,7 +241,7 @@ def df2ecl( saturation functions (SWOF, SGOF, ...) Args: - satfunc_df: Dataframe with data on ecl2df format. + satfunc_df: Dataframe with data on res2df format. keywords: List of keywords to include. Must be supported and present in the incoming dataframe. Keywords are printed in the order defined by this list. diff --git a/ecl2df/summary.py b/res2df/summary.py similarity index 99% rename from ecl2df/summary.py rename to res2df/summary.py index 666b51e50..8941a426a 100644 --- a/ecl2df/summary.py +++ b/res2df/summary.py @@ -16,7 +16,7 @@ import pyarrow.feather from resdata.summary import Summary, SummaryKeyWordVector -from ecl2df import getLogger_ecl2csv +from res2df import getLogger_ecl2csv from . import parameters from .common import write_dframe_stdout_file @@ -36,14 +36,14 @@ # Any frequency mnemonics not mentioned here will be # passed on to Pandas. } -"""Mapping from ecl2df custom offset strings to Pandas DateOffset strings. +"""Mapping from res2df custom offset strings to Pandas DateOffset strings. See https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#dateoffset-objects """ # noqa def date_range(start_date: dt.date, end_date: dt.date, freq: str) -> List[dt.datetime]: - """Wrapper for pandas.date_range to allow for extra ecl2df specific mnemonics + """Wrapper for pandas.date_range to allow for extra res2df specific mnemonics 'yearly', 'daily', 'weekly', mapped over to pandas DateOffsets. Args: diff --git a/ecl2df/svg_color_keyword_names.txt b/res2df/svg_color_keyword_names.txt similarity index 100% rename from ecl2df/svg_color_keyword_names.txt rename to res2df/svg_color_keyword_names.txt diff --git a/ecl2df/trans.py b/res2df/trans.py similarity index 97% rename from ecl2df/trans.py rename to res2df/trans.py index b289e3bef..d67640599 100644 --- a/ecl2df/trans.py +++ b/res2df/trans.py @@ -8,10 +8,10 @@ import pandas as pd -import ecl2df.grid -import ecl2df.nnc -from ecl2df import getLogger_ecl2csv -from ecl2df.common import write_dframe_stdout_file +import res2df.grid +import res2df.nnc +from res2df import getLogger_ecl2csv +from res2df.common import write_dframe_stdout_file from .eclfiles import EclFiles @@ -101,7 +101,7 @@ def df( "Filtering to both k and to ij simultaneously results in empty dataframe" ) - grid_df = ecl2df.grid.df(eclfiles) + grid_df = res2df.grid.df(eclfiles) existing_vectors = [vec for vec in vectors if vec in grid_df.columns] if len(existing_vectors) < len(vectors): logger.warning( @@ -149,7 +149,7 @@ def df( if addnnc: logger.info("Adding NNC data") - nnc_df = ecl2df.nnc.df(eclfiles, coords=False, pillars=False) + nnc_df = res2df.nnc.df(eclfiles, coords=False, pillars=False) nnc_df["DIR"] = "NNC" trans_df = pd.concat([trans_df, nnc_df], sort=False) diff --git a/ecl2df/vfp/__init__.py b/res2df/vfp/__init__.py similarity index 83% rename from ecl2df/vfp/__init__.py rename to res2df/vfp/__init__.py index 4a202ddca..6f3a7dd8e 100644 --- a/ecl2df/vfp/__init__.py +++ b/res2df/vfp/__init__.py @@ -1,4 +1,4 @@ -""" Module with interface for ecl2df to VFPPROD and VFPINJ +""" Module with interface for res2df to VFPPROD and VFPINJ keywords in Eclipse. """ from ._vfp import ( # noqa F:401 diff --git a/ecl2df/vfp/_vfp.py b/res2df/vfp/_vfp.py similarity index 99% rename from ecl2df/vfp/_vfp.py rename to res2df/vfp/_vfp.py index 416884b04..92eefec33 100755 --- a/ecl2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -25,7 +25,7 @@ except ImportError: pass -from ecl2df import EclFiles, common, getLogger_ecl2csv +from res2df import EclFiles, common, getLogger_ecl2csv from . import _vfpinj as vfpinj from . import _vfpprod as vfpprod diff --git a/ecl2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py similarity index 99% rename from ecl2df/vfp/_vfpcommon.py rename to res2df/vfp/_vfpcommon.py index 0d4a75a5b..4eb2ce41c 100755 --- a/ecl2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -25,7 +25,7 @@ except ImportError: pass -from ecl2df import common +from res2df import common logger = logging.getLogger(__name__) diff --git a/ecl2df/vfp/_vfpdefs.py b/res2df/vfp/_vfpdefs.py similarity index 99% rename from ecl2df/vfp/_vfpdefs.py rename to res2df/vfp/_vfpdefs.py index a95d00dd1..fcb71bd65 100755 --- a/ecl2df/vfp/_vfpdefs.py +++ b/res2df/vfp/_vfpdefs.py @@ -2,7 +2,7 @@ Some definitions and parameters used to define VFPPROD and VFPINJ keywords in Eclipse. This includes definitions of rates, thp, wfr (water fractions), gfr (gas fractions), alq (artificial-lift-quantities), units and so on. Used for consistency check in IO -routines for VFPPROD and VFPINJ keywords in ecl2df. +routines for VFPPROD and VFPINJ keywords in res2df. """ from enum import Enum diff --git a/ecl2df/vfp/_vfpinj.py b/res2df/vfp/_vfpinj.py similarity index 99% rename from ecl2df/vfp/_vfpinj.py rename to res2df/vfp/_vfpinj.py index 963f0bbde..33f4bc462 100755 --- a/ecl2df/vfp/_vfpinj.py +++ b/res2df/vfp/_vfpinj.py @@ -29,7 +29,7 @@ except ImportError: pass -from ecl2df import common +from res2df import common from ._vfpcommon import ( _deckrecord2list, diff --git a/ecl2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py similarity index 99% rename from ecl2df/vfp/_vfpprod.py rename to res2df/vfp/_vfpprod.py index b782ed262..fc9a9262f 100755 --- a/ecl2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -29,7 +29,7 @@ except ImportError: pass -from ecl2df import common +from res2df import common from ._vfpcommon import ( _deckrecord2list, diff --git a/ecl2df/wcon.py b/res2df/wcon.py similarity index 97% rename from ecl2df/wcon.py rename to res2df/wcon.py index 2ee0a3f55..c0da856d4 100644 --- a/ecl2df/wcon.py +++ b/res2df/wcon.py @@ -15,8 +15,8 @@ except ImportError: pass -from ecl2df import EclFiles, getLogger_ecl2csv -from ecl2df.common import ( +from res2df import EclFiles, getLogger_ecl2csv +from res2df.common import ( parse_opmio_date_rec, parse_opmio_deckrecord, write_dframe_stdout_file, diff --git a/ecl2df/wellcompletiondata.py b/res2df/wellcompletiondata.py similarity index 99% rename from ecl2df/wellcompletiondata.py rename to res2df/wellcompletiondata.py index e7d225a04..403f3ed68 100644 --- a/ecl2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -10,8 +10,8 @@ import pyarrow import pyarrow.feather -from ecl2df import common, compdat, getLogger_ecl2csv, wellconnstatus -from ecl2df.eclfiles import EclFiles +from res2df import common, compdat, getLogger_ecl2csv, wellconnstatus +from res2df.eclfiles import EclFiles from .common import write_dframe_stdout_file diff --git a/ecl2df/wellconnstatus.py b/res2df/wellconnstatus.py similarity index 97% rename from ecl2df/wellconnstatus.py rename to res2df/wellconnstatus.py index 87ef90c3c..600f6198c 100644 --- a/ecl2df/wellconnstatus.py +++ b/res2df/wellconnstatus.py @@ -8,8 +8,8 @@ import numpy as np import pandas as pd -from ecl2df import getLogger_ecl2csv, summary -from ecl2df.eclfiles import EclFiles +from res2df import getLogger_ecl2csv, summary +from res2df.eclfiles import EclFiles from .common import write_dframe_stdout_file diff --git a/setup.cfg b/setup.cfg index ce6e3e7c4..b2fbe9cd8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -6,7 +6,7 @@ test=pytest [flake8] max-line-length = 88 -exclude = docs, ecl2df/__init__.py +exclude = docs, res2df/__init__.py [tool:pytest] markers = diff --git a/setup.py b/setup.py index e75d37e5b..0dab95bf6 100644 --- a/setup.py +++ b/setup.py @@ -44,20 +44,20 @@ } setup( - name="ecl2df", - use_scm_version={"write_to": "ecl2df/version.py"}, + name="res2df", + use_scm_version={"write_to": "res2df/version.py"}, cmdclass=cmdclass, description="Convert Eclipse 100 input and output to DataFrames", long_description=LONG_DESCRIPTION, long_description_content_type="text/markdown", - url="http://github.com/equinor/ecl2df", + url="http://github.com/equinor/res2df", author="Håvard Berland", author_email="havb@equinor.com", license="GPLv3", - packages=find_packages(include=["ecl2df*"]), - package_dir={"ecl2df": "ecl2df"}, + packages=find_packages(include=["res2df*"]), + package_dir={"res2df": "res2df"}, package_data={ - "ecl2df": [ + "res2df": [ "opmkeywords/*", "config_jobs/*", "py.typed", @@ -67,11 +67,11 @@ zip_safe=False, entry_points={ "console_scripts": [ - "csv2ecl=ecl2df.csv2ecl:main", - "ecl2csv=ecl2df.ecl2csv:main", - "ecl2arrow=ecl2df.ecl2csv:main", + "csv2ecl=res2df.csv2ecl:main", + "ecl2csv=res2df.ecl2csv:main", + "ecl2arrow=res2df.ecl2csv:main", ], - "ert": ["ecl2df_jobs = ecl2df.hook_implementations.jobs"], + "ert": ["res2df_jobs = res2df.hook_implementations.jobs"], }, test_suite="tests", install_requires=REQUIREMENTS, diff --git a/tests/conftest.py b/tests/conftest.py index 2440e3ce4..3db98cd74 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -2,10 +2,10 @@ import pytest -import ecl2df +import res2df @pytest.fixture -def path_to_ecl2df(): - """Path to installed ecl2df module""" - return Path(ecl2df.__file__).parent +def path_to_res2df(): + """Path to installed res2df module""" + return Path(res2df.__file__).parent diff --git a/tests/data/reek/eclipse/model/2_R001_REEK-0-OPMFLOW.PRT b/tests/data/reek/eclipse/model/2_R001_REEK-0-OPMFLOW.PRT index 477f9678f..3c8940fd2 100644 --- a/tests/data/reek/eclipse/model/2_R001_REEK-0-OPMFLOW.PRT +++ b/tests/data/reek/eclipse/model/2_R001_REEK-0-OPMFLOW.PRT @@ -198,404 +198,404 @@ VtkWriteWaterFormationVolumeFactor="0" ZoltanImbalanceTol="1.1" Reading deck file '2_R001_REEK-0.DATA' - 0 Reading RUNSPEC in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 11 - 1 Reading TITLE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 14 - 2 Reading SAVE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 19 - 3 Reading NOECHO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 22 - 4 Reading DIMENS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 25 - 5 Reading START in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 29 - 6 Reading OIL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 36 - 7 Reading GAS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 37 - 8 Reading WATER in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 38 - 9 Reading METRIC in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 42 - 10 Reading GRIDOPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 44 - 11 Reading EQLOPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 52 - 12 Reading TABDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 56 - 13 Reading EQLDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 61 - 14 Reading REGDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 65 - 15 Reading FAULTDIM in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 70 - 16 Reading WELLDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 74 - 17 Reading VFPPDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 78 - 18 Reading VFPIDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 83 - 19 Reading SMRYDIMS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 87 - 20 Reading UNIFIN in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 91 - 21 Reading UNIFOUT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 92 - 22 Reading NOINSPEC in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 95 - 23 Reading NORSSPEC in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 98 - 24 Reading GRID in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 104 - 25 Reading NOECHO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 107 - 26 Reading INIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 110 - 27 Reading GRIDFILE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 113 - 28 Reading MESSAGES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 117 - 29 Reading PINCH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 121 - 30 Reading NOECHO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 125 - 31 Reading NOECHO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 4 - 32 Reading MAPUNITS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 12 - 33 Reading MAPAXES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 15 - 34 Reading GRIDUNIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 19 - 35 Reading SPECGRID in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 22 - 36 Reading GDORIENT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 25 - 37 Reading COORD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 29 - 38 Reading ZCORN in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 3229 - 39 Reading ACTNUM in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 34211 - 40 Reading ECHO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid line 34222 - 41 Reading FAULTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.faults line 10 - 42 Reading PORO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.poro line 11 - 43 Reading PERMX in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.perm line 11 - 44 Reading PERMY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.perm line 7167 - 45 Reading PERMZ in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.perm line 14323 - 46 Reading MULTIPLY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 146 - 47 Reading MULTFLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.multflt line 1 - 48 Reading EQUALS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.multz line 1 - 49 Reading EDIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 163 - 50 Reading PROPS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 170 - 51 Reading SWOF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/swof.inc line 9 - 52 Reading SGOF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/sgof.txt line 9 - 53 Reading EQUALS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.endpoints line 1 - 54 Reading COPY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 184 - 55 Reading MULTIPLY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 190 - 56 Reading ADD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 193 - 57 Reading SCALECRS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 197 - 58 Reading ROCKOPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 1 - 59 Reading ROCK in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 4 - 60 Reading PVTW in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 7 - 61 Reading PVTO in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 10 - 62 Reading PVDG in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 139 - 63 Reading DENSITY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt line 157 - 64 Reading SWATINIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.swatinit line 11 - 65 Reading REGIONS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 212 - 66 Reading EQLNUM in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/regions/reek.eqlnum line 11 - 67 Reading FIPNUM in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/regions/reek.fipnum line 11 - 68 Reading EQUALS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 222 - 69 Reading SOLUTION in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 231 - 70 Reading RTEMP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 233 - 71 Reading EQUIL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/solution/reek.equil line 1 - 72 Reading RSVD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 240 - 73 Reading RPTSOL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 246 - 74 Reading RPTRST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 249 - 75 Reading SUMMARY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 256 - 76 Reading FOPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 7 - 77 Reading FOPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 8 - 78 Reading FGPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 9 - 79 Reading FGPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 10 - 80 Reading FWPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 11 - 81 Reading FWPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 12 - 82 Reading FLPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 13 - 83 Reading FLPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 14 - 84 Reading FVPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 15 - 85 Reading FOPRF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 16 - 86 Reading FOPRS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 17 - 87 Reading FGSR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 18 - 88 Reading FGPRF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 19 - 89 Reading FGPRS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 20 - 90 Reading FOPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 24 - 91 Reading FWPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 25 - 92 Reading FGPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 26 - 93 Reading FMWPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 30 - 94 Reading FMWIN in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 31 - 95 Reading FVIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 33 - 96 Reading FWIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 34 - 97 Reading FWIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 35 - 98 Reading FGIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 36 - 99 Reading FGIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 37 - 100 Reading FGLIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 39 - 101 Reading FMCTP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 43 - 102 Reading FVPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 48 - 103 Reading FOPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 49 - 104 Reading FOPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 50 - 105 Reading FWPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 51 - 106 Reading FWPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 52 - 107 Reading FGPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 53 - 108 Reading FGPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 54 - 109 Reading FWIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 55 - 110 Reading FWITH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 56 - 111 Reading FGIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 57 - 112 Reading FGITH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 58 - 113 Reading FOPTF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 59 - 114 Reading FOPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 60 - 115 Reading FWIP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 64 - 116 Reading FOIP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 65 - 117 Reading FGIP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 66 - 118 Reading FWCT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 68 - 119 Reading FWCTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 69 - 120 Reading FGOR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 70 - 121 Reading FGORH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 71 - 122 Reading FGLR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 72 - 123 Reading FWGR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 73 - 124 Reading FPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 75 - 125 Reading RPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 77 - 126 Reading ROIP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 79 - 127 Reading ROIPL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 81 - 128 Reading ROIPG in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 83 - 129 Reading RGIP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 85 - 130 Reading RGIPL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 87 - 131 Reading RGIPG in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 89 - 132 Reading RGPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 91 - 133 Reading RGPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 93 - 134 Reading GMWPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 101 - 135 Reading GGLIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 104 - 136 Reading GOPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 106 - 137 Reading GOPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 108 - 138 Reading GGPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 110 - 139 Reading GGPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 112 - 140 Reading GWPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 114 - 141 Reading GWPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 116 - 142 Reading GVPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 118 - 143 Reading GLPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 120 - 144 Reading GOPRF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 122 - 145 Reading GOPRS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 124 - 146 Reading GGPRF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 126 - 147 Reading GGPRS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 128 - 148 Reading GWCT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 130 - 149 Reading GWCTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 132 - 150 Reading GGOR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 134 - 151 Reading GGORH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 136 - 152 Reading GWGR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 138 - 153 Reading GGLR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 140 - 154 Reading GOPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 142 - 155 Reading GOPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 144 - 156 Reading GGPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 146 - 157 Reading GGPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 148 - 158 Reading GWPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 150 - 159 Reading GWPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 152 - 160 Reading GVPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 154 - 161 Reading GLPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 156 - 162 Reading GOPTF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 158 - 163 Reading GOPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 160 - 164 Reading GGPTF in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 162 - 165 Reading GGPTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 164 - 166 Reading GWIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 166 - 167 Reading GVIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 168 - 168 Reading GWIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 170 - 169 Reading GGIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 172 - 170 Reading GGIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 174 - 171 Reading GOPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 177 - 172 Reading GGPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 179 - 173 Reading GWPP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 181 - 174 Reading GMCTP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 186 - 175 Reading WOPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 190 - 176 Reading WOPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 192 - 177 Reading WGPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 194 - 178 Reading WGPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 196 - 179 Reading WWPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 198 - 180 Reading WWPRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 200 - 181 Reading WOPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 203 - 182 Reading WWPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 205 - 183 Reading WGPT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 207 - 184 Reading WOPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 209 - 185 Reading WWPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 211 - 186 Reading WGPTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 213 - 187 Reading WWCT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 216 - 188 Reading WWCTH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 218 - 189 Reading WGOR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 220 - 190 Reading WGORH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 222 - 191 Reading WWIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 224 - 192 Reading WWIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 226 - 193 Reading WGIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 228 - 194 Reading WGIRH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 230 - 195 Reading WWIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 232 - 196 Reading WWITH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 234 - 197 Reading WGIT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 236 - 198 Reading WGITH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 238 - 199 Reading WBHP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 241 - 200 Reading WTHP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 243 - 201 Reading WPI in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 245 - 202 Reading WVPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 247 - 203 Reading WBP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 249 - 204 Reading WBP4 in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 251 - 205 Reading WBP9 in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 253 - 206 Reading WMCTL in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 255 - 207 Reading WLPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 257 - 208 Reading WGLIR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 260 - 209 Reading WOGLR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 265 - 210 Reading BPR in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 269 - 211 Reading TCPU in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 277 - 212 Reading TCPUDAY in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 278 - 213 Reading SCHEDULE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 265 - 214 Reading TUNING in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 267 - 215 Reading GRUPTREE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 4 - 216 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 9 - 217 Reading WELSPECS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 13 - 218 Reading COMPORD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 21 - 219 Reading COMPDAT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 28 - 220 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 96 - 221 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 103 - 222 Reading WRFTPLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 108 - 223 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 116 - 224 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 120 - 225 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 127 - 226 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 132 - 227 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 136 - 228 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 143 - 229 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 147 - 230 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 154 - 231 Reading WELSPECS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 158 - 232 Reading COMPORD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 163 - 233 Reading COMPDAT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 167 - 234 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 187 - 235 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 194 - 236 Reading WRFTPLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 199 - 237 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 204 - 238 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 208 - 239 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 215 - 240 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 220 - 241 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 224 - 242 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 231 - 243 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 236 - 244 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 240 - 245 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 247 - 246 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 252 - 247 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 256 - 248 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 263 - 249 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 268 - 250 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 272 - 251 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 279 - 252 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 284 - 253 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 288 - 254 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 295 - 255 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 300 - 256 Reading WELSPECS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 304 - 257 Reading COMPORD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 310 - 258 Reading COMPDAT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 315 - 259 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 350 - 260 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 359 - 261 Reading WRFTPLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 364 - 262 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 370 - 263 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 374 - 264 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 383 - 265 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 388 - 266 Reading WELSPECS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 392 - 267 Reading COMPORD in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 397 - 268 Reading COMPDAT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 401 - 269 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 421 - 270 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 430 - 271 Reading WRFTPLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 436 - 272 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 441 - 273 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 445 - 274 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 454 - 275 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 460 - 276 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 464 - 277 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 473 - 278 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 478 - 279 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 482 - 280 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 491 - 281 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 496 - 282 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 500 - 283 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 509 - 284 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 514 - 285 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 518 - 286 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 527 - 287 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 532 - 288 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 536 - 289 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 545 - 290 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 550 - 291 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 554 - 292 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 563 - 293 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 568 - 294 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 572 - 295 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 581 - 296 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 586 - 297 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 590 - 298 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 599 - 299 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 604 - 300 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 608 - 301 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 617 - 302 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 622 - 303 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 626 - 304 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 635 - 305 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 640 - 306 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 644 - 307 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 653 - 308 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 658 - 309 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 662 - 310 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 671 - 311 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 676 - 312 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 680 - 313 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 689 - 314 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 694 - 315 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 698 - 316 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 707 - 317 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 712 - 318 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 716 - 319 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 725 - 320 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 730 - 321 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 734 - 322 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 743 - 323 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 748 - 324 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 752 - 325 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 761 - 326 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 766 - 327 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 770 - 328 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 779 - 329 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 784 - 330 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 788 - 331 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 797 - 332 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 802 - 333 Reading WCONHIST in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 806 - 334 Reading WCONINJH in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 815 - 335 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 820 - 336 Reading DATES in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 824 - 337 Reading SAVE in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 276 - 338 Reading TSTEP in /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 279 + 0 Reading RUNSPEC in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 11 + 1 Reading TITLE in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 14 + 2 Reading SAVE in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 19 + 3 Reading NOECHO in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 22 + 4 Reading DIMENS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 25 + 5 Reading START in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 29 + 6 Reading OIL in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 36 + 7 Reading GAS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 37 + 8 Reading WATER in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 38 + 9 Reading METRIC in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 42 + 10 Reading GRIDOPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 44 + 11 Reading EQLOPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 52 + 12 Reading TABDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 56 + 13 Reading EQLDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 61 + 14 Reading REGDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 65 + 15 Reading FAULTDIM in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 70 + 16 Reading WELLDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 74 + 17 Reading VFPPDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 78 + 18 Reading VFPIDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 83 + 19 Reading SMRYDIMS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 87 + 20 Reading UNIFIN in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 91 + 21 Reading UNIFOUT in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 92 + 22 Reading NOINSPEC in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 95 + 23 Reading NORSSPEC in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 98 + 24 Reading GRID in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 104 + 25 Reading NOECHO in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 107 + 26 Reading INIT in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 110 + 27 Reading GRIDFILE in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 113 + 28 Reading MESSAGES in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 117 + 29 Reading PINCH in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 121 + 30 Reading NOECHO in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 125 + 31 Reading NOECHO in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 4 + 32 Reading MAPUNITS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 12 + 33 Reading MAPAXES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 15 + 34 Reading GRIDUNIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 19 + 35 Reading SPECGRID in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 22 + 36 Reading GDORIENT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 25 + 37 Reading COORD in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 29 + 38 Reading ZCORN in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 3229 + 39 Reading ACTNUM in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 34211 + 40 Reading ECHO in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid line 34222 + 41 Reading FAULTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.faults line 10 + 42 Reading PORO in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.poro line 11 + 43 Reading PERMX in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.perm line 11 + 44 Reading PERMY in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.perm line 7167 + 45 Reading PERMZ in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.perm line 14323 + 46 Reading MULTIPLY in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 146 + 47 Reading MULTFLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.multflt line 1 + 48 Reading EQUALS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.multz line 1 + 49 Reading EDIT in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 163 + 50 Reading PROPS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 170 + 51 Reading SWOF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/swof.inc line 9 + 52 Reading SGOF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/sgof.txt line 9 + 53 Reading EQUALS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.endpoints line 1 + 54 Reading COPY in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 184 + 55 Reading MULTIPLY in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 190 + 56 Reading ADD in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 193 + 57 Reading SCALECRS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 197 + 58 Reading ROCKOPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 1 + 59 Reading ROCK in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 4 + 60 Reading PVTW in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 7 + 61 Reading PVTO in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 10 + 62 Reading PVDG in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 139 + 63 Reading DENSITY in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt line 157 + 64 Reading SWATINIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.swatinit line 11 + 65 Reading REGIONS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 212 + 66 Reading EQLNUM in /home/berland/projects/res2df/tests/data/reek/eclipse/include/regions/reek.eqlnum line 11 + 67 Reading FIPNUM in /home/berland/projects/res2df/tests/data/reek/eclipse/include/regions/reek.fipnum line 11 + 68 Reading EQUALS in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 222 + 69 Reading SOLUTION in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 231 + 70 Reading RTEMP in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 233 + 71 Reading EQUIL in /home/berland/projects/res2df/tests/data/reek/eclipse/include/solution/reek.equil line 1 + 72 Reading RSVD in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 240 + 73 Reading RPTSOL in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 246 + 74 Reading RPTRST in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 249 + 75 Reading SUMMARY in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 256 + 76 Reading FOPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 7 + 77 Reading FOPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 8 + 78 Reading FGPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 9 + 79 Reading FGPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 10 + 80 Reading FWPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 11 + 81 Reading FWPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 12 + 82 Reading FLPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 13 + 83 Reading FLPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 14 + 84 Reading FVPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 15 + 85 Reading FOPRF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 16 + 86 Reading FOPRS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 17 + 87 Reading FGSR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 18 + 88 Reading FGPRF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 19 + 89 Reading FGPRS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 20 + 90 Reading FOPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 24 + 91 Reading FWPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 25 + 92 Reading FGPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 26 + 93 Reading FMWPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 30 + 94 Reading FMWIN in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 31 + 95 Reading FVIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 33 + 96 Reading FWIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 34 + 97 Reading FWIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 35 + 98 Reading FGIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 36 + 99 Reading FGIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 37 + 100 Reading FGLIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 39 + 101 Reading FMCTP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 43 + 102 Reading FVPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 48 + 103 Reading FOPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 49 + 104 Reading FOPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 50 + 105 Reading FWPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 51 + 106 Reading FWPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 52 + 107 Reading FGPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 53 + 108 Reading FGPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 54 + 109 Reading FWIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 55 + 110 Reading FWITH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 56 + 111 Reading FGIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 57 + 112 Reading FGITH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 58 + 113 Reading FOPTF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 59 + 114 Reading FOPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 60 + 115 Reading FWIP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 64 + 116 Reading FOIP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 65 + 117 Reading FGIP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 66 + 118 Reading FWCT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 68 + 119 Reading FWCTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 69 + 120 Reading FGOR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 70 + 121 Reading FGORH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 71 + 122 Reading FGLR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 72 + 123 Reading FWGR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 73 + 124 Reading FPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 75 + 125 Reading RPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 77 + 126 Reading ROIP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 79 + 127 Reading ROIPL in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 81 + 128 Reading ROIPG in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 83 + 129 Reading RGIP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 85 + 130 Reading RGIPL in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 87 + 131 Reading RGIPG in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 89 + 132 Reading RGPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 91 + 133 Reading RGPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 93 + 134 Reading GMWPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 101 + 135 Reading GGLIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 104 + 136 Reading GOPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 106 + 137 Reading GOPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 108 + 138 Reading GGPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 110 + 139 Reading GGPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 112 + 140 Reading GWPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 114 + 141 Reading GWPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 116 + 142 Reading GVPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 118 + 143 Reading GLPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 120 + 144 Reading GOPRF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 122 + 145 Reading GOPRS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 124 + 146 Reading GGPRF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 126 + 147 Reading GGPRS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 128 + 148 Reading GWCT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 130 + 149 Reading GWCTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 132 + 150 Reading GGOR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 134 + 151 Reading GGORH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 136 + 152 Reading GWGR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 138 + 153 Reading GGLR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 140 + 154 Reading GOPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 142 + 155 Reading GOPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 144 + 156 Reading GGPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 146 + 157 Reading GGPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 148 + 158 Reading GWPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 150 + 159 Reading GWPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 152 + 160 Reading GVPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 154 + 161 Reading GLPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 156 + 162 Reading GOPTF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 158 + 163 Reading GOPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 160 + 164 Reading GGPTF in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 162 + 165 Reading GGPTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 164 + 166 Reading GWIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 166 + 167 Reading GVIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 168 + 168 Reading GWIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 170 + 169 Reading GGIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 172 + 170 Reading GGIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 174 + 171 Reading GOPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 177 + 172 Reading GGPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 179 + 173 Reading GWPP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 181 + 174 Reading GMCTP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 186 + 175 Reading WOPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 190 + 176 Reading WOPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 192 + 177 Reading WGPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 194 + 178 Reading WGPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 196 + 179 Reading WWPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 198 + 180 Reading WWPRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 200 + 181 Reading WOPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 203 + 182 Reading WWPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 205 + 183 Reading WGPT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 207 + 184 Reading WOPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 209 + 185 Reading WWPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 211 + 186 Reading WGPTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 213 + 187 Reading WWCT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 216 + 188 Reading WWCTH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 218 + 189 Reading WGOR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 220 + 190 Reading WGORH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 222 + 191 Reading WWIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 224 + 192 Reading WWIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 226 + 193 Reading WGIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 228 + 194 Reading WGIRH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 230 + 195 Reading WWIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 232 + 196 Reading WWITH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 234 + 197 Reading WGIT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 236 + 198 Reading WGITH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 238 + 199 Reading WBHP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 241 + 200 Reading WTHP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 243 + 201 Reading WPI in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 245 + 202 Reading WVPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 247 + 203 Reading WBP in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 249 + 204 Reading WBP4 in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 251 + 205 Reading WBP9 in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 253 + 206 Reading WMCTL in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 255 + 207 Reading WLPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 257 + 208 Reading WGLIR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 260 + 209 Reading WOGLR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 265 + 210 Reading BPR in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 269 + 211 Reading TCPU in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 277 + 212 Reading TCPUDAY in /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 278 + 213 Reading SCHEDULE in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 265 + 214 Reading TUNING in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 267 + 215 Reading GRUPTREE in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 4 + 216 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 9 + 217 Reading WELSPECS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 13 + 218 Reading COMPORD in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 21 + 219 Reading COMPDAT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 28 + 220 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 96 + 221 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 103 + 222 Reading WRFTPLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 108 + 223 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 116 + 224 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 120 + 225 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 127 + 226 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 132 + 227 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 136 + 228 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 143 + 229 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 147 + 230 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 154 + 231 Reading WELSPECS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 158 + 232 Reading COMPORD in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 163 + 233 Reading COMPDAT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 167 + 234 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 187 + 235 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 194 + 236 Reading WRFTPLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 199 + 237 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 204 + 238 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 208 + 239 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 215 + 240 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 220 + 241 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 224 + 242 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 231 + 243 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 236 + 244 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 240 + 245 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 247 + 246 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 252 + 247 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 256 + 248 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 263 + 249 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 268 + 250 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 272 + 251 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 279 + 252 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 284 + 253 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 288 + 254 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 295 + 255 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 300 + 256 Reading WELSPECS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 304 + 257 Reading COMPORD in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 310 + 258 Reading COMPDAT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 315 + 259 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 350 + 260 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 359 + 261 Reading WRFTPLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 364 + 262 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 370 + 263 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 374 + 264 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 383 + 265 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 388 + 266 Reading WELSPECS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 392 + 267 Reading COMPORD in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 397 + 268 Reading COMPDAT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 401 + 269 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 421 + 270 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 430 + 271 Reading WRFTPLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 436 + 272 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 441 + 273 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 445 + 274 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 454 + 275 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 460 + 276 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 464 + 277 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 473 + 278 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 478 + 279 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 482 + 280 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 491 + 281 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 496 + 282 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 500 + 283 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 509 + 284 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 514 + 285 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 518 + 286 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 527 + 287 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 532 + 288 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 536 + 289 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 545 + 290 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 550 + 291 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 554 + 292 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 563 + 293 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 568 + 294 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 572 + 295 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 581 + 296 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 586 + 297 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 590 + 298 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 599 + 299 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 604 + 300 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 608 + 301 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 617 + 302 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 622 + 303 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 626 + 304 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 635 + 305 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 640 + 306 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 644 + 307 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 653 + 308 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 658 + 309 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 662 + 310 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 671 + 311 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 676 + 312 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 680 + 313 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 689 + 314 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 694 + 315 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 698 + 316 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 707 + 317 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 712 + 318 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 716 + 319 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 725 + 320 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 730 + 321 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 734 + 322 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 743 + 323 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 748 + 324 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 752 + 325 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 761 + 326 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 766 + 327 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 770 + 328 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 779 + 329 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 784 + 330 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 788 + 331 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 797 + 332 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 802 + 333 Reading WCONHIST in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 806 + 334 Reading WCONINJH in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 815 + 335 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 820 + 336 Reading DATES in /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 824 + 337 Reading SAVE in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 276 + 338 Reading TSTEP in /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 279 Warning: Unsupported keywords or keyword items: SAVE: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 19 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 19 NOECHO: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 22 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 22 NOINSPEC: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 95 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 95 NORSSPEC: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 98 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 98 NOECHO: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 107 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 107 PINCH: invalid value 'NOGAP' in record 1 for item 2 - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 121 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 121 NOECHO: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 125 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 125 NOECHO: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid, line 4 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid, line 4 MAPUNITS: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid, line 12 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid, line 12 GRIDUNIT: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid, line 19 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid, line 19 GDORIENT: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid, line 25 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid, line 25 ECHO: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.grid, line 34222 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.grid, line 34222 ROCKOPTS: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/props/reek.pvt, line 1 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/props/reek.pvt, line 1 WBP: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry, line 249 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry, line 249 WBP4: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry, line 251 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry, line 251 WBP9: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry, line 253 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry, line 253 SAVE: keyword not supported - In file: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 276 + In file: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA, line 276 Creating cornerpoint grid from keywords ZCORN, COORD and ACTNUM -Loading faults from FAULTS in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.faults line 10 +Loading faults from FAULTS in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.faults line 10 -Applying MULTFLT in /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/grid/reek.multflt line 1 +Applying MULTFLT in /home/berland/projects/res2df/tests/data/reek/eclipse/include/grid/reek.multflt line 1 Setting fault transmissibility multiplier 0.001 for fault F1 Setting fault transmissibility multiplier 0.001 for fault F2 Setting fault transmissibility multiplier 0.001 for fault F3 @@ -604,10 +604,10 @@ Setting fault transmissibility multiplier 0.001 for fault F4 Setting fault transmissibility multiplier 0.001 for fault F5 Processing dynamic information from -/home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 265 +/home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 265 Initializing report step 1/38 at 2000-01-01 0.0 DAYS line 265 Processing keyword TUNING at line 267 -Reading from: /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 4 +Reading from: /home/berland/projects/res2df/tests/data/reek/eclipse/include/schedule/reek_history.sch line 4 Processing keyword GRUPTREE at line 4 Complete report step 1 (31.0 DAYS) at 2000-02-01 (0.0 DAYS) @@ -805,13 +805,13 @@ Initializing report step 37/38 at 2002-12-31 (1065.0 DAYS) - line 820 Complete report step 37 (1.0 DAYS) at 2003-01-01 (1095.0 DAYS) Initializing report step 38/38 at 2003-01-01 (1095.0 DAYS) - line 824 -Reading from: /home/berland/projects/ecl2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 276 +Reading from: /home/berland/projects/res2df/tests/data/reek/eclipse/model/2_R001_REEK-0.DATA line 276 Processing keyword SAVE at line 276 Complete report step 38 (1.0 DAYS) at 2003-01-02 (1096.0 DAYS) Warning: Request for missing group GMWIN in GMWPR -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 101 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 101 Processing grid Total number of active cells: 35641 / total pore volume: 399077425 RM3 @@ -830,25 +830,25 @@ Property tree for linear solver: Warning: Unhandled summary keyword FGSR -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 18 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 18 Warning: Unhandled summary keyword FWGR -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 73 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 73 Warning: Unhandled summary keyword GWGR -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 138 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 138 Warning: Unhandled summary keyword WBP -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 249 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 249 Warning: Unhandled summary keyword WBP4 -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 251 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 251 Warning: Unhandled summary keyword WBP9 -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 253 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 253 Warning: Unhandled summary keyword WOGLR -In /home/berland/projects/ecl2df/tests/data/reek/eclipse/include/summary/reek.smry line 265 +In /home/berland/projects/res2df/tests/data/reek/eclipse/include/summary/reek.smry line 265 ===============Saturation Functions Diagnostics=============== diff --git a/tests/test_common.py b/tests/test_common.py index e602ff15b..ed5547312 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -1,4 +1,4 @@ -"""Test module for ecl2df.common""" +"""Test module for res2df.common""" import datetime import os @@ -9,7 +9,7 @@ import pandas as pd import pytest -from ecl2df import common, eclfiles, equil +from res2df import common, eclfiles, equil try: # pylint: disable=unused-import diff --git a/tests/test_compdat.py b/tests/test_compdat.py index d3e20593e..4da6fba65 100644 --- a/tests/test_compdat.py +++ b/tests/test_compdat.py @@ -5,7 +5,7 @@ import pandas as pd import pytest -from ecl2df import EclFiles, compdat, ecl2csv +from res2df import EclFiles, compdat, ecl2csv try: # pylint: disable=unused-import diff --git a/tests/test_eclfiles.py b/tests/test_eclfiles.py index 69e3548b4..1102a6ad0 100644 --- a/tests/test_eclfiles.py +++ b/tests/test_eclfiles.py @@ -3,7 +3,7 @@ import pytest -from ecl2df import EclFiles +from res2df import EclFiles try: # pylint: disable=unused-import diff --git a/tests/test_equil.py b/tests/test_equil.py index bb15e8f7f..24238de66 100644 --- a/tests/test_equil.py +++ b/tests/test_equil.py @@ -8,8 +8,8 @@ import pandas as pd import pytest -from ecl2df import csv2ecl, ecl2csv, equil -from ecl2df.eclfiles import EclFiles +from res2df import csv2ecl, ecl2csv, equil +from res2df.eclfiles import EclFiles try: # pylint: disable=unused-import @@ -215,7 +215,7 @@ def test_decks(): WATER GAS --- Output file printed by ecl2df.equil 0.5.2.dev12+g785dc0d.d20200402 +-- Output file printed by res2df.equil 0.5.2.dev12+g785dc0d.d20200402 -- at 2020-04-03 16:18:57.450100 EQUIL diff --git a/tests/test_ert_hooks.py b/tests/test_ert_hooks.py index 645276d29..c2fde782c 100644 --- a/tests/test_ert_hooks.py +++ b/tests/test_ert_hooks.py @@ -5,8 +5,8 @@ import pandas as pd import pytest -import ecl2df -from ecl2df.hook_implementations import jobs +import res2df +from res2df.hook_implementations import jobs try: # pylint: disable=unused-import @@ -55,7 +55,7 @@ def test_ecl2csv_through_ert(tmp_path): csv2ecl_subcommands = ["equil", "pvt", "satfunc"] - for subcommand in ecl2df.SUBMODULES: + for subcommand in res2df.SUBMODULES: ert_config.append( f"FORWARD_MODEL ECL2CSV(={subcommand}, " f"={subcommand}.csv)" @@ -96,7 +96,7 @@ def test_ecl2csv_through_ert(tmp_path): assert Path("OK").is_file() - for subcommand in ecl2df.SUBMODULES: + for subcommand in res2df.SUBMODULES: assert Path(subcommand + ".csv").is_file() # Check the custom output where options were supplied to the subcommands: @@ -137,9 +137,9 @@ def test_get_module_variable(): # pylint: disable=protected-access assert jobs._get_module_variable_if_exists("foo", "bar") == "" assert jobs._get_module_variable_if_exists( - "ecl2df.ecl2csv", "DESCRIPTION" + "res2df.ecl2csv", "DESCRIPTION" ).startswith("Convert Eclipse input and output") - assert jobs._get_module_variable_if_exists("ecl2df.ecl2csv", "NOPE") == "" + assert jobs._get_module_variable_if_exists("res2df.ecl2csv", "NOPE") == "" @pytest.mark.skipif(HAVE_ERT, reason="Tested only when ERT is not available") @@ -147,4 +147,4 @@ def test_no_erthooks(): """Test that we can import the hook implementations even when ERT is unavailable.""" # pylint: disable=redefined-outer-name, unused-import # pylint: disable=reimported, import-outside-toplevel - from ecl2df.hook_implementations import jobs # noqa + from res2df.hook_implementations import jobs # noqa diff --git a/tests/test_faults.py b/tests/test_faults.py index 0ea9f5167..aec60a633 100644 --- a/tests/test_faults.py +++ b/tests/test_faults.py @@ -7,8 +7,8 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, faults -from ecl2df.eclfiles import EclFiles +from res2df import ecl2csv, faults +from res2df.eclfiles import EclFiles try: # pylint: disable=unused-import diff --git a/tests/test_fipreports.py b/tests/test_fipreports.py index 6faf25fe1..bc4f6b2ab 100644 --- a/tests/test_fipreports.py +++ b/tests/test_fipreports.py @@ -8,9 +8,9 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, fipreports -from ecl2df.eclfiles import EclFiles -from ecl2df.fipreports import report_block_lineparser as parser +from res2df import ecl2csv, fipreports +from res2df.eclfiles import EclFiles +from res2df.fipreports import report_block_lineparser as parser TESTDIR = Path(__file__).absolute().parent DATAFILE = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") diff --git a/tests/test_grid.py b/tests/test_grid.py index 86cbabdee..07343b2ee 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -1,4 +1,4 @@ -"""Test module for ecl2df.grid""" +"""Test module for res2df.grid""" import datetime import os from pathlib import Path @@ -8,8 +8,8 @@ import pyarrow import pytest -from ecl2df import common, ecl2csv, grid -from ecl2df.eclfiles import EclFiles +from res2df import common, ecl2csv, grid +from res2df.eclfiles import EclFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -50,7 +50,7 @@ def test_gridgeometry2df(mocker): grid.gridgeometry2df(None) with pytest.raises(ValueError, match="No EGRID file supplied"): - mocker.patch("ecl2df.eclfiles.EclFiles.get_egridfile", return_value=None) + mocker.patch("res2df.eclfiles.EclFiles.get_egridfile", return_value=None) grid.gridgeometry2df(eclfiles) @@ -150,7 +150,7 @@ def test_init2df(): # The KRO data from the INIT file in Reek contains only NaN's, # but libecl gives out a large negative integer/float. - # ecl2df should ensure this comes out as a NaN (but it + # res2df should ensure this comes out as a NaN (but it # should be allowed later to drop columns which have only NaNs)) if "KRO" in init_df: assert np.isnan(init_df["KRO"].unique()).all() @@ -195,7 +195,7 @@ def test_df2ecl(tmp_path): grid.df2ecl(grid_df, "FIPNUM", dtype="foo") assert "FIPNUM" in fipnum_str - assert "-- Output file printed by ecl2df.grid" in fipnum_str + assert "-- Output file printed by res2df.grid" in fipnum_str assert "35817 active cells" in fipnum_str # (comment at the end) assert "35840 total cell count" in fipnum_str # (comment at the end) assert len(fipnum_str) > 100 diff --git a/tests/test_gruptree.py b/tests/test_gruptree.py index 9902bb4cb..05878c0ab 100644 --- a/tests/test_gruptree.py +++ b/tests/test_gruptree.py @@ -7,8 +7,8 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, gruptree -from ecl2df.eclfiles import EclFiles +from res2df import ecl2csv, gruptree +from res2df.eclfiles import EclFiles try: # pylint: disable=unused-import diff --git a/tests/test_hook_implementations.py b/tests/test_hook_implementations.py index 7ed1b5639..ffd89fde4 100644 --- a/tests/test_hook_implementations.py +++ b/tests/test_hook_implementations.py @@ -16,22 +16,22 @@ from ert.shared.plugins.plugin_manager import ErtPluginManager -import ecl2df.hook_implementations.jobs +import res2df.hook_implementations.jobs @pytest.fixture(name="expected_jobs") -def fixture_expected_jobs(path_to_ecl2df: Path) -> Dict[str, Path]: +def fixture_expected_jobs(path_to_res2df: Path) -> Dict[str, Path]: """Dictionary of installed jobs with location to job configuration""" expected_job_names = [ "ECL2CSV", "CSV2ECL", ] - return {name: path_to_ecl2df / "config_jobs" / name for name in expected_job_names} + return {name: path_to_res2df / "config_jobs" / name for name in expected_job_names} def test_hook_implementations(expected_jobs): """Test that the expected jobs can be found using an ERT plugin manager""" - plugin_m = ErtPluginManager(plugins=[ecl2df.hook_implementations.jobs]) + plugin_m = ErtPluginManager(plugins=[res2df.hook_implementations.jobs]) installable_jobs = plugin_m.get_installable_jobs() for wf_name, wf_location in expected_jobs.items(): @@ -75,7 +75,7 @@ def test_executables(expected_jobs): def test_hook_implementations_job_docs(): """Test extracting docs from ERT hooks""" - plugin_m = ErtPluginManager(plugins=[ecl2df.hook_implementations.jobs]) + plugin_m = ErtPluginManager(plugins=[res2df.hook_implementations.jobs]) installable_jobs = plugin_m.get_installable_jobs() diff --git a/tests/test_inferdims.py b/tests/test_inferdims.py index c61b05e3a..85288ca4b 100644 --- a/tests/test_inferdims.py +++ b/tests/test_inferdims.py @@ -3,7 +3,7 @@ import pytest -from ecl2df import inferdims +from res2df import inferdims try: # pylint: disable=unused-import diff --git a/tests/test_init.py b/tests/test_init.py index 26d11db69..5fcdd7af9 100644 --- a/tests/test_init.py +++ b/tests/test_init.py @@ -1,21 +1,21 @@ -"""Check that ecl2df's submodules are always imported""" +"""Check that res2df's submodules are always imported""" import sys # This file tests what happens when we do this import: -import ecl2df +import res2df def test_init(): - """Test the top level properties of the ecl2df package""" - assert "ecl2df.compdat" in sys.modules + """Test the top level properties of the res2df package""" + assert "res2df.compdat" in sys.modules # This should be a list of all submodules - assert ecl2df.SUBMODULES + assert res2df.SUBMODULES - for submodule in ecl2df.SUBMODULES: - assert "ecl2df." + submodule in sys.modules + for submodule in res2df.SUBMODULES: + assert "res2df." + submodule in sys.modules # The Eclfiles object inside eclfiles should be lifted up to top-level: - assert hasattr(ecl2df, "EclFiles") + assert hasattr(res2df, "EclFiles") - assert isinstance(ecl2df.__version__, str) + assert isinstance(res2df.__version__, str) diff --git a/tests/test_integration.py b/tests/test_integration.py index 99c93085a..4e076ca31 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -4,7 +4,7 @@ import pytest -import ecl2df +import res2df try: # pylint: disable=unused-import @@ -33,7 +33,7 @@ def test_integration(): # ref: https://stackoverflow.com/questions/23714542/ \ # why-does-pythons-argparse-use-an-error-code-of-2-for-systemexit - for submodule in ecl2df.SUBMODULES: + for submodule in res2df.SUBMODULES: helptext = subprocess.check_output(["ecl2csv", submodule, "-h"]) # Test that this option is hidden, the argument is only there # to support optional number of arguments in ERT forward models. diff --git a/tests/test_logging.py b/tests/test_logging.py index 5e5b05482..6fd2c14a5 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -2,7 +2,7 @@ import pytest -import ecl2df +import res2df from .test_grid import EIGHTCELLS, REEK @@ -18,7 +18,7 @@ def test_default_logger_levels_and_split(capsys): """Verify that the intended usage of this logger have expected results""" - splitlogger = ecl2df.getLogger_ecl2csv("test_levels_split") + splitlogger = res2df.getLogger_ecl2csv("test_levels_split") splitlogger.debug("This DEBUG-text is not to be seen") captured = capsys.readouterr() @@ -40,7 +40,7 @@ def test_default_logger_levels_and_split(capsys): assert "ERROR-text" in captured.err # If output is written to stdout, all logs should go to stderr: - nosplit_logger = ecl2df.getLogger_ecl2csv( + nosplit_logger = res2df.getLogger_ecl2csv( "test_levels_nosplit", args_dict={"output": "-", "debug": True} ) nosplit_logger.debug("This DEBUG-text is to be seen in stderr") @@ -66,10 +66,10 @@ def test_default_logger_levels_and_split(capsys): @pytest.mark.skipif(not HAVE_OPM, reason="Command line client requires OPM") @pytest.mark.parametrize( - "ecl2df_module, verbose, fileexport", - itertools.product(ecl2df.SUBMODULES, [False, True], [True, False]), + "res2df_module, verbose, fileexport", + itertools.product(res2df.SUBMODULES, [False, True], [True, False]), ) -def test_ecl2csv_logging(tmp_path, ecl2df_module, verbose, fileexport, mocker, capsys): +def test_ecl2csv_logging(tmp_path, res2df_module, verbose, fileexport, mocker, capsys): """Test that the command line client for each submodule logs correctly. Each submodule should write logs to stdout for INFO and WARNING messages @@ -84,24 +84,24 @@ def test_ecl2csv_logging(tmp_path, ecl2df_module, verbose, fileexport, mocker, c test invocation. """ # pylint: disable=too-many-arguments - if ecl2df_module == "nnc": + if res2df_module == "nnc": # There are no nnc's in EIGHTCELLS, so for that test # we need the REEK dataset: - commands = ["ecl2csv", ecl2df_module, REEK, "--output"] + commands = ["ecl2csv", res2df_module, REEK, "--output"] else: - commands = ["ecl2csv", ecl2df_module, EIGHTCELLS, "--output"] + commands = ["ecl2csv", res2df_module, EIGHTCELLS, "--output"] if fileexport: commands.append(str(tmp_path / "output.csv")) else: - commands.append(ecl2df.common.MAGIC_STDOUT) + commands.append(res2df.common.MAGIC_STDOUT) if verbose: commands.append("-v") mocker.patch("sys.argv", commands) - ecl2df.ecl2csv.main() + res2df.ecl2csv.main() captured = capsys.readouterr() stdout_output = captured.out stderr_output = captured.err @@ -124,8 +124,8 @@ def test_ecl2csv_logging(tmp_path, ecl2df_module, verbose, fileexport, mocker, c def test_repeated_logger_construction(capsys): """If we repeatedly call getLogger(), ensure handlers are not added on top""" - logger = ecl2df.getLogger_ecl2csv("nodouble") - logger = ecl2df.getLogger_ecl2csv("nodouble") + logger = res2df.getLogger_ecl2csv("nodouble") + logger = res2df.getLogger_ecl2csv("nodouble") logger.warning("Don't repeat me") captured = capsys.readouterr() assert captured.out.count("Don't repeat me") == 1 diff --git a/tests/test_nnc.py b/tests/test_nnc.py index b51363be0..be3eecd2b 100644 --- a/tests/test_nnc.py +++ b/tests/test_nnc.py @@ -8,8 +8,8 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, faults, nnc, trans -from ecl2df.eclfiles import EclFiles +from res2df import ecl2csv, faults, nnc, trans +from res2df.eclfiles import EclFiles try: # pylint: disable=unused-import diff --git a/tests/test_parameters.py b/tests/test_parameters.py index 253e1e91b..a4920045d 100644 --- a/tests/test_parameters.py +++ b/tests/test_parameters.py @@ -7,8 +7,8 @@ import pytest import yaml -from ecl2df.eclfiles import EclFiles -from ecl2df.parameters import find_parameter_files, load, load_all +from res2df.eclfiles import EclFiles +from res2df.parameters import find_parameter_files, load, load_all TESTDIR = Path(__file__).absolute().parent DATAFILE = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") diff --git a/tests/test_pillars.py b/tests/test_pillars.py index e962119f4..319a7ff78 100644 --- a/tests/test_pillars.py +++ b/tests/test_pillars.py @@ -1,12 +1,12 @@ -"""Test module for ecl2df.pillars""" +"""Test module for res2df.pillars""" from pathlib import Path import pandas as pd import pytest -from ecl2df import ecl2csv, grid, pillars -from ecl2df.eclfiles import EclFiles +from res2df import ecl2csv, grid, pillars +from res2df.eclfiles import EclFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") diff --git a/tests/test_pvt.py b/tests/test_pvt.py index 3dd0b1dc7..9a975d938 100644 --- a/tests/test_pvt.py +++ b/tests/test_pvt.py @@ -8,8 +8,8 @@ import pandas as pd import pytest -from ecl2df import csv2ecl, ecl2csv, pvt -from ecl2df.eclfiles import EclFiles +from res2df import csv2ecl, ecl2csv, pvt +from res2df.eclfiles import EclFiles try: # pylint: disable=unused-import diff --git a/tests/test_rft.py b/tests/test_rft.py index f79410102..e812eb018 100644 --- a/tests/test_rft.py +++ b/tests/test_rft.py @@ -7,8 +7,8 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, rft -from ecl2df.eclfiles import EclFiles +from res2df import ecl2csv, rft +from res2df.eclfiles import EclFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index d7fb9f90d..5ce131bf8 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -8,8 +8,8 @@ import pandas as pd import pytest -from ecl2df import csv2ecl, ecl2csv, inferdims, satfunc -from ecl2df.eclfiles import EclFiles +from res2df import csv2ecl, ecl2csv, inferdims, satfunc +from res2df.eclfiles import EclFiles try: # pylint: disable=unused-import diff --git a/tests/test_summary.py b/tests/test_summary.py index 138d544d8..8e6e0c5b6 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -10,9 +10,9 @@ import yaml from resdata.summary import Summary -from ecl2df import csv2ecl, ecl2csv, summary -from ecl2df.eclfiles import EclFiles -from ecl2df.summary import ( +from res2df import csv2ecl, ecl2csv, summary +from res2df.eclfiles import EclFiles +from res2df.summary import ( _df2pyarrow, _fallback_date_roll, _fix_dframe_for_libecl, @@ -764,7 +764,7 @@ def test_resample_smry_dates(): == 2 + 300 # boundary dates + 2001-01-01 to 2300-01-01 ) - # Verify boundary date bug up to and including ecl2df v0.13.2 + # Verify boundary date bug up to and including res2df v0.13.2 assert resample_smry_dates( ecldates, start_date="2300-06-05", @@ -838,7 +838,7 @@ def test_smry_meta(): def test_smry_meta_synthetic(): """What does meta look like when we start from a synthetic summary? - ecl2df currently does not try to set the units to anything when + res2df currently does not try to set the units to anything when making synthetic summary. """ dframe = pd.DataFrame( @@ -1056,10 +1056,10 @@ def test_duplicated_summary_vectors(caplog): if the user has inserted a vector name twice in the SUMMARY section - ecl2df.summary.df() should deduplicate this, and give a warning. + res2df.summary.df() should deduplicate this, and give a warning. """ - # ecl2df.df2eclsum() is not able to mock such a UNSMRY file. + # res2df.df2eclsum() is not able to mock such a UNSMRY file. dupe_datafile = ( TESTDIR / "data" @@ -1158,7 +1158,7 @@ def test_df2pyarrow_strings(): @pytest.mark.skipif(not HAVE_OPM, reason="Test requires OPM") -def test_ecl2df_errors(tmp_path): +def test_res2df_errors(tmp_path): """Test error handling on bogus/corrupted summary files""" os.chdir(tmp_path) Path("FOO.UNSMRY").write_bytes(os.urandom(100)) diff --git a/tests/test_trans.py b/tests/test_trans.py index ae8860184..9424996bb 100644 --- a/tests/test_trans.py +++ b/tests/test_trans.py @@ -1,4 +1,4 @@ -"""Test module for ecl2df.trans""" +"""Test module for res2df.trans""" from pathlib import Path @@ -13,8 +13,8 @@ import pandas as pd -from ecl2df import ecl2csv, trans -from ecl2df.eclfiles import EclFiles +from res2df import ecl2csv, trans +from res2df.eclfiles import EclFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") diff --git a/tests/test_userapi.py b/tests/test_userapi.py index 153221be8..deb6fbe77 100644 --- a/tests/test_userapi.py +++ b/tests/test_userapi.py @@ -1,10 +1,10 @@ -"""Test module for user API for ecl2df""" +"""Test module for user API for res2df""" from pathlib import Path import pytest -import ecl2df +import res2df try: # pylint: disable=unused-import @@ -28,22 +28,22 @@ def test_userapi(): To the user reading the source: Skip all 'assert' lines, read the rest. """ - eclfiles = ecl2df.EclFiles(REEK) - - compdatdf = ecl2df.compdat.df(eclfiles) - equil = ecl2df.equil.df(eclfiles) - faults = ecl2df.faults.df(eclfiles) - fipreports = ecl2df.fipreports.df(eclfiles) - grid_df = ecl2df.grid.df(eclfiles) - grst_df = ecl2df.grid.df(eclfiles, rstdates="last") - gruptree = ecl2df.gruptree.df(eclfiles) - nnc = ecl2df.nnc.df(eclfiles) - pillars = ecl2df.pillars.df(eclfiles) - rft = ecl2df.rft.df(eclfiles) - satfunc = ecl2df.satfunc.df(eclfiles) - smry = ecl2df.summary.df(eclfiles, datetime=True) - trans = ecl2df.trans.df(eclfiles) - wcon = ecl2df.wcon.df(eclfiles) + eclfiles = res2df.EclFiles(REEK) + + compdatdf = res2df.compdat.df(eclfiles) + equil = res2df.equil.df(eclfiles) + faults = res2df.faults.df(eclfiles) + fipreports = res2df.fipreports.df(eclfiles) + grid_df = res2df.grid.df(eclfiles) + grst_df = res2df.grid.df(eclfiles, rstdates="last") + gruptree = res2df.gruptree.df(eclfiles) + nnc = res2df.nnc.df(eclfiles) + pillars = res2df.pillars.df(eclfiles) + rft = res2df.rft.df(eclfiles) + satfunc = res2df.satfunc.df(eclfiles) + smry = res2df.summary.df(eclfiles, datetime=True) + trans = res2df.trans.df(eclfiles) + wcon = res2df.wcon.df(eclfiles) assert "PORV" in grid_df assert "SOIL" not in grid_df diff --git a/tests/test_vfp.py b/tests/test_vfp.py index 0692a3d33..b77cf3a0a 100644 --- a/tests/test_vfp.py +++ b/tests/test_vfp.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from ecl2df import EclFiles, vfp +from res2df import EclFiles, vfp try: import opm # noqa @@ -991,8 +991,8 @@ @pytest.mark.parametrize("test_input, expected", VFPPROD_CASES) -def test_ecl2df_vfpprod(test_input, expected): - """Test ecl2df for VFPPROD""" +def test_res2df_vfpprod(test_input, expected): + """Test res2df for VFPPROD""" deck = EclFiles.str2deck(test_input) vfpdf = vfp.df(deck, "VFPPROD") @@ -1007,7 +1007,7 @@ def test_ecl2pyarrow_vfpprod(test_input, expected): vfppa = vfp.pyarrow_tables(deck, "VFPPROD") # Convert pyarrow table to basic data types for VFPPROD vfpprod_data = vfp.pyarrow2basic_data(vfppa[0]) - # Convert basic data types to ecl2df DataFrame for VFPPROD + # Convert basic data types to res2df DataFrame for VFPPROD vfpdf = vfp.basic_data2df(vfpprod_data) # Check that all steps lead to desired end result @@ -1037,8 +1037,8 @@ def test_pyarrow2ecl_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", VFPINJ_CASES) -def test_ecl2df_vfpinj(test_input, expected): - """Test ecl2df for VFPINJ""" +def test_res2df_vfpinj(test_input, expected): + """Test res2df for VFPINJ""" deck = EclFiles.str2deck(test_input) vfpdf = vfp.df(deck, "VFPINJ") @@ -1068,8 +1068,8 @@ def test_pyarrow2ecl_vfpinj(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpprods(test_input, expected): - """Test ecl2df for files with multiple VFPPROD""" +def test_res2df_vfpprods(test_input, expected): + """Test res2df for files with multiple VFPPROD""" deck = EclFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD") @@ -1080,7 +1080,7 @@ def test_ecl2df_vfpprods(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_ecl2pyarrow_vfpprods(test_input, expected): - """Test ecl2df with pyarrow for files with multiple VFPPROD""" + """Test res2df with pyarrow for files with multiple VFPPROD""" deck = EclFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPPROD") @@ -1092,8 +1092,8 @@ def test_ecl2pyarrow_vfpprods(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpinjs(test_input, expected): - """Test ecl2df for files with multiple VFPINJ""" +def test_res2df_vfpinjs(test_input, expected): + """Test res2df for files with multiple VFPINJ""" deck = EclFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPINJ") @@ -1104,7 +1104,7 @@ def test_ecl2df_vfpinjs(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_eclpyarrow_vfpinjs(test_input, expected): - """Test ecl2df for pyarrow for files with multiple VFPINJ""" + """Test res2df for pyarrow for files with multiple VFPINJ""" deck = EclFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ") @@ -1116,8 +1116,8 @@ def test_eclpyarrow_vfpinjs(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpprod_no(test_input, expected): - """Test ecl2df for files with multiple VFPPROD with vfp number argument""" +def test_res2df_vfpprod_no(test_input, expected): + """Test res2df for files with multiple VFPPROD with vfp number argument""" deck = EclFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD", "2") @@ -1127,7 +1127,7 @@ def test_ecl2df_vfpprod_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_ecl2pyarrow_vfpprod_no(test_input, expected): - """Test ecl2df for pyarrow for files with multiple + """Test res2df for pyarrow for files with multiple VFPPROD with vfp number argument """ deck = EclFiles.str2deck(test_input) @@ -1140,8 +1140,8 @@ def test_ecl2pyarrow_vfpprod_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpinj_no(test_input, expected): - """Test ecl2df for files with multiple VFPINJ with vfp number argument""" +def test_res2df_vfpinj_no(test_input, expected): + """Test res2df for files with multiple VFPINJ with vfp number argument""" deck = EclFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPINJ", "4") @@ -1151,7 +1151,7 @@ def test_ecl2df_vfpinj_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_ecl2pyarrow_vfpinj_no(test_input, expected): - """Test ecl2df for pyarrow files with multiple VFPINJ with vfp number argument""" + """Test res2df for pyarrow files with multiple VFPINJ with vfp number argument""" deck = EclFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ", "4") @@ -1163,8 +1163,8 @@ def test_ecl2pyarrow_vfpinj_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpprods_no(test_input, expected): - """Test ecl2df for files with multiple VFPPROD with vfp number argument as range""" +def test_res2df_vfpprods_no(test_input, expected): + """Test res2df for files with multiple VFPPROD with vfp number argument as range""" deck = EclFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD", "[1:2]") @@ -1175,7 +1175,7 @@ def test_ecl2df_vfpprods_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_ecl2pyarrow_vfpprods_no(test_input, expected): - """Test ecl2df for pyarrow for files with multiple VFPPROD + """Test res2df for pyarrow for files with multiple VFPPROD with vfp number argument as range """ deck = EclFiles.str2deck(test_input) @@ -1189,8 +1189,8 @@ def test_ecl2pyarrow_vfpprods_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2df_vfpinjs_no(test_input, expected): - """Test ecl2df for files with multiple VFPINJ with vfp number +def test_res2df_vfpinjs_no(test_input, expected): + """Test res2df for files with multiple VFPINJ with vfp number argument as range """ deck = EclFiles.str2deck(test_input) @@ -1203,7 +1203,7 @@ def test_ecl2df_vfpinjs_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_ecl2pyarrow_vfpinjs_no(test_input, expected): - """Test ecl2df for pyararow for files with multiple VFPINJ with vfp + """Test res2df for pyararow for files with multiple VFPINJ with vfp number argument as range """ deck = EclFiles.str2deck(test_input) @@ -1218,7 +1218,7 @@ def test_ecl2pyarrow_vfpinjs_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_basic_data_vfpprods_no(test_input, expected): - """Test ecl2df basic_data reading for files with multiple VFPPROD + """Test res2df basic_data reading for files with multiple VFPPROD with vfp number argument as range """ deck = EclFiles.str2deck(test_input) @@ -1232,7 +1232,7 @@ def test_basic_data_vfpprods_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_basic_data_vfpinjs_no(test_input, expected): - """Test ecl2df basic_data reading for files with multiple VFPINJ with vfp + """Test res2df basic_data reading for files with multiple VFPINJ with vfp number argument as range """ deck = EclFiles.str2deck(test_input) @@ -1246,7 +1246,7 @@ def test_basic_data_vfpinjs_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_pyarrow2basic_data_vfpprods_no(test_input, expected): - """Test ecl2df pyarrow2basic_data for files with multiple VFPPROD + """Test res2df pyarrow2basic_data for files with multiple VFPPROD with vfp number argument as range """ deck = EclFiles.str2deck(test_input) @@ -1261,7 +1261,7 @@ def test_pyarrow2basic_data_vfpprods_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_pyarrow2basic_data_vfpinjs_no(test_input, expected): - """Test ecl2df pyarrow2basic_data for files with multiple VFPINJ with vfp + """Test res2df pyarrow2basic_data for files with multiple VFPINJ with vfp number argument as range """ deck = EclFiles.str2deck(test_input) diff --git a/tests/test_wcon.py b/tests/test_wcon.py index 7cb1fbe24..4bc00e71d 100644 --- a/tests/test_wcon.py +++ b/tests/test_wcon.py @@ -7,8 +7,8 @@ import pandas as pd import pytest -from ecl2df import ecl2csv, wcon -from ecl2df.eclfiles import EclFiles +from res2df import ecl2csv, wcon +from res2df.eclfiles import EclFiles try: # pylint: disable=unused-import diff --git a/tests/test_wellcompletiondata.py b/tests/test_wellcompletiondata.py index 07101459e..aeaff2129 100644 --- a/tests/test_wellcompletiondata.py +++ b/tests/test_wellcompletiondata.py @@ -5,9 +5,9 @@ import pandas as pd import pytest -from ecl2df import common, compdat, wellcompletiondata -from ecl2df.eclfiles import EclFiles -from ecl2df.wellcompletiondata import ( +from res2df import common, compdat, wellcompletiondata +from res2df.eclfiles import EclFiles +from res2df.wellcompletiondata import ( _aggregate_layer_to_zone, _df2pyarrow, _excl_well_startswith, diff --git a/tests/test_wellconnstatus.py b/tests/test_wellconnstatus.py index 291e27b4e..037d76640 100644 --- a/tests/test_wellconnstatus.py +++ b/tests/test_wellconnstatus.py @@ -3,8 +3,8 @@ import pandas as pd import pytest -from ecl2df import wellconnstatus -from ecl2df.eclfiles import EclFiles +from res2df import wellconnstatus +from res2df.eclfiles import EclFiles try: # pylint: disable=unused-import diff --git a/tests/test_welopen.py b/tests/test_welopen.py index df297645b..f7a7d0540 100644 --- a/tests/test_welopen.py +++ b/tests/test_welopen.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from ecl2df import EclFiles, compdat +from res2df import EclFiles, compdat try: # pylint: disable=unused-import @@ -1025,7 +1025,7 @@ def test_welopen(test_input, expected): 'IN2' 2 1 1 1 'OPEN' / / WELOPEN - -- In ecl2df, the WELOPEN is allowed to be before WLIST + -- In res2df, the WELOPEN is allowed to be before WLIST '*OP' 'SHUT' 0 0 0 / / WLIST @@ -1300,7 +1300,7 @@ def test_welopen_df(): id="complump_defaults", marks=pytest.mark.xfail( raises=ValueError, - match="Defaulted COMPLUMP coordinates are not supported in ecl2df", + match="Defaulted COMPLUMP coordinates are not supported in res2df", ), ), pytest.param( diff --git a/tests/test_wlist.py b/tests/test_wlist.py index 88b1a1821..87c4e02c3 100644 --- a/tests/test_wlist.py +++ b/tests/test_wlist.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from ecl2df import EclFiles, compdat +from res2df import EclFiles, compdat try: # pylint: disable=unused-import diff --git a/tests/test_zonemap.py b/tests/test_zonemap.py index 32f6fd0fc..650fa28cb 100644 --- a/tests/test_zonemap.py +++ b/tests/test_zonemap.py @@ -5,7 +5,7 @@ import pandas as pd import pytest -import ecl2df +import res2df TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -15,10 +15,10 @@ def test_stdzoneslyr(): """Test that we can read zones if the zonemap is in a standard location. The eclfiles object defines what is the standard location for the file, while - the actual parsing is done in ecl2df.common.parse_lyrfile() and + the actual parsing is done in res2df.common.parse_lyrfile() and converted to zonemap in common.convert_lyrlist_to_zonemap() """ - eclfiles = ecl2df.EclFiles(REEK) + eclfiles = res2df.EclFiles(REEK) zonemap = eclfiles.get_zonemap() assert isinstance(zonemap, dict) @@ -37,7 +37,7 @@ def test_stdzoneslyr(): def test_nonexistingzones(): """Test an Eclipse case with non-existing zonemap (i.e. no zonemap file in the standard location)""" - eclfiles = ecl2df.EclFiles(REEK) + eclfiles = res2df.EclFiles(REEK) zonemap = eclfiles.get_zonemap("foobar") # (we got a warning and an empty dict) assert not zonemap @@ -52,7 +52,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.common.parse_lyrfile(lyrfile) is None + assert res2df.common.parse_lyrfile(lyrfile) is None assert "Could not parse lyr file" in caplog.text assert "Failed on content: foo" in caplog.text @@ -64,7 +64,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.common.parse_lyrfile(lyrfile) is None + assert res2df.common.parse_lyrfile(lyrfile) is None assert "Failed on content: foo 1 2 3" in caplog.text lyrfile = tmp_path / "formations.lyr" @@ -74,7 +74,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None assert "From_layer higher than to_layer" in caplog.text lyrfile = tmp_path / "formations.lyr" @@ -85,7 +85,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None assert "Failed on content: foo 3- 4 #FFGGHH" in caplog.text lyrfile = tmp_path / "formations.lyr" @@ -96,7 +96,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None assert "Failed on content: foo 3- 4 bluez" in caplog.text lyrfile.write_text( @@ -105,7 +105,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert ecl2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None def test_lyrlist_format(tmp_path): @@ -123,7 +123,7 @@ def test_lyrlist_format(tmp_path): """, encoding="utf-8", ) - lyrlist = ecl2df.common.parse_lyrfile(lyrfile) + lyrlist = res2df.common.parse_lyrfile(lyrfile) assert lyrlist == [ {"name": "ZoneA", "from_layer": 1, "to_layer": 5, "color": "#FFE5F7"}, @@ -155,8 +155,8 @@ def test_convert_lyrlist_to_zonemap(tmp_path): """, encoding="utf-8", ) - lyrlist = ecl2df.common.parse_lyrfile(lyrfile) - zonemap = ecl2df.common.convert_lyrlist_to_zonemap(lyrlist) + lyrlist = res2df.common.parse_lyrfile(lyrfile) + zonemap = res2df.common.convert_lyrlist_to_zonemap(lyrlist) assert zonemap assert len(lyrlist) == 3 assert len(zonemap) == 20 @@ -176,8 +176,8 @@ def test_nonstandardzones(tmp_path): # Difficult quote parsing above, might not run in ResInsight. """ lyrfile.write_text(lyrfilecontent) - lyrlist = ecl2df.common.parse_lyrfile(lyrfile) - zonemap = ecl2df.common.convert_lyrlist_to_zonemap(lyrlist) + lyrlist = res2df.common.parse_lyrfile(lyrfile) + zonemap = res2df.common.convert_lyrlist_to_zonemap(lyrlist) assert 0 not in zonemap assert zonemap[1] == "Eiriksson" assert zonemap[10] == "Eiriksson" @@ -252,7 +252,7 @@ def test_nonstandardzones(tmp_path): def test_merge_zones(dframe, zonedict, zoneheader, kname, expected_df): """Test merging of zone information into a (mocked) grid dataframe""" pd.testing.assert_frame_equal( - ecl2df.common.merge_zones(dframe, zonedict, zoneheader, kname), + res2df.common.merge_zones(dframe, zonedict, zoneheader, kname), expected_df, check_like=True, ) @@ -264,8 +264,8 @@ def test_repeated_merge_zone(): dframe = pd.DataFrame([{"K1": 1, "ZONE": "upper"}]) pd.testing.assert_frame_equal( - ecl2df.common.merge_zones(dframe, {1: "upper"}, "ZONE"), dframe + res2df.common.merge_zones(dframe, {1: "upper"}, "ZONE"), dframe ) pd.testing.assert_frame_equal( - ecl2df.common.merge_zones(dframe, {1: "lower"}, "ZONE"), dframe + res2df.common.merge_zones(dframe, {1: "lower"}, "ZONE"), dframe ) From 06ae725956d39f0b993c03efbc1b732882ed7649 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 09:48:04 +0100 Subject: [PATCH 02/68] ecl2csv->res2csv --- README.md | 2 +- docs/csv2ecl.rst | 2 +- docs/index.rst | 2 +- docs/introduction.rst | 14 ++++----- docs/{ecl2csv.rst => res2csv.rst} | 8 ++--- docs/usage/compdat.rst | 2 +- docs/usage/fipreports.rst | 4 +-- docs/usage/grid.rst | 6 ++-- docs/usage/gruptree.rst | 2 +- docs/usage/nnc.rst | 2 +- docs/usage/pillars.rst | 6 ++-- docs/usage/rft.rst | 2 +- docs/usage/satfunc.rst | 2 +- docs/usage/trans.rst | 4 +-- res2df/__init__.py | 6 ++-- res2df/compdat.py | 4 +-- res2df/config_jobs/{ECL2CSV => RES2CSV} | 2 +- res2df/equil.py | 6 ++-- res2df/faults.py | 4 +-- res2df/fipreports.py | 4 +-- res2df/grid.py | 4 +-- res2df/gruptree.py | 4 +-- res2df/nnc.py | 6 ++-- res2df/pillars.py | 4 +-- res2df/pvt.py | 6 ++-- res2df/{ecl2csv.py => res2csv.py} | 8 ++--- res2df/rft.py | 4 +-- res2df/satfunc.py | 6 ++-- res2df/summary.py | 6 ++-- res2df/trans.py | 4 +-- res2df/vfp/_vfp.py | 6 ++-- res2df/wcon.py | 4 +-- res2df/wellcompletiondata.py | 4 +-- res2df/wellconnstatus.py | 4 +-- setup.py | 4 +-- tests/test_compdat.py | 14 ++++----- tests/test_equil.py | 14 ++++----- tests/test_ert_hooks.py | 22 ++++++------- tests/test_faults.py | 8 ++--- tests/test_fipreports.py | 14 ++++----- tests/test_grid.py | 22 ++++++------- tests/test_gruptree.py | 22 ++++++------- tests/test_hook_implementations.py | 2 +- tests/test_integration.py | 6 ++-- tests/test_logging.py | 16 +++++----- tests/test_nnc.py | 8 ++--- tests/test_pillars.py | 42 ++++++++++++------------- tests/test_pvt.py | 18 +++++------ tests/test_rft.py | 12 +++---- tests/test_satfunc.py | 14 ++++----- tests/test_summary.py | 42 ++++++++++++------------- tests/test_trans.py | 6 ++-- tests/test_wcon.py | 8 ++--- 53 files changed, 224 insertions(+), 224 deletions(-) rename docs/{ecl2csv.rst => res2csv.rst} (76%) rename res2df/config_jobs/{ECL2CSV => RES2CSV} (94%) rename res2df/{ecl2csv.py => res2csv.py} (97%) diff --git a/README.md b/README.md index c3d20f181..56a2eaf80 100644 --- a/README.md +++ b/README.md @@ -20,7 +20,7 @@ The package consists of a module pr. datatype, e.g. one module for summary files (.UNSMRY), one for completion data etc. There is a command line frontend for almost all functionality, called -`ecl2csv`, which converts the Eclipse data to DataFrames, and then dumps +`res2csv`, which converts the Eclipse data to DataFrames, and then dumps the dataframes to files in CSV format, and a similar `csv2ecl` for the reverse operation. diff --git a/docs/csv2ecl.rst b/docs/csv2ecl.rst index ac5438806..4cb8ea1f8 100644 --- a/docs/csv2ecl.rst +++ b/docs/csv2ecl.rst @@ -6,7 +6,7 @@ from dataframes (in the format dumped by res2df). This makes it possible to produce Eclipse input data in any application that can write CSV files, and use this tool to convert it into Eclipse include files, or it can facilitate operations/manipulations of an existing deck using any tool -that can work on CSV files, by first running ecl2csv on an input file, +that can work on CSV files, by first running res2csv on an input file, transforming it, and writing back using csv2ecl. Mandatory argument for csv2ecl is diff --git a/docs/index.rst b/docs/index.rst index c1ed59c0e..eddab93f3 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -11,7 +11,7 @@ data format. introduction usage - ecl2csv + res2csv csv2ecl installation contribution diff --git a/docs/introduction.rst b/docs/introduction.rst index 95f50d06c..b26e87d85 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -9,7 +9,7 @@ input files --- or any other tool outputting to the same data format, f.ex. `flow `_. Most of the features can be reached from the command line, through the -command line program ``ecl2csv``. Use the command line tool to dump the +command line program ``res2csv``. Use the command line tool to dump the extracted or computed data to a CSV file, and use any other tool to view the CSV data. @@ -18,15 +18,15 @@ Examples .. code-block:: console - > ecl2csv --help - > ecl2csv summary --help - > ecl2csv summary --column_keys "F*" --time_index monthly --output output.csv MYECLDECK.DATA - > ecl2csv pillars --help - > ecl2csv pillars --rstdates all MYECLDECK.DATA + > res2csv --help + > res2csv summary --help + > res2csv summary --column_keys "F*" --time_index monthly --output output.csv MYECLDECK.DATA + > res2csv pillars --help + > res2csv pillars --rstdates all MYECLDECK.DATA If you access the module from within a Python script, for each submodule there is a function called ``df()`` which provides more or less the same -functionality as through ``ecl2csv`` from the command line, but which returns +functionality as through ``res2csv`` from the command line, but which returns a Pandas Dataframe. .. code-block:: python diff --git a/docs/ecl2csv.rst b/docs/res2csv.rst similarity index 76% rename from docs/ecl2csv.rst rename to docs/res2csv.rst index 4646c0ac9..0dff56ddd 100644 --- a/docs/ecl2csv.rst +++ b/docs/res2csv.rst @@ -1,13 +1,13 @@ -ecl2csv +res2csv ======= Most of the functionality in res2df is exposed to the command line through -the script *ecl2csv*. The first argument to this script is always +the script *res2csv*. The first argument to this script is always the submodule (subcommand) from which you want functionality. Mandatory argument is always an Eclipse deck or sometimes individual Eclipse include files, and there is usually an ``--output`` option to specify which file to dump the CSV to. If you want output to your terminal, use ``-`` as the output filename. .. argparse:: - :ref: res2df.ecl2csv.get_parser - :prog: ecl2csv + :ref: res2df.res2csv.get_parser + :prog: res2csv diff --git a/docs/usage/compdat.rst b/docs/usage/compdat.rst index e32a37ef5..f7850fb98 100644 --- a/docs/usage/compdat.rst +++ b/docs/usage/compdat.rst @@ -39,5 +39,5 @@ be added to the returned data through the option ``--initvectors``: .. code-block:: console - ecl2csv compdat --verbose MYDATADECK.DATA --initvectors FIPNUM PERMX + res2csv compdat --verbose MYDATADECK.DATA --initvectors FIPNUM PERMX # (put the DATA file first, if not it will be interpreted as a vector) diff --git a/docs/usage/fipreports.rst b/docs/usage/fipreports.rst index fbf24c495..2e06dc1ec 100644 --- a/docs/usage/fipreports.rst +++ b/docs/usage/fipreports.rst @@ -9,7 +9,7 @@ from these tables: This table found in a PRT file will be parsed to the following dataframe: .. - Generated with ecl2csv fipreports -v --fipname FIPZON fipreports-example.PRT -o fipreports-example.csv + Generated with res2csv fipreports -v --fipname FIPZON fipreports-example.PRT -o fipreports-example.csv Date added manually .. csv-table:: FIPZON table from PRT file @@ -19,7 +19,7 @@ This table found in a PRT file will be parsed to the following dataframe: In this particular example, ``FIPZON`` was selected explicitly, either using the command line client or the Python API through an option to the :func:`res2df.fipreports.df` function. -Using this module is easiest through ``ecl2csv fipreports``. +Using this module is easiest through ``res2csv fipreports``. diff --git a/docs/usage/grid.rst b/docs/usage/grid.rst index 7a1112bb9..3df33b081 100644 --- a/docs/usage/grid.rst +++ b/docs/usage/grid.rst @@ -29,8 +29,8 @@ Alternatively, the same data can be produced as a CSV file using the command lin .. code-block:: console - ecl2csv grid --help # Will display some help text - ecl2csv grid MYDATADECK.DATA --rstdates last --verbose --output grid.csv + res2csv grid --help # Will display some help text + res2csv grid MYDATADECK.DATA --rstdates last --verbose --output grid.csv Select which vectors to include (INIT and/or restart vectors) with the @@ -38,7 +38,7 @@ Select which vectors to include (INIT and/or restart vectors) with the .. code-block:: console - ecl2csv grid --verbose MYDATADECK.DATA --vectors PRESSURE PERMX + res2csv grid --verbose MYDATADECK.DATA --vectors PRESSURE PERMX Example computations on a grid dataframe ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ diff --git a/docs/usage/gruptree.rst b/docs/usage/gruptree.rst index a33ba774a..11ae29596 100644 --- a/docs/usage/gruptree.rst +++ b/docs/usage/gruptree.rst @@ -35,7 +35,7 @@ available (here also wells from WELSPECS is included): .. code-block:: console - > ecl2csv gruptree --prettyprint MYDATADECK.DATA + > res2csv gruptree --prettyprint MYDATADECK.DATA Date: 2000-01-01 └── NORTHSEA └── AREA diff --git a/docs/usage/nnc.rst b/docs/usage/nnc.rst index 2e94cfb7b..604a5ce45 100644 --- a/docs/usage/nnc.rst +++ b/docs/usage/nnc.rst @@ -29,7 +29,7 @@ Alternatively, the same data can be produced as a CSV file using the command lin .. code-block:: console - ecl2csv nnc MYDATADECK.DATA --verbose --output nnc.csv + res2csv nnc MYDATADECK.DATA --verbose --output nnc.csv It is possible to add *xyz* coordinates for each connection (as the average of the xyz for each of the cells involved in a connection pair) as diff --git a/docs/usage/pillars.rst b/docs/usage/pillars.rst index e5949cfe5..391d0b47d 100644 --- a/docs/usage/pillars.rst +++ b/docs/usage/pillars.rst @@ -57,13 +57,13 @@ saturation above sgascutoff, among those pillars with at least one cell above ``swatcutoff``. See the API documentation, :func:`res2df.pillars.compute_pillar_contacts`. -The functionality is also available through the command line tool ``ecl2csv pillars`` +The functionality is also available through the command line tool ``res2csv pillars`` as in the example: .. code-block:: console - ecl2csv pillars --help # This will display some help text - ecl2csv pillars MYDATAFILE.DATA --rstdates all --stackdates + res2csv pillars --help # This will display some help text + res2csv pillars MYDATAFILE.DATA --rstdates all --stackdates It is *strongly* recommended to play with the cutoffs to get the desired result. Also calibrate the computed contacts with the initial contacts, you may see that diff --git a/docs/usage/rft.rst b/docs/usage/rft.rst index 01b87a618..03b1f3310 100644 --- a/docs/usage/rft.rst +++ b/docs/usage/rft.rst @@ -9,7 +9,7 @@ Typical usage is to generate the CSV from the command line: .. code-block:: console - ecl2csv rft MYDATADECK.DATA --verbose --output rft.csv + res2csv rft MYDATADECK.DATA --verbose --output rft.csv It is possible to a specific well, a date (YYYY-MM-DD). If you enable debug mode through ``--debug``, more information is printed, including an ASCII representation diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index 05ef3dcdf..eec07d6e1 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -28,7 +28,7 @@ Alternatively, the same data can be produced as a CSV file using the command lin .. code-block:: console - ecl2csv satfunc MYDATADECK.DATA --verbose --output satfunc.csv + res2csv satfunc MYDATADECK.DATA --verbose --output satfunc.csv It is possible to extract keywords one at a time using the ``--keywords`` command line option. diff --git a/docs/usage/trans.rst b/docs/usage/trans.rst index 8e18ebf9c..9a26f7558 100644 --- a/docs/usage/trans.rst +++ b/docs/usage/trans.rst @@ -33,7 +33,7 @@ command line: .. code-block:: console - ecl2csv trans MYDATADECK.DATA --verbose --output trans.csv + res2csv trans MYDATADECK.DATA --verbose --output trans.csv Adding more data for each connection ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -121,5 +121,5 @@ where this last table can also be exported directly from the command line using .. code-block:: console - ecl2csv trans MYDATADECK.DATA --vectors FIPNUM --nnc --group --output fipnuminterfaces.csv + res2csv trans MYDATADECK.DATA --vectors FIPNUM --nnc --group --output fipnuminterfaces.csv diff --git a/res2df/__init__.py b/res2df/__init__.py index e0b530b6d..a7623df92 100644 --- a/res2df/__init__.py +++ b/res2df/__init__.py @@ -34,11 +34,11 @@ ] -def getLogger_ecl2csv( +def getLogger_res2csv( module_name: str = "res2df", args_dict: Optional[Dict[str, Union[str, bool]]] = None ) -> logging.Logger: # pylint: disable=invalid-name - """Provide a custom logger for ecl2csv and csv2ecl + """Provide a custom logger for res2csv and csv2ecl Logging output is by default split by logging levels (split between WARNING and ERROR) to stdout and stderr, each log occurs in only one of the streams. @@ -91,5 +91,5 @@ def getLogger_ecl2csv( return logger -for submodule in SUBMODULES + ["ecl2csv", "csv2ecl"]: +for submodule in SUBMODULES + ["res2csv", "csv2ecl"]: importlib.import_module("res2df." + submodule) diff --git a/res2df/compdat.py b/res2df/compdat.py index 7d5a23ae5..e6fcd8e79 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -25,7 +25,7 @@ # Allow parts of res2df to work without OPM: pass -from res2df import getLogger_ecl2csv +from res2df import getLogger_res2csv from .common import ( get_wells_matching_template, @@ -970,7 +970,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def compdat_main(args): """Entry-point for module, for command line utility""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) eclfiles = EclFiles(args.DATAFILE) diff --git a/res2df/config_jobs/ECL2CSV b/res2df/config_jobs/RES2CSV similarity index 94% rename from res2df/config_jobs/ECL2CSV rename to res2df/config_jobs/RES2CSV index 3f879339b..8be172aa1 100644 --- a/res2df/config_jobs/ECL2CSV +++ b/res2df/config_jobs/RES2CSV @@ -1,4 +1,4 @@ -EXECUTABLE ecl2csv +EXECUTABLE res2csv DEFAULT "" DEFAULT "" diff --git a/res2df/equil.py b/res2df/equil.py index 2f7c73a82..8458343c9 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -9,7 +9,7 @@ import pandas as pd -from res2df import common, getLogger_ecl2csv, inferdims +from res2df import common, getLogger_res2csv, inferdims from .eclfiles import EclFiles @@ -311,7 +311,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar def equil_main(args) -> None: """Read from disk and write CSV back to disk""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) eclfiles = EclFiles(args.DATAFILE) @@ -343,7 +343,7 @@ def equil_main(args) -> None: def equil_reverse_main(args) -> None: """Entry-point for module, for command line utility for CSV to Eclipse""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) equil_df = pd.read_csv(args.csvfile) diff --git a/res2df/faults.py b/res2df/faults.py index 3a2031d21..98599540b 100644 --- a/res2df/faults.py +++ b/res2df/faults.py @@ -10,7 +10,7 @@ import pandas as pd -from res2df import EclFiles, getLogger_ecl2csv +from res2df import EclFiles, getLogger_res2csv from res2df.common import parse_opmio_deckrecord, write_dframe_stdout_file try: @@ -83,7 +83,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def faults_main(args) -> None: """Read from disk and write CSV back to disk""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) eclfiles = EclFiles(args.DATAFILE) diff --git a/res2df/fipreports.py b/res2df/fipreports.py index 4107161f2..ed3c741ae 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -10,7 +10,7 @@ import numpy as np import pandas as pd -from res2df import EclFiles, getLogger_ecl2csv +from res2df import EclFiles, getLogger_res2csv from res2df.common import parse_ecl_month, write_dframe_stdout_file logger = logging.getLogger(__name__) @@ -211,7 +211,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fipreports_main(args) -> None: """Command line API""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) if args.PRTFILE.endswith(".PRT"): diff --git a/res2df/grid.py b/res2df/grid.py index 98a782b61..4aab554cb 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -24,7 +24,7 @@ import pyarrow.feather from resdata.resfile import ResdataFile -from res2df import __version__, common, getLogger_ecl2csv +from res2df import __version__, common, getLogger_res2csv from .eclfiles import EclFiles @@ -760,7 +760,7 @@ def df2ecl( def grid_main(args) -> None: """This is the command line API""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) eclfiles = EclFiles(args.DATAFILE) diff --git a/res2df/gruptree.py b/res2df/gruptree.py index 882c347d8..d7422c4cd 100644 --- a/res2df/gruptree.py +++ b/res2df/gruptree.py @@ -19,7 +19,7 @@ except ImportError: pass -from res2df import EclFiles, getLogger_ecl2csv +from res2df import EclFiles, getLogger_res2csv from res2df.common import ( parse_opmio_date_rec, parse_opmio_deckrecord, @@ -447,7 +447,7 @@ def prettyprint(dframe: pd.DataFrame) -> str: def gruptree_main(args) -> None: """Entry-point for module, for command line utility.""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) if not args.output and not args.prettyprint: diff --git a/res2df/nnc.py b/res2df/nnc.py index b5ce0e56b..f755309b0 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -10,7 +10,7 @@ import pandas as pd -from res2df import EclFiles, __version__, common, getLogger_ecl2csv, grid +from res2df import EclFiles, __version__, common, getLogger_res2csv, grid from res2df.common import write_dframe_stdout_file logger: logging.Logger = logging.getLogger(__name__) @@ -275,8 +275,8 @@ def df2ecl_editnnc( def nnc_main(args) -> None: - """Command line access point from main() or from ecl2csv via subparser""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + """Command line access point from main() or from res2csv via subparser""" + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) eclfiles = EclFiles(args.DATAFILE) diff --git a/res2df/pillars.py b/res2df/pillars.py index 96000238e..e94421ec7 100644 --- a/res2df/pillars.py +++ b/res2df/pillars.py @@ -8,7 +8,7 @@ import dateutil.parser import pandas as pd -from res2df import EclFiles, common, getLogger_ecl2csv, grid +from res2df import EclFiles, common, getLogger_res2csv, grid logger: logging.Logger = logging.getLogger(__name__) @@ -411,7 +411,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def pillars_main(args) -> None: """This is the command line API""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) diff --git a/res2df/pvt.py b/res2df/pvt.py index 05d9ee894..5d9b3e640 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -11,7 +11,7 @@ import pandas as pd -from res2df import EclFiles, common, getLogger_ecl2csv, inferdims +from res2df import EclFiles, common, getLogger_res2csv, inferdims try: # Needed for mypy @@ -280,7 +280,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar def pvt_main(args) -> None: """Entry-point for module, for command line utility for Eclipse to CSV""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) eclfiles = EclFiles(args.DATAFILE) @@ -314,7 +314,7 @@ def pvt_main(args) -> None: def pvt_reverse_main(args) -> None: """Entry-point for module, for command line utility for CSV to Eclipse""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) pvt_df = pd.read_csv(args.csvfile) diff --git a/res2df/ecl2csv.py b/res2df/res2csv.py similarity index 97% rename from res2df/ecl2csv.py rename to res2df/res2csv.py index 57e487987..2a62ec544 100644 --- a/res2df/ecl2csv.py +++ b/res2df/res2csv.py @@ -13,7 +13,7 @@ # String constants in use for generating ERT forward model documentation: DESCRIPTION: str = """Convert Eclipse input and output files into CSV files, -with the command line utility ``ecl2csv``. Run ``ecl2csv --help`` to see +with the command line utility ``res2csv``. Run ``res2csv --help`` to see which subcommands are supported. For supplying options to subcommands, you can use the arguments ```` @@ -27,12 +27,12 @@ Outputting the EQUIL data from an Eclipse deck. The ECLBASE variable from your ERT config is supplied implicitly:: - FORWARD_MODEL ECL2CSV(=equil, =equil.csv) + FORWARD_MODEL res2csv(=equil, =equil.csv) For a yearly summary export of the realization, options have to be supplied with the XARG options:: - FORWARD_MODEL ECL2CSV(=summary, =yearly.csv, ="--time_index", ="yearly") + FORWARD_MODEL res2csv(=summary, =yearly.csv, ="--time_index", ="yearly") The quotes around double-dashed options are critical to avoid ERT taking for a comment. For more options, use ```` etc. @@ -44,7 +44,7 @@ def get_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( - "ecl2csv (" + __version__ + ") is a command line frontend to res2df. " + "res2csv (" + __version__ + ") is a command line frontend to res2df. " "Documentation at https://equinor.github.io/res2df/ " ), ) diff --git a/res2df/rft.py b/res2df/rft.py index 7dbcde11b..42a422931 100644 --- a/res2df/rft.py +++ b/res2df/rft.py @@ -23,7 +23,7 @@ import pandas as pd from resdata.resfile import ResdataFile -from res2df import getLogger_ecl2csv +from res2df import getLogger_res2csv from .common import merge_zones, write_dframe_stdout_file from .eclfiles import EclFiles @@ -674,7 +674,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def rft_main(args) -> None: """Entry-point for module, for command line utility""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) if args.DATAFILE.endswith(".RFT"): diff --git a/res2df/satfunc.py b/res2df/satfunc.py index 5f1d00c46..fc17096bb 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -25,7 +25,7 @@ except ImportError: pass -from res2df import common, getLogger_ecl2csv, inferdims +from res2df import common, getLogger_res2csv, inferdims from .common import write_dframe_stdout_file from .eclfiles import EclFiles @@ -189,7 +189,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar def satfunc_main(args) -> None: """Entry-point for module, for command line utility""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) eclfiles = EclFiles(args.DATAFILE) @@ -222,7 +222,7 @@ def satfunc_main(args) -> None: def satfunc_reverse_main(args) -> None: """For command line utility for CSV to Eclipse""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) satfunc_df = pd.read_csv(args.csvfile) diff --git a/res2df/summary.py b/res2df/summary.py index 8941a426a..73fbbb969 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -16,7 +16,7 @@ import pyarrow.feather from resdata.summary import Summary, SummaryKeyWordVector -from res2df import getLogger_ecl2csv +from res2df import getLogger_res2csv from . import parameters from .common import write_dframe_stdout_file @@ -897,7 +897,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar def summary_main(args) -> None: """Read summary data from disk and write CSV back to disk""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) eclbase = ( @@ -924,7 +924,7 @@ def summary_main(args) -> None: def summary_reverse_main(args) -> None: """Entry point for usage with "csv2ecl summary" on the command line""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) diff --git a/res2df/trans.py b/res2df/trans.py index d67640599..af94e9979 100644 --- a/res2df/trans.py +++ b/res2df/trans.py @@ -10,7 +10,7 @@ import res2df.grid import res2df.nnc -from res2df import getLogger_ecl2csv +from res2df import getLogger_res2csv from res2df.common import write_dframe_stdout_file from .eclfiles import EclFiles @@ -303,7 +303,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def trans_main(args): """This is the command line API""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) eclfiles = EclFiles(args.DATAFILE) diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index 92eefec33..4448df815 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -25,7 +25,7 @@ except ImportError: pass -from res2df import EclFiles, common, getLogger_ecl2csv +from res2df import EclFiles, common, getLogger_res2csv from . import _vfpinj as vfpinj from . import _vfpprod as vfpprod @@ -482,7 +482,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar def vfp_main(args) -> None: """Entry-point for module, for command line utility.""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) if args.keyword: @@ -524,7 +524,7 @@ def vfp_main(args) -> None: def vfp_reverse_main(args) -> None: """Entry-point for module, for command line utility for CSV to Eclipse""" - logger = getLogger_ecl2csv( # pylint: disable=redefined-outer-name + logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) vfp_df = pd.read_csv(args.csvfile) diff --git a/res2df/wcon.py b/res2df/wcon.py index c0da856d4..8246e0d68 100644 --- a/res2df/wcon.py +++ b/res2df/wcon.py @@ -15,7 +15,7 @@ except ImportError: pass -from res2df import EclFiles, getLogger_ecl2csv +from res2df import EclFiles, getLogger_res2csv from res2df.common import ( parse_opmio_date_rec, parse_opmio_deckrecord, @@ -91,7 +91,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def wcon_main(args) -> None: """Read from disk and write CSV back to disk""" - logger = getLogger_ecl2csv( # pylint: disable:redefined-outer_name + logger = getLogger_res2csv( # pylint: disable:redefined-outer_name __name__, vars(args) ) eclfiles = EclFiles(args.DATAFILE) diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index 403f3ed68..8b0af8dcc 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -10,7 +10,7 @@ import pyarrow import pyarrow.feather -from res2df import common, compdat, getLogger_ecl2csv, wellconnstatus +from res2df import common, compdat, getLogger_res2csv, wellconnstatus from res2df.eclfiles import EclFiles from .common import write_dframe_stdout_file @@ -286,7 +286,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def wellcompletiondata_main(args): """Entry-point for module, for command line utility""" - logger = getLogger_ecl2csv(__name__, vars(args)) + logger = getLogger_res2csv(__name__, vars(args)) eclfiles = EclFiles(args.DATAFILE) if not Path(args.zonemap).is_file(): diff --git a/res2df/wellconnstatus.py b/res2df/wellconnstatus.py index 600f6198c..1dcebec8e 100644 --- a/res2df/wellconnstatus.py +++ b/res2df/wellconnstatus.py @@ -8,7 +8,7 @@ import numpy as np import pandas as pd -from res2df import getLogger_ecl2csv, summary +from res2df import getLogger_res2csv, summary from res2df.eclfiles import EclFiles from .common import write_dframe_stdout_file @@ -114,7 +114,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def wellconnstatus_main(args): """Entry-point for module, for command line utility""" - logger = getLogger_ecl2csv(__name__, vars(args)) + logger = getLogger_res2csv(__name__, vars(args)) eclfiles = EclFiles(args.DATAFILE) wellconnstatus_df = df(eclfiles) diff --git a/setup.py b/setup.py index 0dab95bf6..52f489818 100644 --- a/setup.py +++ b/setup.py @@ -68,8 +68,8 @@ entry_points={ "console_scripts": [ "csv2ecl=res2df.csv2ecl:main", - "ecl2csv=res2df.ecl2csv:main", - "ecl2arrow=res2df.ecl2csv:main", + "res2csv=res2df.res2csv:main", + "ecl2arrow=res2df.res2csv:main", ], "ert": ["res2df_jobs = res2df.hook_implementations.jobs"], }, diff --git a/tests/test_compdat.py b/tests/test_compdat.py index 4da6fba65..4fc483b75 100644 --- a/tests/test_compdat.py +++ b/tests/test_compdat.py @@ -5,7 +5,7 @@ import pandas as pd import pytest -from res2df import EclFiles, compdat, ecl2csv +from res2df import EclFiles, compdat, res2csv try: # pylint: disable=unused-import @@ -291,9 +291,9 @@ def test_main_subparsers(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "compdat.csv" mocker.patch( - "sys.argv", ["ecl2csv", "compdat", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] + "sys.argv", ["res2csv", "compdat", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -303,7 +303,7 @@ def test_main_subparsers(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "compdat", EIGHTCELLS, "--initvectors", @@ -312,7 +312,7 @@ def test_main_subparsers(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -322,7 +322,7 @@ def test_main_subparsers(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "compdat", EIGHTCELLS, "--initvectors", @@ -332,7 +332,7 @@ def test_main_subparsers(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) diff --git a/tests/test_equil.py b/tests/test_equil.py index 24238de66..a534d258d 100644 --- a/tests/test_equil.py +++ b/tests/test_equil.py @@ -8,7 +8,7 @@ import pandas as pd import pytest -from res2df import csv2ecl, ecl2csv, equil +from res2df import csv2ecl, equil, res2csv from res2df.eclfiles import EclFiles try: @@ -440,8 +440,8 @@ def test_rsvd_via_file(tmp_path, mocker): 60 1000 /""" rsvd_df = equil.df(deckstr) Path("rsvd.inc").write_text(deckstr, encoding="utf8") - mocker.patch("sys.argv", ["ecl2csv", "equil", "-v", "rsvd.inc", "-o", "rsvd.csv"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "equil", "-v", "rsvd.inc", "-o", "rsvd.csv"]) + res2csv.main() rsvd_df_fromcsv = pd.read_csv("rsvd.csv") pd.testing.assert_frame_equal(rsvd_df, rsvd_df_fromcsv) @@ -541,8 +541,8 @@ def test_main_subparser(tmp_path, mocker): """Test command line interface""" os.chdir(tmp_path) tmpcsvfile = "equil.csv" - mocker.patch("sys.argv", ["ecl2csv", "equil", "-v", REEK, "-o", tmpcsvfile]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "equil", "-v", REEK, "-o", tmpcsvfile]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(tmpcsvfile) @@ -588,8 +588,8 @@ def test_main_subparser(tmp_path, mocker): """, encoding="utf8", ) - mocker.patch("sys.argv", ["ecl2csv", "equil", "-v", "poro.inc", "-o", "empty.csv"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "equil", "-v", "poro.inc", "-o", "empty.csv"]) + res2csv.main() assert not Path("empty.csv").read_text(encoding="utf8").strip() diff --git a/tests/test_ert_hooks.py b/tests/test_ert_hooks.py index c2fde782c..13bd9aa56 100644 --- a/tests/test_ert_hooks.py +++ b/tests/test_ert_hooks.py @@ -24,7 +24,7 @@ @pytest.mark.skipif( not HAVE_ERT, reason="ERT is not installed, skipping hook implementation tests." ) -def test_ecl2csv_through_ert(tmp_path): +def test_res2csv_through_ert(tmp_path): """Test running the ERT executable on a mocked config file""" os.chdir(tmp_path) @@ -57,25 +57,25 @@ def test_ecl2csv_through_ert(tmp_path): for subcommand in res2df.SUBMODULES: ert_config.append( - f"FORWARD_MODEL ECL2CSV(={subcommand}, " + f"FORWARD_MODEL RES2CSV(={subcommand}, " f"={subcommand}.csv)" ) # Test what we can also supply additional options for some submodules: ert_config.append( - "FORWARD_MODEL ECL2CSV(=summary, " + "FORWARD_MODEL RES2CSV(=summary, " '=summary-yearly.csv, ="--time_index", =yearly)' ) ert_config.append( - "FORWARD_MODEL ECL2CSV(=equil, " + "FORWARD_MODEL RES2CSV(=equil, " '=equil-rsvd.csv, ="--keywords", ="RSVD")' ) ert_config.append( - "FORWARD_MODEL ECL2CSV(=pvt, " + "FORWARD_MODEL RES2CSV(=pvt, " '=pvt-custom.csv, ="--keywords", ="PVTO")' ) ert_config.append( - "FORWARD_MODEL ECL2CSV(=satfunc, " + "FORWARD_MODEL RES2CSV(=satfunc, " '=satfunc-swof.csv, ="--keywords", ="SWOF")' ) @@ -89,7 +89,7 @@ def test_ecl2csv_through_ert(tmp_path): "=SUMYEARLY)" ) - ert_config_filename = "ecl2csv_test.ert" + ert_config_filename = "res2csv_test.ert" Path(ert_config_filename).write_text("\n".join(ert_config), encoding="utf-8") subprocess.call(["ert", "test_run", ert_config_filename]) @@ -114,7 +114,7 @@ def test_job_documentation(): """Test that for registered ERT forward models the documentation is non-empty""" if HAVE_ERT: assert ( - type(jobs.job_documentation("ECL2CSV")) + type(jobs.job_documentation("RES2CSV")) == ert.shared.plugins.plugin_response.PluginResponse ) assert ( @@ -123,7 +123,7 @@ def test_job_documentation(): ) else: - assert jobs.job_documentation("ECL2CSV") is None + assert jobs.job_documentation("RES2CSV") is None assert jobs.job_documentation("CSV2ECL") is None assert jobs.job_documentation("foobar") is None @@ -137,9 +137,9 @@ def test_get_module_variable(): # pylint: disable=protected-access assert jobs._get_module_variable_if_exists("foo", "bar") == "" assert jobs._get_module_variable_if_exists( - "res2df.ecl2csv", "DESCRIPTION" + "res2df.res2csv", "DESCRIPTION" ).startswith("Convert Eclipse input and output") - assert jobs._get_module_variable_if_exists("res2df.ecl2csv", "NOPE") == "" + assert jobs._get_module_variable_if_exists("res2df.res2csv", "NOPE") == "" @pytest.mark.skipif(HAVE_ERT, reason="Tested only when ERT is not available") diff --git a/tests/test_faults.py b/tests/test_faults.py index aec60a633..99d652c67 100644 --- a/tests/test_faults.py +++ b/tests/test_faults.py @@ -7,7 +7,7 @@ import pandas as pd import pytest -from res2df import ecl2csv, faults +from res2df import faults, res2csv from res2df.eclfiles import EclFiles try: @@ -82,8 +82,8 @@ def test_multiplestr2df(): def test_main_subparser(tmp_path, mocker): """Test command line interface with subparsers""" tmpcsvfile = tmp_path / "faultsdf.csv" - mocker.patch("sys.argv", ["ecl2csv", "faults", REEK, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "faults", REEK, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -93,7 +93,7 @@ def test_main_subparser(tmp_path, mocker): def test_magic_stdout(): """Test that we can pipe the output into a dataframe""" result = subprocess.run( - ["ecl2csv", "faults", "-o", "-", REEK], check=True, stdout=subprocess.PIPE + ["res2csv", "faults", "-o", "-", REEK], check=True, stdout=subprocess.PIPE ) df_stdout = pd.read_csv(io.StringIO(result.stdout.decode())) assert not df_stdout.empty diff --git a/tests/test_fipreports.py b/tests/test_fipreports.py index bc4f6b2ab..7a75ea301 100644 --- a/tests/test_fipreports.py +++ b/tests/test_fipreports.py @@ -8,7 +8,7 @@ import pandas as pd import pytest -from res2df import ecl2csv, fipreports +from res2df import fipreports, res2csv from res2df.eclfiles import EclFiles from res2df.fipreports import report_block_lineparser as parser @@ -440,9 +440,9 @@ def test_cmdline(tmp_path, mocker): tmpcsvfile = tmp_path / "TMP-fipreports.csv" mocker.patch( "sys.argv", - ["ecl2csv", "fipreports", "-v", DATAFILE, "--output", str(tmpcsvfile)], + ["res2csv", "fipreports", "-v", DATAFILE, "--output", str(tmpcsvfile)], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(tmpcsvfile) @@ -454,7 +454,7 @@ def test_cmdline(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "fipreports", "--debug", DATAFILE, @@ -462,19 +462,19 @@ def test_cmdline(tmp_path, mocker): "debugmode.csv", ], ) - ecl2csv.main() + res2csv.main() pd.testing.assert_frame_equal(pd.read_csv("debugmode.csv"), disk_df) # Directly on PRT file: mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "fipreports", DATAFILE.replace("DATA", "PRT"), "--output", "fromprtfile.csv", ], ) - ecl2csv.main() + res2csv.main() pd.testing.assert_frame_equal(pd.read_csv("fromprtfile.csv"), disk_df) diff --git a/tests/test_grid.py b/tests/test_grid.py index 07343b2ee..e9df854b4 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -8,7 +8,7 @@ import pyarrow import pytest -from res2df import common, ecl2csv, grid +from res2df import common, grid, res2csv from res2df.eclfiles import EclFiles TESTDIR = Path(__file__).absolute().parent @@ -421,7 +421,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "grid", EIGHTCELLS, "-o", @@ -432,7 +432,7 @@ def test_main(tmp_path, mocker): "PORO", ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert not disk_df.empty @@ -442,7 +442,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "grid", "--verbose", EIGHTCELLS, @@ -455,7 +455,7 @@ def test_main(tmp_path, mocker): "PERMX", ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert not disk_df.empty @@ -465,9 +465,9 @@ def test_main(tmp_path, mocker): # Test with constants dropping mocker.patch( - "sys.argv", ["ecl2csv", "grid", REEK, "-o", str(tmpcsvfile), "--dropconstants"] + "sys.argv", ["res2csv", "grid", REEK, "-o", str(tmpcsvfile), "--dropconstants"] ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) # That PVTNUM is constant is a particular feature @@ -480,15 +480,15 @@ def test_main_arrow(tmp_path, mocker): """Check that we can export grid in arrow format""" mocker.patch( "sys.argv", - ["ecl2csv", "grid", "--arrow", EIGHTCELLS, "-o", str(tmp_path / "grid.arrow")], + ["res2csv", "grid", "--arrow", EIGHTCELLS, "-o", str(tmp_path / "grid.arrow")], ) - ecl2csv.main() + res2csv.main() # Obtain the CSV version for comparison: mocker.patch( - "sys.argv", ["ecl2csv", "grid", EIGHTCELLS, "-o", str(tmp_path / "grid.csv")] + "sys.argv", ["res2csv", "grid", EIGHTCELLS, "-o", str(tmp_path / "grid.csv")] ) - ecl2csv.main() + res2csv.main() # Read from disk and verify similarity disk_frame_arrow = pyarrow.feather.read_table(tmp_path / "grid.arrow").to_pandas() diff --git a/tests/test_gruptree.py b/tests/test_gruptree.py index 05878c0ab..5adfb6c8d 100644 --- a/tests/test_gruptree.py +++ b/tests/test_gruptree.py @@ -7,7 +7,7 @@ import pandas as pd import pytest -from res2df import ecl2csv, gruptree +from res2df import gruptree, res2csv from res2df.eclfiles import EclFiles try: @@ -430,16 +430,16 @@ def test_emptytree_commandlinetool(tmp_path, mocker, caplog): """Test the command line tool on an Eclipse deck which is empty""" os.chdir(tmp_path) Path("EMPTY.DATA").write_text("", encoding="utf8") - mocker.patch("sys.argv", ["ecl2csv", "gruptree", "--prettyprint", "EMPTY.DATA"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "gruptree", "--prettyprint", "EMPTY.DATA"]) + res2csv.main() assert "No tree data to prettyprint" in caplog.text def test_cli_nothing_to_do(mocker, capsys): """Test that the client says nothing to do when DATA is supplied, but no action.""" - mocker.patch("sys.argv", ["ecl2csv", "gruptree", "EMPTY.DATA"]) + mocker.patch("sys.argv", ["res2csv", "gruptree", "EMPTY.DATA"]) with pytest.raises(SystemExit): - ecl2csv.main() + res2csv.main() assert "Nothing to do" in capsys.readouterr().out @@ -470,8 +470,8 @@ def test_tstep(): def test_main(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "gruptree.csv" - mocker.patch("sys.argv", ["ecl2csv", "gruptree", REEK, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "gruptree", REEK, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -480,8 +480,8 @@ def test_main(tmp_path, mocker): def test_prettyprint_commandline(mocker, capsys): """Test pretty printing via command line interface""" - mocker.patch("sys.argv", ["ecl2csv", "gruptree", REEK, "--prettyprint"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "gruptree", REEK, "--prettyprint"]) + res2csv.main() stdout = capsys.readouterr().out.strip() print(stdout) assert ( @@ -551,8 +551,8 @@ def test_prettyprint_commandline(mocker, capsys): def test_main_subparser(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "gruptree.csv" - mocker.patch("sys.argv", ["ecl2csv", "gruptree", "-v", REEK, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "gruptree", "-v", REEK, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) diff --git a/tests/test_hook_implementations.py b/tests/test_hook_implementations.py index ffd89fde4..8bde36253 100644 --- a/tests/test_hook_implementations.py +++ b/tests/test_hook_implementations.py @@ -23,7 +23,7 @@ def fixture_expected_jobs(path_to_res2df: Path) -> Dict[str, Path]: """Dictionary of installed jobs with location to job configuration""" expected_job_names = [ - "ECL2CSV", + "RES2CSV", "CSV2ECL", ] return {name: path_to_res2df / "config_jobs" / name for name in expected_job_names} diff --git a/tests/test_integration.py b/tests/test_integration.py index 4e076ca31..9d0953f48 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -19,13 +19,13 @@ @pytest.mark.integration def test_integration(): """Test that all endpoints that are to be installed are installed""" - assert subprocess.check_output(["ecl2csv", "-h"]) # nosec + assert subprocess.check_output(["res2csv", "-h"]) # nosec assert subprocess.check_output(["csv2ecl", "-h"]) # nosec # The subparsers should exit "cleanly" with exit code 2 ("Incorrect usage") # when no more options are provided on the command line with pytest.raises(subprocess.CalledProcessError) as exception: - subprocess.check_output(["ecl2csv"]) # nosec + subprocess.check_output(["res2csv"]) # nosec assert exception.value.returncode == 2 with pytest.raises(subprocess.CalledProcessError) as exception: subprocess.check_output(["csv2ecl"]) # nosec @@ -34,7 +34,7 @@ def test_integration(): # why-does-pythons-argparse-use-an-error-code-of-2-for-systemexit for submodule in res2df.SUBMODULES: - helptext = subprocess.check_output(["ecl2csv", submodule, "-h"]) + helptext = subprocess.check_output(["res2csv", submodule, "-h"]) # Test that this option is hidden, the argument is only there # to support optional number of arguments in ERT forward models. assert "hiddenemptyplaceholders" not in str(helptext) diff --git a/tests/test_logging.py b/tests/test_logging.py index 6fd2c14a5..8281773ab 100644 --- a/tests/test_logging.py +++ b/tests/test_logging.py @@ -18,7 +18,7 @@ def test_default_logger_levels_and_split(capsys): """Verify that the intended usage of this logger have expected results""" - splitlogger = res2df.getLogger_ecl2csv("test_levels_split") + splitlogger = res2df.getLogger_res2csv("test_levels_split") splitlogger.debug("This DEBUG-text is not to be seen") captured = capsys.readouterr() @@ -40,7 +40,7 @@ def test_default_logger_levels_and_split(capsys): assert "ERROR-text" in captured.err # If output is written to stdout, all logs should go to stderr: - nosplit_logger = res2df.getLogger_ecl2csv( + nosplit_logger = res2df.getLogger_res2csv( "test_levels_nosplit", args_dict={"output": "-", "debug": True} ) nosplit_logger.debug("This DEBUG-text is to be seen in stderr") @@ -69,7 +69,7 @@ def test_default_logger_levels_and_split(capsys): "res2df_module, verbose, fileexport", itertools.product(res2df.SUBMODULES, [False, True], [True, False]), ) -def test_ecl2csv_logging(tmp_path, res2df_module, verbose, fileexport, mocker, capsys): +def test_res2csv_logging(tmp_path, res2df_module, verbose, fileexport, mocker, capsys): """Test that the command line client for each submodule logs correctly. Each submodule should write logs to stdout for INFO and WARNING messages @@ -87,9 +87,9 @@ def test_ecl2csv_logging(tmp_path, res2df_module, verbose, fileexport, mocker, c if res2df_module == "nnc": # There are no nnc's in EIGHTCELLS, so for that test # we need the REEK dataset: - commands = ["ecl2csv", res2df_module, REEK, "--output"] + commands = ["res2csv", res2df_module, REEK, "--output"] else: - commands = ["ecl2csv", res2df_module, EIGHTCELLS, "--output"] + commands = ["res2csv", res2df_module, EIGHTCELLS, "--output"] if fileexport: commands.append(str(tmp_path / "output.csv")) @@ -101,7 +101,7 @@ def test_ecl2csv_logging(tmp_path, res2df_module, verbose, fileexport, mocker, c mocker.patch("sys.argv", commands) - res2df.ecl2csv.main() + res2df.res2csv.main() captured = capsys.readouterr() stdout_output = captured.out stderr_output = captured.err @@ -124,8 +124,8 @@ def test_ecl2csv_logging(tmp_path, res2df_module, verbose, fileexport, mocker, c def test_repeated_logger_construction(capsys): """If we repeatedly call getLogger(), ensure handlers are not added on top""" - logger = res2df.getLogger_ecl2csv("nodouble") - logger = res2df.getLogger_ecl2csv("nodouble") + logger = res2df.getLogger_res2csv("nodouble") + logger = res2df.getLogger_res2csv("nodouble") logger.warning("Don't repeat me") captured = capsys.readouterr() assert captured.out.count("Don't repeat me") == 1 diff --git a/tests/test_nnc.py b/tests/test_nnc.py index be3eecd2b..9aaa3b817 100644 --- a/tests/test_nnc.py +++ b/tests/test_nnc.py @@ -8,7 +8,7 @@ import pandas as pd import pytest -from res2df import ecl2csv, faults, nnc, trans +from res2df import faults, nnc, res2csv, trans from res2df.eclfiles import EclFiles try: @@ -120,8 +120,8 @@ def test_df2ecl_editnnc(tmp_path): def test_main(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "nnc.csv" - mocker.patch("sys.argv", ["ecl2csv", "nnc", "-v", REEK, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "nnc", "-v", REEK, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -134,7 +134,7 @@ def test_main(tmp_path, mocker): def test_magic_stdout(): """Test that we can pipe the output into a dataframe""" result = subprocess.run( - ["ecl2csv", "nnc", "-o", "-", REEK], check=True, stdout=subprocess.PIPE + ["res2csv", "nnc", "-o", "-", REEK], check=True, stdout=subprocess.PIPE ) df_stdout = pd.read_csv(io.StringIO(result.stdout.decode())) assert not df_stdout.empty diff --git a/tests/test_pillars.py b/tests/test_pillars.py index 319a7ff78..c604a9009 100644 --- a/tests/test_pillars.py +++ b/tests/test_pillars.py @@ -5,7 +5,7 @@ import pandas as pd import pytest -from res2df import ecl2csv, grid, pillars +from res2df import grid, pillars, res2csv from res2df.eclfiles import EclFiles TESTDIR = Path(__file__).absolute().parent @@ -326,8 +326,8 @@ def test_compute_volumes(dframe, datestr, expectedrows): def test_main(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "pillars.csv" - mocker.patch("sys.argv", ["ecl2csv", "pillars", REEK, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "pillars", REEK, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" in disk_df @@ -338,7 +338,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -348,7 +348,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of grouping @@ -362,7 +362,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -371,7 +371,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" in disk_df @@ -383,7 +383,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -393,7 +393,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of grouping @@ -404,7 +404,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -416,7 +416,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of region averaging @@ -433,7 +433,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -445,7 +445,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of region averaging @@ -462,7 +462,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -474,7 +474,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of region averaging @@ -489,7 +489,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -502,7 +502,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" not in disk_df # because of region averaging @@ -519,7 +519,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", REEK, "--region", @@ -531,7 +531,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" in disk_df @@ -545,7 +545,7 @@ def test_main(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "pillars", "-v", REEK, @@ -556,7 +556,7 @@ def test_main(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert "PILLAR" in disk_df diff --git a/tests/test_pvt.py b/tests/test_pvt.py index 9a975d938..21cd465a0 100644 --- a/tests/test_pvt.py +++ b/tests/test_pvt.py @@ -8,7 +8,7 @@ import pandas as pd import pytest -from res2df import csv2ecl, ecl2csv, pvt +from res2df import csv2ecl, pvt, res2csv from res2df.eclfiles import EclFiles try: @@ -395,9 +395,9 @@ def test_main(tmp_path, mocker): os.chdir(tmp_path) tmpcsvfile = tmp_path / "pvt.csv" mocker.patch( - "sys.argv", ["ecl2csv", "pvt", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] + "sys.argv", ["res2csv", "pvt", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(tmpcsvfile) @@ -428,8 +428,8 @@ def test_main(tmp_path, mocker): """, encoding="utf8", ) - mocker.patch("sys.argv", ["ecl2csv", "pvt", "-v", "pvto.inc", "-o", "pvto.csv"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "pvt", "-v", "pvto.inc", "-o", "pvto.csv"]) + res2csv.main() assert Path("pvto.csv").is_file() # Empty data: @@ -440,8 +440,8 @@ def test_main(tmp_path, mocker): """, encoding="utf8", ) - mocker.patch("sys.argv", ["ecl2csv", "pvt", "-v", "empty.inc", "-o", "empty.csv"]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "pvt", "-v", "empty.inc", "-o", "empty.csv"]) + res2csv.main() assert not Path("empty.csv").read_text(encoding="utf8").strip() @@ -449,14 +449,14 @@ def test_magic_stdout(tmp_path): """Test writing dataframes and include files to stdout""" os.chdir(tmp_path) result = subprocess.run( - ["ecl2csv", "pvt", "-o", "-", EIGHTCELLS], check=True, stdout=subprocess.PIPE + ["res2csv", "pvt", "-o", "-", EIGHTCELLS], check=True, stdout=subprocess.PIPE ) df_stdout = pd.read_csv(io.StringIO(result.stdout.decode())) assert not df_stdout.empty # Verbose options should not ruin it: result = subprocess.run( - ["ecl2csv", "pvt", "--verbose", "-o", "-", EIGHTCELLS], + ["res2csv", "pvt", "--verbose", "-o", "-", EIGHTCELLS], check=True, stdout=subprocess.PIPE, ) diff --git a/tests/test_rft.py b/tests/test_rft.py index e812eb018..a35e8ad2f 100644 --- a/tests/test_rft.py +++ b/tests/test_rft.py @@ -7,7 +7,7 @@ import pandas as pd import pytest -from res2df import ecl2csv, rft +from res2df import res2csv, rft from res2df.eclfiles import EclFiles TESTDIR = Path(__file__).absolute().parent @@ -489,8 +489,8 @@ def test_rft2df(): def test_main_subparsers(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / ".TMP-rft.csv" - mocker.patch("sys.argv", ["ecl2csv", "rft", EIGHTCELLS, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "rft", EIGHTCELLS, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -509,7 +509,7 @@ def test_main_subparsers(tmp_path, mocker): str(tmpcsvfile), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert not disk_df.empty @@ -519,9 +519,9 @@ def test_main_debugmode(tmp_path, mocker): """Test debug mode""" os.chdir(tmp_path) mocker.patch( - "sys.argv", ["ecl2csv", "rft", "--debug", EIGHTCELLS, "-o", "indebugmode.csv"] + "sys.argv", ["res2csv", "rft", "--debug", EIGHTCELLS, "-o", "indebugmode.csv"] ) - ecl2csv.main() + res2csv.main() # Extra files emitted in debug mode: assert not pd.read_csv("con.csv").empty assert Path("seg.csv").exists() # too simple example data, no segments. diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index 5ce131bf8..fc7b7f38c 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -8,7 +8,7 @@ import pandas as pd import pytest -from res2df import csv2ecl, ecl2csv, inferdims, satfunc +from res2df import csv2ecl, inferdims, res2csv, satfunc from res2df.eclfiles import EclFiles try: @@ -280,9 +280,9 @@ def test_sgof_satnuminferrer(tmp_path, mocker): sgoffile = "__sgof_tmp.txt" Path(sgoffile).write_text(sgofstr, encoding="utf8") mocker.patch( - "sys.argv", ["ecl2csv", "satfunc", "-v", sgoffile, "-o", sgoffile + ".csv"] + "sys.argv", ["res2csv", "satfunc", "-v", sgoffile, "-o", sgoffile + ".csv"] ) - ecl2csv.main() + res2csv.main() parsed_sgof = pd.read_csv(sgoffile + ".csv") assert len(parsed_sgof["SATNUM"].unique()) == 3 @@ -657,8 +657,8 @@ def test_multiple_keywords_family2(): def test_main_subparsers(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "satfunc.csv" - mocker.patch("sys.argv", ["ecl2csv", "satfunc", EIGHTCELLS, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "satfunc", EIGHTCELLS, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -669,7 +669,7 @@ def test_main_subparsers(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "satfunc", EIGHTCELLS, "--keywords", @@ -678,7 +678,7 @@ def test_main_subparsers(tmp_path, mocker): str(tmpcsvfile2), ], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile2).is_file() disk_df = pd.read_csv(str(tmpcsvfile2)) diff --git a/tests/test_summary.py b/tests/test_summary.py index 8e6e0c5b6..a93670727 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -10,7 +10,7 @@ import yaml from resdata.summary import Summary -from res2df import csv2ecl, ecl2csv, summary +from res2df import csv2ecl, res2csv, summary from res2df.eclfiles import EclFiles from res2df.summary import ( _df2pyarrow, @@ -130,14 +130,14 @@ def test_summary2df_dates(): @pytest.mark.integration -def test_ecl2csv_summary(tmp_path, mocker): - """Test that the command line utility ecl2csv is installed and +def test_res2csv_summary(tmp_path, mocker): + """Test that the command line utility res2csv is installed and works with summary data""" tmpcsvfile = tmp_path / "sum.csv" mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "summary", "-v", REEK, @@ -149,7 +149,7 @@ def test_ecl2csv_summary(tmp_path, mocker): "2003-01-02", ], ) - ecl2csv.main() + res2csv.main() disk_df = pd.read_csv(tmpcsvfile) assert len(disk_df) == 97 # Includes timestamps assert str(disk_df["DATE"].values[0]) == "2002-01-02 00:00:00" @@ -159,7 +159,7 @@ def test_ecl2csv_summary(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "summary", REEK, "-o", @@ -172,7 +172,7 @@ def test_ecl2csv_summary(tmp_path, mocker): "2003-01-02", ], ) - ecl2csv.main() + res2csv.main() disk_df = pd.read_csv(tmpcsvfile) assert len(disk_df) == 366 # Pandas' csv export writes datetime64 as pure date @@ -198,9 +198,9 @@ def test_paramsupport(tmp_path, mocker): parameterstxt.unlink() parameterstxt.write_text("FOO 1\nBAR 3", encoding="utf-8") mocker.patch( - "sys.argv", ["ecl2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile), "-p"] + "sys.argv", ["res2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile), "-p"] ) - ecl2csv.main() + res2csv.main() disk_df = pd.read_csv(tmpcsvfile) assert "FOPT" in disk_df assert "FOO" in disk_df @@ -213,9 +213,9 @@ def test_paramsupport(tmp_path, mocker): parametersyml.unlink() parametersyml.write_text(yaml.dump({"FOO": 1, "BAR": 3}), encoding="utf-8") mocker.patch( - "sys.argv", ["ecl2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile), "-p"] + "sys.argv", ["res2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile), "-p"] ) - ecl2csv.main() + res2csv.main() disk_df = pd.read_csv(str(tmpcsvfile)) assert "FOPT" in disk_df assert "FOO" in disk_df @@ -258,7 +258,7 @@ def test_paramsupport_explicitfile(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "summary", "--verbose", EIGHTCELLS, @@ -268,7 +268,7 @@ def test_paramsupport_explicitfile(tmp_path, mocker): str(randomparamfile), # Absolute filepath ], ) - ecl2csv.main() + res2csv.main() assert pd.read_csv(tmpcsvfile)["FOO"].unique() == ["barrbarr"] assert pd.read_csv(tmpcsvfile)["COM"].unique() == [1234] @@ -278,7 +278,7 @@ def test_paramsupport_explicitfile(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2csv", + "res2csv", "summary", "--verbose", EIGHTCELLS, @@ -288,15 +288,15 @@ def test_paramsupport_explicitfile(tmp_path, mocker): Path(randomparamfile).name, # A relative filepath ], ) - ecl2csv.main() + res2csv.main() assert "FOO" not in pd.read_csv("smry_noparams.csv") def test_main_subparser(tmp_path, mocker): """Test command line interface with output to both CSV and arrow/feather.""" tmpcsvfile = tmp_path / "sum.csv" - mocker.patch("sys.argv", ["ecl2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "summary", EIGHTCELLS, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -307,9 +307,9 @@ def test_main_subparser(tmp_path, mocker): tmparrowfile = tmp_path / "sum.arrow" mocker.patch( "sys.argv", - ["ecl2csv", "summary", "--arrow", EIGHTCELLS, "-o", str(tmparrowfile)], + ["res2csv", "summary", "--arrow", EIGHTCELLS, "-o", str(tmparrowfile)], ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_arraydf = pyarrow.feather.read_table(tmparrowfile).to_pandas() assert "FOPT" in disk_arraydf @@ -319,7 +319,7 @@ def test_main_subparser(tmp_path, mocker): mocker.patch( "sys.argv", ["ecl2arrow", "summary", EIGHTCELLS, "-o", str(tmparrowfile_alt)] ) - ecl2csv.main() + res2csv.main() pd.testing.assert_frame_equal( disk_arraydf, pyarrow.feather.read_table(str(tmparrowfile_alt)).to_pandas() ) @@ -327,7 +327,7 @@ def test_main_subparser(tmp_path, mocker): # Not possible (yet?) to write arrow to stdout: mocker.patch("sys.argv", ["ecl2arrow", "summary", EIGHTCELLS, "-o", "-"]) with pytest.raises(SystemExit): - ecl2csv.main() + res2csv.main() def test_datenormalization(): diff --git a/tests/test_trans.py b/tests/test_trans.py index 9424996bb..5bd819235 100644 --- a/tests/test_trans.py +++ b/tests/test_trans.py @@ -13,7 +13,7 @@ import pandas as pd -from res2df import ecl2csv, trans +from res2df import res2csv, trans from res2df.eclfiles import EclFiles TESTDIR = Path(__file__).absolute().parent @@ -97,9 +97,9 @@ def test_main(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / "trans.csv" mocker.patch( - "sys.argv", ["ecl2csv", "trans", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] + "sys.argv", ["res2csv", "trans", "-v", EIGHTCELLS, "-o", str(tmpcsvfile)] ) - ecl2csv.main() + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) assert not disk_df.empty diff --git a/tests/test_wcon.py b/tests/test_wcon.py index 4bc00e71d..0978a1dcd 100644 --- a/tests/test_wcon.py +++ b/tests/test_wcon.py @@ -7,7 +7,7 @@ import pandas as pd import pytest -from res2df import ecl2csv, wcon +from res2df import res2csv, wcon from res2df.eclfiles import EclFiles try: @@ -219,8 +219,8 @@ def test_tstep(): def test_main_subparsers(tmp_path, mocker): """Test command line interface""" tmpcsvfile = tmp_path / ".TMP-wcondf.csv" - mocker.patch("sys.argv", ["ecl2csv", "wcon", EIGHTCELLS, "-o", str(tmpcsvfile)]) - ecl2csv.main() + mocker.patch("sys.argv", ["res2csv", "wcon", EIGHTCELLS, "-o", str(tmpcsvfile)]) + res2csv.main() assert Path(tmpcsvfile).is_file() disk_df = pd.read_csv(str(tmpcsvfile)) @@ -230,7 +230,7 @@ def test_main_subparsers(tmp_path, mocker): def test_magic_stdout(): """Test that we can pipe the output into a dataframe""" result = subprocess.run( - ["ecl2csv", "wcon", "-v", "-o", "-", EIGHTCELLS], + ["res2csv", "wcon", "-v", "-o", "-", EIGHTCELLS], check=True, stdout=subprocess.PIPE, ) From 25dad26096886f25be26289d49895608f001cf98 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 10:37:02 +0100 Subject: [PATCH 03/68] csv2ecl->csv2res --- README.md | 2 +- docs/{csv2ecl.rst => csv2res.rst} | 10 +++++----- docs/index.rst | 2 +- docs/usage/equil.rst | 2 +- docs/usage/pvt.rst | 2 +- docs/usage/satfunc.rst | 2 +- res2df/__init__.py | 4 ++-- res2df/config_jobs/{CSV2ECL => CSV2RES} | 2 +- res2df/{csv2ecl.py => csv2res.py} | 8 ++++---- res2df/summary.py | 2 +- setup.py | 2 +- tests/test_equil.py | 8 ++++---- tests/test_ert_hooks.py | 14 +++++++------- tests/test_hook_implementations.py | 2 +- tests/test_integration.py | 4 ++-- tests/test_pvt.py | 10 +++++----- tests/test_satfunc.py | 10 +++++----- tests/test_summary.py | 14 +++++++------- 18 files changed, 50 insertions(+), 50 deletions(-) rename docs/{csv2ecl.rst => csv2res.rst} (85%) rename res2df/config_jobs/{CSV2ECL => CSV2RES} (79%) rename res2df/{csv2ecl.py => csv2res.py} (93%) diff --git a/README.md b/README.md index 56a2eaf80..e548974bc 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ files (.UNSMRY), one for completion data etc. There is a command line frontend for almost all functionality, called `res2csv`, which converts the Eclipse data to DataFrames, and then dumps -the dataframes to files in CSV format, and a similar `csv2ecl` for the +the dataframes to files in CSV format, and a similar `csv2res` for the reverse operation. For documentation, see diff --git a/docs/csv2ecl.rst b/docs/csv2res.rst similarity index 85% rename from docs/csv2ecl.rst rename to docs/csv2res.rst index 4cb8ea1f8..f89291aeb 100644 --- a/docs/csv2ecl.rst +++ b/docs/csv2res.rst @@ -1,4 +1,4 @@ -csv2ecl +csv2res ======= Some of the modules inside res2df is able to write Eclipse include files @@ -7,9 +7,9 @@ to produce Eclipse input data in any application that can write CSV files, and use this tool to convert it into Eclipse include files, or it can facilitate operations/manipulations of an existing deck using any tool that can work on CSV files, by first running res2csv on an input file, -transforming it, and writing back using csv2ecl. +transforming it, and writing back using csv2res. -Mandatory argument for csv2ecl is +Mandatory argument for csv2res is always the submodule responsible, a CSV file, and an ``--output`` option to specify which include file to write to. If you want output to your terminal, use ``-`` as the output filename. Unless @@ -18,5 +18,5 @@ supported keywords for a submodule which is also found in the CSV file provided, will be dumped to output file. .. argparse:: - :ref: res2df.csv2ecl.get_parser - :prog: csv2ecl + :ref: res2df.csv2res.get_parser + :prog: csv2res diff --git a/docs/index.rst b/docs/index.rst index eddab93f3..c80403a24 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -12,7 +12,7 @@ data format. introduction usage res2csv - csv2ecl + csv2res installation contribution history diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index ae1c41854..6004587f4 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -61,5 +61,5 @@ Eclipse from your modified data by issuing equil.df2ecl(dframe, filename="solution.inc") -The last step can also be done using the ``csv2ecl`` command line utility +The last step can also be done using the ``csv2res`` command line utility if you dump to CSV from your Python code instead. diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index fac399cb6..fedd33886 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -82,6 +82,6 @@ check which keywords have been written out, compared to what you gave in to `res2df.pvt` above. Any non-supported keywords will get lost in the import phase and need to be catered for outside res2df. -The last step can also be done using the ``csv2ecl`` command line utility +The last step can also be done using the ``csv2res`` command line utility if you dump to CSV from your Python code instead. diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index eec07d6e1..44b605733 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -61,7 +61,7 @@ include file can be generated either with the Python API .. code-block:: console - csv2ecl satfunc satfunc.csv --output relperm.inc --keywords SWOF SGOF --verbose + csv2res satfunc satfunc.csv --output relperm.inc --keywords SWOF SGOF --verbose which should give a file ``relperm.inc`` that can be parsed by Eclipse. The command above will only pick the keywords ``SWOF`` and ``SGOF`` (in the case there are diff --git a/res2df/__init__.py b/res2df/__init__.py index a7623df92..f4244fc5a 100644 --- a/res2df/__init__.py +++ b/res2df/__init__.py @@ -38,7 +38,7 @@ def getLogger_res2csv( module_name: str = "res2df", args_dict: Optional[Dict[str, Union[str, bool]]] = None ) -> logging.Logger: # pylint: disable=invalid-name - """Provide a custom logger for res2csv and csv2ecl + """Provide a custom logger for res2csv and csv2res Logging output is by default split by logging levels (split between WARNING and ERROR) to stdout and stderr, each log occurs in only one of the streams. @@ -91,5 +91,5 @@ def getLogger_res2csv( return logger -for submodule in SUBMODULES + ["res2csv", "csv2ecl"]: +for submodule in SUBMODULES + ["res2csv", "csv2res"]: importlib.import_module("res2df." + submodule) diff --git a/res2df/config_jobs/CSV2ECL b/res2df/config_jobs/CSV2RES similarity index 79% rename from res2df/config_jobs/CSV2ECL rename to res2df/config_jobs/CSV2RES index 65890bc2a..1f3d42955 100644 --- a/res2df/config_jobs/CSV2ECL +++ b/res2df/config_jobs/CSV2RES @@ -1,4 +1,4 @@ -EXECUTABLE csv2ecl +EXECUTABLE csv2res ARGLIST "--verbose" "--output" diff --git a/res2df/csv2ecl.py b/res2df/csv2res.py similarity index 93% rename from res2df/csv2ecl.py rename to res2df/csv2res.py index 279ba4c69..6c3b40904 100644 --- a/res2df/csv2ecl.py +++ b/res2df/csv2res.py @@ -11,16 +11,16 @@ # String constants in use for generating ERT forward model documentation: DESCRIPTION: str = """Convert CSV files into Eclipse include files. Uses the command -line utility ``csv2ecl``. Run ``csv2ecl --help`` to see which subcommands are supported. +line utility ``csv2res``. Run ``csv2res --help`` to see which subcommands are supported. No options other than the output file is possible when used directly as a forward model. When writing synthetic summary files, the ECLBASE with no filename suffix is expected as the OUTPUT argument.""" CATEGORY: str = "utility.eclipse" EXAMPLES: str = ( "``FORWARD_MODEL " - "CSV2ECL(=equil, =equil.csv, " + "CSV2RES(=equil, =equil.csv, " "=eclipse/include/equil.inc)``" - "CSV2ECL(=summary, =summary-monthly.csv, " + "CSV2RES(=summary, =summary-monthly.csv, " "=eclipse/model/MONTHLYSUMMARY)``" ) @@ -30,7 +30,7 @@ def get_parser() -> argparse.ArgumentParser: parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( - "csv2ecl (" + __version__ + ") is a command line frontend to res2df. " + "csv2res (" + __version__ + ") is a command line frontend to res2df. " "Documentation at https://equinor.github.io/res2df/ " ), ) diff --git a/res2df/summary.py b/res2df/summary.py index 73fbbb969..ab9cf4569 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -923,7 +923,7 @@ def summary_main(args) -> None: def summary_reverse_main(args) -> None: - """Entry point for usage with "csv2ecl summary" on the command line""" + """Entry point for usage with "csv2res summary" on the command line""" logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) diff --git a/setup.py b/setup.py index 52f489818..ceed4c504 100644 --- a/setup.py +++ b/setup.py @@ -67,7 +67,7 @@ zip_safe=False, entry_points={ "console_scripts": [ - "csv2ecl=res2df.csv2ecl:main", + "csv2res=res2df.csv2res:main", "res2csv=res2df.res2csv:main", "ecl2arrow=res2df.res2csv:main", ], diff --git a/tests/test_equil.py b/tests/test_equil.py index a534d258d..e886c6939 100644 --- a/tests/test_equil.py +++ b/tests/test_equil.py @@ -8,7 +8,7 @@ import pandas as pd import pytest -from res2df import csv2ecl, equil, res2csv +from res2df import csv2res, equil, res2csv from res2df.eclfiles import EclFiles try: @@ -550,9 +550,9 @@ def test_main_subparser(tmp_path, mocker): # Test the reverse operation: mocker.patch( - "sys.argv", ["csv2ecl", "equil", "-v", "--output", "equil.inc", tmpcsvfile] + "sys.argv", ["csv2res", "equil", "-v", "--output", "equil.inc", tmpcsvfile] ) - csv2ecl.main() + csv2res.main() # NB: cvs2ecl does not output the phase configuration! phases = "WATER\nGAS\nOIL\n\n" ph_equil_inc = Path("phasesequil.inc") @@ -567,7 +567,7 @@ def test_main_subparser(tmp_path, mocker): # Test via stdout: result = subprocess.run( - ["csv2ecl", "equil", "--output", "-", tmpcsvfile], + ["csv2res", "equil", "--output", "-", tmpcsvfile], stdout=subprocess.PIPE, check=True, ) diff --git a/tests/test_ert_hooks.py b/tests/test_ert_hooks.py index 13bd9aa56..db2999e0b 100644 --- a/tests/test_ert_hooks.py +++ b/tests/test_ert_hooks.py @@ -53,7 +53,7 @@ def test_res2csv_through_ert(tmp_path): "RUNPATH .", ] - csv2ecl_subcommands = ["equil", "pvt", "satfunc"] + csv2res_subcommands = ["equil", "pvt", "satfunc"] for subcommand in res2df.SUBMODULES: ert_config.append( @@ -79,13 +79,13 @@ def test_res2csv_through_ert(tmp_path): '=satfunc-swof.csv, ="--keywords", ="SWOF")' ) - for subcommand in csv2ecl_subcommands: + for subcommand in csv2res_subcommands: ert_config.append( - f"FORWARD_MODEL CSV2ECL(={subcommand}, " + f"FORWARD_MODEL CSV2RES(={subcommand}, " f"={subcommand}.csv, ={subcommand}.inc)" ) ert_config.append( - "FORWARD_MODEL CSV2ECL(=summary, =summary-yearly.csv, " + "FORWARD_MODEL CSV2RES(=summary, =summary-yearly.csv, " "=SUMYEARLY)" ) @@ -105,7 +105,7 @@ def test_res2csv_through_ert(tmp_path): assert set(pd.read_csv("pvt-custom.csv")["KEYWORD"]) == set(["PVTO"]) assert set(pd.read_csv("satfunc-swof.csv")["KEYWORD"]) == set(["SWOF"]) - for subcommand in csv2ecl_subcommands: + for subcommand in csv2res_subcommands: assert Path(subcommand + ".inc").is_file() @@ -118,13 +118,13 @@ def test_job_documentation(): == ert.shared.plugins.plugin_response.PluginResponse ) assert ( - type(jobs.job_documentation("CSV2ECL")) + type(jobs.job_documentation("CSV2RES")) == ert.shared.plugins.plugin_response.PluginResponse ) else: assert jobs.job_documentation("RES2CSV") is None - assert jobs.job_documentation("CSV2ECL") is None + assert jobs.job_documentation("CSV2RES") is None assert jobs.job_documentation("foobar") is None diff --git a/tests/test_hook_implementations.py b/tests/test_hook_implementations.py index 8bde36253..f03471707 100644 --- a/tests/test_hook_implementations.py +++ b/tests/test_hook_implementations.py @@ -24,7 +24,7 @@ def fixture_expected_jobs(path_to_res2df: Path) -> Dict[str, Path]: """Dictionary of installed jobs with location to job configuration""" expected_job_names = [ "RES2CSV", - "CSV2ECL", + "CSV2RES", ] return {name: path_to_res2df / "config_jobs" / name for name in expected_job_names} diff --git a/tests/test_integration.py b/tests/test_integration.py index 9d0953f48..2205e5061 100644 --- a/tests/test_integration.py +++ b/tests/test_integration.py @@ -20,7 +20,7 @@ def test_integration(): """Test that all endpoints that are to be installed are installed""" assert subprocess.check_output(["res2csv", "-h"]) # nosec - assert subprocess.check_output(["csv2ecl", "-h"]) # nosec + assert subprocess.check_output(["csv2res", "-h"]) # nosec # The subparsers should exit "cleanly" with exit code 2 ("Incorrect usage") # when no more options are provided on the command line @@ -28,7 +28,7 @@ def test_integration(): subprocess.check_output(["res2csv"]) # nosec assert exception.value.returncode == 2 with pytest.raises(subprocess.CalledProcessError) as exception: - subprocess.check_output(["csv2ecl"]) # nosec + subprocess.check_output(["csv2res"]) # nosec assert exception.value.returncode == 2 # ref: https://stackoverflow.com/questions/23714542/ \ # why-does-pythons-argparse-use-an-error-code-of-2-for-systemexit diff --git a/tests/test_pvt.py b/tests/test_pvt.py index 21cd465a0..8f707a182 100644 --- a/tests/test_pvt.py +++ b/tests/test_pvt.py @@ -8,7 +8,7 @@ import pandas as pd import pytest -from res2df import csv2ecl, pvt, res2csv +from res2df import csv2res, pvt, res2csv from res2df.eclfiles import EclFiles try: @@ -408,9 +408,9 @@ def test_main(tmp_path, mocker): # Write back to include file: incfile = tmp_path / "pvt.inc" mocker.patch( - "sys.argv", ["csv2ecl", "pvt", "-v", str(tmpcsvfile), "-o", str(incfile)] + "sys.argv", ["csv2res", "pvt", "-v", str(tmpcsvfile), "-o", str(incfile)] ) - csv2ecl.main() + csv2res.main() # Reparse the include file on disk back to dataframe # and check dataframe equality @@ -464,10 +464,10 @@ def test_magic_stdout(tmp_path): # pylint: disable=no-member # false positive on Dataframes assert not df_stdout.empty - # Pipe back to csv2ecl: + # Pipe back to csv2res: df_stdout.to_csv("pvt.csv", index=False) result = subprocess.run( - ["csv2ecl", "pvt", "--verbose", "-o", "-", "pvt.csv"], + ["csv2res", "pvt", "--verbose", "-o", "-", "pvt.csv"], check=True, stdout=subprocess.PIPE, ) diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index fc7b7f38c..cf1940432 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -8,7 +8,7 @@ import pandas as pd import pytest -from res2df import csv2ecl, inferdims, res2csv, satfunc +from res2df import csv2res, inferdims, res2csv, satfunc from res2df.eclfiles import EclFiles try: @@ -685,7 +685,7 @@ def test_main_subparsers(tmp_path, mocker): assert set(disk_df["KEYWORD"].unique()) == {"SWOF"} -def test_csv2ecl(tmp_path, mocker): +def test_csv2res(tmp_path, mocker): """Test command line interface for csv to Eclipse include files""" os.chdir(tmp_path) tmpcsvfile = "satfunc.csv" @@ -695,8 +695,8 @@ def test_csv2ecl(tmp_path, mocker): data=[["SWOF", 0.0, 0.0, 1.0, 0.0], ["SWOF", 1.0, 1.0, 0.0, 0.0]], ) swof_df.to_csv(tmpcsvfile, index=False) - mocker.patch("sys.argv", ["csv2ecl", "satfunc", "--output", "swof.inc", tmpcsvfile]) - csv2ecl.main() + mocker.patch("sys.argv", ["csv2res", "satfunc", "--output", "swof.inc", tmpcsvfile]) + csv2res.main() pd.testing.assert_frame_equal( satfunc.df(Path("swof.inc").read_text(encoding="utf8")).drop( "SATNUM", axis="columns" @@ -707,7 +707,7 @@ def test_csv2ecl(tmp_path, mocker): # Test writing to stdout: result = subprocess.run( - ["csv2ecl", "satfunc", "--output", "-", tmpcsvfile], + ["csv2res", "satfunc", "--output", "-", tmpcsvfile], stdout=subprocess.PIPE, check=True, ) diff --git a/tests/test_summary.py b/tests/test_summary.py index a93670727..e6f857896 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -10,7 +10,7 @@ import yaml from resdata.summary import Summary -from res2df import csv2ecl, res2csv, summary +from res2df import csv2res, res2csv, summary from res2df.eclfiles import EclFiles from res2df.summary import ( _df2pyarrow, @@ -1197,8 +1197,8 @@ def test_df2eclsum_errors(): @pytest.mark.integration -def test_csv2ecl_summary(tmp_path, mocker): - """Check that we can call df2eclsum through the csv2ecl command line +def test_csv2res_summary(tmp_path, mocker): + """Check that we can call df2eclsum through the csv2res command line utility""" dframe = pd.DataFrame( [ @@ -1211,7 +1211,7 @@ def test_csv2ecl_summary(tmp_path, mocker): mocker.patch( "sys.argv", [ - "csv2ecl", + "csv2res", "summary", "-v", "summary.csv", @@ -1219,7 +1219,7 @@ def test_csv2ecl_summary(tmp_path, mocker): "SYNTHETIC", ], ) - csv2ecl.main() + csv2res.main() assert Path("SYNTHETIC.UNSMRY").is_file() assert Path("SYNTHETIC.SMSPEC").is_file() @@ -1228,7 +1228,7 @@ def test_csv2ecl_summary(tmp_path, mocker): mocker.patch( "sys.argv", [ - "csv2ecl", + "csv2res", "summary", "--debug", "summary.csv", @@ -1236,6 +1236,6 @@ def test_csv2ecl_summary(tmp_path, mocker): str(Path("foo") / Path("SYNTHETIC")), ], ) - csv2ecl.main() + csv2res.main() assert ("foo" / Path("SYNTHETIC.UNSMRY")).is_file() assert ("foo" / Path("SYNTHETIC.SMSPEC")).is_file() From 16b4367c22ff4f96892e46a9aeb03ba010a4ed6d Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 12:04:06 +0100 Subject: [PATCH 04/68] eclfile -> resdatafile --- README.md | 9 +- docs/introduction.rst | 12 +- docs/usage/compdat.rst | 8 +- docs/usage/equil.rst | 6 +- docs/usage/grid.rst | 26 ++-- docs/usage/nnc.rst | 16 +-- docs/usage/pillars.rst | 8 +- docs/usage/pvt.rst | 8 +- docs/usage/satfunc.rst | 8 +- docs/usage/summary.rst | 8 +- docs/usage/trans.rst | 20 +-- docs/usage/wcon.rst | 8 +- res2df/__init__.py | 2 +- res2df/compdat.py | 14 +- res2df/equil.py | 14 +- res2df/faults.py | 12 +- res2df/fipreports.py | 10 +- res2df/grid.py | 79 +++++----- res2df/gruptree.py | 12 +- res2df/inferdims.py | 8 +- res2df/nnc.py | 26 ++-- res2df/parameters.py | 8 +- res2df/pillars.py | 14 +- res2df/pvt.py | 10 +- res2df/{eclfiles.py => resdatafiles.py} | 4 +- res2df/rft.py | 18 +-- res2df/satfunc.py | 14 +- res2df/summary.py | 38 ++--- res2df/trans.py | 20 +-- res2df/vfp/_vfp.py | 32 ++--- res2df/wcon.py | 12 +- res2df/wellcompletiondata.py | 24 ++-- res2df/wellconnstatus.py | 10 +- tests/test_common.py | 4 +- tests/test_compdat.py | 88 ++++++------ tests/test_eclfiles.py | 46 +++--- tests/test_equil.py | 16 ++- tests/test_faults.py | 14 +- tests/test_fipreports.py | 4 +- tests/test_grid.py | 183 +++++++++++++----------- tests/test_gruptree.py | 28 ++-- tests/test_init.py | 4 +- tests/test_nnc.py | 28 ++-- tests/test_parameters.py | 34 ++--- tests/test_pillars.py | 20 +-- tests/test_pvt.py | 32 ++--- tests/test_rft.py | 12 +- tests/test_satfunc.py | 14 +- tests/test_summary.py | 92 ++++++------ tests/test_trans.py | 36 ++--- tests/test_userapi.py | 32 ++--- tests/test_vfp.py | 54 +++---- tests/test_wcon.py | 16 +-- tests/test_wellcompletiondata.py | 30 ++-- tests/test_wellconnstatus.py | 10 +- tests/test_welopen.py | 10 +- tests/test_wlist.py | 4 +- tests/test_zonemap.py | 18 +-- 58 files changed, 691 insertions(+), 656 deletions(-) rename res2df/{eclfiles.py => resdatafiles.py} (98%) diff --git a/README.md b/README.md index e548974bc..7658f2e30 100644 --- a/README.md +++ b/README.md @@ -9,18 +9,17 @@ # res2df res2df is a Pandas DataFrame wrapper around libecl and opm.io, which -are used to access binary files outputted by the reservoir simulator -Eclipse, or its input files --- or any other tool outputting to the same -data format. +are used to access binary files outputted by reservoir simulators, +or its input files --- or any other tool outputting to the same data format. -The reverse operation, from a Pandas DataFrame to Eclipse include files, +The reverse operation, from a Pandas DataFrame to reservoir include files, is provided for some of the modules. The package consists of a module pr. datatype, e.g. one module for summary files (.UNSMRY), one for completion data etc. There is a command line frontend for almost all functionality, called -`res2csv`, which converts the Eclipse data to DataFrames, and then dumps +`res2csv`, which converts the reservoir data to DataFrames, and then dumps the dataframes to files in CSV format, and a similar `csv2res` for the reverse operation. diff --git a/docs/introduction.rst b/docs/introduction.rst index b26e87d85..79a55b611 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -33,9 +33,9 @@ a Pandas Dataframe. import res2df - eclfiles = res2df.EclFiles("MYECLDECK.DATA") - smry = res2df.summary.df(eclfiles, column_keys="F*", time_index="monthly") - hc_contacts = res2df.pillars.df(eclfiles, rstdates="all") + resdatafiles = res2df.ResdataFiles("MYECLDECK.DATA") + smry = res2df.summary.df(resdatafiles, column_keys="F*", time_index="monthly") + hc_contacts = res2df.pillars.df(resdatafiles, rstdates="all") See the API for more documentation and possibilities for each module. @@ -169,11 +169,11 @@ associated data in a dataframe format. More documentation on :doc:`usage/wcon`. -``eclfiles`` -^^^^^^^^^^^^ +``resdatafiles`` +^^^^^^^^^^^^^^^^ This is an internal helper module in order to represent finished or -unfinished Eclipse decks and runs. The class EclFiles can cache binary +unfinished Eclipse decks and runs. The class ResdataFiles can cache binary files that are recently read, and is able to locate the various output files based on the basename or the `.DATA` filename. diff --git a/docs/usage/compdat.rst b/docs/usage/compdat.rst index f7850fb98..7cbe1ebcc 100644 --- a/docs/usage/compdat.rst +++ b/docs/usage/compdat.rst @@ -7,13 +7,13 @@ Additionally, it will parse WELOPEN statements and emit new COMPDAT statements from the actions in WELOPEN. .. - compdat.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/compdat.csv', index=False) + compdat.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/compdat.csv', index=False) .. code-block:: python - from res2df import compdat, EclFiles + from res2df import compdat, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = compdat.df(eclfiles) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = compdat.df(resdatafiles) .. csv-table:: Example COMPDAT table :file: compdat.csv diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index 6004587f4..eebacb641 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -9,9 +9,9 @@ Supported keywords are ``EQUIL``, ``RSVD``, ``RVVD``, ``PBVD`` and .. code-block:: python - from res2df import equil, EclFiles + from res2df import equil, ResdataFiles - dframe = equil.df(EclFiles('MYECLDECK.DATA')) + dframe = equil.df(ResdataFiles('MYECLDECK.DATA')) Which will provide a dataframe similar to the example below. Note that the column `Z` is used both for datum depth and the depth values in ``RSVD`` tables. The @@ -19,7 +19,7 @@ amount of columns obtained depends on the input dataset, and should be possible to link up with the Eclipse documentation. API doc: :func:`res2df.equil.df` .. - dframe = equil.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) + dframe = equil.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) dframe[['EQLNUM', 'KEYWORD', 'Z', 'PRESSURE', 'OWC', 'GOC', 'RS']]\ .to_csv(index=False)) diff --git a/docs/usage/grid.rst b/docs/usage/grid.rst index 3df33b081..633ef27a9 100644 --- a/docs/usage/grid.rst +++ b/docs/usage/grid.rst @@ -9,16 +9,16 @@ Typical usage .. code-block:: python - from res2df import grid, EclFiles + from res2df import grid, ResdataFiles - eclfiles = EclFiles('MYDATADECK.DATA') - dframe = grid.df(eclfiles, rstdates='last') + resdatafiles = ResdataFiles('MYDATADECK.DATA') + dframe = grid.df(resdatafiles, rstdates='last') where the API is documented at :func:`res2df.grid.df`. .. - eclfiles = EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA') - grid.df(eclfiles).sample(10).to_csv('docs/usage/grid.csv', float_format="%.2f", index=False) + resdatafiles = ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA') + grid.df(resdatafiles).sample(10).to_csv('docs/usage/grid.csv', float_format="%.2f", index=False) .. csv-table:: Example grid table :file: grid.csv @@ -110,10 +110,10 @@ the whereabouts of the file: .. code-block:: python - from res2df import grid, EclFiles, common + from res2df import grid, ResdataFiles, common - eclfiles = EclFiles("'MYDATADECK.DATA") - dframe = grid.df(eclfiles) + resdatafiles = ResdataFiles("'MYDATADECK.DATA") + dframe = grid.df(resdatafiles) # The filename with layers is relative to DATA-file location # or an absolute path. subzonemap = res2df.common.parse_zonemapfile("subzones.lyr") @@ -155,21 +155,21 @@ it to FIPNUM 5. This can be accomplished using .. code-block:: python - from res2df import grid, EclFiles, common + from res2df import grid, ResdataFiles, common - eclfiles = EclFiles("'MYDATADECK.DATA") - dframe = grid.df(eclfiles) + resdatafiles = ResdataFiles("'MYDATADECK.DATA") + dframe = grid.df(resdatafiles) # Change FIPNUM 6 to FIPNUM 5: rows_to_touch = dframe["FIPNUM"] == 6 dframe.loc[rows_to_touch, "FIPNUM"] = 5 # Write back to new include file, ensure datatype is integer. - grid.df2ecl(dframe, "FIPNUM", dtype=int, filename="fipnum.inc", eclfiles=eclfiles) + grid.df2ecl(dframe, "FIPNUM", dtype=int, filename="fipnum.inc", resdatafiles=resdatafiles) This will produce the file `fipnum.inc` with the contents: .. literalinclude:: fipnum.inc -It is recommended to supply the ``eclfiles`` object to ``df2ecl``, if not, correct grid +It is recommended to supply the ``resdatafiles`` object to ``df2ecl``, if not, correct grid size can not be ensured. diff --git a/docs/usage/nnc.rst b/docs/usage/nnc.rst index 604a5ce45..b368899b7 100644 --- a/docs/usage/nnc.rst +++ b/docs/usage/nnc.rst @@ -11,14 +11,14 @@ Note: Eclipse300 will not export TRANNNC data in parallel mode. Run in serial to get this output. .. - nnc.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/nnc.csv', index=False) + nnc.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/nnc.csv', index=False) .. code-block:: python - from res2df import nnc, EclFiles + from res2df import nnc, ResdataFiles - eclfiles = EclFiles('MYDATADECK.DATA') - dframe = nnc.df(eclfiles) + resdatafiles = ResdataFiles('MYDATADECK.DATA') + dframe = nnc.df(resdatafiles) .. csv-table:: Example nnc table :file: nnc.csv @@ -49,10 +49,10 @@ to an Eclipse include file: .. code-block:: python - from ecl2f import nnc, EclFiles + from ecl2f import nnc, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - nnc_df = nnc.df(eclfiles) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + nnc_df = nnc.df(resdatafiles) nnc_df["TRANM"] = 0.1 # Reduce all NNC transmissibilities nnc.df2ecl_editnnc(nnc_df, filename="editnnc.inc") @@ -60,7 +60,7 @@ to an Eclipse include file: and the contents of the exported file can be: .. - print(nnc.df2ecl_editnnc(nnc.df(eclfiles).head(4).assign(TRANM=0.1))) + print(nnc.df2ecl_editnnc(nnc.df(resdatafiles).head(4).assign(TRANM=0.1))) .. code-block:: console diff --git a/docs/usage/pillars.rst b/docs/usage/pillars.rst index 391d0b47d..33ed94e9d 100644 --- a/docs/usage/pillars.rst +++ b/docs/usage/pillars.rst @@ -13,9 +13,9 @@ Typical usage is to obtain property statistics, and compute contacts pr. pillar (and optionally pr some region parameter). .. - from res2df import pillars, EclFiles - pillars.df(res2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) - pillars.df(res2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head().to_csv("pillars-example1.csv"float_format="%.1f", index=False)) + from res2df import pillars, ResdataFiles + pillars.df(res2df.ResdataFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) + pillars.df(res2df.ResdataFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head().to_csv("pillars-example1.csv"float_format="%.1f", index=False)) .. csv-table:: Example pillar table :file: pillars-example1.csv @@ -90,7 +90,7 @@ By default, dynamic data are added as a set of columns for every date, like in this example: .. - pillars.df(res2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), rstdates='all').dropna().head().to_csv('pillars-dyn1-unstacked.csv', float_format="%.1f", index=False) + pillars.df(res2df.ResdataFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), rstdates='all').dropna().head().to_csv('pillars-dyn1-unstacked.csv', float_format="%.1f", index=False) .. csv-table:: Example pillar table with dynamical data, unstacked :file: pillars-dyn1-unstacked.csv diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index fedd33886..198308288 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -9,10 +9,10 @@ Example usage: .. code-block:: python - from res2df import pvt, EclFiles + from res2df import pvt, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = pvt.df(eclfiles) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = pvt.df(resdatafiles) Alternatively, we may also read directly from an include file if we read the contents of the file and supply it as a string: @@ -22,7 +22,7 @@ if we read the contents of the file and supply it as a string: dframe = pvt.df(open("pvt.inc").read()) .. - pvt.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).tail(15).to_csv('docs/usage/pvt.csv', index=False) + pvt.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).tail(15).to_csv('docs/usage/pvt.csv', index=False) .. csv-table:: Example PVT table (last 15 rows to show non-Nan data) diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index 44b605733..19223d913 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -11,14 +11,14 @@ column. .. import numpy as np - satfunc.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).iloc[np.r_[0:5, 37:42, -5:0]].to_csv('docs/usage/satfunc.csv', index=False) + satfunc.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).iloc[np.r_[0:5, 37:42, -5:0]].to_csv('docs/usage/satfunc.csv', index=False) .. code-block:: python - from res2df import satfunc, EclFiles + from res2df import satfunc, ResdataFiles - eclfiles = EclFiles('MYDATADECK.DATA') - dframe = satfunc.df(eclfiles) + resdatafiles = ResdataFiles('MYDATADECK.DATA') + dframe = satfunc.df(resdatafiles) .. csv-table:: Example satfunc table (only a subset of the rows are shown) :file: satfunc.csv diff --git a/docs/usage/summary.rst b/docs/usage/summary.rst index 968d37670..47abf0c08 100644 --- a/docs/usage/summary.rst +++ b/docs/usage/summary.rst @@ -5,14 +5,14 @@ This module extracts summary information from UNSMRY-files into Pandas Dataframes. .. - summary.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), column_keys="F*PT", time_index='yearly').to_csv("summary.csv") + summary.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), column_keys="F*PT", time_index='yearly').to_csv("summary.csv") .. code-block:: python - from res2df import summary, EclFiles + from res2df import summary, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = summary.df(eclfiles, column_keys="F*PT", time_index="yearly") + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = summary.df(resdatafiles, column_keys="F*PT", time_index="yearly") If you don't specify ``column_keys``, all included summary vectors will be retrieved. Default for ``time_index`` is the report datetimes written by diff --git a/docs/usage/trans.rst b/docs/usage/trans.rst index 9a26f7558..f0c3f743f 100644 --- a/docs/usage/trans.rst +++ b/docs/usage/trans.rst @@ -11,13 +11,13 @@ connections .. code-block:: python - from res2df import trans, EclFiles + from res2df import trans, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = res2df.trans.df(eclfiles) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = res2df.trans.df(resdatafiles) .. - res2df.trans.df(res2df.EclFiles("2_R001_REEK-0.DATA")).sample(7)\ + res2df.trans.df(res2df.ResdataFiles("2_R001_REEK-0.DATA")).sample(7)\ .to_csv("trans1.csv", float_format="%.2f", index=False) .. csv-table:: Neighbour transmissibilities, sample rows from an example simulation. @@ -79,12 +79,12 @@ like this. Example: .. code-block:: python - dframe = res2df.trans.df(eclfiles, vectors="FIPNUM", boundaryfilter=True, addnnc=True) + dframe = res2df.trans.df(resdatafiles, vectors="FIPNUM", boundaryfilter=True, addnnc=True) which gives the dataframe .. - res2df.trans.df(res2df.EclFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", boundaryfilter=True).sample(10).to_csv("trans-boundaries.csv", index=False, float_format="%.2f") + res2df.trans.df(res2df.ResdataFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", boundaryfilter=True).sample(10).to_csv("trans-boundaries.csv", index=False, float_format="%.2f") .. csv-table:: Sample rows from connections where FIPNUM is changing :file: trans-boundaries.csv @@ -105,13 +105,13 @@ over a region interface. This is accomplished by adding the ``group=True`` optio .. code-block:: python - from res2df import trans, EclFiles + from res2df import trans, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = res2df.trans.df(eclfiles, vectors="FIPNUM", addnnc=True, group=True) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = res2df.trans.df(resdatafiles, vectors="FIPNUM", addnnc=True, group=True) .. - res2df.trans.df(res2df.EclFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", group=True).to_csv("trans-group.csv", index=False, float_format="%.2f") + res2df.trans.df(res2df.ResdataFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", group=True).to_csv("trans-group.csv", index=False, float_format="%.2f") .. csv-table:: Transmissibilities summed over each FIPNUM interface :file: trans-group.csv diff --git a/docs/usage/wcon.rst b/docs/usage/wcon.rst index 393607935..e4b09e90c 100644 --- a/docs/usage/wcon.rst +++ b/docs/usage/wcon.rst @@ -5,13 +5,13 @@ This module extracts information from WCONHIST, WCONINJE, WCONINJH and WCONPROD from an Eclipse deck. .. - wcon.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/wcon.csv', index=False) + wcon.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/wcon.csv', index=False) .. code-block:: python - from res2df import wcon, EclFiles + from res2df import wcon, ResdataFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = wcon.df(eclfiles) + resdatafiles = ResdataFiles("MYDATADECK.DATA") + dframe = wcon.df(resdatafiles) .. csv-table:: Example WCON table :file: wcon.csv diff --git a/res2df/__init__.py b/res2df/__init__.py index f4244fc5a..90a6b180e 100644 --- a/res2df/__init__.py +++ b/res2df/__init__.py @@ -11,7 +11,7 @@ __version__ = "v0.0.0" from .constants import MAGIC_STDOUT -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles SUBMODULES: List[str] = [ "compdat", diff --git a/res2df/compdat.py b/res2df/compdat.py index e6fcd8e79..94a3cbc73 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -35,8 +35,8 @@ parse_opmio_tstep_rec, write_dframe_stdout_file, ) -from .eclfiles import EclFiles from .grid import merge_initvectors +from .resdatafiles import ResdataFiles logger = logging.getLogger(__name__) @@ -973,13 +973,13 @@ def compdat_main(args): logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - compdat_df = df(eclfiles, initvectors=args.initvectors) + resdatafiles = ResdataFiles(args.DATAFILE) + compdat_df = df(resdatafiles, initvectors=args.initvectors) write_dframe_stdout_file(compdat_df, args.output, index=False, caller_logger=logger) def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, initvectors: Optional[List[str]] = None, zonemap: Optional[Dict[int, str]] = None, ) -> pd.DataFrame: @@ -992,17 +992,17 @@ def df( Returns: pd.Dataframe with one row pr cell to well connection """ - compdat_df = deck2dfs(eclfiles.get_ecldeck())["COMPDAT"] + compdat_df = deck2dfs(resdatafiles.get_ecldeck())["COMPDAT"] compdat_df = unrolldf(compdat_df) if initvectors: compdat_df = merge_initvectors( - eclfiles, compdat_df, initvectors, ijknames=["I", "J", "K1"] + resdatafiles, compdat_df, initvectors, ijknames=["I", "J", "K1"] ) if zonemap is None: # If no zonemap is submitted, search for zonemap in default location - zonemap = eclfiles.get_zonemap() + zonemap = resdatafiles.get_zonemap() if zonemap: logger.info("Merging zonemap into compdat") diff --git a/res2df/equil.py b/res2df/equil.py index 8458343c9..8ae4ba98f 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -11,7 +11,7 @@ from res2df import common, getLogger_res2csv, inferdims -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -72,7 +72,7 @@ def df( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResdataFiles, "opm.libopmcommon_python.Deck"], keywords: Optional[List[str]] = None, ntequl: Optional[int] = None, ) -> pd.DataFrame: @@ -89,7 +89,7 @@ def df( that we have to infer the correct number of EQUIL lines from what gives us successful parsing from OPM. In those cases, the deck must be supplied as a string, if not, extra EQUIL lines - are possibly already removed by the OPM parser in eclfiles.str2deck(). + are possibly already removed by the OPM parser in resdatafiles.str2deck(). Arguments: deck: Eclipse deck or string with deck. If @@ -101,7 +101,7 @@ def df( Return: pd.DataFrame, at least with columns KEYWORD and EQLNUM """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResdataFiles): deck = deck.get_ecldeck() deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) @@ -314,9 +314,9 @@ def equil_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resdatafiles = ResdataFiles(args.DATAFILE) + if resdatafiles: + deck = resdatafiles.get_ecldeck() if "EQLDIMS" in deck: # Things are easier when a full deck with (correct) EQLDIMS # is supplied: diff --git a/res2df/faults.py b/res2df/faults.py index 98599540b..c62a66cce 100644 --- a/res2df/faults.py +++ b/res2df/faults.py @@ -10,7 +10,7 @@ import pandas as pd -from res2df import EclFiles, getLogger_res2csv +from res2df import ResdataFiles, getLogger_res2csv from res2df.common import parse_opmio_deckrecord, write_dframe_stdout_file try: @@ -30,7 +30,7 @@ ALLOWED_FACES = ["X", "Y", "Z", "I", "J", "K", "X-", "Y-", "Z-", "I-", "J-", "K-"] -def df(deck: Union[EclFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: +def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: """Produce a dataframe of fault data from a deck All data for the keyword FAULTS will be returned. @@ -38,7 +38,7 @@ def df(deck: Union[EclFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: Args: deck: Eclipse deck """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResdataFiles): deck = deck.get_ecldeck() # In[91]: list(deck['FAULTS'][0]) @@ -86,9 +86,9 @@ def faults_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resdatafiles = ResdataFiles(args.DATAFILE) + if resdatafiles: + deck = resdatafiles.get_ecldeck() faults_df = df(deck) write_dframe_stdout_file( faults_df, diff --git a/res2df/fipreports.py b/res2df/fipreports.py index ed3c741ae..5aec98573 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -10,7 +10,7 @@ import numpy as np import pandas as pd -from res2df import EclFiles, getLogger_res2csv +from res2df import ResdataFiles, getLogger_res2csv from res2df.common import parse_ecl_month, write_dframe_stdout_file logger = logging.getLogger(__name__) @@ -97,7 +97,7 @@ def float_or_nan(string: str) -> float: ) -def df(prtfile: Union[str, EclFiles], fipname: str = "FIPNUM") -> pd.DataFrame: +def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFrame: """ Parses a PRT file from Eclipse and finds FIPXXXX REGION REPORT blocks and organizes those numbers into a dataframe @@ -106,12 +106,12 @@ def df(prtfile: Union[str, EclFiles], fipname: str = "FIPNUM") -> pd.DataFrame: DATE and region index added. Args: - prtfile: filename (PRT) or an EclFiles object + prtfile: filename (PRT) or an ResdataFiles object fipname: The name of the regport regions, FIPNUM, FIPZON or whatever Max length of the string is 8, the first three characters must be FIP, and the next 3 characters must be unique for a given Eclipse deck. """ - if isinstance(prtfile, EclFiles): + if isinstance(prtfile, ResdataFiles): prtfile = prtfile.get_prtfilename() if not fipname.startswith("FIP"): raise ValueError("fipname must start with FIP") @@ -217,6 +217,6 @@ def fipreports_main(args) -> None: if args.PRTFILE.endswith(".PRT"): prtfile = args.PRTFILE else: - prtfile = EclFiles(args.PRTFILE).get_prtfilename() + prtfile = ResdataFiles(args.PRTFILE).get_prtfilename() dframe = df(prtfile, args.fipname) write_dframe_stdout_file(dframe, args.output, index=False, caller_logger=logger) diff --git a/res2df/grid.py b/res2df/grid.py index 4aab554cb..11de9d0be 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -26,27 +26,28 @@ from res2df import __version__, common, getLogger_res2csv -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles logger = logging.getLogger(__name__) -def get_available_rst_dates(eclfiles: EclFiles) -> List[datetime.date]: +def get_available_rst_dates(resdatafiles: ResdataFiles) -> List[datetime.date]: """Return a list of datetime objects for the available dates in the RST file""" - report_indices = ResdataFile.file_report_list(eclfiles.get_rstfilename()) + report_indices = ResdataFile.file_report_list(resdatafiles.get_rstfilename()) logger.info( "Restart report indices (count %s): %s", str(len(report_indices)), str(report_indices), ) return [ - eclfiles.get_rstfile().iget_restart_sim_time(index).date() + resdatafiles.get_rstfile().iget_restart_sim_time(index).date() for index in range(0, len(report_indices)) ] def dates2rstindices( - eclfiles: EclFiles, dates: Optional[Union[str, datetime.date, List[datetime.date]]] + resdatafiles: ResdataFiles, + dates: Optional[Union[str, datetime.date, List[datetime.date]]], ) -> Tuple[List[int], List[datetime.date], List[str]]: """Return the restart index/indices for a given datetime or list of datetimes @@ -68,7 +69,7 @@ def dates2rstindices( if not dates: return ([], [], []) - availabledates = get_available_rst_dates(eclfiles) + availabledates = get_available_rst_dates(resdatafiles) supportedmnemonics = ["first", "last", "all"] @@ -141,7 +142,7 @@ def _df2pyarrow(dframe: pd.DataFrame) -> pyarrow.Table: def rst2df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, date: Union[str, datetime.date, List[datetime.date]], vectors: Optional[Union[str, List[str]]] = None, dateinheaders: bool = False, @@ -155,7 +156,7 @@ def rst2df( when merging with the grid geometry dataframe. Args: - eclfiles: EclFiles object + resdatafiles: ResdataFiles object date: datetime.date or list of datetime.date, must correspond to an existing date. If list, it forces dateinheaders to be True. @@ -183,16 +184,16 @@ def rst2df( # First task is to determine the restart index to extract # data for: - (rstindices, chosendates, isodates) = dates2rstindices(eclfiles, date) + (rstindices, chosendates, isodates) = dates2rstindices(resdatafiles, date) logger.info("Extracting restart information at dates %s", str(isodates)) # Determine the available restart vectors, we only include # those with correct length, meaning that they are defined # for all active cells: - activecells = eclfiles.get_egrid().getNumActive() + activecells = resdatafiles.get_egrid().getNumActive() rstvectors = [] - for vec in eclfiles.get_rstfile().headers: + for vec in resdatafiles.get_rstfile().headers: if vec[1] == activecells and any( fnmatch.fnmatch(vec[0], key) for key in vectors ): @@ -211,7 +212,7 @@ def rst2df( present_rstvectors = [] for vec in rstvectors: try: - if eclfiles.get_rstfile().iget_named_kw(vec, rstindex): + if resdatafiles.get_rstfile().iget_named_kw(vec, rstindex): present_rstvectors.append(vec) except IndexError: pass @@ -232,7 +233,7 @@ def rst2df( columns=present_rstvectors, data=np.hstack( [ - eclfiles.get_rstfile() + resdatafiles.get_rstfile() .iget_named_kw(vec, rstindex) .numpyView() .reshape(-1, 1) @@ -279,7 +280,7 @@ def rst2df( def gridgeometry2df( - eclfiles: EclFiles, zonemap: Optional[Dict[int, str]] = None + resdatafiles: ResdataFiles, zonemap: Optional[Dict[int, str]] = None ) -> pd.DataFrame: """Produce a Pandas Dataframe with Eclipse gridgeometry @@ -287,7 +288,7 @@ def gridgeometry2df( when merging with other dataframes with cell-data. Args: - eclfiles: object holding the Eclipse output files. + resdatafiles: object holding the Eclipse output files. zonemap: A zonemap dictionary mapping every K index to a string, which will be put in a column ZONE. If none is provided, a zonemap from a default file will be looked for. Provide an empty @@ -299,8 +300,8 @@ def gridgeometry2df( pr. cell. The index of the dataframe are the global indices. If a zonemap is provided, zone information will be in the column ZONE. """ - egrid_file = eclfiles.get_egridfile() - grid = eclfiles.get_egrid() + egrid_file = resdatafiles.get_egridfile() + grid = resdatafiles.get_egrid() if not egrid_file or not grid: raise ValueError("No EGRID file supplied") @@ -348,7 +349,7 @@ def gridgeometry2df( if zonemap is None: # Look for default zonemap file: - zonemap = eclfiles.get_zonemap() + zonemap = resdatafiles.get_zonemap() if zonemap: logger.info("Merging zonemap into grid") grid_df = common.merge_zones(grid_df, zonemap, kname="K") @@ -357,7 +358,7 @@ def gridgeometry2df( def merge_initvectors( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, dframe: pd.DataFrame, initvectors: List[str], ijknames: Optional[List[str]] = None, @@ -368,7 +369,7 @@ def merge_initvectors( for API users to only use the df() function. Args: - eclfiles: Object representing the Eclipse output files + resdatafiles: Object representing the Eclipse output files dframe: Table data to merge with initvectors: Names of INIT vectors to merge in. ijknames: Three strings that determine the I, J and K columns to use @@ -385,7 +386,7 @@ def merge_initvectors( if len(ijknames) != 3: raise ValueError("ijknames must be a list of length 3") assert isinstance(dframe, pd.DataFrame) - assert isinstance(eclfiles, EclFiles) + assert isinstance(resdatafiles, ResdataFiles) if not set(ijknames).issubset(dframe.columns): raise ValueError( @@ -398,12 +399,12 @@ def merge_initvectors( assert isinstance(initvectors, list) logger.info("Merging INIT data %s into dataframe", str(initvectors)) - ijkinit = df(eclfiles, vectors=initvectors)[["I", "J", "K"] + initvectors] + ijkinit = df(resdatafiles, vectors=initvectors)[["I", "J", "K"] + initvectors] return pd.merge(dframe, ijkinit, left_on=ijknames, right_on=["I", "J", "K"]) def init2df( - eclfiles: EclFiles, vectors: Optional[Union[str, List[str]]] = None + resdatafiles: ResdataFiles, vectors: Optional[Union[str, List[str]]] = None ) -> pd.DataFrame: """Extract information from INIT file with cell data @@ -413,7 +414,7 @@ def init2df( Order is significant, as index is used for merging Args: - eclfiles: Object that can serve the EGRID and INIT files + resdatafiles: Object that can serve the EGRID and INIT files vectors: List of vectors to include, glob-style wildcards supported. """ @@ -422,8 +423,8 @@ def init2df( if not isinstance(vectors, list): vectors = [vectors] - init = eclfiles.get_initfile() - egrid = eclfiles.get_egrid() + init = resdatafiles.get_initfile() + egrid = resdatafiles.get_egrid() # Build list of vector names to include: usevectors = [] @@ -470,7 +471,7 @@ def init2df( def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, vectors: Union[str, List[str]] = "*", dropconstants: bool = False, rstdates: Optional[Union[str, datetime.date, List[datetime.date]]] = None, @@ -486,7 +487,7 @@ def df( any time dependent data from Restart files. Args: - eclfiles: Handle to an Eclipse case + resdatafiles: Handle to an Eclipse case vectors: Vectors to include, wildcards supported. Used to match both INIT vectors and RESTART vectors. @@ -506,12 +507,12 @@ def df( dictionary to avoid looking for the default file, and no ZONE column will be added. """ - gridgeom = gridgeometry2df(eclfiles, zonemap) - initdf = init2df(eclfiles, vectors=vectors) + gridgeom = gridgeometry2df(resdatafiles, zonemap) + initdf = init2df(resdatafiles, vectors=vectors) rst_df = None if rstdates: rst_df = rst2df( - eclfiles, + resdatafiles, rstdates, vectors=vectors, dateinheaders=dateinheaders, @@ -617,7 +618,7 @@ def drop_constant_columns( def df2ecl( grid_df: pd.DataFrame, keywords: Union[str, List[str]], - eclfiles: Optional[EclFiles] = None, + resdatafiles: Optional[ResdataFiles] = None, dtype: Optional[Type] = None, filename: Optional[str] = None, nocomments: bool = False, @@ -643,7 +644,7 @@ def df2ecl( The grid can contain both active and inactive cells. keywords: The keyword(s) to export, with one value for every cell. - eclfiles: If provided, the total cell count for the grid + resdatafiles: If provided, the total cell count for the grid will be requested from this object. If not, it will be *guessed* from the maximum number of GLOBAL_INDEX, which can be under-estimated in the corner-case that the last cells are inactive. @@ -670,10 +671,10 @@ def df2ecl( # Figure out the total number of cells for which we need to export data for: global_size = None active_cells = None - if eclfiles is not None: - if eclfiles.get_egrid() is not None: - global_size = eclfiles.get_egrid().get_global_size() - active_cells = eclfiles.get_egrid().getNumActive() + if resdatafiles is not None: + if resdatafiles.get_egrid() is not None: + global_size = resdatafiles.get_egrid().get_global_size() + active_cells = resdatafiles.get_egrid().getNumActive() if "GLOBAL_INDEX" not in grid_df: logger.warning( @@ -763,9 +764,9 @@ def grid_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) grid_df = df( - eclfiles, + resdatafiles, vectors=args.vectors, rstdates=args.rstdates, dropconstants=args.dropconstants, diff --git a/res2df/gruptree.py b/res2df/gruptree.py index d7422c4cd..fdafb661e 100644 --- a/res2df/gruptree.py +++ b/res2df/gruptree.py @@ -19,7 +19,7 @@ except ImportError: pass -from res2df import EclFiles, getLogger_res2csv +from res2df import ResdataFiles, getLogger_res2csv from res2df.common import ( parse_opmio_date_rec, parse_opmio_deckrecord, @@ -31,7 +31,7 @@ def df( - deck: Union[EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"], startdate: Optional[datetime.date] = None, welspecs: bool = True, ) -> pd.DataFrame: @@ -55,7 +55,7 @@ def df( startdate is only relevant when START is not in the deck. Args: - deck: opm.io Deck object or EclFiles + deck: opm.io Deck object or ResdataFiles Returns: pd.DataFrame with one row pr edge. Empty dataframe if no @@ -68,7 +68,7 @@ def df( else: date = None - if isinstance(deck, EclFiles): + if isinstance(deck, ResdataFiles): deck = deck.get_ecldeck() edgerecords = [] # list of dict of rows containing an edge. @@ -453,8 +453,8 @@ def gruptree_main(args) -> None: if not args.output and not args.prettyprint: print("Nothing to do. Set --output or --prettyprint") sys.exit(0) - eclfiles = EclFiles(args.DATAFILE) - dframe = df(eclfiles.get_ecldeck(), startdate=args.startdate) + resdatafiles = ResdataFiles(args.DATAFILE) + dframe = df(resdatafiles.get_ecldeck(), startdate=args.startdate) if args.prettyprint: if "DATE" in dframe: print(prettyprint(dframe)) diff --git a/res2df/inferdims.py b/res2df/inferdims.py index aa0f9f5eb..ebb4b42cd 100644 --- a/res2df/inferdims.py +++ b/res2df/inferdims.py @@ -12,7 +12,7 @@ # Let parts of res2df work without OPM: pass -from res2df import EclFiles +from res2df import ResdataFiles logger = logging.getLogger(__name__) @@ -68,7 +68,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: deckstring, dimkeyword, dimitem, dimcountguess, nowarn=True ) try: - EclFiles.str2deck( + ResdataFiles.str2deck( deck_candidate, parsecontext=opm.io.ParseContext( opmioparser_recovery_fail_extra_records @@ -170,7 +170,7 @@ def inject_xxxdims_ntxxx( if xxxdims in deck and ntxxx_value is None: # Then we have nothing to do, but ensure we parse a potential string to a deck if isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResdataFiles.str2deck(deck) return deck if xxxdims in deck and ntxxx_value is not None: @@ -195,6 +195,6 @@ def inject_xxxdims_ntxxx( str(deck), xxxdims, DIMS_POS[ntxxx_name], ntxxx_estimate, nowarn=True ) # Overwrite the deck object - deck = EclFiles.str2deck(augmented_strdeck) + deck = ResdataFiles.str2deck(augmented_strdeck) return deck diff --git a/res2df/nnc.py b/res2df/nnc.py index f755309b0..d8b17fe2d 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -10,13 +10,15 @@ import pandas as pd -from res2df import EclFiles, __version__, common, getLogger_res2csv, grid +from res2df import ResdataFiles, __version__, common, getLogger_res2csv, grid from res2df.common import write_dframe_stdout_file logger: logging.Logger = logging.getLogger(__name__) -def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.DataFrame: +def df( + resdatafiles: ResdataFiles, coords: bool = False, pillars: bool = False +) -> pd.DataFrame: """Produce a Pandas Dataframe with NNC information A NNC is a pair of cells that are not next to each other @@ -28,7 +30,7 @@ def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.Da between the two cells) Args: - eclfiles: object that can serve EclFile and EclGrid + resdatafiles: object that can serve EclFile and EclGrid on demand coords: Set to True if you want the midpoint of the two connected cells to be computed and added to the columns @@ -39,9 +41,9 @@ def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.Da Returns: Empty if no NNC information found. """ - egrid_file = eclfiles.get_egridfile() - egrid_grid = eclfiles.get_egrid() - init_file = eclfiles.get_initfile() + egrid_file = resdatafiles.get_egridfile() + egrid_grid = resdatafiles.get_egrid() + init_file = resdatafiles.get_initfile() if not ("NNC1" in egrid_file and "NNC2" in egrid_file): logger.warning("No NNC data in EGRID") @@ -98,11 +100,11 @@ def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.Da if pillars: nncdf = filter_vertical(nncdf) if coords: - nncdf = add_nnc_coords(nncdf, eclfiles) + nncdf = add_nnc_coords(nncdf, resdatafiles) return nncdf -def add_nnc_coords(nncdf: pd.DataFrame, eclfiles: EclFiles) -> pd.DataFrame: +def add_nnc_coords(nncdf: pd.DataFrame, resdatafiles: ResdataFiles) -> pd.DataFrame: """Add columns X, Y and Z for the connection midpoint This extracts x, y and z for (I1, J1, K1) and (I2, J2, K2) @@ -110,12 +112,12 @@ def add_nnc_coords(nncdf: pd.DataFrame, eclfiles: EclFiles) -> pd.DataFrame: Arguments: nncdf: With grid index columns (I1, J1, K1, I2, J2, K2) - eclfiles: Object used to fetch grid data from EGRID. + resdatafiles: Object used to fetch grid data from EGRID. Returns: Incoming dataframe augmented with the columns X, Y and Z. """ - gridgeometry = grid.gridgeometry2df(eclfiles) + gridgeometry = grid.gridgeometry2df(resdatafiles) gnncdf = pd.merge( nncdf, gridgeometry, @@ -279,8 +281,8 @@ def nnc_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - nncdf = df(eclfiles, coords=args.coords, pillars=args.pillars) + resdatafiles = ResdataFiles(args.DATAFILE) + nncdf = df(resdatafiles, coords=args.coords, pillars=args.pillars) write_dframe_stdout_file( nncdf, args.output, diff --git a/res2df/parameters.py b/res2df/parameters.py index 6b7ea1b57..601d04e8c 100644 --- a/res2df/parameters.py +++ b/res2df/parameters.py @@ -10,13 +10,13 @@ import pandas as pd import yaml -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles logger = logging.getLogger(__name__) def find_parameter_files( - ecldeck_or_eclpath: Union[EclFiles, str, Path], filebase: str = "parameters" + ecldeck_or_eclpath: Union[ResdataFiles, str, Path], filebase: str = "parameters" ) -> List[Path]: """Locate a default prioritized list of files to try to read as key-value @@ -24,7 +24,7 @@ def find_parameter_files( current dir, one directory up, and two directories up. Args: - ecldeck_or_eclpath: Either an EclFiles object of + ecldeck_or_eclpath: Either an ResdataFiles object of an Eclipse output set (only the corresponding path will be used), or path to a file or directory, that will be used as a starting point for locating parameter files @@ -35,7 +35,7 @@ def find_parameter_files( """ eclbasepath: Path fname: str - if isinstance(ecldeck_or_eclpath, EclFiles): + if isinstance(ecldeck_or_eclpath, ResdataFiles): eclbasepath = Path(ecldeck_or_eclpath.get_path()) elif isinstance(ecldeck_or_eclpath, (str, Path)): eclbasepath = Path(ecldeck_or_eclpath).parent.absolute() diff --git a/res2df/pillars.py b/res2df/pillars.py index e94421ec7..9814f7686 100644 --- a/res2df/pillars.py +++ b/res2df/pillars.py @@ -8,7 +8,7 @@ import dateutil.parser import pandas as pd -from res2df import EclFiles, common, getLogger_res2csv, grid +from res2df import ResdataFiles, common, getLogger_res2csv, grid logger: logging.Logger = logging.getLogger(__name__) @@ -33,7 +33,7 @@ def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, region: Optional[str] = None, rstdates: Optional[Union[str, datetime.date, List[datetime.date]]] = None, soilcutoff: float = 0.2, @@ -83,9 +83,11 @@ def df( if region: vectors.append(region) vectors.extend(["POR*", "PERM*", "SWAT", "SGAS", "1OVERBO", "1OVERBG"]) - grid_df = grid.df(eclfiles, rstdates=rstdates, vectors=vectors, dateinheaders=True) + grid_df = grid.df( + resdatafiles, rstdates=rstdates, vectors=vectors, dateinheaders=True + ) - rstdates_iso = grid.dates2rstindices(eclfiles, rstdates)[2] + rstdates_iso = grid.dates2rstindices(resdatafiles, rstdates)[2] grid_df["PILLAR"] = grid_df["I"].astype(str) + "-" + grid_df["J"].astype(str) logger.info("Computing pillar statistics") @@ -415,9 +417,9 @@ def pillars_main(args) -> None: __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) dframe = df( - eclfiles, + resdatafiles, region=args.region, rstdates=args.rstdates, soilcutoff=args.soilcutoff, diff --git a/res2df/pvt.py b/res2df/pvt.py index 5d9b3e640..2afc33fc6 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -11,7 +11,7 @@ import pandas as pd -from res2df import EclFiles, common, getLogger_res2csv, inferdims +from res2df import ResdataFiles, common, getLogger_res2csv, inferdims try: # Needed for mypy @@ -217,7 +217,7 @@ def df( Return: pd.DataFrame """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResdataFiles): deck = deck.get_ecldeck() deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) @@ -283,10 +283,10 @@ def pvt_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) logger.info("Parsed %s", args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + if resdatafiles: + deck = resdatafiles.get_ecldeck() if "TABDIMS" in deck: # Things are easier when a full deck with correct TABDIMS # is supplied: diff --git a/res2df/eclfiles.py b/res2df/resdatafiles.py similarity index 98% rename from res2df/eclfiles.py rename to res2df/resdatafiles.py index 2fca379eb..bb3834923 100644 --- a/res2df/eclfiles.py +++ b/res2df/resdatafiles.py @@ -39,7 +39,7 @@ ] -class EclFiles(object): +class ResdataFiles(object): """ Class for holding an Eclipse deck with result files @@ -109,7 +109,7 @@ def str2deck( @staticmethod def file2deck(filename: Union[str, Path]) -> "opm.libopmcommon_python.Deck": """Try to convert standalone files into opm.io Deck objects""" - return EclFiles.str2deck(Path(filename).read_text(encoding="utf-8")) + return ResdataFiles.str2deck(Path(filename).read_text(encoding="utf-8")) def get_egrid(self) -> Grid: """Find and return EGRID file as an Grid object""" diff --git a/res2df/rft.py b/res2df/rft.py index 42a422931..cbba20c42 100644 --- a/res2df/rft.py +++ b/res2df/rft.py @@ -26,8 +26,8 @@ from res2df import getLogger_res2csv from .common import merge_zones, write_dframe_stdout_file -from .eclfiles import EclFiles from .gruptree import tree_from_dict +from .resdatafiles import ResdataFiles logger: logging.Logger = logging.getLogger(__name__) @@ -515,18 +515,20 @@ def add_extras(dframe: pd.DataFrame, inplace: bool = True) -> pd.DataFrame: def df( - eclfiles: EclFiles, wellname: Optional[str] = None, date: Optional[str] = None + resdatafiles: ResdataFiles, + wellname: Optional[str] = None, + date: Optional[str] = None, ) -> pd.DataFrame: """Loop over an RFT file and construct a dataframe representation of the data, ordered by well and date. Args: - eclfiles: Object used to locate the RFT file + resdatafiles: Object used to locate the RFT file wellname: If provided, only wells matching this string exactly will be included date: If provided, all other dates will be ignored. YYYY-MM-DD. """ - rftfile = eclfiles.get_rftfile() + rftfile = resdatafiles.get_rftfile() rftdata = [] for rftrecord in rftrecords(rftfile): @@ -632,7 +634,7 @@ def df( if rftdata_df.HOSTGRID.unique()[0].strip() == "": del rftdata_df["HOSTGRID"] - zonemap = eclfiles.get_zonemap() + zonemap = resdatafiles.get_zonemap() if zonemap: if "K" in rftdata_df: kname = "K" @@ -679,10 +681,10 @@ def rft_main(args) -> None: ) if args.DATAFILE.endswith(".RFT"): # Support the RFT file as an argument also: - eclfiles = EclFiles(args.DATAFILE.replace(".RFT", "") + ".DATA") + resdatafiles = ResdataFiles(args.DATAFILE.replace(".RFT", "") + ".DATA") else: - eclfiles = EclFiles(args.DATAFILE) - rft_df = df(eclfiles, wellname=args.wellname, date=args.date) + resdatafiles = ResdataFiles(args.DATAFILE) + rft_df = df(resdatafiles, wellname=args.wellname, date=args.date) if rft_df.empty: if args.wellname is not None or args.date is not None: logger.warning("No data. Check your well and/or date filter") diff --git a/res2df/satfunc.py b/res2df/satfunc.py index fc17096bb..07c101dd6 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -28,7 +28,7 @@ from res2df import common, getLogger_res2csv, inferdims from .common import write_dframe_stdout_file -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles logger: logging.Logger = logging.getLogger(__name__) @@ -75,7 +75,7 @@ def df( and stating how many saturation functions there should be. If you have a string with TABDIMS missing, you must supply this as a string to this function, and not a parsed deck, as - the default parser in EclFiles is very permissive (and only + the default parser in ResdataFiles is very permissive (and only returning the first function by default). Arguments: @@ -91,7 +91,7 @@ def df( Return: pd.DataFrame, columns 'KEYWORD', 'SW', 'KRW', 'KROW', 'PC', .. """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResdataFiles): # NB: If this is done on include files and not on DATA files # we can loose data for SATNUM > 1 deck = deck.get_ecldeck() @@ -192,13 +192,13 @@ def satfunc_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resdatafiles = ResdataFiles(args.DATAFILE) + if resdatafiles: + deck = resdatafiles.get_ecldeck() if "TABDIMS" in deck: # Things are easier when a full deck with (correct) TABDIMS # is supplied: - satfunc_df = df(eclfiles, keywords=args.keywords) + satfunc_df = df(resdatafiles, keywords=args.keywords) else: # This might be an include file for which we have to infer/guess # TABDIMS. Then we send it to df() as a string diff --git a/res2df/summary.py b/res2df/summary.py index ab9cf4569..7a19ec6a9 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -20,7 +20,7 @@ from . import parameters from .common import write_dframe_stdout_file -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles logger: logging.Logger = logging.getLogger(__name__) @@ -297,7 +297,7 @@ def resample_smry_dates( def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, time_index: Optional[str] = None, column_keys: Optional[Union[List[str], str]] = None, start_date: Optional[Union[str, dt.date]] = None, @@ -321,7 +321,7 @@ def df( is always named "DATE". Arguments: - eclfiles: EclFiles object representing the Eclipse deck. Alternatively + resdatafiles: ResdataFiles object representing the Eclipse deck. Alternatively an Summary object. time_index: string indicating a resampling frequency, 'yearly', 'monthly', 'daily', 'last' or 'raw', the latter will @@ -354,11 +354,11 @@ def df( if isinstance(column_keys, str): column_keys = [column_keys] - if isinstance(eclfiles, Summary): - eclsum = eclfiles + if isinstance(resdatafiles, Summary): + eclsum = resdatafiles else: try: - eclsum = eclfiles.get_eclsum(include_restart=include_restart) + eclsum = resdatafiles.get_eclsum(include_restart=include_restart) except OSError: logger.warning("Error reading summary instance, returning empty dataframe") return pd.DataFrame() @@ -412,7 +412,7 @@ def df( ) dframe.index.name = "DATE" if params or paramfile: - dframe = _merge_params(dframe, paramfile, eclfiles) + dframe = _merge_params(dframe, paramfile, resdatafiles) # Add metadata as an attribute the dataframe, using experimental Pandas features: meta = smry_meta(eclsum) @@ -539,7 +539,7 @@ def _df2pyarrow(dframe: pd.DataFrame) -> pyarrow.Table: def _merge_params( dframe: pd.DataFrame, paramfile: Optional[Union[str, Path]] = None, - eclfiles: Optional[Union[str, EclFiles]] = None, + resdatafiles: Optional[Union[str, ResdataFiles]] = None, ) -> pd.DataFrame: """Locate parameters in a file and add to the dataframe. @@ -547,16 +547,18 @@ def _merge_params( the parameters.txt file based on the location of an Eclise run. """ - if paramfile is None and eclfiles is not None: - param_files = parameters.find_parameter_files(eclfiles) + if paramfile is None and resdatafiles is not None: + param_files = parameters.find_parameter_files(resdatafiles) logger.info("Loading parameters from files: %s", str(param_files)) param_dict = parameters.load_all(param_files) elif ( paramfile is not None - and eclfiles is not None + and resdatafiles is not None and not Path(paramfile).is_absolute() ): - param_files = parameters.find_parameter_files(eclfiles, filebase=str(paramfile)) + param_files = parameters.find_parameter_files( + resdatafiles, filebase=str(paramfile) + ) logger.info("Loading parameters from files: %s", str(param_files)) param_dict = parameters.load_all(param_files) elif paramfile is not None and Path(paramfile).is_absolute(): @@ -574,7 +576,7 @@ def _merge_params( return dframe -def smry_meta(eclfiles: EclFiles) -> Dict[str, Dict[str, Any]]: +def smry_meta(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]: """Provide metadata for summary data vectors. A dictionary indexed by summary vector name is returned, and each @@ -589,10 +591,10 @@ def smry_meta(eclfiles: EclFiles) -> Dict[str, Dict[str, Any]]: * keyword (str) * wgname (str or None) """ - if isinstance(eclfiles, Summary): - eclsum = eclfiles + if isinstance(resdatafiles, Summary): + eclsum = resdatafiles else: - eclsum = eclfiles.get_eclsum() + eclsum = resdatafiles.get_eclsum() meta: Dict[str, Dict[str, Any]] = {} for col in eclsum.keys(): @@ -904,9 +906,9 @@ def summary_main(args) -> None: args.DATAFILE.replace(".DATA", "").replace(".UNSMRY", "").replace(".SMSPEC", "") ) - eclfiles = EclFiles(eclbase) + resdatafiles = ResdataFiles(eclbase) sum_df = df( - eclfiles, + resdatafiles, time_index=args.time_index, column_keys=args.column_keys, start_date=args.start_date, diff --git a/res2df/trans.py b/res2df/trans.py index af94e9979..77c644507 100644 --- a/res2df/trans.py +++ b/res2df/trans.py @@ -13,7 +13,7 @@ from res2df import getLogger_res2csv from res2df.common import write_dframe_stdout_file -from .eclfiles import EclFiles +from .resdatafiles import ResdataFiles try: import networkx @@ -26,7 +26,7 @@ def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, vectors: Optional[Union[str, List[str]]] = None, boundaryfilter: bool = False, group: bool = False, @@ -57,7 +57,7 @@ def df( you will get a corresponding FIPNUM1 and FIPNUM2 added. Args: - eclfiles: An object representing your Eclipse run + resdatafiles: An object representing your Eclipse run vectors: Eclipse INIT vectors that you want to include boundaryfilter: Set to true if you want to filter where one INIT vector change. Only use for integer INIT vectors. @@ -101,7 +101,7 @@ def df( "Filtering to both k and to ij simultaneously results in empty dataframe" ) - grid_df = res2df.grid.df(eclfiles) + grid_df = res2df.grid.df(resdatafiles) existing_vectors = [vec for vec in vectors if vec in grid_df.columns] if len(existing_vectors) < len(vectors): logger.warning( @@ -149,7 +149,7 @@ def df( if addnnc: logger.info("Adding NNC data") - nnc_df = res2df.nnc.df(eclfiles, coords=False, pillars=False) + nnc_df = res2df.nnc.df(resdatafiles, coords=False, pillars=False) nnc_df["DIR"] = "NNC" trans_df = pd.concat([trans_df, nnc_df], sort=False) @@ -236,12 +236,14 @@ def df( return trans_df -def make_nx_graph(eclfiles: EclFiles, region: str = "FIPNUM") -> "networkx.Graph": +def make_nx_graph( + resdatafiles: ResdataFiles, region: str = "FIPNUM" +) -> "networkx.Graph": """Construct a networkx graph for the transmissibilities.""" if not HAVE_NETWORKX: logger.error("Please install networkx for this function to work") return None - trans_df = df(eclfiles, vectors=[region], coords=True, group=True) + trans_df = df(resdatafiles, vectors=[region], coords=True, group=True) reg1 = region + "1" reg2 = region + "2" graph = networkx.Graph() @@ -306,9 +308,9 @@ def trans_main(args): logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) trans_df = df( - eclfiles, + resdatafiles, vectors=args.vectors, boundaryfilter=args.boundaryfilter, onlykdir=args.onlyk, diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index 4448df815..badfb3fe5 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -25,7 +25,7 @@ except ImportError: pass -from res2df import EclFiles, common, getLogger_res2csv +from res2df import ResdataFiles, common, getLogger_res2csv from . import _vfpinj as vfpinj from . import _vfpprod as vfpprod @@ -35,7 +35,7 @@ def basic_data( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResdataFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> List[Dict[str, Any]]: @@ -51,10 +51,10 @@ def basic_data( Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResdataFiles): deck = deck.get_ecldeck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResdataFiles.str2deck(deck) if keyword not in SUPPORTED_KEYWORDS: raise ValueError( @@ -241,7 +241,7 @@ def pyarrow2basic_data(pa_table: pa.Table) -> Union[Dict[str, Any], None]: def dfs( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResdataFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> List[pd.DataFrame]: @@ -255,10 +255,10 @@ def dfs( vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResdataFiles): deck = deck.get_ecldeck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResdataFiles.str2deck(deck) if keyword not in SUPPORTED_KEYWORDS: raise ValueError( @@ -284,7 +284,7 @@ def dfs( def pyarrow_tables( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResdataFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> List[pa.Table]: @@ -298,10 +298,10 @@ def pyarrow_tables( vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResdataFiles): deck = deck.get_ecldeck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResdataFiles.str2deck(deck) if keyword not in SUPPORTED_KEYWORDS: raise ValueError( @@ -409,7 +409,7 @@ def df2ecl( def df( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResdataFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> pd.DataFrame: @@ -427,10 +427,10 @@ def df( logger.warning("No keywords provided to vfp.df. Empty dataframe returned") return pd.DataFrame() - if isinstance(deck, EclFiles): + if isinstance(deck, ResdataFiles): deck = deck.get_ecldeck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResdataFiles.str2deck(deck) # Extract all VFPROD/VFPINJ as separate dataframes dfs_vfp = dfs(deck, keyword, vfpnumbers_str) @@ -495,12 +495,12 @@ def vfp_main(args) -> None: if "vfpnumbers" in args: vfpnumbers = str(args.vfpnumbers) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) if args.arrow: outputfile = args.output outputfile.replace(".arrow", "") vfp_arrow_tables = pyarrow_tables( - eclfiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers + resdatafiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers ) for vfp_table in vfp_arrow_tables: table_number = int( @@ -513,7 +513,7 @@ def vfp_main(args) -> None: logger.info(f"Parsed file {args.DATAFILE} for vfp.dfs_arrow") else: dframe = df( - eclfiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers + resdatafiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers ) if args.output: common.write_dframe_stdout_file( diff --git a/res2df/wcon.py b/res2df/wcon.py index 8246e0d68..4f88b257f 100644 --- a/res2df/wcon.py +++ b/res2df/wcon.py @@ -15,7 +15,7 @@ except ImportError: pass -from res2df import EclFiles, getLogger_res2csv +from res2df import ResdataFiles, getLogger_res2csv from res2df.common import ( parse_opmio_date_rec, parse_opmio_deckrecord, @@ -28,13 +28,13 @@ WCONKEYS = ["WCONHIST", "WCONINJE", "WCONINJH", "WCONPROD"] -def df(deck: Union[EclFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: +def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: """Loop through the deck and pick up information found The loop over the deck is a state machine, as it has to pick up dates """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResdataFiles): deck = deck.get_ecldeck() wconrecords = [] # List of dicts of every line in input file @@ -94,9 +94,9 @@ def wcon_main(args) -> None: logger = getLogger_res2csv( # pylint: disable:redefined-outer_name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resdatafiles = ResdataFiles(args.DATAFILE) + if resdatafiles: + deck = resdatafiles.get_ecldeck() wcon_df = df(deck) write_dframe_stdout_file( wcon_df, diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index 8b0af8dcc..461306ed4 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -11,7 +11,7 @@ import pyarrow.feather from res2df import common, compdat, getLogger_res2csv, wellconnstatus -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles from .common import write_dframe_stdout_file @@ -33,7 +33,7 @@ class KHUnit(Enum): def df( - eclfiles: EclFiles, + resdatafiles: ResdataFiles, zonemap: Dict[int, str], use_wellconnstatus: bool = False, excl_well_startswith: Optional[str] = None, @@ -49,14 +49,14 @@ def df( only. Args: - eclfiles; EclFiles object + resdatafiles; ResdataFiles object zonemap: dictionary with layer->zone mapping use_wellconnstatus: boolean Returns: pd.DataFrame with one row per unique combination of well, zone and date. """ - compdat_df = compdat.df(eclfiles, zonemap=zonemap) + compdat_df = compdat.df(resdatafiles, zonemap=zonemap) if "ZONE" not in compdat_df.columns: logger.warning( "ZONE column not generated in compdat table. " @@ -75,13 +75,13 @@ def df( compdat_df = _excl_well_startswith(compdat_df, excl_well_startswith) if use_wellconnstatus: - wellconnstatus_df = wellconnstatus.df(eclfiles) + wellconnstatus_df = wellconnstatus.df(resdatafiles) compdat_df = _merge_compdat_and_connstatus(compdat_df, wellconnstatus_df) compdat_df = _aggregate_layer_to_zone(compdat_df) # Add metadata as an attribute the dataframe - meta = _get_metadata(eclfiles) + meta = _get_metadata(resdatafiles) # Slice meta to dataframe columns: compdat_df.attrs["meta"] = { column_key: meta[column_key] for column_key in compdat_df if column_key in meta @@ -90,7 +90,7 @@ def df( return compdat_df -def _get_ecl_unit_system(eclfiles: EclFiles) -> EclipseUnitSystem: +def _get_ecl_unit_system(resdatafiles: ResdataFiles) -> EclipseUnitSystem: """Returns the unit system of an eclipse deck. The options are \ METRIC, FIELD, LAB and PVT-M. @@ -98,16 +98,16 @@ def _get_ecl_unit_system(eclfiles: EclFiles) -> EclipseUnitSystem: default unit system in Eclipse. """ unit_systems = [unitsystem.value for unitsystem in EclipseUnitSystem] - for keyword in eclfiles.get_ecldeck(): + for keyword in resdatafiles.get_ecldeck(): if keyword.name in unit_systems: return EclipseUnitSystem(keyword.name) return EclipseUnitSystem.METRIC -def _get_metadata(eclfiles: EclFiles) -> Dict[str, Dict[str, Any]]: +def _get_metadata(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]: """Provide metadata for the well completion data export""" meta: Dict[str, Dict[str, str]] = {} - unitsystem = _get_ecl_unit_system(eclfiles) + unitsystem = _get_ecl_unit_system(resdatafiles) kh_units = { EclipseUnitSystem.METRIC: KHUnit.METRIC, EclipseUnitSystem.FIELD: KHUnit.FIELD, @@ -288,14 +288,14 @@ def wellcompletiondata_main(args): """Entry-point for module, for command line utility""" logger = getLogger_res2csv(__name__, vars(args)) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) if not Path(args.zonemap).is_file(): wellcompletiondata_df = pd.DataFrame() logger.info(f"Zonemap not found: {args.zonemap}") else: zonemap = common.convert_lyrlist_to_zonemap(common.parse_lyrfile(args.zonemap)) wellcompletiondata_df = df( - eclfiles, zonemap, args.use_wellconnstatus, args.excl_well_startswith + resdatafiles, zonemap, args.use_wellconnstatus, args.excl_well_startswith ) logger.info( f"Well completion data successfully generated with zonemap: {zonemap}" diff --git a/res2df/wellconnstatus.py b/res2df/wellconnstatus.py index 1dcebec8e..765b4ee01 100644 --- a/res2df/wellconnstatus.py +++ b/res2df/wellconnstatus.py @@ -9,14 +9,14 @@ import pandas as pd from res2df import getLogger_res2csv, summary -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles from .common import write_dframe_stdout_file logger = logging.getLogger(__name__) -def df(eclfiles: EclFiles) -> pd.DataFrame: +def df(resdatafiles: ResdataFiles) -> pd.DataFrame: """Exctracts connection status history for each compdat connection that is included in the summary data on the form CPI:WELL,I,J,K. CPI stands for connection productivity index. @@ -28,7 +28,7 @@ def df(eclfiles: EclFiles) -> pd.DataFrame: The output data set is very sparse compared to the CPI summary data. """ - smry = summary.df(eclfiles, column_keys="CPI*") + smry = summary.df(resdatafiles, column_keys="CPI*") return _extract_status_changes(smry) @@ -115,9 +115,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def wellconnstatus_main(args): """Entry-point for module, for command line utility""" logger = getLogger_res2csv(__name__, vars(args)) - eclfiles = EclFiles(args.DATAFILE) + resdatafiles = ResdataFiles(args.DATAFILE) - wellconnstatus_df = df(eclfiles) + wellconnstatus_df = df(resdatafiles) write_dframe_stdout_file( wellconnstatus_df, args.output, index=False, caller_logger=logger ) diff --git a/tests/test_common.py b/tests/test_common.py index ed5547312..284c3525b 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -9,7 +9,7 @@ import pandas as pd import pytest -from res2df import common, eclfiles, equil +from res2df import common, equil, resdatafiles try: # pylint: disable=unused-import @@ -147,7 +147,7 @@ def test_parse_opmio_deckrecord(): ) def test_handle_wanted_keywords(wanted, deckstr, supported, expected): """Test that we can handle list of wanted, supported and available keywords.""" - deck = eclfiles.EclFiles.str2deck(deckstr) + deck = resdatafiles.ResdataFiles.str2deck(deckstr) assert common.handle_wanted_keywords(wanted, deck, supported) == expected diff --git a/tests/test_compdat.py b/tests/test_compdat.py index 4fc483b75..a2738da25 100644 --- a/tests/test_compdat.py +++ b/tests/test_compdat.py @@ -5,7 +5,7 @@ import pandas as pd import pytest -from res2df import EclFiles, compdat, res2csv +from res2df import ResdataFiles, compdat, res2csv try: # pylint: disable=unused-import @@ -32,8 +32,8 @@ def test_df(): """Test main dataframe API, only testing that something comes out""" - eclfiles = EclFiles(EIGHTCELLS) - compdat_df = compdat.df(eclfiles) + resdatafiles = ResdataFiles(EIGHTCELLS) + compdat_df = compdat.df(resdatafiles) assert not compdat_df.empty assert "ZONE" in compdat_df assert "K1" in compdat_df @@ -42,8 +42,8 @@ def test_df(): def test_comp2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(EIGHTCELLS) - compdfs = compdat.deck2dfs(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(EIGHTCELLS) + compdfs = compdat.deck2dfs(resdatafiles.get_ecldeck()) assert not compdfs["COMPDAT"].empty assert not compdfs["WELSEGS"].empty @@ -53,7 +53,7 @@ def test_comp2df(): def test_schfile2df(): """Test that we can process individual files""" - deck = EclFiles.file2deck(SCHFILE) + deck = ResdataFiles.file2deck(SCHFILE) compdfs = compdat.deck2dfs(deck) assert not compdfs["COMPDAT"].columns.empty assert not compdfs["COMPDAT"].empty @@ -67,7 +67,7 @@ def test_str_compdat(): -- comments. / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) compdfs = compdat.deck2dfs(deck) compdat_df = compdfs["COMPDAT"] assert compdat_df.loc[0, "SATN"] == 0 @@ -79,7 +79,7 @@ def test_str_compdat(): 'FOO' 303 1010 031 39 / / """ - compdat_df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"] + compdat_df = compdat.deck2dfs(ResdataFiles.str2deck(schstr))["COMPDAT"] assert len(compdat_df) == 9 assert not compdat_df["DFACT"].values[0] assert not compdat_df["TRAN"].values[0] @@ -114,7 +114,7 @@ def test_str2df(): 'OP1' 166 1 7.4294683E-06 0 / icd on segment 17, cell 41 125 29 / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) compdfs = compdat.deck2dfs(deck) compdat_df = compdfs["COMPDAT"] welsegs = compdfs["WELSEGS"] @@ -182,7 +182,7 @@ def test_tstep(): 'OP1' 35 111 33 33 'SHUT' / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) compdf = compdat.deck2dfs(deck)["COMPDAT"] dates = [str(x) for x in compdf["DATE"].unique()] assert len(dates) == 3 @@ -202,7 +202,7 @@ def test_tstep(): 'OP1' 34 111 32 32 'OPEN' / / """ - assert compdat.deck2dfs(EclFiles.str2deck(schstr_nodate)) == {} + assert compdat.deck2dfs(ResdataFiles.str2deck(schstr_nodate)) == {} # (critical error logged) @@ -215,14 +215,14 @@ def test_unrollcompdatk1k2(): 'OP1' 33 44 10 20 / / """ - df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"] + df = compdat.deck2dfs(ResdataFiles.str2deck(schstr))["COMPDAT"] assert df["I"].unique() == 33 assert df["J"].unique() == 44 assert (df["K1"].values == range(10, 20 + 1)).all() assert (df["K2"].values == range(10, 20 + 1)).all() # Check that we can read withoug unrolling: - df_noroll = compdat.deck2dfs(EclFiles.str2deck(schstr), unroll=False)["COMPDAT"] + df_noroll = compdat.deck2dfs(ResdataFiles.str2deck(schstr), unroll=False)["COMPDAT"] assert len(df_noroll) == 1 @@ -234,7 +234,7 @@ def test_samecellperf(): 'OP2' 1 1 1 1 / / """ - df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"] + df = compdat.deck2dfs(ResdataFiles.str2deck(schstr))["COMPDAT"] assert len(df) == 2 @@ -248,10 +248,10 @@ def test_unrollwelsegs(): 2 3 1 1 1923.9 1689.000 0.1172 0.000015 / / """ - df = compdat.deck2dfs(EclFiles.str2deck(schstr))["WELSEGS"] + df = compdat.deck2dfs(ResdataFiles.str2deck(schstr))["WELSEGS"] assert len(df) == 2 - df = compdat.deck2dfs(EclFiles.str2deck(schstr), unroll=False)["WELSEGS"] + df = compdat.deck2dfs(ResdataFiles.str2deck(schstr), unroll=False)["WELSEGS"] assert len(df) == 1 @@ -267,24 +267,24 @@ def test_unrollbogus(): def test_initmerging(): """Test that we can ask for INIT vectors to be merged into the data""" - eclfiles = EclFiles(REEK) - noinit_df = compdat.df(eclfiles) - df = compdat.df(eclfiles, initvectors=[]) + resdatafiles = ResdataFiles(REEK) + noinit_df = compdat.df(resdatafiles) + df = compdat.df(resdatafiles, initvectors=[]) assert isinstance(df, pd.DataFrame) assert not df.empty - df = compdat.df(eclfiles, initvectors=["FIPNUM", "EQLNUM", "SATNUM"]) + df = compdat.df(resdatafiles, initvectors=["FIPNUM", "EQLNUM", "SATNUM"]) assert "FIPNUM" in df assert "EQLNUM" in df assert "SATNUM" in df assert len(df) == len(noinit_df) - df = compdat.df(eclfiles, initvectors="FIPNUM") + df = compdat.df(resdatafiles, initvectors="FIPNUM") assert "FIPNUM" in df assert len(df) == len(noinit_df) with pytest.raises(AssertionError): - compdat.df(eclfiles, initvectors=2) + compdat.df(resdatafiles, initvectors=2) def test_main_subparsers(tmp_path, mocker): @@ -369,37 +369,39 @@ def test_defaulted_compdat_i_j(): # pylint: disable=expression-not-assigned with pytest.raises(ValueError, match="WELSPECS must be provided when I"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_i))["COMPDAT"] + compdat.deck2dfs(ResdataFiles.str2deck(compdat_str_def_i))["COMPDAT"] # I value of 0 also means defaulted: with pytest.raises(ValueError, match="WELSPECS must be provided when I"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_i.replace("1*", "0")))[ + compdat.deck2dfs(ResdataFiles.str2deck(compdat_str_def_i.replace("1*", "0")))[ "COMPDAT" ] with pytest.raises(ValueError, match="WELSPECS must be provided when J"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_j))["COMPDAT"] + compdat.deck2dfs(ResdataFiles.str2deck(compdat_str_def_j))["COMPDAT"] # J value of 0 also means defaulted: with pytest.raises(ValueError, match="WELSPECS must be provided when J"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_j.replace("1*", "0")))[ + compdat.deck2dfs(ResdataFiles.str2deck(compdat_str_def_j.replace("1*", "0")))[ "COMPDAT" ] with pytest.raises(ValueError, match="WELSPECS must be provided"): # Wrong order: - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_i + welspecs_str))["COMPDAT"] + compdat.deck2dfs(ResdataFiles.str2deck(compdat_str_def_i + welspecs_str))[ + "COMPDAT" + ] # Simplest example: - compdat_df = compdat.deck2dfs(EclFiles.str2deck(welspecs_str + compdat_str_def_i))[ - "COMPDAT" - ] + compdat_df = compdat.deck2dfs( + ResdataFiles.str2deck(welspecs_str + compdat_str_def_i) + )["COMPDAT"] assert compdat_df["I"].unique() == [20] assert compdat_df["J"].unique() == [30] # Two wells: compdat_df = compdat.deck2dfs( - EclFiles.str2deck( + ResdataFiles.str2deck( welspecs_str.replace("OP1", "OP2").replace("30", "99") + welspecs_str + compdat_str_def_i @@ -408,14 +410,14 @@ def test_defaulted_compdat_i_j(): # Partial defaulting compdat_df = compdat.deck2dfs( - EclFiles.str2deck(welspecs_str + compdat_str_def_i + compdat_str_nodefaults) + ResdataFiles.str2deck(welspecs_str + compdat_str_def_i + compdat_str_nodefaults) )["COMPDAT"] assert set(compdat_df["I"].unique()) == {20, 55} assert set(compdat_df["J"].unique()) == {30, 66} compdat_df = compdat.deck2dfs( - EclFiles.str2deck( + ResdataFiles.str2deck( welspecs_str.replace("OP1", "OP2").replace("30", "99") + welspecs_str + compdat_str_def_i @@ -430,7 +432,7 @@ def test_defaulted_compdat_i_j(): # Same well redrilled to new location compdat_df = compdat.deck2dfs( - EclFiles.str2deck( + ResdataFiles.str2deck( "DATES\n 1 JAN 2030 /\n/\n" + welspecs_str + compdat_str_def_i @@ -450,17 +452,17 @@ def test_defaulted_compdat_i_j(): # Multisegement well testing def test_msw_schfile2df(): """Test that we can process individual files with AICD and ICD MSW""" - deck = EclFiles.file2deck(SCHFILE_AICD) + deck = ResdataFiles.file2deck(SCHFILE_AICD) compdfs = compdat.deck2dfs(deck) assert not compdfs["WSEGAICD"].empty assert not compdfs["WSEGAICD"].columns.empty - deck = EclFiles.file2deck(SCHFILE_ICD) + deck = ResdataFiles.file2deck(SCHFILE_ICD) compdfs = compdat.deck2dfs(deck) assert not compdfs["WSEGSICD"].empty assert not compdfs["WSEGSICD"].columns.empty - deck = EclFiles.file2deck(SCHFILE_VALV) + deck = ResdataFiles.file2deck(SCHFILE_VALV) compdfs = compdat.deck2dfs(deck) assert not compdfs["WSEGVALV"].empty assert not compdfs["WSEGVALV"].columns.empty @@ -507,7 +509,7 @@ def test_msw_str2df(): OP_6 31 0.0084252 0.00075 1* / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) compdfs = compdat.deck2dfs(deck) wsegaicd = compdfs["WSEGAICD"] wsegsicd = compdfs["WSEGSICD"] @@ -539,7 +541,7 @@ def test_wsegaicd(): OPEN 1.0 1.0 1.0 2.43 1.18 10.0 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wsegaicd = compdat.deck2dfs(deck)["WSEGAICD"] pd.testing.assert_frame_equal( wsegaicd, @@ -585,7 +587,7 @@ def test_wsegsicd(): OPEN / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wsegsicd = compdat.deck2dfs(deck)["WSEGSICD"] pd.testing.assert_frame_equal( wsegsicd, @@ -620,7 +622,7 @@ def test_wsegvalv(): WELL_A 31 0.0084252 0.00075 0.5 0.216 0.0005 0.0366 SHUT 0.0008 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wsegvalv = compdat.deck2dfs(deck)["WSEGVALV"] pd.testing.assert_frame_equal( wsegvalv, @@ -654,7 +656,7 @@ def test_wsegvalv_max_blank(): WELL_A 31 0.0084252 0.00075 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wsegvalv = compdat.deck2dfs(deck)["WSEGVALV"] pd.testing.assert_frame_equal( wsegvalv, @@ -688,7 +690,7 @@ def test_wsegvalv_max_default(): WELL_A 31 0.0084252 0.00075 6* / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wsegvalv = compdat.deck2dfs(deck)["WSEGVALV"] pd.testing.assert_frame_equal( wsegvalv, diff --git a/tests/test_eclfiles.py b/tests/test_eclfiles.py index 1102a6ad0..d9f219e69 100644 --- a/tests/test_eclfiles.py +++ b/tests/test_eclfiles.py @@ -3,7 +3,7 @@ import pytest -from res2df import EclFiles +from res2df import ResdataFiles try: # pylint: disable=unused-import @@ -29,52 +29,52 @@ def test_filedescriptors(): pre_fd_count = len(list(fd_dir.glob("*"))) - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) # No opened files yet: assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.close() + resdatafiles.close() # No change, no files to close: assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.get_egrid() + resdatafiles.get_egrid() # This should not leave any file descriptor open assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.get_initfile() + resdatafiles.get_initfile() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._initfile is not None - eclfiles.close() + assert resdatafiles._initfile is not None + resdatafiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._initfile is None + assert resdatafiles._initfile is None - eclfiles.get_rstfile() + resdatafiles.get_rstfile() # Automatically closed by libecl assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rstfile is not None - eclfiles.close() + assert resdatafiles._rstfile is not None + resdatafiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rstfile is None + assert resdatafiles._rstfile is None - eclfiles.get_eclsum() + resdatafiles.get_eclsum() assert len(list(fd_dir.glob("*"))) == pre_fd_count + 1 - eclfiles.close() + resdatafiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.get_egridfile() + resdatafiles.get_egridfile() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._egridfile is not None - eclfiles.close() + assert resdatafiles._egridfile is not None + resdatafiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._egridfile is None + assert resdatafiles._egridfile is None - eclfiles.get_rftfile() + resdatafiles.get_rftfile() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rftfile is not None - eclfiles.close() + assert resdatafiles._rftfile is not None + resdatafiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rftfile is None + assert resdatafiles._rftfile is None - eclfiles.get_ecldeck() + resdatafiles.get_ecldeck() # This should not leave any file descriptor open assert len(list(fd_dir.glob("*"))) == pre_fd_count diff --git a/tests/test_equil.py b/tests/test_equil.py index e886c6939..45bb4e54d 100644 --- a/tests/test_equil.py +++ b/tests/test_equil.py @@ -9,7 +9,7 @@ import pytest from res2df import csv2res, equil, res2csv -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -28,8 +28,8 @@ def test_equil2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - equildf = equil.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + equildf = equil.df(resdatafiles) expected = {} expected["EQUIL"] = pd.DataFrame( [ @@ -85,8 +85,8 @@ def test_equil2df(): def test_df2ecl(tmp_path): """Test that we can write include files to disk""" os.chdir(tmp_path) - eclfiles = EclFiles(EIGHTCELLS) - equildf = equil.df(eclfiles) + resdatafiles = ResdataFiles(EIGHTCELLS) + equildf = equil.df(resdatafiles) equil.df2ecl(equildf, filename="equil.inc") assert Path("equil.inc").is_file() @@ -255,7 +255,9 @@ def test_equil_fromdeck(): assert len(equil.equil_fromdeck(deckstr)) == 2 # correct assert len(equil.equil_fromdeck(deckstr, 2)) == 2 assert len(equil.equil_fromdeck(deckstr, 1)) == 1 - assert len(equil.equil_fromdeck(EclFiles.str2deck(deckstr))) == 1 # (watch out!) + assert ( + len(equil.equil_fromdeck(ResdataFiles.str2deck(deckstr))) == 1 + ) # (watch out!) wrongdeck = """ EQUIL @@ -611,7 +613,7 @@ def test_main_subparser(tmp_path, mocker): ) def test_phases_from_deck(deckstring, expected): """Test that we can extract phase configuration from a deck""" - deck = EclFiles.str2deck(deckstring) + deck = ResdataFiles.str2deck(deckstring) assert equil.phases_from_deck(deck) == expected diff --git a/tests/test_faults.py b/tests/test_faults.py index 99d652c67..0ac482ce1 100644 --- a/tests/test_faults.py +++ b/tests/test_faults.py @@ -8,7 +8,7 @@ import pytest from res2df import faults, res2csv -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -26,8 +26,8 @@ def test_faults2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - faultsdf = faults.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + faultsdf = faults.df(resdatafiles.get_ecldeck()) assert "NAME" in faultsdf assert "I" in faultsdf @@ -46,7 +46,7 @@ def test_str2df(): 'B' 2 3 4 5 6 7 'J' / / """ - deck = EclFiles.str2deck(deckstr) + deck = ResdataFiles.str2deck(deckstr) faultsdf = faults.df(deck) assert len(faultsdf) == 16 @@ -54,8 +54,8 @@ def test_str2df(): def test_nofaults(): """Test on a dataset with no faults""" - eclfiles = EclFiles(EIGHTCELLS) - faultsdf = faults.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(EIGHTCELLS) + faultsdf = faults.df(resdatafiles.get_ecldeck()) assert faultsdf.empty @@ -71,7 +71,7 @@ def test_multiplestr2df(): 'D' 2 2 4 4 10 10 'J' / / """ - deck = EclFiles.str2deck(deckstr) + deck = ResdataFiles.str2deck(deckstr) faultsdf = faults.df(deck).set_index("NAME") assert len(faultsdf) == 23 diff --git a/tests/test_fipreports.py b/tests/test_fipreports.py index 7a75ea301..5f4b0660c 100644 --- a/tests/test_fipreports.py +++ b/tests/test_fipreports.py @@ -9,8 +9,8 @@ import pytest from res2df import fipreports, res2csv -from res2df.eclfiles import EclFiles from res2df.fipreports import report_block_lineparser as parser +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent DATAFILE = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -19,7 +19,7 @@ def test_fipreports2df(): """Test parsing of Reek dataset""" - prtfile = EclFiles(DATAFILE).get_prtfilename() + prtfile = ResdataFiles(DATAFILE).get_prtfilename() fipreport_df = fipreports.df(prtfile) assert len(fipreport_df["REGION"].unique()) == 6 assert len(fipreport_df["DATE"].unique()) == 1 diff --git a/tests/test_grid.py b/tests/test_grid.py index e9df854b4..400782997 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -9,7 +9,7 @@ import pytest from res2df import common, grid, res2csv -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -18,8 +18,8 @@ def test_gridgeometry2df(mocker): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - grid_geom = grid.gridgeometry2df(eclfiles) + resdatafiles = ResdataFiles(REEK) + grid_geom = grid.gridgeometry2df(resdatafiles) assert isinstance(grid_geom, pd.DataFrame) assert not grid_geom.empty @@ -50,38 +50,40 @@ def test_gridgeometry2df(mocker): grid.gridgeometry2df(None) with pytest.raises(ValueError, match="No EGRID file supplied"): - mocker.patch("res2df.eclfiles.EclFiles.get_egridfile", return_value=None) - grid.gridgeometry2df(eclfiles) + mocker.patch( + "res2df.resdatafiles.ResdataFiles.get_egridfile", return_value=None + ) + grid.gridgeometry2df(resdatafiles) def test_wrongfile(): - """Test the EclFiles object on nonexistent files""" + """Test the ResdataFiles object on nonexistent files""" # pylint: disable=invalid-name,redefined-builtin # We can initalize this object with bogus: - eclfiles = EclFiles("FOO.DATA") + resdatafiles = ResdataFiles("FOO.DATA") # but when we try to use it, things should fail: with pytest.raises(FileNotFoundError): - grid.init2df(eclfiles) + grid.init2df(resdatafiles) def test_gridzonemap(): """Check that zonemap can be merged automatically be default, and also that there is some API for supplying the zonemap directly as a dictionary""" - eclfiles = EclFiles(EIGHTCELLS) - grid_geom = grid.gridgeometry2df(eclfiles, zonemap=None) + resdatafiles = ResdataFiles(EIGHTCELLS) + grid_geom = grid.gridgeometry2df(resdatafiles, zonemap=None) default_zonemap = grid_geom["ZONE"] - grid_no_zone = grid.gridgeometry2df(eclfiles, zonemap={}) + grid_no_zone = grid.gridgeometry2df(resdatafiles, zonemap={}) assert "ZONE" not in grid_no_zone - assert (grid.df(eclfiles, zonemap=None)["ZONE"] == default_zonemap).all() + assert (grid.df(resdatafiles, zonemap=None)["ZONE"] == default_zonemap).all() - df_no_zone = grid.df(eclfiles, zonemap={}) + df_no_zone = grid.df(resdatafiles, zonemap={}) assert "ZONE" not in df_no_zone - df_custom_zone = grid.gridgeometry2df(eclfiles, zonemap={1: "FIRSTLAYER"}) + df_custom_zone = grid.gridgeometry2df(resdatafiles, zonemap={1: "FIRSTLAYER"}) assert "ZONE" in df_custom_zone assert set(df_custom_zone[df_custom_zone["K"] == 1]["ZONE"].unique()) == set( ["FIRSTLAYER"] @@ -89,14 +91,14 @@ def test_gridzonemap(): assert len(df_custom_zone) == len(grid_no_zone) df_bogus_zones = grid.gridgeometry2df( - eclfiles, zonemap={999999: "nonexistinglayer"} + resdatafiles, zonemap={999999: "nonexistinglayer"} ) assert pd.isnull(df_bogus_zones["ZONE"]).all() # Test a custom "subzone" map via direct usage of merge_zone on an dataframe # where ZONE already exists: - dframe = grid.df(eclfiles) + dframe = grid.df(resdatafiles) subzonemap = {1: "SUBZONE1", 2: "SUBZONE2"} dframe = common.merge_zones(dframe, subzonemap, zoneheader="SUBZONE", kname="K") assert (dframe["ZONE"] == default_zonemap).all() @@ -107,20 +109,22 @@ def test_gridzonemap(): def test_merge_initvectors(): """Test merging of INIT-vectors into the grid dataframe""" - eclfiles = EclFiles(REEK) - assert grid.merge_initvectors(eclfiles, pd.DataFrame(), []).empty + resdatafiles = ResdataFiles(REEK) + assert grid.merge_initvectors(resdatafiles, pd.DataFrame(), []).empty foo_df = pd.DataFrame([{"FOO": 1}]) - pd.testing.assert_frame_equal(grid.merge_initvectors(eclfiles, foo_df, []), foo_df) + pd.testing.assert_frame_equal( + grid.merge_initvectors(resdatafiles, foo_df, []), foo_df + ) with pytest.raises(ValueError, match="All of the columns"): - grid.merge_initvectors(eclfiles, foo_df, ["NONEXISTING"]) + grid.merge_initvectors(resdatafiles, foo_df, ["NONEXISTING"]) minimal_df = pd.DataFrame([{"I": 10, "J": 11, "K": 12}]) with pytest.raises(KeyError): - grid.merge_initvectors(eclfiles, minimal_df, ["NONEXISTING"]) + grid.merge_initvectors(resdatafiles, minimal_df, ["NONEXISTING"]) - withporo = grid.merge_initvectors(eclfiles, minimal_df, ["PORO"]) + withporo = grid.merge_initvectors(resdatafiles, minimal_df, ["PORO"]) pd.testing.assert_frame_equal( withporo, minimal_df.assign(PORO=0.221848), check_dtype=False ) @@ -128,18 +132,20 @@ def test_merge_initvectors(): with pytest.raises(ValueError): # ijknames must be length 3 grid.merge_initvectors( - eclfiles, minimal_df, ["PORO"], ijknames=["I", "J", "K", "L"] + resdatafiles, minimal_df, ["PORO"], ijknames=["I", "J", "K", "L"] ) with pytest.raises(ValueError): - grid.merge_initvectors(eclfiles, minimal_df, ["PORO"], ijknames=["I", "J"]) + grid.merge_initvectors(resdatafiles, minimal_df, ["PORO"], ijknames=["I", "J"]) with pytest.raises(ValueError, match="All of the columns"): - grid.merge_initvectors(eclfiles, minimal_df, ["PORO"], ijknames=["A", "B", "C"]) + grid.merge_initvectors( + resdatafiles, minimal_df, ["PORO"], ijknames=["A", "B", "C"] + ) def test_init2df(): """Test that dataframe with INIT vectors can be produced""" - eclfiles = EclFiles(REEK) - init_df = grid.init2df(eclfiles) + resdatafiles = ResdataFiles(REEK) + init_df = grid.init2df(resdatafiles) assert isinstance(init_df, pd.DataFrame) # pylint: disable=unsupported-membership-test # false positive on Dataframe @@ -158,8 +164,8 @@ def test_init2df(): def test_grid_df(): """Test that dataframe with INIT vectors and coordinates can be produced""" - eclfiles = EclFiles(EIGHTCELLS) - grid_df = grid.df(eclfiles) + resdatafiles = ResdataFiles(EIGHTCELLS) + grid_df = grid.df(resdatafiles) assert isinstance(grid_df, pd.DataFrame) assert not grid_df.empty @@ -184,8 +190,8 @@ def test_grid_df(): def test_df2ecl(tmp_path): """Test if we are able to output include files for grid data""" - eclfiles = EclFiles(REEK) - grid_df = grid.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + grid_df = grid.df(resdatafiles) fipnum_str = grid.df2ecl(grid_df, "FIPNUM", dtype=int) assert grid.df2ecl(grid_df, "FIPNUM", dtype="int", nocomments=True) == grid.df2ecl( @@ -203,7 +209,7 @@ def test_df2ecl(tmp_path): fipnum_str_nocomment = grid.df2ecl(grid_df, "FIPNUM", dtype=int, nocomments=True) assert "--" not in fipnum_str_nocomment fipnum2_str = grid.df2ecl( - grid_df, "FIPNUM", dtype=int, eclfiles=eclfiles, nocomments=True + grid_df, "FIPNUM", dtype=int, resdatafiles=resdatafiles, nocomments=True ) # This would mean that we guessed the correct global size in the first run assert fipnum_str_nocomment == fipnum2_str @@ -237,20 +243,20 @@ def test_df2ecl(tmp_path): grid.df2ecl(grid_df, ["PERMRR"]) # Check when we have restart info included: - gr_rst = grid.df(eclfiles, rstdates="all") + gr_rst = grid.df(resdatafiles, rstdates="all") fipnum_str_rst = grid.df2ecl(gr_rst, "FIPNUM", dtype=int, nocomments=True) assert fipnum_str_rst == fipnum_str_nocomment # When dates are stacked, there are NaN's in the FIPNUM column, # which should be gracefully ignored. - gr_rst_stacked = grid.df(eclfiles, rstdates="all", stackdates=True) + gr_rst_stacked = grid.df(resdatafiles, rstdates="all", stackdates=True) fipnum_str_rst = grid.df2ecl(gr_rst_stacked, "FIPNUM", dtype=int, nocomments=True) assert fipnum_str_rst == fipnum_str_nocomment # dateinheaders here will be ignored due to stackdates: pd.testing.assert_frame_equal( gr_rst_stacked, - grid.df(eclfiles, rstdates="all", stackdates=True, dateinheaders=True), + grid.df(resdatafiles, rstdates="all", stackdates=True, dateinheaders=True), ) @@ -267,25 +273,25 @@ def test_df2ecl_mock(): def test_subvectors(): """Test that we can ask for a few vectors only""" - eclfiles = EclFiles(EIGHTCELLS) - init_df = grid.init2df(eclfiles, "PORO") + resdatafiles = ResdataFiles(EIGHTCELLS) + init_df = grid.init2df(resdatafiles, "PORO") assert "PORO" in init_df assert "PERMX" not in init_df assert "PORV" not in init_df - init_df = grid.init2df(eclfiles, "P*") + init_df = grid.init2df(resdatafiles, "P*") assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df assert "SATNUM" not in init_df - init_df = grid.init2df(eclfiles, ["P*"]) + init_df = grid.init2df(resdatafiles, ["P*"]) assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df assert "SATNUM" not in init_df - init_df = grid.init2df(eclfiles, ["P*", "*NUM"]) + init_df = grid.init2df(resdatafiles, ["P*", "*NUM"]) assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df @@ -313,55 +319,59 @@ def test_dropconstants(): def test_df(): """Test the df function""" - eclfiles = EclFiles(REEK) + resdatafiles = ResdataFiles(REEK) # assert error.. with pytest.raises(TypeError): # pylint: disable=no-value-for-parameter grid.df() - grid_df = grid.df(eclfiles) + grid_df = grid.df(resdatafiles) assert not grid_df.empty assert "I" in grid_df # From GRID assert "PORO" in grid_df # From INIT assert "SOIL" not in grid_df # We do not get RST unless we ask for it. - grid_df = grid.df(eclfiles, vectors="*") + grid_df = grid.df(resdatafiles, vectors="*") assert "I" in grid_df # From GRID assert "PORO" in grid_df # From INIT assert "SOIL" not in grid_df # We do not get RST unless we ask for it. - grid_df = grid.df(eclfiles, vectors=["*"]) + grid_df = grid.df(resdatafiles, vectors=["*"]) assert "I" in grid_df # From GRID assert "PORO" in grid_df # From INIT assert "SOIL" not in grid_df # We do not get RST unless we ask for it. - grid_df = grid.df(eclfiles, vectors="PRESSURE") + grid_df = grid.df(resdatafiles, vectors="PRESSURE") assert "I" in grid_df assert "PRESSURE" not in grid_df # that vector is only in RST assert len(grid_df) == 35817 assert "VOLUME" in grid_df - grid_df = grid.df(eclfiles, vectors=["PRESSURE"]) + grid_df = grid.df(resdatafiles, vectors=["PRESSURE"]) assert "I" in grid_df assert not grid_df.empty assert "PRESSURE" not in grid_df geometry_cols = len(grid_df.columns) - grid_df = grid.df(eclfiles, vectors=["PRESSURE"], rstdates="last", stackdates=True) + grid_df = grid.df( + resdatafiles, vectors=["PRESSURE"], rstdates="last", stackdates=True + ) assert "PRESSURE" in grid_df assert len(grid_df.columns) == geometry_cols + 2 assert "DATE" in grid_df # Present because of stackdates - grid_df = grid.df(eclfiles, vectors="PRESSURE", rstdates="last") + grid_df = grid.df(resdatafiles, vectors="PRESSURE", rstdates="last") assert "PRESSURE" in grid_df assert len(grid_df.columns) == geometry_cols + 1 - grid_df = grid.df(eclfiles, vectors="PRESSURE", rstdates="last", dateinheaders=True) + grid_df = grid.df( + resdatafiles, vectors="PRESSURE", rstdates="last", dateinheaders=True + ) assert "PRESSURE" not in grid_df assert "PRESSURE@2001-08-01" in grid_df grid_df = grid.df( - eclfiles, vectors=["PORO", "PRESSURE"], rstdates="all", stackdates=True + resdatafiles, vectors=["PORO", "PRESSURE"], rstdates="all", stackdates=True ) assert "PRESSURE" in grid_df assert len(grid_df.columns) == geometry_cols + 3 @@ -393,20 +403,20 @@ def test_df(): pd.testing.assert_frame_equal(df1, df3) pd.testing.assert_frame_equal(df1, df4) - grid_df = grid.df(eclfiles, vectors="PORO") + grid_df = grid.df(resdatafiles, vectors="PORO") assert "I" in grid_df assert "PORO" in grid_df assert len(grid_df) == 35817 assert "DATE" not in grid_df - grid_df = grid.df(eclfiles, vectors="PORO", rstdates="all") + grid_df = grid.df(resdatafiles, vectors="PORO", rstdates="all") assert "I" in grid_df assert "PORO" in grid_df assert "DATE" not in grid_df # (no RST columns, so no DATE info in the dataframe) # (warnings should be printed) - grid_df = grid.df(eclfiles, vectors="PORO", rstdates="all", stackdates=True) + grid_df = grid.df(resdatafiles, vectors="PORO", rstdates="all", stackdates=True) assert "I" in grid_df assert "PORO" in grid_df assert "DATE" not in grid_df @@ -499,13 +509,13 @@ def test_main_arrow(tmp_path, mocker): def test_get_available_rst_dates(): """Test the support of dates in restart files""" - eclfiles = EclFiles(REEK) - # rstfile = eclfiles.get_rstfile() + resdatafiles = ResdataFiles(REEK) + # rstfile = resdatafiles.get_rstfile() - alldates = grid.get_available_rst_dates(eclfiles) + alldates = grid.get_available_rst_dates(resdatafiles) assert len(alldates) == 4 - didx = grid.dates2rstindices(eclfiles, "all") + didx = grid.dates2rstindices(resdatafiles, "all") assert len(didx[0]) == len(alldates) assert len(didx[1]) == len(alldates) assert isinstance(didx[0][0], int) @@ -513,38 +523,40 @@ def test_get_available_rst_dates(): assert didx[1][0] == alldates[0] assert didx[1][-1] == alldates[-1] - somedate = grid.dates2rstindices(eclfiles, "2000-07-01") + somedate = grid.dates2rstindices(resdatafiles, "2000-07-01") assert somedate[1] == [alldates[1]] with pytest.raises(ValueError, match="date 1999-09-09 not found in UNRST file"): - grid.dates2rstindices(eclfiles, "1999-09-09") + grid.dates2rstindices(resdatafiles, "1999-09-09") with pytest.raises(ValueError, match="date 1999-0909 not understood"): - grid.dates2rstindices(eclfiles, "1999-0909") + grid.dates2rstindices(resdatafiles, "1999-0909") - expl_date = grid.dates2rstindices(eclfiles, datetime.date(2000, 7, 1)) + expl_date = grid.dates2rstindices(resdatafiles, datetime.date(2000, 7, 1)) assert expl_date[1] == [alldates[1]] expl_datetime = grid.dates2rstindices( - eclfiles, datetime.datetime(2000, 7, 1, 0, 0, 0) + resdatafiles, datetime.datetime(2000, 7, 1, 0, 0, 0) ) assert expl_datetime[1] == [alldates[1]] - expl_list_datetime = grid.dates2rstindices(eclfiles, [datetime.date(2000, 7, 1)]) + expl_list_datetime = grid.dates2rstindices( + resdatafiles, [datetime.date(2000, 7, 1)] + ) assert expl_list_datetime[1] == [alldates[1]] # For list input, only datetime.date objects are allowed: expl_list2_date = grid.dates2rstindices( - eclfiles, [datetime.date(2000, 7, 1), datetime.date(2001, 2, 1)] + resdatafiles, [datetime.date(2000, 7, 1), datetime.date(2001, 2, 1)] ) assert expl_list2_date[1] == [alldates[1], alldates[2]] with pytest.raises(ValueError, match="None of the requested dates were found"): - grid.dates2rstindices(eclfiles, ["2000-07-01", "2001-02-01"]) + grid.dates2rstindices(resdatafiles, ["2000-07-01", "2001-02-01"]) with pytest.raises(ValueError, match="None of the requested dates were found"): grid.dates2rstindices( - eclfiles, + resdatafiles, [ datetime.datetime(2000, 7, 1, 0, 0, 0), datetime.datetime(2001, 2, 1, 0, 0, 0), @@ -552,40 +564,43 @@ def test_get_available_rst_dates(): ) with pytest.raises(ValueError, match="not understood"): - grid.dates2rstindices(eclfiles, {"2000-07-01": "2001-02-01"}) + grid.dates2rstindices(resdatafiles, {"2000-07-01": "2001-02-01"}) - first = grid.dates2rstindices(eclfiles, "first") + first = grid.dates2rstindices(resdatafiles, "first") assert first[1][0] == alldates[0] - last = grid.dates2rstindices(eclfiles, "last") + last = grid.dates2rstindices(resdatafiles, "last") assert last[1][0] == alldates[-1] - dates = grid.get_available_rst_dates(eclfiles) + dates = grid.get_available_rst_dates(resdatafiles) assert isinstance(dates, list) # Test with missing RST file: - eclfiles = EclFiles("BOGUS.DATA") + resdatafiles = ResdataFiles("BOGUS.DATA") with pytest.raises(IOError): - eclfiles.get_rstfile() + resdatafiles.get_rstfile() def test_rst2df(): """Test producing dataframes from restart files""" - eclfiles = EclFiles(REEK) - assert grid.rst2df(eclfiles, "first").shape == (35817, 24) - assert grid.rst2df(eclfiles, "last").shape == (35817, 24) - assert grid.rst2df(eclfiles, "all").shape == (35817, 23 * 4 + 1) + resdatafiles = ResdataFiles(REEK) + assert grid.rst2df(resdatafiles, "first").shape == (35817, 24) + assert grid.rst2df(resdatafiles, "last").shape == (35817, 24) + assert grid.rst2df(resdatafiles, "all").shape == (35817, 23 * 4 + 1) - assert "SOIL" in grid.rst2df(eclfiles, date="first", dateinheaders=False) + assert "SOIL" in grid.rst2df(resdatafiles, date="first", dateinheaders=False) assert ( - "SOIL@2000-01-01" in grid.rst2df(eclfiles, "first", dateinheaders=True).columns + "SOIL@2000-01-01" + in grid.rst2df(resdatafiles, "first", dateinheaders=True).columns ) - rst_df = grid.rst2df(eclfiles, "first", stackdates=True) + rst_df = grid.rst2df(resdatafiles, "first", stackdates=True) assert "DATE" in rst_df assert rst_df["DATE"].unique()[0] == "2000-01-01" - rst_df = grid.rst2df(eclfiles, "all", stackdates=True) - assert len(rst_df["DATE"].unique()) == len(grid.get_available_rst_dates(eclfiles)) + rst_df = grid.rst2df(resdatafiles, "all", stackdates=True) + assert len(rst_df["DATE"].unique()) == len( + grid.get_available_rst_dates(resdatafiles) + ) # "DATE" and "active" are now the extra columns: assert rst_df.shape == (4 * 35817, 23 + 2) @@ -599,21 +614,21 @@ def test_rst2df(): assert sum(nancols) == 1 # All other columns are "False" # Check vector slicing: - rst_df = grid.rst2df(eclfiles, "first", vectors="S???") + rst_df = grid.rst2df(resdatafiles, "first", vectors="S???") assert rst_df.shape == (35817, 4) assert "SGAS" in rst_df assert "SWAT" in rst_df assert "SOIL" in rst_df # This is actually computed assert "FIPWAT" not in rst_df - rst_df = grid.rst2df(eclfiles, "first", vectors=["PRESSURE", "SWAT"]) + rst_df = grid.rst2df(resdatafiles, "first", vectors=["PRESSURE", "SWAT"]) assert "PRESSURE" in rst_df assert "SWAT" in rst_df assert "SGAS" not in rst_df assert "SOIL" not in rst_df # Check that we can avoid getting SOIL if we are explicit: - rst_df = grid.rst2df(eclfiles, "first", vectors=["SGAS", "SWAT"]) + rst_df = grid.rst2df(resdatafiles, "first", vectors=["SGAS", "SWAT"]) assert "SOIL" not in rst_df assert "SGAS" in rst_df assert "SWAT" in rst_df diff --git a/tests/test_gruptree.py b/tests/test_gruptree.py index 5adfb6c8d..1163d027f 100644 --- a/tests/test_gruptree.py +++ b/tests/test_gruptree.py @@ -8,7 +8,7 @@ import pytest from res2df import gruptree, res2csv -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -27,8 +27,8 @@ def test_eightcells_dataset(): """Test Eightcells dataset""" - eclfiles = EclFiles(EIGHTCELLS) - gruptree_df = gruptree.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(EIGHTCELLS) + gruptree_df = gruptree.df(resdatafiles.get_ecldeck()) expected_dframe = pd.DataFrame( [ @@ -44,8 +44,8 @@ def test_eightcells_dataset(): def test_gruptree2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - grupdf = gruptree.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + grupdf = gruptree.df(resdatafiles.get_ecldeck()) assert not grupdf.empty assert len(grupdf["DATE"].unique()) == 5 @@ -53,7 +53,7 @@ def test_gruptree2df(): assert len(grupdf["PARENT"].dropna().unique()) == 3 assert set(grupdf["KEYWORD"].unique()) == set(["GRUPTREE", "WELSPECS"]) - grupdfnowells = gruptree.df(eclfiles.get_ecldeck(), welspecs=False) + grupdfnowells = gruptree.df(resdatafiles.get_ecldeck(), welspecs=False) assert len(grupdfnowells["KEYWORD"].unique()) == 1 assert grupdf["PARENT"].dropna().unique()[0] == "FIELD" @@ -75,7 +75,7 @@ def test_str2df(): / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck) assert grupdf.dropna().empty # the DATE is empty @@ -118,7 +118,7 @@ def test_grupnet_rst_docs(tmp_path): / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck) grupdf[["DATE", "CHILD", "PARENT", "KEYWORD"]].to_csv("gruptree.csv", index=False) grupdf.to_csv("gruptreenet.csv", index=False) @@ -161,7 +161,7 @@ def test_grupnetdf(): / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck, startdate="2000-01-01") print(grupdf) assert "TERMINAL_PRESSURE" in grupdf @@ -308,7 +308,7 @@ def test_dict2treelib_deprecated(): def test_grupnetroot(schstr, expected_dframe, expected_tree): """Test that terminal pressure of the tree root can be included in the dataframe (with an empty parent)""" - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck, startdate="2000-01-01") non_default_columns = ["CHILD", "PARENT", "TERMINAL_PRESSURE"] pd.testing.assert_frame_equal( @@ -414,7 +414,7 @@ def test_edge_dataframe2dict(dframe, expected): def test_emptytree_strdeck(): """Test empty schedule sections. Don't want to crash""" schstr = "" - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck) assert grupdf.empty gruptreedict = gruptree.edge_dataframe2dict(grupdf) @@ -461,7 +461,7 @@ def test_tstep(): / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) grupdf = gruptree.df(deck) assert len(grupdf["DATE"].unique()) == 2 print(grupdf) @@ -724,7 +724,7 @@ def test_branprop_nodeprop(schstr, expected_dframe, check_columns): """Testing that the gruptree dataframe works correctly when the schedule string contains BRANPROP and NODEPROP """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) dframe = gruptree.df(deck).reset_index() expected_dframe.DATE = pd.to_datetime(expected_dframe.DATE) pd.testing.assert_frame_equal( @@ -789,5 +789,5 @@ def test_prettyprint(): """ - dframe = gruptree.df(EclFiles.str2deck(schstr)) + dframe = gruptree.df(ResdataFiles.str2deck(schstr)) assert gruptree.prettyprint(dframe).strip() == expected_prettyprint.strip() diff --git a/tests/test_init.py b/tests/test_init.py index 5fcdd7af9..d1c197708 100644 --- a/tests/test_init.py +++ b/tests/test_init.py @@ -15,7 +15,7 @@ def test_init(): for submodule in res2df.SUBMODULES: assert "res2df." + submodule in sys.modules - # The Eclfiles object inside eclfiles should be lifted up to top-level: - assert hasattr(res2df, "EclFiles") + # The Eclfiles object inside resdatafiles should be lifted up to top-level: + assert hasattr(res2df, "ResdataFiles") assert isinstance(res2df.__version__, str) diff --git a/tests/test_nnc.py b/tests/test_nnc.py index 9aaa3b817..48a64b94c 100644 --- a/tests/test_nnc.py +++ b/tests/test_nnc.py @@ -9,7 +9,7 @@ import pytest from res2df import faults, nnc, res2csv, trans -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -27,8 +27,8 @@ def test_nnc2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - nncdf = nnc.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + nncdf = nnc.df(resdatafiles) assert not nncdf.empty assert "I1" in nncdf @@ -48,14 +48,14 @@ def test_nnc2df(): def test_no_nnc(): """Test nnc on an Eclipse case with no NNCs""" - eclfiles = EclFiles(EIGHTCELLS) - assert nnc.df(eclfiles).empty + resdatafiles = ResdataFiles(EIGHTCELLS) + assert nnc.df(resdatafiles).empty def test_nnc2df_coords(): """Test that we are able to add coordinates""" - eclfiles = EclFiles(REEK) - gnncdf = nnc.df(eclfiles, coords=True) + resdatafiles = ResdataFiles(REEK) + gnncdf = nnc.df(resdatafiles, coords=True) assert not gnncdf.empty assert "X" in gnncdf assert "Y" in gnncdf @@ -65,9 +65,9 @@ def test_nnc2df_coords(): @pytest.mark.skipif(not HAVE_OPM, reason="Requires OPM") def test_nnc2df_faultnames(): """Add faultnames from FAULTS keyword to connections""" - eclfiles = EclFiles(REEK) - nncdf = nnc.df(eclfiles) - faultsdf = faults.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + nncdf = nnc.df(resdatafiles) + faultsdf = faults.df(resdatafiles.get_ecldeck()) merged = pd.merge( nncdf, @@ -89,8 +89,8 @@ def test_nnc2df_faultnames(): def test_df2ecl_editnnc(tmp_path): """Test generation of EDITNNC keyword""" - eclfiles = EclFiles(REEK) - nncdf = nnc.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + nncdf = nnc.df(resdatafiles) os.chdir(tmp_path) nncdf["TRANM"] = 2 @@ -109,11 +109,11 @@ def test_df2ecl_editnnc(tmp_path): assert "avg multiplier" not in editnnc # Test compatibility with trans module: - trans_df = trans.df(eclfiles, addnnc=True) + trans_df = trans.df(resdatafiles, addnnc=True) editnnc = nnc.df2ecl_editnnc(trans_df.assign(TRANM=0.3)) assert "avg multiplier 0.3" in editnnc or "avg multiplier 0.29999" in editnnc - print(nnc.df2ecl_editnnc(nnc.df(eclfiles).head(4).assign(TRANM=0.1))) + print(nnc.df2ecl_editnnc(nnc.df(resdatafiles).head(4).assign(TRANM=0.1))) @pytest.mark.skipif(not HAVE_OPM, reason="Requires OPM") diff --git a/tests/test_parameters.py b/tests/test_parameters.py index a4920045d..1f762e65c 100644 --- a/tests/test_parameters.py +++ b/tests/test_parameters.py @@ -7,8 +7,8 @@ import pytest import yaml -from res2df.eclfiles import EclFiles from res2df.parameters import find_parameter_files, load, load_all +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent DATAFILE = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -16,12 +16,12 @@ def test_parameters(): """Test import of parameters.txt++""" - eclfiles = EclFiles(DATAFILE) + resdatafiles = ResdataFiles(DATAFILE) # NB: This test easily fails due to remnants of other test code.. - assert not find_parameter_files(eclfiles) + assert not find_parameter_files(resdatafiles) - parameterstxt = Path(eclfiles.get_path()) / "parameters.txt" + parameterstxt = Path(resdatafiles.get_path()) / "parameters.txt" # If this exists, it is a remnant from test code that has # crashed. It should NOT be in git. if parameterstxt.is_file(): @@ -32,10 +32,10 @@ def test_parameters(): assert "FOO" in param_dict assert "BAR" in param_dict - assert len(find_parameter_files(eclfiles)) == 1 + assert len(find_parameter_files(resdatafiles)) == 1 parameterstxt.unlink() - parameterstxt = Path(eclfiles.get_path()).parent / "parameters.txt" + parameterstxt = Path(resdatafiles.get_path()).parent / "parameters.txt" if parameterstxt.is_file(): parameterstxt.unlink() parameterstxt.write_text("FOO 1\nBAR 3\nCONTACT:BARF 2700", encoding="utf-8") @@ -45,33 +45,33 @@ def test_parameters(): assert "BAR" in param_dict assert param_dict["BAR"] == 3 assert param_dict["CONTACT:BARF"] == 2700 - assert len(find_parameter_files(eclfiles)) == 1 + assert len(find_parameter_files(resdatafiles)) == 1 parameterstxt.unlink() # Typical parameters.json structure: The group "CONTACT" is assumed having # duplicate information, and is to be ignored dump_me = {"FOO": 1, "BAR": "com", "CONTACT:BARF": 2700, "CONTACT": {"BARF": 2700}} - parametersyml = Path(eclfiles.get_path()) / "parameters.yml" + parametersyml = Path(resdatafiles.get_path()) / "parameters.yml" if parametersyml.is_file(): parametersyml.unlink() parametersyml.write_text(yaml.dump(dump_me), encoding="utf-8") assert Path(parametersyml).is_file() - assert len(find_parameter_files(eclfiles)) == 1 + assert len(find_parameter_files(resdatafiles)) == 1 param_dict = load(parametersyml) assert "FOO" in param_dict assert "BAR" in param_dict assert param_dict["BAR"] == "com" parametersyml.unlink() - parametersjson = Path(eclfiles.get_path()) / "parameters.json" + parametersjson = Path(resdatafiles.get_path()) / "parameters.json" if parametersjson.is_file(): parametersjson.unlink() parametersjson.write_text(json.dumps(dump_me), encoding="utf-8") assert Path(parametersjson).is_file() - assert len(find_parameter_files(eclfiles)) == 1 - param_dict = load(find_parameter_files(eclfiles)[0]) - param_dict_m = load_all(find_parameter_files(eclfiles)) + assert len(find_parameter_files(resdatafiles)) == 1 + param_dict = load(find_parameter_files(resdatafiles)[0]) + param_dict_m = load_all(find_parameter_files(resdatafiles)) assert "FOO" in param_dict assert "BAR" in param_dict assert param_dict["BAR"] == "com" @@ -81,12 +81,12 @@ def test_parameters(): def test_multiple_parameters(): """Test what happens when we have duplicate parameter files""" - eclfiles = EclFiles(DATAFILE) - parametersjson = Path(eclfiles.get_path()) / "parameters.json" - parameterstxt = Path(eclfiles.get_path()).parent / "parameters.txt" + resdatafiles = ResdataFiles(DATAFILE) + parametersjson = Path(resdatafiles.get_path()) / "parameters.json" + parameterstxt = Path(resdatafiles.get_path()).parent / "parameters.txt" parameterstxt.write_text("FOO 1\nBAR 4", encoding="utf-8") parametersjson.write_text(json.dumps({"BAR": 5, "COM": 6}), encoding="utf-8") - param_dict = load_all(find_parameter_files(eclfiles)) + param_dict = load_all(find_parameter_files(resdatafiles)) assert len(param_dict) == 3 assert param_dict["BAR"] == 5 # json has precedence over txt parametersjson.unlink() diff --git a/tests/test_pillars.py b/tests/test_pillars.py index c604a9009..1edb0c6c4 100644 --- a/tests/test_pillars.py +++ b/tests/test_pillars.py @@ -6,7 +6,7 @@ import pytest from res2df import grid, pillars, res2csv -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -14,8 +14,8 @@ def test_pillars(): """Test that we can build a dataframe of pillar statistics""" - eclfiles = EclFiles(REEK) - pillars_df = pillars.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + pillars_df = pillars.df(resdatafiles) assert "PILLAR" in pillars_df assert "VOLUME" in pillars_df assert "PORV" in pillars_df @@ -30,25 +30,27 @@ def test_pillars(): assert "GOC" not in pillars_df assert len(pillars_df) == 2560 - pillars_df = pillars.df(eclfiles, region="FIPNUM") + pillars_df = pillars.df(resdatafiles, region="FIPNUM") assert "FIPNUM" in pillars_df assert len(pillars_df["FIPNUM"].unique()) == 6 assert "OILVOL" not in pillars_df - pillars_df = pillars.df(eclfiles, rstdates="first") - firstdate = str(grid.dates2rstindices(eclfiles, "first")[1][0]) + pillars_df = pillars.df(resdatafiles, rstdates="first") + firstdate = str(grid.dates2rstindices(resdatafiles, "first")[1][0]) assert "OILVOL@" + firstdate in pillars_df assert "GASVOL@" + firstdate in pillars_df assert "WATVOL@" + firstdate in pillars_df - pillars_df = pillars.df(eclfiles, rstdates="last", soilcutoff=0.2, sgascutoff=0.2) - lastdate = str(grid.dates2rstindices(eclfiles, "last")[1][0]) + pillars_df = pillars.df( + resdatafiles, rstdates="last", soilcutoff=0.2, sgascutoff=0.2 + ) + lastdate = str(grid.dates2rstindices(resdatafiles, "last")[1][0]) assert "OWC@" + lastdate in pillars_df assert "GOC@" + lastdate not in pillars_df # Because the dataset has no GAS... # Grouping by unknowns only trigger a warning pd.testing.assert_frame_equal( - pillars.df(eclfiles), pillars.df(eclfiles, region="FOOBAR") + pillars.df(resdatafiles), pillars.df(resdatafiles, region="FOOBAR") ) diff --git a/tests/test_pvt.py b/tests/test_pvt.py index 8f707a182..6b77beda2 100644 --- a/tests/test_pvt.py +++ b/tests/test_pvt.py @@ -9,7 +9,7 @@ import pytest from res2df import csv2res, pvt, res2csv -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -33,7 +33,7 @@ def test_pvto_strings(): 18 25 1.14 0.59 / / -- One table (pvtnum=1), two records (two gor's) """ - dframe = pvt.pvto_fromdeck(EclFiles.str2deck(pvto_deck)) + dframe = pvt.pvto_fromdeck(ResdataFiles.str2deck(pvto_deck)) assert "PVTNUM" in dframe assert "RS" in dframe assert "PRESSURE" in dframe @@ -64,7 +64,7 @@ def test_pvto_strings(): 19 30 1.14 0.59 / / """ - dframe = pvt.pvto_fromdeck(EclFiles.str2deck(pvto_deck)) + dframe = pvt.pvto_fromdeck(ResdataFiles.str2deck(pvto_deck)) assert len(dframe) == 6 assert "PVTNUM" in dframe assert set(dframe["PVTNUM"].astype(int).unique()) == {1, 2} @@ -160,8 +160,8 @@ def test_pvdo_string(): def test_pvt_reek(): """Test that the Reek PVT input can be parsed individually""" - eclfiles = EclFiles(REEK) - pvto_df = pvt.pvto_fromdeck(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + pvto_df = pvt.pvto_fromdeck(resdatafiles.get_ecldeck()) assert "PVTNUM" in pvto_df assert "PRESSURE" in pvto_df assert "VOLUMEFACTOR" in pvto_df @@ -180,7 +180,7 @@ def test_pvt_reek(): dframe_via_string = pvt.pvto_fromdeck(pvt.df2ecl_pvto(pvto_df)) pd.testing.assert_frame_equal(dframe_via_string, pvto_df) - density_df = pvt.density_fromdeck(eclfiles.get_ecldeck()) + density_df = pvt.density_fromdeck(resdatafiles.get_ecldeck()) pd.testing.assert_frame_equal( density_df, pd.DataFrame( @@ -192,14 +192,14 @@ def test_pvt_reek(): dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df)) pd.testing.assert_frame_equal(dframe_via_string, density_df) - rock_df = pvt.rock_fromdeck(eclfiles.get_ecldeck()) + rock_df = pvt.rock_fromdeck(resdatafiles.get_ecldeck()) assert "PVTNUM" in rock_df assert len(rock_df) == 1 assert "PRESSURE" in rock_df assert "COMPRESSIBILITY" in rock_df assert rock_df["PRESSURE"].values[0] == 327.3 - pvtw_df = pvt.pvtw_fromdeck(eclfiles.get_ecldeck()) + pvtw_df = pvt.pvtw_fromdeck(resdatafiles.get_ecldeck()) assert "PVTNUM" in pvtw_df assert pvtw_df["PVTNUM"].values[0] == 1 assert len(pvtw_df) == 1 @@ -210,7 +210,7 @@ def test_pvt_reek(): assert "VISCOSIBILITY" in pvtw_df assert pvtw_df["VISCOSITY"].values[0] == 0.25 - pvdg_df = pvt.pvdg_fromdeck(eclfiles.get_ecldeck()) + pvdg_df = pvt.pvdg_fromdeck(resdatafiles.get_ecldeck()) assert "PVTNUM" in pvdg_df assert "PRESSURE" in pvdg_df assert "VOLUMEFACTOR" in pvdg_df @@ -292,8 +292,8 @@ def test_pvtg_string(): def test_density(): """Test that DENSITY can be parsed from files and from strings""" - eclfiles = EclFiles(REEK) - density_df = pvt.density_fromdeck(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + density_df = pvt.density_fromdeck(resdatafiles.get_ecldeck()) assert len(density_df) == 1 assert "PVTNUM" in density_df assert "OILDENSITY" in density_df @@ -308,7 +308,7 @@ def test_density(): 800 950 1.05 / """ - density_df = pvt.density_fromdeck(EclFiles.str2deck(two_pvtnum_deck)) + density_df = pvt.density_fromdeck(ResdataFiles.str2deck(two_pvtnum_deck)) # (a warning will be printed that we cannot guess) assert len(density_df) == 1 density_df = pvt.density_fromdeck(two_pvtnum_deck) @@ -329,7 +329,7 @@ def test_pvtw(): """Test that PVTW can be parsed from a string""" deck = """PVTW 327.3 1.03 4.51E-005 0.25 0 /""" - pvtw_df = pvt.pvtw_fromdeck(EclFiles.str2deck(deck)) + pvtw_df = pvt.pvtw_fromdeck(ResdataFiles.str2deck(deck)) pd.testing.assert_frame_equal( pvtw_df, pd.DataFrame( @@ -362,7 +362,7 @@ def test_rock(): """Test parsing of the ROCK keyword from a string""" deck = """ROCK 100 1.1 /""" - rock_df = pvt.rock_fromdeck(EclFiles.str2deck(deck)) + rock_df = pvt.rock_fromdeck(ResdataFiles.str2deck(deck)) assert len(rock_df) == 1 assert "PRESSURE" in rock_df assert "COMPRESSIBILITY" in rock_df @@ -377,8 +377,8 @@ def test_rock(): def test_df(): """Test that aggregate dataframes are produced""" - eclfiles = EclFiles(REEK) - pvtdf = pvt.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + pvtdf = pvt.df(resdatafiles) assert not pvtdf.empty assert set(pvtdf["KEYWORD"]) == {"PVTO", "PVDG", "DENSITY", "ROCK", "PVTW"} diff --git a/tests/test_rft.py b/tests/test_rft.py index a35e8ad2f..de33e2b2e 100644 --- a/tests/test_rft.py +++ b/tests/test_rft.py @@ -8,7 +8,7 @@ import pytest from res2df import res2csv, rft -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -20,7 +20,7 @@ def test_rftrecords2df(): """Test that we can construct a dataframe for navigating in RFT records""" - rftrecs = rft._rftrecords2df(EclFiles(EIGHTCELLS).get_rftfile()) + rftrecs = rft._rftrecords2df(ResdataFiles(EIGHTCELLS).get_rftfile()) assert len(rftrecs[rftrecs["recordname"] == "TIME"]) == len( rftrecs["timeindex"].unique() ) @@ -35,7 +35,7 @@ def test_rftrecords2df(): def test_rftrecords_generator(): """Test the generator that will iterate over an EclFile/RFTFile and provide one yield pr. well pr. date""" - for rftrecord in rft.rftrecords(EclFiles(EIGHTCELLS).get_rftfile()): + for rftrecord in rft.rftrecords(ResdataFiles(EIGHTCELLS).get_rftfile()): assert isinstance(rftrecord, dict) assert "date" in rftrecord assert isinstance(rftrecord["date"], datetime.date) @@ -50,7 +50,7 @@ def test_rftrecords_generator(): def test_get_con_seg_data(): """Get CON data. Later add more code here to defend the name""" - rftfile = EclFiles(EIGHTCELLS).get_rftfile() + rftfile = ResdataFiles(EIGHTCELLS).get_rftfile() # Test the first record, it is a CON type (not multisegment) rftrecord = next(rft.rftrecords(rftfile)) @@ -464,8 +464,8 @@ def test_add_extras(dframe, inplace, expected): def test_rft2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - rftdf = rft.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + rftdf = rft.df(resdatafiles) assert "ZONE" in rftdf assert "LEAF" not in rftdf # Topology metadata should not be exported assert set(rftdf["WELLMODEL"]) == {"STANDARD"} diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index cf1940432..d20c3539c 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -9,7 +9,7 @@ import pytest from res2df import csv2res, inferdims, res2csv, satfunc -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -29,8 +29,8 @@ def test_ecldeck_to_satfunc_dframe(): """Test that dataframes can be produced from a full Eclipse deck (the example Reek case)""" - eclfiles = EclFiles(REEK) - satdf = satfunc.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + satdf = satfunc.df(resdatafiles.get_ecldeck()) assert set(satdf["KEYWORD"]) == {"SWOF", "SGOF"} assert set(satdf["SATNUM"]) == {1} @@ -56,8 +56,8 @@ def test_ecldeck_to_satfunc_dframe(): def test_satfunc_roundtrip(): """Test that we can produce a SATNUM dataframe from the Reek case, convert it back to an include file, and then reinterpret it to the same""" - eclfiles = EclFiles(EIGHTCELLS) - satdf = satfunc.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(EIGHTCELLS) + satdf = satfunc.df(resdatafiles.get_ecldeck()) inc = satfunc.df2ecl(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal( @@ -69,8 +69,8 @@ def test_satfunc_roundtrip(): def test_df2ecl_order(): """Test that we can control the keyword order in generated strings by the list supplied in keywords argument""" - eclfiles = EclFiles(REEK) - satdf = satfunc.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(REEK) + satdf = satfunc.df(resdatafiles.get_ecldeck()) swof_sgof = satfunc.df2ecl(satdf, keywords=["SWOF", "SGOF"]) assert swof_sgof.find("SWOF") < swof_sgof.find("SGOF") diff --git a/tests/test_summary.py b/tests/test_summary.py index e6f857896..98d3a6324 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -11,7 +11,7 @@ from resdata.summary import Summary from res2df import csv2res, res2csv, summary -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles from res2df.summary import ( _df2pyarrow, _fallback_date_roll, @@ -44,8 +44,8 @@ def test_df(): """Test that dataframes are produced""" - eclfiles = EclFiles(EIGHTCELLS) - sumdf = summary.df(eclfiles) + resdatafiles = ResdataFiles(EIGHTCELLS) + sumdf = summary.df(resdatafiles) assert sumdf.index.name == "DATE" assert sumdf.index.dtype in ["datetime64[ns]", "datetime64"] @@ -55,7 +55,7 @@ def test_df(): assert not sumdf.columns.empty assert "FOPT" in sumdf.columns - sumdf = summary.df(eclfiles, datetime=True) + sumdf = summary.df(resdatafiles, datetime=True) # (datetime=True is implicit when raw time reports are requested) assert sumdf.index.name == "DATE" assert sumdf.index.dtype in ["datetime64[ns]", "datetime64"] @@ -68,7 +68,7 @@ def test_df(): def test_df_column_keys(): """Test that we can slice the dataframe on columns""" - sumdf = summary.df(EclFiles(REEK), column_keys="FOPT") + sumdf = summary.df(ResdataFiles(REEK), column_keys="FOPT") assert set(sumdf.columns) == {"FOPT"} assert set(sumdf.attrs["meta"].keys()) == {"FOPT"} @@ -83,29 +83,29 @@ def test_df_column_keys(): "FOPTF", "FOPP", } - sumdf = summary.df(EclFiles(REEK), column_keys="FOP*") + sumdf = summary.df(ResdataFiles(REEK), column_keys="FOP*") assert set(sumdf.columns) == fop_cols assert set(sumdf.attrs["meta"].keys()) == fop_cols - sumdf = summary.df(EclFiles(REEK), column_keys=["FOP*"]) + sumdf = summary.df(ResdataFiles(REEK), column_keys=["FOP*"]) assert set(sumdf.columns) == fop_cols assert set(sumdf.attrs["meta"].keys()) == fop_cols - sumdf = summary.df(EclFiles(REEK), column_keys=["FOPR", "FOPT"]) + sumdf = summary.df(ResdataFiles(REEK), column_keys=["FOPR", "FOPT"]) assert set(sumdf.columns) == {"FOPT", "FOPR"} assert set(sumdf.attrs["meta"].keys()) == {"FOPT", "FOPR"} - sumdf_no_columns = summary.df(EclFiles(REEK), column_keys=["BOGUS"]) + sumdf_no_columns = summary.df(ResdataFiles(REEK), column_keys=["BOGUS"]) assert sumdf_no_columns.columns.empty assert all(sumdf_no_columns.index == sumdf.index) def test_summary2df_dates(): """Test that we have some API possibilities with ISO dates""" - eclfiles = EclFiles(REEK) + resdatafiles = ResdataFiles(REEK) sumdf = summary.df( - eclfiles, + resdatafiles, start_date=datetime.date(2002, 1, 2), end_date="2002-03-01", time_index="daily", @@ -119,12 +119,12 @@ def test_summary2df_dates(): assert sumdf.index.values[0] == np.datetime64("2002-01-02") assert sumdf.index.values[-1] == np.datetime64("2002-03-01") - sumdf = summary.df(eclfiles, time_index="last", datetime=True) + sumdf = summary.df(resdatafiles, time_index="last", datetime=True) assert len(sumdf) == 1 assert sumdf.index.values[0] == np.datetime64("2003-01-02") # Leave this test for the datetime=False behaviour: - sumdf = summary.df(eclfiles, time_index="first") + sumdf = summary.df(resdatafiles, time_index="first") assert len(sumdf) == 1 assert str(sumdf.index.values[0]) == "2000-01-01" @@ -191,9 +191,9 @@ def test_paramsupport(tmp_path, mocker): """ tmpcsvfile = tmp_path / "sum.csv" - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) - parameterstxt = Path(eclfiles.get_path()) / "parameters.txt" + parameterstxt = Path(resdatafiles.get_path()) / "parameters.txt" if parameterstxt.is_file(): parameterstxt.unlink() parameterstxt.write_text("FOO 1\nBAR 3", encoding="utf-8") @@ -208,7 +208,7 @@ def test_paramsupport(tmp_path, mocker): assert disk_df["BAR"].unique()[0] == 3 parameterstxt.unlink() - parametersyml = Path(eclfiles.get_path()) / "parameters.yml" + parametersyml = Path(resdatafiles.get_path()) / "parameters.yml" if parametersyml.is_file(): parametersyml.unlink() parametersyml.write_text(yaml.dump({"FOO": 1, "BAR": 3}), encoding="utf-8") @@ -226,22 +226,24 @@ def test_paramsupport(tmp_path, mocker): assert disk_df["BAR"].unique()[0] == 3 # Test the merging from summary.df() explicitly: - assert "FOO" in summary.df(eclfiles, params=True, paramfile=None) - assert "FOO" not in summary.df(eclfiles, params=False, paramfile=None) - assert "FOO" not in summary.df(eclfiles, params=None, paramfile=None) + assert "FOO" in summary.df(resdatafiles, params=True, paramfile=None) + assert "FOO" not in summary.df(resdatafiles, params=False, paramfile=None) + assert "FOO" not in summary.df(resdatafiles, params=None, paramfile=None) - assert "FOO" in summary.df(eclfiles, params=False, paramfile=parametersyml) - assert "FOO" in summary.df(eclfiles, params=None, paramfile=parametersyml) - assert "FOO" in summary.df(eclfiles, params=None, paramfile="parameters.yml") + assert "FOO" in summary.df(resdatafiles, params=False, paramfile=parametersyml) + assert "FOO" in summary.df(resdatafiles, params=None, paramfile=parametersyml) + assert "FOO" in summary.df(resdatafiles, params=None, paramfile="parameters.yml") # Non-existing relative path is a soft error: assert "FOO" not in summary.df( - eclfiles, params=None, paramfile="notexisting/parameters.yml" + resdatafiles, params=None, paramfile="notexisting/parameters.yml" ) # Non-existing absolute path is a hard error: with pytest.raises(FileNotFoundError): - summary.df(eclfiles, params=None, paramfile="/tmp/notexisting/parameters.yml") + summary.df( + resdatafiles, params=None, paramfile="/tmp/notexisting/parameters.yml" + ) parametersyml.unlink() @@ -334,15 +336,17 @@ def test_datenormalization(): """Test normalization of dates, where dates can be ensured to be on dategrid boundaries""" # realization-0 here has its last summary date at 2003-01-02 - eclfiles = EclFiles(REEK) - daily = summary.df(eclfiles, column_keys="FOPT", time_index="daily", datetime=True) + resdatafiles = ResdataFiles(REEK) + daily = summary.df( + resdatafiles, column_keys="FOPT", time_index="daily", datetime=True + ) assert str(daily.index[-1])[0:10] == "2003-01-02" monthly = summary.df( - eclfiles, column_keys="FOPT", time_index="monthly", datetime=True + resdatafiles, column_keys="FOPT", time_index="monthly", datetime=True ) assert str(monthly.index[-1])[0:10] == "2003-02-01" yearly = summary.df( - eclfiles, column_keys="FOPT", time_index="yearly", datetime=True + resdatafiles, column_keys="FOPT", time_index="yearly", datetime=True ) assert str(yearly.index[-1])[0:10] == "2004-01-01" @@ -350,9 +354,9 @@ def test_datenormalization(): def test_extrapolation(): """Summary data should be possible to extrapolate into the future, rates should be zero, cumulatives should be constant""" - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) lastfopt = summary.df( - eclfiles, column_keys="FOPT", time_index="last", datetime=True + resdatafiles, column_keys="FOPT", time_index="last", datetime=True )["FOPT"].values[0] answer = pd.DataFrame( # This is the maximal date for datetime64[ns] @@ -363,7 +367,7 @@ def test_extrapolation(): pd.testing.assert_frame_equal( summary.df( - eclfiles, + resdatafiles, column_keys=["FOPT", "FOPR"], time_index="2262-04-11", datetime=True, @@ -372,7 +376,7 @@ def test_extrapolation(): ) pd.testing.assert_frame_equal( summary.df( - eclfiles, + resdatafiles, column_keys=["FOPT", "FOPR"], time_index=[datetime.date(2262, 4, 11)], # NB: df() does not support datetime64 for time_index @@ -384,7 +388,7 @@ def test_extrapolation(): # Pandas does not support DatetimeIndex beyound 2262: with pytest.raises(pd.errors.OutOfBoundsDatetime): summary.df( - eclfiles, + resdatafiles, column_keys=["FOPT"], time_index=[datetime.date(2300, 1, 1)], datetime=True, @@ -392,7 +396,7 @@ def test_extrapolation(): # But without datetime, we can get it extrapolated by libecl: assert summary.df( - eclfiles, column_keys=["FOPT"], time_index=[datetime.date(2300, 1, 1)] + resdatafiles, column_keys=["FOPT"], time_index=[datetime.date(2300, 1, 1)] )["FOPT"].values == [lastfopt] @@ -629,9 +633,9 @@ def test_date_range(start, end, freq, expected): def test_resample_smry_dates(): """Test resampling of summary dates""" - eclfiles = EclFiles(REEK) + resdatafiles = ResdataFiles(REEK) - ecldates = eclfiles.get_eclsum().dates + ecldates = resdatafiles.get_eclsum().dates assert isinstance(resample_smry_dates(ecldates), list) assert isinstance(resample_smry_dates(ecldates, freq="last"), list) @@ -792,7 +796,7 @@ def test_resample_smry_dates(): ], ) def test_unique_datetime_for_short_timesteps(filepath): - assert summary.df(EclFiles(filepath)).index.is_unique + assert summary.df(ResdataFiles(filepath)).index.is_unique @pytest.mark.parametrize( @@ -804,12 +808,12 @@ def test_unique_datetime_for_short_timesteps(filepath): ) def test_unique_datetime_retain_index_name(filepath): """Test _ensure_unique_datetime_index method retain index name""" - assert summary.df(EclFiles(filepath)).index.name == "DATE" + assert summary.df(ResdataFiles(filepath)).index.name == "DATE" def test_smry_meta(): """Test obtaining metadata dictionary for summary vectors from an EclSum object""" - meta = smry_meta(EclFiles(REEK)) + meta = smry_meta(ResdataFiles(REEK)) assert isinstance(meta, dict) assert "FOPT" in meta @@ -1068,7 +1072,7 @@ def test_duplicated_summary_vectors(caplog): / "EIGHTCELLS_DUPES.DATA" ) assert "SUMMARY\nFOPR\nFOPR" in dupe_datafile.read_text() - deduplicated_dframe = df(EclFiles(dupe_datafile)) + deduplicated_dframe = df(ResdataFiles(dupe_datafile)) assert (deduplicated_dframe.columns == ["YEARS", "FOPR"]).all() assert "Duplicated columns detected" in caplog.text @@ -1167,15 +1171,15 @@ def test_res2df_errors(tmp_path): # This is how libecl reacts to bogus binary data Summary("FOO.UNSMRY") - # But EclFiles should be more tolerant, as it should be possible + # But ResdataFiles should be more tolerant, as it should be possible # to extract other data if SMRY is corrupted Path("FOO.DATA").write_text("RUNSPEC", encoding="utf8") - assert str(EclFiles("FOO").get_ecldeck()).strip() == "RUNSPEC" + assert str(ResdataFiles("FOO").get_ecldeck()).strip() == "RUNSPEC" with pytest.raises(OSError): - EclFiles("FOO").get_eclsum() + ResdataFiles("FOO").get_eclsum() # Getting a dataframe from bogus data should give empty data: - assert df(EclFiles("FOO")).empty + assert df(ResdataFiles("FOO")).empty def test_df2eclsum_errors(): diff --git a/tests/test_trans.py b/tests/test_trans.py index 5bd819235..8f9f90d5c 100644 --- a/tests/test_trans.py +++ b/tests/test_trans.py @@ -14,7 +14,7 @@ import pandas as pd from res2df import res2csv, trans -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -23,8 +23,8 @@ def test_trans(): """Test that we can build a dataframe of transmissibilities""" - eclfiles = EclFiles(REEK) - trans_df = trans.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + trans_df = trans.df(resdatafiles) assert "TRAN" in trans_df assert "DIR" in trans_df assert set(trans_df["DIR"].unique()) == set(["I", "J", "K"]) @@ -33,45 +33,45 @@ def test_trans(): trans_full_length = len(trans_df) # Try including some vectors: - trans_df = trans.df(eclfiles, vectors="FIPNUM") + trans_df = trans.df(resdatafiles, vectors="FIPNUM") assert "FIPNUM" not in trans_df assert "FIPNUM1" in trans_df assert "EQLNUM2" not in trans_df - trans_df = trans.df(eclfiles, vectors=["FIPNUM", "EQLNUM"]) + trans_df = trans.df(resdatafiles, vectors=["FIPNUM", "EQLNUM"]) assert "FIPNUM1" in trans_df assert "EQLNUM2" in trans_df - trans_df = trans.df(eclfiles, vectors="BOGUS") + trans_df = trans.df(resdatafiles, vectors="BOGUS") assert "BOGUS1" not in trans_df assert "TRAN" in trans_df # (we should have gotten a warning only) - assert "K" not in trans.df(eclfiles, onlyijdir=True)["DIR"] - assert "I" not in trans.df(eclfiles, onlykdir=True)["DIR"] + assert "K" not in trans.df(resdatafiles, onlyijdir=True)["DIR"] + assert "I" not in trans.df(resdatafiles, onlykdir=True)["DIR"] # A warning is logged, seems strange to filter on both, but # the answer (empty) makes sense given the instruction. Alternative # would be a ValueError. - assert trans.df(eclfiles, onlykdir=True, onlyijdir=True).empty + assert trans.df(resdatafiles, onlykdir=True, onlyijdir=True).empty - transnnc_df = trans.df(eclfiles, addnnc=True) + transnnc_df = trans.df(resdatafiles, addnnc=True) assert len(transnnc_df) > trans_full_length - trans_df = trans.df(eclfiles, vectors=["FIPNUM", "EQLNUM"], boundaryfilter=True) + trans_df = trans.df(resdatafiles, vectors=["FIPNUM", "EQLNUM"], boundaryfilter=True) assert trans_df.empty - trans_df = trans.df(eclfiles, vectors="FIPNUM", boundaryfilter=True) + trans_df = trans.df(resdatafiles, vectors="FIPNUM", boundaryfilter=True) assert len(trans_df) < trans_full_length - trans_df = trans.df(eclfiles, coords=True) + trans_df = trans.df(resdatafiles, coords=True) assert "X" in trans_df assert "Y" in trans_df def test_grouptrans(): """Test grouping of transmissibilities""" - eclfiles = EclFiles(REEK) - trans_df = trans.df(eclfiles, vectors="FIPNUM", group=True, coords=True) + resdatafiles = ResdataFiles(REEK) + trans_df = trans.df(resdatafiles, vectors="FIPNUM", group=True, coords=True) assert "FIPNUMPAIR" in trans_df assert "FIPNUM1" in trans_df assert "FIPNUM2" in trans_df @@ -80,14 +80,14 @@ def test_grouptrans(): assert "X" in trans_df # (average X coord for that FIPNUM interface) # This gives a logged error: - assert trans.df(eclfiles, vectors=["FIPNUM", "EQLNUM"], group=True).empty + assert trans.df(resdatafiles, vectors=["FIPNUM", "EQLNUM"], group=True).empty @pytest.mark.skipif(not HAVE_NETWORKX, reason="Requires networkx being installed") def test_nx(tmp_path): """Test graph generation""" - eclfiles = EclFiles(REEK) - network = trans.make_nx_graph(eclfiles, region="FIPNUM") + resdatafiles = ResdataFiles(REEK) + network = trans.make_nx_graph(resdatafiles, region="FIPNUM") assert network.number_of_nodes() == 6 networkx.write_gexf(network, tmp_path / "reek-fipnum-trans.gxf", prettyprint=True) assert (tmp_path / "reek-fipnum-trans.gxf").is_file() diff --git a/tests/test_userapi.py b/tests/test_userapi.py index deb6fbe77..7abd3e5d4 100644 --- a/tests/test_userapi.py +++ b/tests/test_userapi.py @@ -28,22 +28,22 @@ def test_userapi(): To the user reading the source: Skip all 'assert' lines, read the rest. """ - eclfiles = res2df.EclFiles(REEK) - - compdatdf = res2df.compdat.df(eclfiles) - equil = res2df.equil.df(eclfiles) - faults = res2df.faults.df(eclfiles) - fipreports = res2df.fipreports.df(eclfiles) - grid_df = res2df.grid.df(eclfiles) - grst_df = res2df.grid.df(eclfiles, rstdates="last") - gruptree = res2df.gruptree.df(eclfiles) - nnc = res2df.nnc.df(eclfiles) - pillars = res2df.pillars.df(eclfiles) - rft = res2df.rft.df(eclfiles) - satfunc = res2df.satfunc.df(eclfiles) - smry = res2df.summary.df(eclfiles, datetime=True) - trans = res2df.trans.df(eclfiles) - wcon = res2df.wcon.df(eclfiles) + resdatafiles = res2df.ResdataFiles(REEK) + + compdatdf = res2df.compdat.df(resdatafiles) + equil = res2df.equil.df(resdatafiles) + faults = res2df.faults.df(resdatafiles) + fipreports = res2df.fipreports.df(resdatafiles) + grid_df = res2df.grid.df(resdatafiles) + grst_df = res2df.grid.df(resdatafiles, rstdates="last") + gruptree = res2df.gruptree.df(resdatafiles) + nnc = res2df.nnc.df(resdatafiles) + pillars = res2df.pillars.df(resdatafiles) + rft = res2df.rft.df(resdatafiles) + satfunc = res2df.satfunc.df(resdatafiles) + smry = res2df.summary.df(resdatafiles, datetime=True) + trans = res2df.trans.df(resdatafiles) + wcon = res2df.wcon.df(resdatafiles) assert "PORV" in grid_df assert "SOIL" not in grid_df diff --git a/tests/test_vfp.py b/tests/test_vfp.py index b77cf3a0a..0392776c2 100644 --- a/tests/test_vfp.py +++ b/tests/test_vfp.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from res2df import EclFiles, vfp +from res2df import ResdataFiles, vfp try: import opm # noqa @@ -993,7 +993,7 @@ @pytest.mark.parametrize("test_input, expected", VFPPROD_CASES) def test_res2df_vfpprod(test_input, expected): """Test res2df for VFPPROD""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfpdf = vfp.df(deck, "VFPPROD") pd.testing.assert_frame_equal(vfpdf, expected) @@ -1002,7 +1002,7 @@ def test_res2df_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", VFPPROD_CASES) def test_ecl2pyarrow_vfpprod(test_input, expected): """Test ecl2pyarrow for VFPPROD""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) # Read first into pyarrow tables vfppa = vfp.pyarrow_tables(deck, "VFPPROD") # Convert pyarrow table to basic data types for VFPPROD @@ -1025,7 +1025,7 @@ def test_df2ecl_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", [VFPPROD_CASES[0]]) def test_pyarrow2ecl_vfpprod(test_input, expected): """Test pyarrow2ecl for VFPPROD (case without default values)""" - deck = EclFiles.str2deck(vfp.df2ecl(expected, "VFPPROD")) + deck = ResdataFiles.str2deck(vfp.df2ecl(expected, "VFPPROD")) vfpprod_df = vfp.df(deck, "VFPPROD") vfpprod_data = vfp.df2basic_data(vfpprod_df) vfpprod_pa = vfp.basic_data2pyarrow(vfpprod_data) @@ -1039,7 +1039,7 @@ def test_pyarrow2ecl_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", VFPINJ_CASES) def test_res2df_vfpinj(test_input, expected): """Test res2df for VFPINJ""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfpdf = vfp.df(deck, "VFPINJ") pd.testing.assert_frame_equal(vfpdf, expected) @@ -1056,7 +1056,7 @@ def test_df2ecl_vfpinj(test_input, expected): @pytest.mark.parametrize("test_input, expected", [VFPINJ_CASES[0]]) def test_pyarrow2ecl_vfpinj(test_input, expected): """Test pyarrow2ecl for VFPPROD (case without default values)""" - deck = EclFiles.str2deck(vfp.df2ecl(expected, "VFPINJ")) + deck = ResdataFiles.str2deck(vfp.df2ecl(expected, "VFPINJ")) vfpinj_df = vfp.df(deck, "VFPINJ") vfpinj_data = vfp.df2basic_data(vfpinj_df) vfpinj_pa = vfp.basic_data2pyarrow(vfpinj_data) @@ -1070,7 +1070,7 @@ def test_pyarrow2ecl_vfpinj(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_res2df_vfpprods(test_input, expected): """Test res2df for files with multiple VFPPROD""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD") # Two VFPPROD curves in file corresponding to curves 0 and 1 @@ -1081,7 +1081,7 @@ def test_res2df_vfpprods(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_ecl2pyarrow_vfpprods(test_input, expected): """Test res2df with pyarrow for files with multiple VFPPROD""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPPROD") # Two VFPPROD curves in file corresponding to curves 0 and 1 @@ -1094,7 +1094,7 @@ def test_ecl2pyarrow_vfpprods(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_res2df_vfpinjs(test_input, expected): """Test res2df for files with multiple VFPINJ""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPINJ") # Two VFPINJ curves in file corresponding to curves 2 and 3 @@ -1105,7 +1105,7 @@ def test_res2df_vfpinjs(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_eclpyarrow_vfpinjs(test_input, expected): """Test res2df for pyarrow for files with multiple VFPINJ""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ") # Two VFPINJ curves in file corresponding to curves 2 and 3 @@ -1118,7 +1118,7 @@ def test_eclpyarrow_vfpinjs(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_res2df_vfpprod_no(test_input, expected): """Test res2df for files with multiple VFPPROD with vfp number argument""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD", "2") # VFPPROD curve with VFP number 2 is curve 1 in file @@ -1130,7 +1130,7 @@ def test_ecl2pyarrow_vfpprod_no(test_input, expected): """Test res2df for pyarrow for files with multiple VFPPROD with vfp number argument """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPPROD", "2") vfpprod_data = vfp.pyarrow2basic_data(vfppas[0]) vfpdf = vfp.basic_data2df(vfpprod_data) @@ -1142,7 +1142,7 @@ def test_ecl2pyarrow_vfpprod_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_res2df_vfpinj_no(test_input, expected): """Test res2df for files with multiple VFPINJ with vfp number argument""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPINJ", "4") # VFPINJ curve with VFP number 4 is curve 3 in file @@ -1152,7 +1152,7 @@ def test_res2df_vfpinj_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_ecl2pyarrow_vfpinj_no(test_input, expected): """Test res2df for pyarrow files with multiple VFPINJ with vfp number argument""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ", "4") vfpinj_data = vfp.pyarrow2basic_data(vfppas[0]) @@ -1165,7 +1165,7 @@ def test_ecl2pyarrow_vfpinj_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_res2df_vfpprods_no(test_input, expected): """Test res2df for files with multiple VFPPROD with vfp number argument as range""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1178,7 +1178,7 @@ def test_ecl2pyarrow_vfpprods_no(test_input, expected): """Test res2df for pyarrow for files with multiple VFPPROD with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1193,7 +1193,7 @@ def test_res2df_vfpinjs_no(test_input, expected): """Test res2df for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1206,7 +1206,7 @@ def test_ecl2pyarrow_vfpinjs_no(test_input, expected): """Test res2df for pyararow for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1221,7 +1221,7 @@ def test_basic_data_vfpprods_no(test_input, expected): """Test res2df basic_data reading for files with multiple VFPPROD with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfps = vfp.basic_data(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1235,7 +1235,7 @@ def test_basic_data_vfpinjs_no(test_input, expected): """Test res2df basic_data reading for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfps = vfp.basic_data(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1249,7 +1249,7 @@ def test_pyarrow2basic_data_vfpprods_no(test_input, expected): """Test res2df pyarrow2basic_data for files with multiple VFPPROD with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) pyarrow_vfps = vfp.pyarrow_tables(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1264,7 +1264,7 @@ def test_pyarrow2basic_data_vfpinjs_no(test_input, expected): """Test res2df pyarrow2basic_data for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) pyarrow_vfps = vfp.pyarrow_tables(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1285,7 +1285,7 @@ def test_basic_data_key_exceptions_vfpprods(self, vfpprod_key, test_input, dummy """Test exceptions for basic data format (not containing all required keywords) for VFPPROD" """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfpprods = vfp.basic_data(deck, "VFPPROD") # Check if exception is raises if one key is missing @@ -1312,7 +1312,7 @@ def test_basic_data_array_dim_exceptions_vfpprods( """Test exceptions for basic data format (inconsistency in array dimensions) for VFPPROD" """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfpprods = vfp.basic_data(deck, "VFPPROD") # Check if exception is raises if array dimension is wrong @@ -1329,7 +1329,7 @@ def test_basic_data_dims_vfpprods(test_input, expected): """Test exceptions for dimensions consistency for basic data format (not containing all required keywords) for VFPPROD" """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfpprods = vfp.basic_data(deck, "VFPPROD") # Check if exception is raised if dimensions are wrong @@ -1352,7 +1352,7 @@ def test_basic_data_key_exceptions_vfpinjs(self, vfpinj_key, test_input, dummy): """Test exceptions for basic data format (not containing all required keywords) for VFPINJ" """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfpinjs = vfp.basic_data(deck, "VFPINJ") # Check if exception is raises if one key is missing @@ -1379,7 +1379,7 @@ def test_basic_data_array_dim_exceptions_vfpinjs( """Test exceptions for basic data format (inconsistency in array dimensions) for VFPINJ" """ - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) basic_data_vfpinjs = vfp.basic_data(deck, "VFPINJ") # Check if exception is raises if array dimension if wrong diff --git a/tests/test_wcon.py b/tests/test_wcon.py index 0978a1dcd..b745e72fa 100644 --- a/tests/test_wcon.py +++ b/tests/test_wcon.py @@ -8,7 +8,7 @@ import pytest from res2df import res2csv, wcon -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -24,8 +24,8 @@ def test_wcon2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(EIGHTCELLS) - wcondf = wcon.df(eclfiles.get_ecldeck()) + resdatafiles = ResdataFiles(EIGHTCELLS) + wcondf = wcon.df(resdatafiles.get_ecldeck()) assert not wcondf.empty assert "DATE" in wcondf # for all data @@ -41,7 +41,7 @@ def test_wconhist(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResdataFiles.str2deck(wconstr) wconhist_df = wcon.df(deck) pd.testing.assert_frame_equal( wconhist_df, @@ -74,7 +74,7 @@ def test_wconinjh(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResdataFiles.str2deck(wconstr) wconinjh_df = wcon.df(deck) pd.testing.assert_frame_equal( wconinjh_df, @@ -108,7 +108,7 @@ def test_wconinje(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResdataFiles.str2deck(wconstr) wconinje_df = wcon.df(deck) pd.testing.assert_frame_equal( wconinje_df, @@ -145,7 +145,7 @@ def test_wconprod(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResdataFiles.str2deck(wconstr) wconprod_df = wcon.df(deck) pd.testing.assert_frame_equal( wconprod_df, @@ -207,7 +207,7 @@ def test_tstep(): 'OP1' 3000 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResdataFiles.str2deck(schstr) wcondf = wcon.df(deck) dates = [str(x) for x in wcondf["DATE"].unique()] assert len(dates) == 3 diff --git a/tests/test_wellcompletiondata.py b/tests/test_wellcompletiondata.py index aeaff2129..70892880a 100644 --- a/tests/test_wellcompletiondata.py +++ b/tests/test_wellcompletiondata.py @@ -6,7 +6,7 @@ import pytest from res2df import common, compdat, wellcompletiondata -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles from res2df.wellcompletiondata import ( _aggregate_layer_to_zone, _df2pyarrow, @@ -34,7 +34,7 @@ def test_eightcells_with_wellconnstatus(): """Test the Eightcells dataset with the well connection status option activated (connection status extracted from summary data) """ - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) expected_dframe = pd.DataFrame( [ { @@ -48,7 +48,7 @@ def test_eightcells_with_wellconnstatus(): ) pd.testing.assert_frame_equal( wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=True + resdatafiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=True ), expected_dframe, check_dtype=False, @@ -58,7 +58,7 @@ def test_eightcells_with_wellconnstatus(): def test_eightcells_without_wellconnstatus(): """Test the Eightcells dataset with only the compdat export data (connection status extracted from parsing the schedule file)""" - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) expected_dframe = pd.DataFrame( [ { @@ -72,7 +72,7 @@ def test_eightcells_without_wellconnstatus(): ) pd.testing.assert_frame_equal( wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False + resdatafiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False ), expected_dframe, check_dtype=False, @@ -81,9 +81,9 @@ def test_eightcells_without_wellconnstatus(): def test_df2pyarrow(): """Test that dataframe is conserved using _df2pyarrow""" - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) df = wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False + resdatafiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False ) df["KH"] = df["KH"].astype(np.int32) pd.testing.assert_frame_equal(df, _df2pyarrow(df).to_pandas(), check_like=True) @@ -91,9 +91,9 @@ def test_df2pyarrow(): def test_metadata(): """Test that the KH column has metadata and that unit is mDm""" - eclfiles = EclFiles(EIGHTCELLS) + resdatafiles = ResdataFiles(EIGHTCELLS) df = wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False + resdatafiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False ) assert df.attrs["meta"] == {"KH": {"unit": "mDm"}} @@ -107,21 +107,21 @@ def test_empty_zonemap(): """Test empty zonemap and zonemap with layers that doesn't exist in the compdat table. Both returns an empty dataframe """ - eclfiles = EclFiles(EIGHTCELLS) - df = wellcompletiondata.df(eclfiles, zonemap={}, use_wellconnstatus=False) + resdatafiles = ResdataFiles(EIGHTCELLS) + df = wellcompletiondata.df(resdatafiles, zonemap={}, use_wellconnstatus=False) assert df.empty zonemap = {1000: "ZONE1", -1: "ZONE1"} - df = wellcompletiondata.df(eclfiles, zonemap=zonemap, use_wellconnstatus=False) + df = wellcompletiondata.df(resdatafiles, zonemap=zonemap, use_wellconnstatus=False) assert df.empty def test_zonemap_with_some_undefined_layers(): """Layers in the zonemap that don't exist in the compdat output will be ignored.""" - eclfiles = EclFiles(REEK) + resdatafiles = ResdataFiles(REEK) zonemap = {1: "ZONE1", 2: "ZONE1"} - df = wellcompletiondata.df(eclfiles, zonemap=zonemap, use_wellconnstatus=False) - compdat_df = compdat.df(eclfiles) + df = wellcompletiondata.df(resdatafiles, zonemap=zonemap, use_wellconnstatus=False) + compdat_df = compdat.df(resdatafiles) # Filter compdat on layer 1 and 2 compdat_df = compdat_df[compdat_df["K1"] <= 2] diff --git a/tests/test_wellconnstatus.py b/tests/test_wellconnstatus.py index 037d76640..4a62fa0dc 100644 --- a/tests/test_wellconnstatus.py +++ b/tests/test_wellconnstatus.py @@ -4,7 +4,7 @@ import pytest from res2df import wellconnstatus -from res2df.eclfiles import EclFiles +from res2df.resdatafiles import ResdataFiles try: # pylint: disable=unused-import @@ -23,15 +23,15 @@ def test_reek_dataset(): """Test Reek dataset. It contains no CPI data and should return an empty dataframe. """ - eclfiles = EclFiles(REEK) - wellconnstatus_df = wellconnstatus.df(eclfiles) + resdatafiles = ResdataFiles(REEK) + wellconnstatus_df = wellconnstatus.df(resdatafiles) assert wellconnstatus_df.empty def test_eightcells_dataset(): """Test the Eightcells dataset which has CPI data""" - eclfiles = EclFiles(EIGHTCELLS) - wellconnstatus_df = wellconnstatus.df(eclfiles) + resdatafiles = ResdataFiles(EIGHTCELLS) + wellconnstatus_df = wellconnstatus.df(resdatafiles) expected_dframe = pd.DataFrame( [ { diff --git a/tests/test_welopen.py b/tests/test_welopen.py index f7a7d0540..9fce7eb07 100644 --- a/tests/test_welopen.py +++ b/tests/test_welopen.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from res2df import EclFiles, compdat +from res2df import ResdataFiles, compdat try: # pylint: disable=unused-import @@ -930,7 +930,7 @@ @pytest.mark.parametrize("test_input, expected", WELOPEN_CASES) def test_welopen(test_input, expected): """Test with WELOPEN present""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) compdf = compdat.deck2dfs(deck)["COMPDAT"] columns_to_check = ["WELL", "I", "J", "K1", "K2", "OP/SH", "DATE"] @@ -1131,7 +1131,7 @@ def test_welopen(test_input, expected): ) def test_welopen_wlist(test_input, expected): """Test that WELOPEN can be used on well lists determined by WLIST""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) dfs = compdat.deck2dfs(deck) pd.testing.assert_frame_equal(dfs["COMPDAT"][expected.columns], expected) @@ -1139,7 +1139,7 @@ def test_welopen_wlist(test_input, expected): def test_welopen_df(): """Test that we can obtain WELOPEN information when it applies on well state, not on connections.""" - deck = EclFiles.str2deck( + deck = ResdataFiles.str2deck( """ DATES 1 JAN 2000 / @@ -1482,7 +1482,7 @@ def test_welopen_df(): ) def test_welopen_complump(test_input, expected): """Test the welopen_complump functionality through Eclipse decks""" - deck = EclFiles.str2deck(test_input) + deck = ResdataFiles.str2deck(test_input) dfs = compdat.deck2dfs(deck) pd.testing.assert_frame_equal(dfs["COMPDAT"][expected.columns], expected) diff --git a/tests/test_wlist.py b/tests/test_wlist.py index 87c4e02c3..6f64aad96 100644 --- a/tests/test_wlist.py +++ b/tests/test_wlist.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from res2df import EclFiles, compdat +from res2df import ResdataFiles, compdat try: # pylint: disable=unused-import @@ -145,7 +145,7 @@ ) def test_parse_wlist(deckstr, expected_df): """Test basic parsing of WLIST keywords into a dataframe representation""" - deck = EclFiles.str2deck(deckstr) + deck = ResdataFiles.str2deck(deckstr) wlistdf = compdat.deck2dfs(deck)["WLIST"] pd.testing.assert_frame_equal(wlistdf, expected_df, check_like=True) diff --git a/tests/test_zonemap.py b/tests/test_zonemap.py index 650fa28cb..70a46ed2a 100644 --- a/tests/test_zonemap.py +++ b/tests/test_zonemap.py @@ -14,13 +14,13 @@ def test_stdzoneslyr(): """Test that we can read zones if the zonemap is in a standard location. - The eclfiles object defines what is the standard location for the file, while + The resdatafiles object defines what is the standard location for the file, while the actual parsing is done in res2df.common.parse_lyrfile() and converted to zonemap in common.convert_lyrlist_to_zonemap() """ - eclfiles = res2df.EclFiles(REEK) + resdatafiles = res2df.ResdataFiles(REEK) - zonemap = eclfiles.get_zonemap() + zonemap = resdatafiles.get_zonemap() assert isinstance(zonemap, dict) assert zonemap[3] == "UpperReek" assert zonemap[10] == "MidReek" @@ -37,8 +37,8 @@ def test_stdzoneslyr(): def test_nonexistingzones(): """Test an Eclipse case with non-existing zonemap (i.e. no zonemap file in the standard location)""" - eclfiles = res2df.EclFiles(REEK) - zonemap = eclfiles.get_zonemap("foobar") + resdatafiles = res2df.ResdataFiles(REEK) + zonemap = resdatafiles.get_zonemap("foobar") # (we got a warning and an empty dict) assert not zonemap @@ -74,7 +74,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResdataFiles(REEK).get_zonemap(str(lyrfile)) is None assert "From_layer higher than to_layer" in caplog.text lyrfile = tmp_path / "formations.lyr" @@ -85,7 +85,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResdataFiles(REEK).get_zonemap(str(lyrfile)) is None assert "Failed on content: foo 3- 4 #FFGGHH" in caplog.text lyrfile = tmp_path / "formations.lyr" @@ -96,7 +96,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResdataFiles(REEK).get_zonemap(str(lyrfile)) is None assert "Failed on content: foo 3- 4 bluez" in caplog.text lyrfile.write_text( @@ -105,7 +105,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResdataFiles(REEK).get_zonemap(str(lyrfile)) is None def test_lyrlist_format(tmp_path): From d7f8924599fbcce0413e95a6cd90429faf20181b Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 12:22:47 +0100 Subject: [PATCH 05/68] Eclipse deck -> input deck --- docs/csv2res.rst | 6 +++--- docs/introduction.rst | 12 ++++++------ docs/res2csv.rst | 2 +- docs/usage/compdat.rst | 4 ++-- docs/usage/gruptree.rst | 7 +++---- docs/usage/pvt.rst | 4 ++-- docs/usage/satfunc.rst | 2 +- docs/usage/trans.rst | 2 +- docs/usage/wcon.rst | 2 +- res2df/compdat.py | 2 +- res2df/equil.py | 4 ++-- res2df/faults.py | 2 +- res2df/fipreports.py | 2 +- res2df/grid.py | 2 +- res2df/gruptree.py | 2 +- res2df/inferdims.py | 2 +- res2df/pvt.py | 4 ++-- res2df/res2csv.py | 4 ++-- res2df/resdatafiles.py | 2 +- res2df/satfunc.py | 6 +++--- res2df/summary.py | 2 +- res2df/vfp/_vfp.py | 10 +++++----- res2df/vfp/_vfpcommon.py | 5 ++--- res2df/vfp/_vfpinj.py | 12 ++++++------ res2df/vfp/_vfpprod.py | 12 ++++++------ res2df/wcon.py | 2 +- res2df/wellcompletiondata.py | 2 +- setup.py | 2 +- tests/test_gruptree.py | 2 +- tests/test_satfunc.py | 2 +- tests/test_welopen.py | 2 +- 31 files changed, 62 insertions(+), 64 deletions(-) diff --git a/docs/csv2res.rst b/docs/csv2res.rst index f89291aeb..5207373db 100644 --- a/docs/csv2res.rst +++ b/docs/csv2res.rst @@ -1,10 +1,10 @@ csv2res ======= -Some of the modules inside res2df is able to write Eclipse include files +Some of the modules inside res2df is able to write reservoir files from dataframes (in the format dumped by res2df). This makes it possible -to produce Eclipse input data in any application that can write CSV files, -and use this tool to convert it into Eclipse include files, or it can +to produce reservoir input data in any application that can write CSV files, +and use this tool to convert it into reservoir files, or it can facilitate operations/manipulations of an existing deck using any tool that can work on CSV files, by first running res2csv on an input file, transforming it, and writing back using csv2res. diff --git a/docs/introduction.rst b/docs/introduction.rst index 79a55b611..b4dae3b80 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -4,7 +4,7 @@ Introduction *res2df* is a `Pandas DataFrame `_ wrapper around `libecl `_ and `opm.io `_, which are used to access -binary files outputted by the reservoir simulator Eclipse, or its +binary files outputted by reservoir simulators such as Eclipse, or its input files --- or any other tool outputting to the same data format, f.ex. `flow `_. @@ -56,7 +56,7 @@ More documentation on :doc:`usage/summary`. Extracts grid data from `.INIT` and `.EGRID` and `.UNRST` files. Restart file are optional to extract, and dates must be picked (or all). Data is merged into one DataFrame by the `i`, `j` and `k` indices. Bulk cell -volume is included. Cells are indexed as in Eclipse, starting with 1. +volume is included. Cells are indexed starting with 1. More documentation on :doc:`usage/grid`. @@ -106,7 +106,7 @@ More documentation on :doc:`usage/rft`. ``fipreports`` ^^^^^^^^^^^^^^ -Parses the PRT file from Eclipse looking for region reports (starting +Parses the PRT file looking for region reports (starting with " ... FIPNUM REPORT REGION". It will extract all the data in the ASCII table in the PRT file and organize into a dataframe, currently-in-place, outflow to wells, outflows to regions, etc. It also @@ -156,7 +156,7 @@ More documentation on :doc:`usage/gruptree`. ``pvt`` ^^^^^^^ -Extracts PVT data from an Eclipse deck, from the keywords `PVTO`, `PVDG`, +Extracts PVT data from an input deck, from the keywords `PVTO`, `PVDG`, `DENSITY`, `ROCK` etc. Can write data back to Eclipse include files. More documentation on :doc:`usage/pvt`. @@ -173,7 +173,7 @@ More documentation on :doc:`usage/wcon`. ^^^^^^^^^^^^^^^^ This is an internal helper module in order to represent finished or -unfinished Eclipse decks and runs. The class ResdataFiles can cache binary +unfinished input decks and runs. The class ResdataFiles can cache binary files that are recently read, and is able to locate the various output files based on the basename or the `.DATA` filename. @@ -183,7 +183,7 @@ Metadata support parameters.txt ^^^^^^^^^^^^^^ -Metadata for each Eclipse deck are sometimes added in a text file named +Metadata for each input deck are sometimes added in a text file named ``parameters.txt``, alongside the Eclipse DATA file or one or two directory levels above it. diff --git a/docs/res2csv.rst b/docs/res2csv.rst index 0dff56ddd..c7c2bc724 100644 --- a/docs/res2csv.rst +++ b/docs/res2csv.rst @@ -4,7 +4,7 @@ res2csv Most of the functionality in res2df is exposed to the command line through the script *res2csv*. The first argument to this script is always the submodule (subcommand) from which you want functionality. Mandatory argument is -always an Eclipse deck or sometimes individual Eclipse include files, and +always an input deck or sometimes individual Eclipse include files, and there is usually an ``--output`` option to specify which file to dump the CSV to. If you want output to your terminal, use ``-`` as the output filename. diff --git a/docs/usage/compdat.rst b/docs/usage/compdat.rst index 7cbe1ebcc..23a8d72ab 100644 --- a/docs/usage/compdat.rst +++ b/docs/usage/compdat.rst @@ -1,7 +1,7 @@ compdat ^^^^^^^ -This module extracts COMPDAT, WELSEGS and COMPSEGS from an Eclipse deck. +This module extracts COMPDAT, WELSEGS and COMPSEGS from an input deck. Additionally, it will parse WELOPEN statements and emit new COMPDAT statements from the actions in WELOPEN. @@ -26,7 +26,7 @@ for each of COMPDAT, and the segmentation keywords. .. warning:: When WELOPEN is in use, the dataframe can differ from Eclipse behaviour in certain circumstances. The dataframe representation from ``compdat`` does not separate from a "shut" well and the open-ness of its - connections. So in an Eclipse deck it is possible to shut a well, and then + connections. So in an input deck it is possible to shut a well, and then reopen it, and get back the original open/shut state of individual connections prior to well shut. The dataframe format will display `all` connections as open if a well is opened with defaulted indices. diff --git a/docs/usage/gruptree.rst b/docs/usage/gruptree.rst index 11ae29596..14f0db96f 100644 --- a/docs/usage/gruptree.rst +++ b/docs/usage/gruptree.rst @@ -1,13 +1,12 @@ gruptree -------- -Extracts data from the GRUPTREE, GRUPNET and WELSPECS keywords from an Eclipse -deck and presents the production network either as pretty-printed ASCII or in a +Extracts data from the GRUPTREE, GRUPNET and WELSPECS keywords from an input deck and presents the production network either as pretty-printed ASCII or in a dataframe-representation. -The GRUPTREE section of your Eclipse deck defines the production network +The GRUPTREE section of your input deck defines the production network from wells and up to the platform (and possibly also to a field having -many platforms). In the Eclipse deck it be as simple as this:: +many platforms). In the input deck it be as simple as this:: START 01 'JAN' 2000 / diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index 198308288..2e81ed1df 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -1,7 +1,7 @@ pvt --- -Extracts PVT related keyword data from the PROPS section in an Eclipse deck, +Extracts PVT related keyword data from the PROPS section in an input deck, typically the keywords ``PVTO``, ``PVDG``, ``DENSITY`` and ``ROCK``. Data from all keywords will be merged into one common dataframe. @@ -77,7 +77,7 @@ Eclipse from your modified data by issuing pvt.df2ecl(dframe, filename="pvt.inc") -When injecting this produced ``pvt.inc`` into any new Eclipse deck, ensure you +When injecting this produced ``pvt.inc`` into any new input deck, ensure you check which keywords have been written out, compared to what you gave in to `res2df.pvt` above. Any non-supported keywords will get lost in the import phase and need to be catered for outside res2df. diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index 19223d913..f4b2ad493 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -1,7 +1,7 @@ satfunc ------- -satfunc will extract saturation functions from Eclipse decks or from Eclipse +satfunc will extract saturation functions from input decks or from Eclipse include files, these are the keywords ``SWOF``, ``SGOF``, ``SGWFN``, ``SWFN``, ``SOF2``, ``SGFN``, ``SOF3`` and ``SLGOF``. diff --git a/docs/usage/trans.rst b/docs/usage/trans.rst index f0c3f743f..4d4377220 100644 --- a/docs/usage/trans.rst +++ b/docs/usage/trans.rst @@ -6,7 +6,7 @@ from a simulation grid. Python API: :func:`res2df.trans.df` -Applied on an Eclipse deck, the *trans* module will give out a dataframe of neighbour +Applied on an input deck, the *trans* module will give out a dataframe of neighbour connections .. code-block:: python diff --git a/docs/usage/wcon.rst b/docs/usage/wcon.rst index e4b09e90c..05855ffe2 100644 --- a/docs/usage/wcon.rst +++ b/docs/usage/wcon.rst @@ -2,7 +2,7 @@ wcon ^^^^ This module extracts information from WCONHIST, WCONINJE, WCONINJH and -WCONPROD from an Eclipse deck. +WCONPROD from an input deck. .. wcon.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/wcon.csv', index=False) diff --git a/res2df/compdat.py b/res2df/compdat.py index 94a3cbc73..d8540cae4 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -82,7 +82,7 @@ def deck2dfs( Args: deck: A deck representing the schedule - Does not have to be a full Eclipse deck, an include file is sufficient + Does not have to be a full input deck, an include file is sufficient start_date: The default date to use for events where the DATE or START keyword is not found in advance. Default: None diff --git a/res2df/equil.py b/res2df/equil.py index 8ae4ba98f..e856b31d7 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -1,5 +1,5 @@ """ -Extract EQUIL from an Eclipse deck as Pandas DataFrame +Extract EQUIL from an input deck as Pandas DataFrame """ import argparse @@ -92,7 +92,7 @@ def df( are possibly already removed by the OPM parser in resdatafiles.str2deck(). Arguments: - deck: Eclipse deck or string with deck. If + deck: input deck or string with deck. If not string, EQLDIMS must be present in the deck. keywords: Requested keywords for which to extract data. ntequl: If not None, should state the NTEQUL in EQLDIMS. If diff --git a/res2df/faults.py b/res2df/faults.py index c62a66cce..d7df5d530 100644 --- a/res2df/faults.py +++ b/res2df/faults.py @@ -36,7 +36,7 @@ def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFram All data for the keyword FAULTS will be returned. Args: - deck: Eclipse deck + deck: input deck """ if isinstance(deck, ResdataFiles): deck = deck.get_ecldeck() diff --git a/res2df/fipreports.py b/res2df/fipreports.py index 5aec98573..493a3073d 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -109,7 +109,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra prtfile: filename (PRT) or an ResdataFiles object fipname: The name of the regport regions, FIPNUM, FIPZON or whatever Max length of the string is 8, the first three characters must be FIP, - and the next 3 characters must be unique for a given Eclipse deck. + and the next 3 characters must be unique for a given input deck. """ if isinstance(prtfile, ResdataFiles): prtfile = prtfile.get_prtfilename() diff --git a/res2df/grid.py b/res2df/grid.py index 11de9d0be..5c82f7fb8 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -625,7 +625,7 @@ def df2ecl( ) -> str: """ Write an include file with grid data keyword, like PERMX, PORO, - FIPNUM etc, for the GRID section of the Eclipse deck. + FIPNUM etc, for the GRID section of the input deck. Output (returned as string and optionally written to file) will then contain f.ex:: diff --git a/res2df/gruptree.py b/res2df/gruptree.py index fdafb661e..a5bfcf42f 100644 --- a/res2df/gruptree.py +++ b/res2df/gruptree.py @@ -1,4 +1,4 @@ -"""Extract GRUPTREE information from an Eclipse deck""" +"""Extract GRUPTREE information from an input deck""" import argparse import collections diff --git a/res2df/inferdims.py b/res2df/inferdims.py index ebb4b42cd..59a889985 100644 --- a/res2df/inferdims.py +++ b/res2df/inferdims.py @@ -31,7 +31,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: stricter mode, to detect the correct table dimensionality Arguments: - deck: String containing an Eclipse deck or only a few Eclipse keywords + deck: String containing an input deck or only a few Eclipse keywords dimkeyword: Either TABDIMS or EQLDIMS dimitem: The element number in TABDIMS/EQLDIMS to modify Returns: diff --git a/res2df/pvt.py b/res2df/pvt.py index 2afc33fc6..873b4a3b0 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -1,7 +1,7 @@ """ Extract the PVT data from an Eclipse (input) deck as Pandas Dataframes -Data can be extracted from a full Eclipse deck or from individual files. +Data can be extracted from a full input deck or from individual files. """ import argparse @@ -239,7 +239,7 @@ def df( def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Set up sys.argv parsers for parsing Eclipse deck or + """Set up sys.argv parsers for parsing input deck or include files into dataframes Arguments: diff --git a/res2df/res2csv.py b/res2df/res2csv.py index 2a62ec544..a7c312bec 100644 --- a/res2df/res2csv.py +++ b/res2df/res2csv.py @@ -24,7 +24,7 @@ CATEGORY: str = "utility.eclipse" EXAMPLES: str = """ -Outputting the EQUIL data from an Eclipse deck. The ECLBASE variable from your +Outputting the EQUIL data from an input deck. The ECLBASE variable from your ERT config is supplied implicitly:: FORWARD_MODEL res2csv(=equil, =equil.csv) @@ -130,7 +130,7 @@ def get_parser() -> argparse.ArgumentParser: "pvt", help="Extract PVT data", description=( - "Extract data for the PVT keywords in an Eclipse deck " + "Extract data for the PVT keywords in an input deck " "and merge all data into a single dataframe. " "Supported keywords are PVTO, PVDO, PVTG, PVDG, PVTW, " "ROCK and DENSITY. Gas phase pressure and oil phase " diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index bb3834923..6207ded37 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -41,7 +41,7 @@ class ResdataFiles(object): """ - Class for holding an Eclipse deck with result files + Class for holding an input deck with result files Exists only for convenience, so that loading of ResdataFile/Summary objects is easy for users, and with diff --git a/res2df/satfunc.py b/res2df/satfunc.py index 07c101dd6..c87d3225f 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -1,8 +1,8 @@ """ Extract saturation function data (SWOF, SGOF, SWFN, etc.) -from an Eclipse deck as Pandas DataFrame. +from an input deck as Pandas DataFrame. -Data can be extracted from a full Eclipse deck (`*.DATA`) +Data can be extracted from a full input deck (`*.DATA`) or from individual files. Note that when parsing from individual files, it is @@ -129,7 +129,7 @@ def df( def interpolate_defaults(dframe: pd.DataFrame) -> pd.DataFrame: """Interpolate NaN's linearly in saturation. - Saturation function tables in Eclipse decks can have certain values defaulted. + Saturation function tables in input decks can have certain values defaulted. When parsed by common.res2df, these values are returned as np.nan. The incoming dataframe must be associated to one keyword only, but can consist of multiple SATNUMs. diff --git a/res2df/summary.py b/res2df/summary.py index 7a19ec6a9..bc69487a6 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -321,7 +321,7 @@ def df( is always named "DATE". Arguments: - resdatafiles: ResdataFiles object representing the Eclipse deck. Alternatively + resdatafiles: ResdataFiles object representing the input deck. Alternatively an Summary object. time_index: string indicating a resampling frequency, 'yearly', 'monthly', 'daily', 'last' or 'raw', the latter will diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index badfb3fe5..8cf35d817 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -1,6 +1,6 @@ """Extract the VFPPROD/VFPINJ data from an Eclipse (input) deck as Pandas Dataframes -Data can be extracted from a full Eclipse deck or from individual files. Supports +Data can be extracted from a full input deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow and pyarrow.table """ @@ -45,7 +45,7 @@ def basic_data( BASIC_DATA_KEYS in _vfpprod and _vfpinj. Args: - deck: Eclipse deck or string with deck + deck: input deck or string with deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -250,7 +250,7 @@ def dfs( Data for the keyword VFPPROD or VFPINJ will be returned as separate item in list Args: - deck: Eclipse deck or string with deck + deck: input deck or string with deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -293,7 +293,7 @@ def pyarrow_tables( Data for the keyword VFPPROD or VFPINJ will be returned as separate item in list Args: - deck: Eclipse deck or string with deck + deck: input deck or string with deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -418,7 +418,7 @@ def df( All data for the keywords VFPPROD/VFPINJ will be returned. Args: - deck: Eclipse deck or string wit deck + deck: input deck or string wit deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: str with list of VFP table numbers to extract """ diff --git a/res2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py index 4eb2ce41c..551f00598 100755 --- a/res2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -1,8 +1,7 @@ -"""Common functionality for vfp module to extract VFPPROD/VFPINJ data from Eclipse -deck to extract the VFPPROD/VFPINJ data from an Eclipse (input) deck as Pandas +"""Common functionality for vfp module to extract VFPPROD/VFPINJ data from input deck to extract the VFPPROD/VFPINJ data from an Eclipse (input) deck as Pandas Dataframes -Data can be extracted from a full Eclipse deck or from individual files. Supports +Data can be extracted from a full input deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow and pyarrow.table """ diff --git a/res2df/vfp/_vfpinj.py b/res2df/vfp/_vfpinj.py index 33f4bc462..48c6fed3a 100755 --- a/res2df/vfp/_vfpinj.py +++ b/res2df/vfp/_vfpinj.py @@ -3,7 +3,7 @@ basic_data (dictionary with basic data types), df (pandas DataFrame) or pyarrow_tables (pyarrow.Tables). -Data can be extracted from a full Eclipse deck or from individual files. +Data can be extracted from a full input deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow a pyarrow.Table. Also functionality to write pandas DataFrame and pyarrow.Table to file as Eclipse .Ecl format @@ -74,7 +74,7 @@ def basic_data( Empty string returned if vfp table number does not match any number in list Args: - keyword: Eclipse deck keyword + keyword: input deck keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds """ @@ -473,12 +473,12 @@ def df( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pd.DataFrame, None]: - """Return a dataframes of a single VFPINJ table from an Eclipse deck + """Return a dataframes of a single VFPINJ table from an input deck Data from the VFPINJ keyword are stacked into a Pandas Dataframe Args: - keyword: Eclipse deck keyword + keyword: input deck keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ @@ -510,11 +510,11 @@ def pyarrow( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pa.Table, None]: - """Return a pyarrow Table of a single VFPINJ table from an Eclipse deck + """Return a pyarrow Table of a single VFPINJ table from an input deck If no VFPINJ table found, return None Args: - keyword: Eclipse deck keyword + keyword: input deck keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ diff --git a/res2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py index fc9a9262f..201767f52 100755 --- a/res2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -3,7 +3,7 @@ basic_data (dictionary with basic data types), df (pandas DataFrame) or pyarrow_tables (pyarrow.Tables). -Data can be extracted from a full Eclipse deck or from individual files. +Data can be extracted from a full input deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow as pyarrow.Table. Also functionality to write pandas DataFrame and pyarrow.Table to file as Eclipse .Ecl format. @@ -86,7 +86,7 @@ def basic_data( Empty string returned if vfp table number does not match any number in list Args: - keyword: Eclipse deck keyword + keyword: input deck keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds """ @@ -720,10 +720,10 @@ def df( vfpnumbers_str: Optional[str] = None, ) -> Union[pd.DataFrame, None]: """Return a dataframe or pyarrow Table of a single VFPPROD table - from an Eclipse deck. + from an input deck. Args: - keyword: Eclipse deck keyword + keyword: input deck keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ @@ -764,11 +764,11 @@ def pyarrow( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pa.Table, None]: - """Return a pyarrow Table of a single VFPPROD table from an Eclipse deck. + """Return a pyarrow Table of a single VFPPROD table from an input deck. If no VFPPROD curve found, return None Args: - keyword: Eclipse deck keyword + keyword: input deck keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ diff --git a/res2df/wcon.py b/res2df/wcon.py index 4f88b257f..0034e9976 100644 --- a/res2df/wcon.py +++ b/res2df/wcon.py @@ -1,4 +1,4 @@ -"""Extract WCON* from an Eclipse deck""" +"""Extract WCON* from an input deck""" import argparse import datetime diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index 461306ed4..571c48847 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -91,7 +91,7 @@ def df( def _get_ecl_unit_system(resdatafiles: ResdataFiles) -> EclipseUnitSystem: - """Returns the unit system of an eclipse deck. The options are \ + """Returns the unit system of an input deck. The options are \ METRIC, FIELD, LAB and PVT-M. If none of these are found, the function returns METRIC which is the diff --git a/setup.py b/setup.py index ceed4c504..6b2569a45 100644 --- a/setup.py +++ b/setup.py @@ -47,7 +47,7 @@ name="res2df", use_scm_version={"write_to": "res2df/version.py"}, cmdclass=cmdclass, - description="Convert Eclipse 100 input and output to DataFrames", + description="Convert reservoir simulator input and output to DataFrames", long_description=LONG_DESCRIPTION, long_description_content_type="text/markdown", url="http://github.com/equinor/res2df", diff --git a/tests/test_gruptree.py b/tests/test_gruptree.py index 1163d027f..7af8a3749 100644 --- a/tests/test_gruptree.py +++ b/tests/test_gruptree.py @@ -427,7 +427,7 @@ def test_emptytree_strdeck(): def test_emptytree_commandlinetool(tmp_path, mocker, caplog): - """Test the command line tool on an Eclipse deck which is empty""" + """Test the command line tool on an input deck which is empty""" os.chdir(tmp_path) Path("EMPTY.DATA").write_text("", encoding="utf8") mocker.patch("sys.argv", ["res2csv", "gruptree", "--prettyprint", "EMPTY.DATA"]) diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index d20c3539c..90579ce32 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -27,7 +27,7 @@ def test_ecldeck_to_satfunc_dframe(): - """Test that dataframes can be produced from a full Eclipse deck (the + """Test that dataframes can be produced from a full input deck (the example Reek case)""" resdatafiles = ResdataFiles(REEK) satdf = satfunc.df(resdatafiles.get_ecldeck()) diff --git a/tests/test_welopen.py b/tests/test_welopen.py index 9fce7eb07..e0e574e6a 100644 --- a/tests/test_welopen.py +++ b/tests/test_welopen.py @@ -1481,7 +1481,7 @@ def test_welopen_df(): ], ) def test_welopen_complump(test_input, expected): - """Test the welopen_complump functionality through Eclipse decks""" + """Test the welopen_complump functionality through input decks""" deck = ResdataFiles.str2deck(test_input) dfs = compdat.deck2dfs(deck) pd.testing.assert_frame_equal(dfs["COMPDAT"][expected.columns], expected) From 1dad475c0b3bd26b4f0764e1384872f167bd197e Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 12:26:55 +0100 Subject: [PATCH 06/68] ecl2arrow->res2arrow --- setup.py | 2 +- tests/test_summary.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 6b2569a45..866f09358 100644 --- a/setup.py +++ b/setup.py @@ -69,7 +69,7 @@ "console_scripts": [ "csv2res=res2df.csv2res:main", "res2csv=res2df.res2csv:main", - "ecl2arrow=res2df.res2csv:main", + "res2arrow=res2df.res2csv:main", ], "ert": ["res2df_jobs = res2df.hook_implementations.jobs"], }, diff --git a/tests/test_summary.py b/tests/test_summary.py index 98d3a6324..2fc3f4a05 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -319,7 +319,7 @@ def test_main_subparser(tmp_path, mocker): # Alternative and equivalent command line syntax for arrow output: tmparrowfile_alt = tmp_path / "sum2.arrow" mocker.patch( - "sys.argv", ["ecl2arrow", "summary", EIGHTCELLS, "-o", str(tmparrowfile_alt)] + "sys.argv", ["res2arrow", "summary", EIGHTCELLS, "-o", str(tmparrowfile_alt)] ) res2csv.main() pd.testing.assert_frame_equal( @@ -327,7 +327,7 @@ def test_main_subparser(tmp_path, mocker): ) # Not possible (yet?) to write arrow to stdout: - mocker.patch("sys.argv", ["ecl2arrow", "summary", EIGHTCELLS, "-o", "-"]) + mocker.patch("sys.argv", ["res2arrow", "summary", EIGHTCELLS, "-o", "-"]) with pytest.raises(SystemExit): res2csv.main() From 0114cc9740a6b73e6da5449295b95d6cbc37b6c2 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 12:27:45 +0100 Subject: [PATCH 07/68] ecl2->res2 --- docs/usage/nnc.rst | 2 +- tests/test_rft.py | 2 +- tests/test_vfp.py | 14 +++++++------- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/docs/usage/nnc.rst b/docs/usage/nnc.rst index b368899b7..21ed2aa60 100644 --- a/docs/usage/nnc.rst +++ b/docs/usage/nnc.rst @@ -49,7 +49,7 @@ to an Eclipse include file: .. code-block:: python - from ecl2f import nnc, ResdataFiles + from res2f import nnc, ResdataFiles resdatafiles = ResdataFiles("MYDATADECK.DATA") nnc_df = nnc.df(resdatafiles) diff --git a/tests/test_rft.py b/tests/test_rft.py index de33e2b2e..d73cd37a2 100644 --- a/tests/test_rft.py +++ b/tests/test_rft.py @@ -501,7 +501,7 @@ def test_main_subparsers(tmp_path, mocker): mocker.patch( "sys.argv", [ - "ecl2cvsv", + "res2cvsv", "rft", "-v", REEK.replace(".DATA", ".RFT"), diff --git a/tests/test_vfp.py b/tests/test_vfp.py index 0392776c2..c3f94c7b2 100644 --- a/tests/test_vfp.py +++ b/tests/test_vfp.py @@ -1000,8 +1000,8 @@ def test_res2df_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", VFPPROD_CASES) -def test_ecl2pyarrow_vfpprod(test_input, expected): - """Test ecl2pyarrow for VFPPROD""" +def test_res2pyarrow_vfpprod(test_input, expected): + """Test res2pyarrow for VFPPROD""" deck = ResdataFiles.str2deck(test_input) # Read first into pyarrow tables vfppa = vfp.pyarrow_tables(deck, "VFPPROD") @@ -1079,7 +1079,7 @@ def test_res2df_vfpprods(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2pyarrow_vfpprods(test_input, expected): +def test_res2pyarrow_vfpprods(test_input, expected): """Test res2df with pyarrow for files with multiple VFPPROD""" deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPPROD") @@ -1126,7 +1126,7 @@ def test_res2df_vfpprod_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2pyarrow_vfpprod_no(test_input, expected): +def test_res2pyarrow_vfpprod_no(test_input, expected): """Test res2df for pyarrow for files with multiple VFPPROD with vfp number argument """ @@ -1150,7 +1150,7 @@ def test_res2df_vfpinj_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2pyarrow_vfpinj_no(test_input, expected): +def test_res2pyarrow_vfpinj_no(test_input, expected): """Test res2df for pyarrow files with multiple VFPINJ with vfp number argument""" deck = ResdataFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ", "4") @@ -1174,7 +1174,7 @@ def test_res2df_vfpprods_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2pyarrow_vfpprods_no(test_input, expected): +def test_res2pyarrow_vfpprods_no(test_input, expected): """Test res2df for pyarrow for files with multiple VFPPROD with vfp number argument as range """ @@ -1202,7 +1202,7 @@ def test_res2df_vfpinjs_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) -def test_ecl2pyarrow_vfpinjs_no(test_input, expected): +def test_res2pyarrow_vfpinjs_no(test_input, expected): """Test res2df for pyararow for files with multiple VFPINJ with vfp number argument as range """ From 5f8572bdcbc6aad6e46ba76d401400e539816ab0 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 12:52:20 +0100 Subject: [PATCH 08/68] an Eclipse record -> a resdata record --- res2df/vfp/_vfpcommon.py | 2 +- res2df/vfp/_vfpinj.py | 4 ++-- res2df/vfp/_vfpprod.py | 4 ++-- res2df/wellcompletiondata.py | 2 +- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/res2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py index 551f00598..10d4434fb 100755 --- a/res2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -180,7 +180,7 @@ def _write_vfp_range( format: str = "%10.6g", values_per_line: int = 5, ) -> str: - """Produce a string representing an Eclipse record for a given table range + """Produce a string representing a resdata record for a given table range Args: values: List/array with the range sorted diff --git a/res2df/vfp/_vfpinj.py b/res2df/vfp/_vfpinj.py index 48c6fed3a..76e75af88 100755 --- a/res2df/vfp/_vfpinj.py +++ b/res2df/vfp/_vfpinj.py @@ -580,7 +580,7 @@ def _write_table( format: str = "%10.6g", values_per_line: int = 5, ) -> str: - """Produce a string representing an Eclipse record for a VFPINJ table (BHP part) + """Produce a string representing a resdata record for a VFPINJ table (BHP part) Args: table: DataFrame with multiindex for table ranges and colums @@ -614,7 +614,7 @@ def _write_table_records( format: str = "%10.6g", values_per_line: int = 5, ) -> str: - """Produce a string representing an Eclipse record for a VFPINJ table (BHP part) + """Produce a string representing a resdata record for a VFPINJ table (BHP part) Args: thp_indices: array of int representing index for THP value for record diff --git a/res2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py index 201767f52..4e2573127 100755 --- a/res2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -855,7 +855,7 @@ def _write_table( format: str = "%10.3", values_per_line: int = 5, ) -> str: - """Produce a string representing an Eclipse record for a VFPPROD table (BHP part) + """Produce a string representing a resdata record for a VFPPROD table (BHP part) Args: table: DataFrame with multiindex for table ranges and colums @@ -892,7 +892,7 @@ def _write_table_records( format: str = "%10.3", values_per_line: int = 5, ) -> str: - """Produce a string representing an Eclipse record for a VFPPROD table (BHP part) + """Produce a string representing a resdata record for a VFPPROD table (BHP part) Args: thp_indices: array of int representing index for THP value for record diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index 571c48847..76ad99da4 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -91,7 +91,7 @@ def df( def _get_ecl_unit_system(resdatafiles: ResdataFiles) -> EclipseUnitSystem: - """Returns the unit system of an input deck. The options are \ + """Returns the unit system of an Eclipse input deck. The options are \ METRIC, FIELD, LAB and PVT-M. If none of these are found, the function returns METRIC which is the From c52758036b33873be87d85d2738d9ca058e714b9 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 12:53:25 +0100 Subject: [PATCH 09/68] Eclipse keyword -> resdata keyword --- res2df/common.py | 6 +++--- res2df/compdat.py | 2 +- res2df/inferdims.py | 2 +- res2df/res2csv.py | 2 +- res2df/vfp/_vfpcommon.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/res2df/common.py b/res2df/common.py index a59e2590a..aa6842ec7 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -34,7 +34,7 @@ from .constants import MAGIC_STDOUT # Parse named JSON files, this exposes a dict of dictionary describing the contents -# of supported Eclipse keyword data +# of supported resdata keyword data OPMKEYWORDS: Dict[str, dict] = {} for keyw in [ "BRANPROP", @@ -194,7 +194,7 @@ def ecl_keyworddata_to_df( recordcountername: Optional[str] = None, emptyrecordcountername: Optional[str] = None, ) -> pd.DataFrame: - """Extract data associated to an Eclipse keyword into a tabular form. + """Extract data associated to an resdata keyword into a tabular form. Two modes of enumeration of tables in the keyworddata is supported, you will have to find out which one fits your particular keyword. Activate @@ -277,7 +277,7 @@ def parse_opmio_deckrecord( Args: record: Record be parsed - keyword: Which Eclipse keyword this belongs to + keyword: Which resdata keyword this belongs to itemlistname: The key in the json dict that describes the items, typically 'items' or 'records' recordindex: For keywords where itemlistname is 'records', this is a diff --git a/res2df/compdat.py b/res2df/compdat.py index d8540cae4..91a684af8 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -1,4 +1,4 @@ -"""Parser and dataframe generator for the Eclipse keywords: +"""Parser and dataframe generator for the resdata keywords: * COMPDAT * COMPLUMP * COMPSEGS diff --git a/res2df/inferdims.py b/res2df/inferdims.py index 59a889985..87d975522 100644 --- a/res2df/inferdims.py +++ b/res2df/inferdims.py @@ -31,7 +31,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: stricter mode, to detect the correct table dimensionality Arguments: - deck: String containing an input deck or only a few Eclipse keywords + deck: String containing an input deck or only a few resdata keywords dimkeyword: Either TABDIMS or EQLDIMS dimitem: The element number in TABDIMS/EQLDIMS to modify Returns: diff --git a/res2df/res2csv.py b/res2df/res2csv.py index a7c312bec..3d7bd1f86 100644 --- a/res2df/res2csv.py +++ b/res2df/res2csv.py @@ -151,7 +151,7 @@ def get_parser() -> argparse.ArgumentParser: "particular well at a particular time. " "If multisegment wells are found, associated data " "to a connection is merged onto the same row as additional columns. " - "You need the Eclipse keyword WRFTPLT present in your DATA-file to get " + "You need the resdata keyword WRFTPLT present in your DATA-file to get " "the data outputted." ), ) diff --git a/res2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py index 10d4434fb..a26f8faeb 100755 --- a/res2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -67,7 +67,7 @@ def _deckrecord2list( Args: record: Record be parsed - keyword: Which Eclipse keyword this belongs to + keyword: Which resdata keyword this belongs to recordindex: For keywords where itemlistname is 'records', this is a list index to the "record". recordname: Name of the record From 8c35b594cd417f3523c13aa92a810fedeb693111 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 13:05:33 +0100 Subject: [PATCH 10/68] Eclipse include file -> resdata include file --- docs/introduction.rst | 4 ++-- docs/res2csv.rst | 2 +- docs/usage/equil.rst | 5 ++--- docs/usage/grid.rst | 2 +- docs/usage/nnc.rst | 2 +- docs/usage/pvt.rst | 5 ++--- docs/usage/satfunc.rst | 6 +++--- res2df/common.py | 6 +++--- res2df/csv2res.py | 8 ++++---- res2df/equil.py | 4 ++-- res2df/pvt.py | 6 +++--- res2df/satfunc.py | 6 +++--- res2df/vfp/_vfp.py | 2 +- res2df/wcon.py | 2 +- 14 files changed, 29 insertions(+), 31 deletions(-) diff --git a/docs/introduction.rst b/docs/introduction.rst index b4dae3b80..4ca2998f4 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -127,7 +127,7 @@ More documentation on :doc:`usage/satfunc`. ^^^^^^^^^ Extracts the information in the `EQUIL` table, `RSVD` and `RVVD` in the -input deck. Can write back to Eclipse include files. +input deck. Can write back to include files. More documentation on :doc:`usage/equil`. @@ -157,7 +157,7 @@ More documentation on :doc:`usage/gruptree`. ^^^^^^^ Extracts PVT data from an input deck, from the keywords `PVTO`, `PVDG`, -`DENSITY`, `ROCK` etc. Can write data back to Eclipse include files. +`DENSITY`, `ROCK` etc. Can write data back to include files. More documentation on :doc:`usage/pvt`. diff --git a/docs/res2csv.rst b/docs/res2csv.rst index c7c2bc724..7980788f6 100644 --- a/docs/res2csv.rst +++ b/docs/res2csv.rst @@ -4,7 +4,7 @@ res2csv Most of the functionality in res2df is exposed to the command line through the script *res2csv*. The first argument to this script is always the submodule (subcommand) from which you want functionality. Mandatory argument is -always an input deck or sometimes individual Eclipse include files, and +always an input deck or sometimes individual include files, and there is usually an ``--output`` option to specify which file to dump the CSV to. If you want output to your terminal, use ``-`` as the output filename. diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index eebacb641..ab046a47e 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -51,11 +51,10 @@ one meter for compatibility, which you could do by the statements: dframe.loc[rsvd_rows, "Z"] = dframe.loc[rsvd_rows, "Z"] + 1 -Re-exporting tables to Eclipse include files +Re-exporting tables to include files ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When you are done with the table, you can generate new include files for -Eclipse from your modified data by issuing +When you are done with the table, you can generate new include files from your modified data by issuing .. code-block:: python diff --git a/docs/usage/grid.rst b/docs/usage/grid.rst index 633ef27a9..0ed6ddd3e 100644 --- a/docs/usage/grid.rst +++ b/docs/usage/grid.rst @@ -142,7 +142,7 @@ Calculating volumes of dynamic data (pr. some region parameter) can be obtained from that module as a by-product of the pillar computations. -Generating Eclipse include files from grid data +Generating include files from grid data ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you have loaded grid data into a Pandas frame, some operations are easily performed, diff --git a/docs/usage/nnc.rst b/docs/usage/nnc.rst index 21ed2aa60..4b10deaba 100644 --- a/docs/usage/nnc.rst +++ b/docs/usage/nnc.rst @@ -45,7 +45,7 @@ Data for the ``EDITNNC`` keyword can be dumped, in order to scale the NNC connec using Pandas operations. Select the connections you want to scale by slicing the nnc dataframe (either from the nnc module, or from the trans module), and fill transmissibility multipliers in a new column ``TRANM``, then this can be exported -to an Eclipse include file: +to an include file: .. code-block:: python diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index 2e81ed1df..b025bf2ff 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -67,11 +67,10 @@ Possibly, different viscosity scaling pr. PVTNUM is needed Density values are easier to scale up or down to whatever is needed. -Re-exporting tables to Eclipse include files +Re-exporting tables to include files ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When you are done with the table, you can generate new include files for -Eclipse from your modified data by issuing +When you are done with the table, you can generate new include files from your modified data by issuing .. code-block:: python diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index f4b2ad493..ef429986d 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -36,7 +36,7 @@ line option. Instead of Eclipse data decks, individual include files may also be parsed, but only one at a time. -Generating Eclipse include files from dataframes +Generating include files from dataframes ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When a dataframe of saturation function data is loaded into Python, any operation @@ -81,7 +81,7 @@ because you need to avoid SOWCR + SWCR overshooting 1, you can write a code from res2df import satfunc - # Read an Eclipse include file directly into a DataFrame + # Read an include file directly into a DataFrame with open("relperm.inc") as f_handle: sat_df = satfunc.df(f_handle.read()) @@ -113,7 +113,7 @@ to do directly on the dataframes. Before doing manipulations of dataframes in through the `pyscal `_ library. Pyscal can create curves from parametrizations, and interpolate between curves. -Pyscal can create initialize its relperm objects from Eclipse include files +Pyscal can create initialize its relperm objects from include files though the parsing capabilities of res2df.satfunc. The function ``pyscal.pyscallist.df()`` is analogous to ``res2df.satfunc.df()`` in diff --git a/res2df/common.py b/res2df/common.py index aa6842ec7..e3ac901d5 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -473,7 +473,7 @@ def fill_reverse_parser( parser: argparse.ArgumentParser, modulename: str, defaultoutputfile: str ): """A standardized submodule parser for the command line utility - to produce Eclipse include files from a CSV file. + to produce resdata include files from a CSV file. Arguments: parser: parser to fill with arguments @@ -488,7 +488,7 @@ def fill_reverse_parser( "--output", type=str, help=( - "Name of output Eclipse include file file, default " + "Name of output resdata include file file, default " + defaultoutputfile + ". " "Use '-' for stdout." @@ -516,7 +516,7 @@ def df2ecl( consecutive: Optional[str] = None, filename: Optional[str] = None, ) -> str: - """Generate Eclipse include strings from dataframes in res2df format. + """Generate resdata include strings from dataframes in res2df format. This function hands over the actual text generation pr. keyword to functions named df2ecl_ in the calling module. diff --git a/res2df/csv2res.py b/res2df/csv2res.py index 6c3b40904..dcd040c7a 100644 --- a/res2df/csv2res.py +++ b/res2df/csv2res.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -Convert dataframes (in res2df format) to Eclipse include files, +Convert dataframes (in res2df format) to resdata include files, for selected keywords """ @@ -10,7 +10,7 @@ from res2df import __version__, equil, pvt, satfunc, summary, vfp # String constants in use for generating ERT forward model documentation: -DESCRIPTION: str = """Convert CSV files into Eclipse include files. Uses the command +DESCRIPTION: str = """Convert CSV files into resdata include files. Uses the command line utility ``csv2res``. Run ``csv2res --help`` to see which subcommands are supported. No options other than the output file is possible when used directly as a forward model. When writing synthetic summary files, the ECLBASE with no filename suffix is expected @@ -62,7 +62,7 @@ def get_parser() -> argparse.ArgumentParser: help="Write SOLUTION include files", description=( "Write SOLUTION keywords (EQUIL, RSVD, RVVD) " - "to Eclipse include files from CSV in res2df format." + "to resdata include files from CSV in res2df format." ), ) equil.fill_reverse_parser(equil_parser) @@ -72,7 +72,7 @@ def get_parser() -> argparse.ArgumentParser: "pvt", help="Write PVT include files", description=( - "Write Eclipse include files from CSV files on the res2df format." + "Write resdata include files from CSV files on the res2df format." ), ) pvt.fill_reverse_parser(pvt_parser) diff --git a/res2df/equil.py b/res2df/equil.py index e856b31d7..c1f62b696 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -305,7 +305,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Fill a parser for the operation dataframe -> eclipse include file""" + """Fill a parser for the operation dataframe -> resdata include file""" return common.fill_reverse_parser(parser, "EQUIL, RSVD++", "solution.inc") @@ -359,7 +359,7 @@ def df2ecl( withphases: bool = False, filename: Optional[str] = None, ) -> str: - """Generate Eclipse include strings from dataframes with + """Generate resdata include strings from dataframes with solution (EQUIL, RSVD++) data. Args: diff --git a/res2df/pvt.py b/res2df/pvt.py index 873b4a3b0..41707e9f5 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -199,7 +199,7 @@ def df( ) -> pd.DataFrame: """Extract all (most) PVT data from a deck. - If you want to call this function on Eclipse include files, + If you want to call this function on resdata include files, read them in to strings as in this example: > pvt_df = pvt.df(open("pvt.inc").read()) @@ -269,7 +269,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Set up sys.argv parsers for writing Eclipse include files from + """Set up sys.argv parsers for writing resdata include files from dataframes (as CSV files) Arguments: @@ -329,7 +329,7 @@ def df2ecl( comments: Optional[Dict[str, str]] = None, filename: Optional[str] = None, ) -> str: - """Generate Eclipse include strings from PVT dataframes + """Generate resdata include strings from PVT dataframes Args: pvt_df: Dataframe with PVT data on res2df format. diff --git a/res2df/satfunc.py b/res2df/satfunc.py index c87d3225f..f8c6e08f9 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -183,7 +183,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Fill a parser for the operation dataframe -> eclipse include file""" + """Fill a parser for the operation dataframe -> resdata include file""" return common.fill_reverse_parser(parser, "SWOF, SGOF++", "relperm.inc") @@ -237,7 +237,7 @@ def df2ecl( comments: Optional[Dict[str, str]] = None, filename: Optional[str] = None, ) -> str: - """Generate Eclipse include strings from dataframes with + """Generate resdata include strings from dataframes with saturation functions (SWOF, SGOF, ...) Args: @@ -252,7 +252,7 @@ def df2ecl( to file. Returns: - Generated Eclipse include string + Generated resdata include string """ string = "" diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index 8cf35d817..b7102bd49 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -476,7 +476,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Fill a parser for the operation dataframe -> eclipse include file""" + """Fill a parser for the operation dataframe -> resdata include file""" return common.fill_reverse_parser(parser, "VFPPROD, VFPINJ", "vfp.inc") diff --git a/res2df/wcon.py b/res2df/wcon.py index 0034e9976..003b92b67 100644 --- a/res2df/wcon.py +++ b/res2df/wcon.py @@ -80,7 +80,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: fill with arguments """ parser.add_argument( - "DATAFILE", help="Name of Eclipse DATA file or Eclipse include file." + "DATAFILE", help="Name of Eclipse DATA file or resdata include file." ) parser.add_argument( "-o", "--output", type=str, help="Name of output csv file.", default="wcon.csv" From 674721b1cfc50f00b3f31cf52b1d0be9e9895af2 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 13:07:51 +0100 Subject: [PATCH 11/68] Eclipse DATA -> Reservoir DATA --- res2df/common.py | 2 +- res2df/compdat.py | 2 +- res2df/equil.py | 2 +- res2df/faults.py | 2 +- res2df/grid.py | 3 ++- res2df/gruptree.py | 2 +- res2df/nnc.py | 3 ++- res2df/pillars.py | 4 +++- res2df/pvt.py | 2 +- res2df/rft.py | 2 +- res2df/satfunc.py | 3 ++- res2df/summary.py | 4 ++-- res2df/trans.py | 3 ++- res2df/vfp/_vfp.py | 2 +- res2df/wcon.py | 2 +- res2df/wellcompletiondata.py | 2 +- res2df/wellconnstatus.py | 2 +- 17 files changed, 24 insertions(+), 18 deletions(-) diff --git a/res2df/common.py b/res2df/common.py index e3ac901d5..122ddd273 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -525,7 +525,7 @@ def df2ecl( for the actual string construction. Args: - dataframe: Dataframe with Eclipse data on res2df format. + dataframe: Dataframe with reservoir DATA on res2df format. keywords: List of keywords to include. Will be reduced to the set of keywords available in dataframe and to those supported comments: Dictionary indexed by keyword with comments to be diff --git a/res2df/compdat.py b/res2df/compdat.py index 91a684af8..316f7cdaf 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -950,7 +950,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: Arguments: parser: parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of Eclipse DATA file.") + parser.add_argument("DATAFILE", help="Name of reservoir DATA file.") parser.add_argument( "-o", "--output", diff --git a/res2df/equil.py b/res2df/equil.py index c1f62b696..fc3df876e 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -283,7 +283,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (argparse.ArgumentParser or argparse.subparser): parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of Eclipse DATA file.") + parser.add_argument("DATAFILE", help="Name of reservoir DATA file.") parser.add_argument( "-o", "--output", diff --git a/res2df/faults.py b/res2df/faults.py index d7df5d530..fe8632695 100644 --- a/res2df/faults.py +++ b/res2df/faults.py @@ -69,7 +69,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: Arguments: parser: argparse.ArgumentParser or argparse.subparser """ - parser.add_argument("DATAFILE", help="Name of Eclipse DATA file.") + parser.add_argument("DATAFILE", help="Name of reservoir DATA file.") parser.add_argument( "-o", "--output", diff --git a/res2df/grid.py b/res2df/grid.py index 5c82f7fb8..2153d2f3b 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -540,7 +540,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of Eclipse DATA file. " + "INIT and EGRID file must lie alongside.", + help="Name of reservoir DATA file. " + + "INIT and EGRID file must lie alongside.", ) parser.add_argument( "--vectors", diff --git a/res2df/gruptree.py b/res2df/gruptree.py index a5bfcf42f..772ce5917 100644 --- a/res2df/gruptree.py +++ b/res2df/gruptree.py @@ -392,7 +392,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (argparse.ArgumentParser or argparse.subparser): parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of Eclipse DATA file.") + parser.add_argument("DATAFILE", help="Name of reservoir DATA file.") parser.add_argument( "-o", "--output", diff --git a/res2df/nnc.py b/res2df/nnc.py index d8b17fe2d..a798bc78e 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -177,7 +177,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of Eclipse DATA file. " + "INIT and EGRID file must lie alongside.", + help="Name of reservoir DATA file. " + + "INIT and EGRID file must lie alongside.", ) parser.add_argument( "-c", diff --git a/res2df/pillars.py b/res2df/pillars.py index 9814f7686..35f6805ce 100644 --- a/res2df/pillars.py +++ b/res2df/pillars.py @@ -332,7 +332,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help=("Name of Eclipse DATA file. " "INIT and EGRID file must lie alongside."), + help=( + "Name of reservoir DATA file. " "INIT and EGRID file must lie alongside." + ), ) parser.add_argument( "--region", diff --git a/res2df/pvt.py b/res2df/pvt.py index 41707e9f5..864c1c536 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -246,7 +246,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (ArgumentParser or subparser): parser to fill with arguments """ parser.add_argument( - "DATAFILE", help="Name of Eclipse DATA file or file with PVT keywords." + "DATAFILE", help="Name of reservoir DATA file or file with PVT keywords." ) parser.add_argument( "-o", diff --git a/res2df/rft.py b/res2df/rft.py index cbba20c42..3b8658b40 100644 --- a/res2df/rft.py +++ b/res2df/rft.py @@ -654,7 +654,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", help=( - "Name of Eclipse DATA file or RFT file. " + "Name of reservoir DATA file or RFT file. " "If DATA file is provided, it will look for" " the associated DATA file" ), diff --git a/res2df/satfunc.py b/res2df/satfunc.py index f8c6e08f9..a7ba81b56 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -160,7 +160,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (ArgumentParser or subparser): parser to fill with arguments """ parser.add_argument( - "DATAFILE", help="Name of Eclipse DATA file or file with saturation functions." + "DATAFILE", + help="Name of reservoir DATA file or file with saturation functions.", ) parser.add_argument( "-o", diff --git a/res2df/summary.py b/res2df/summary.py index bc69487a6..008a09808 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -803,7 +803,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of Eclipse DATA file. " + "UNSMRY file must lie alongside.", + help="Name of reservoir DATA file. " + "UNSMRY file must lie alongside.", ) parser.add_argument( "--time_index", @@ -855,7 +855,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: type=str, help=( "Filename of key-value parameter file to look for if -p is set, " - "relative to Eclipse DATA file or an absolute filename. " + "relative to reservoir DATA file or an absolute filename. " "If not supplied, parameters.{json,yml,txt} in " "{., .. and ../..} will be merged in." ), diff --git a/res2df/trans.py b/res2df/trans.py index 77c644507..457b5d836 100644 --- a/res2df/trans.py +++ b/res2df/trans.py @@ -261,7 +261,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of Eclipse DATA file. " + "INIT and EGRID file must lie alongside.", + help="Name of reservoir DATA file. " + + "INIT and EGRID file must lie alongside.", ) parser.add_argument("--vectors", nargs="+", help="Extra INIT vectors to be added") parser.add_argument( diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index b7102bd49..048f9a54e 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -448,7 +448,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (argparse.ArgumentParser or argparse.subparser): parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of Eclipse DATA file.") + parser.add_argument("DATAFILE", help="Name of reservoir DATA file.") parser.add_argument( "-o", "--output", diff --git a/res2df/wcon.py b/res2df/wcon.py index 003b92b67..7edbd91dc 100644 --- a/res2df/wcon.py +++ b/res2df/wcon.py @@ -80,7 +80,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: fill with arguments """ parser.add_argument( - "DATAFILE", help="Name of Eclipse DATA file or resdata include file." + "DATAFILE", help="Name of reservoir DATA file or resdata include file." ) parser.add_argument( "-o", "--output", type=str, help="Name of output csv file.", default="wcon.csv" diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index 76ad99da4..d6678e024 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -250,7 +250,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", type=str, - help="Name of Eclipse DATA file. " + "UNSMRY file must lie alongside.", + help="Name of reservoir DATA file. " + "UNSMRY file must lie alongside.", ) parser.add_argument( "--zonemap", diff --git a/res2df/wellconnstatus.py b/res2df/wellconnstatus.py index 765b4ee01..7860dd589 100644 --- a/res2df/wellconnstatus.py +++ b/res2df/wellconnstatus.py @@ -96,7 +96,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", type=str, - help="Name of Eclipse DATA file. " + "UNSMRY file must lie alongside.", + help="Name of reservoir DATA file. " + "UNSMRY file must lie alongside.", ) parser.add_argument( "-o", From 2f04e4f234301674a36f98ed72960f879b0f6eb6 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 14:51:23 +0100 Subject: [PATCH 12/68] df2ecl->df2res --- docs/usage/equil.rst | 2 +- docs/usage/grid.rst | 6 +- docs/usage/nnc.rst | 4 +- docs/usage/pvt.rst | 2 +- docs/usage/satfunc.rst | 4 +- res2df/common.py | 6 +- res2df/equil.py | 32 +++++----- res2df/grid.py | 4 +- res2df/nnc.py | 2 +- res2df/pvt.py | 20 +++---- res2df/satfunc.py | 60 +++++++++---------- res2df/summary.py | 4 +- res2df/vfp/__init__.py | 4 +- res2df/vfp/_vfp.py | 16 ++--- res2df/vfp/_vfpinj.py | 2 +- res2df/vfp/_vfpprod.py | 2 +- tests/test_common.py | 36 ++++++------ tests/test_equil.py | 60 +++++++++---------- tests/test_grid.py | 36 ++++++------ tests/test_nnc.py | 12 ++-- tests/test_pvt.py | 130 ++++++++++++++++++++--------------------- tests/test_satfunc.py | 18 +++--- tests/test_summary.py | 32 +++++----- tests/test_vfp.py | 20 +++---- 24 files changed, 257 insertions(+), 257 deletions(-) diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index ab046a47e..53589e2c4 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -58,7 +58,7 @@ When you are done with the table, you can generate new include files from your m .. code-block:: python - equil.df2ecl(dframe, filename="solution.inc") + equil.df2res(dframe, filename="solution.inc") The last step can also be done using the ``csv2res`` command line utility if you dump to CSV from your Python code instead. diff --git a/docs/usage/grid.rst b/docs/usage/grid.rst index 0ed6ddd3e..63e94ec6b 100644 --- a/docs/usage/grid.rst +++ b/docs/usage/grid.rst @@ -147,7 +147,7 @@ Generating include files from grid data If you have loaded grid data into a Pandas frame, some operations are easily performed, scaling porosity, permeability etc. Or remapping some region parameters. Using the -:func:`res2df.grid.df2ecl()` function these manipulated vectors can be written back as +:func:`res2df.grid.df2res()` function these manipulated vectors can be written back as include files to Eclipse. Say you want to change the FIPNUM, and that FIPNUM 6 should be removed, and set @@ -165,11 +165,11 @@ it to FIPNUM 5. This can be accomplished using dframe.loc[rows_to_touch, "FIPNUM"] = 5 # Write back to new include file, ensure datatype is integer. - grid.df2ecl(dframe, "FIPNUM", dtype=int, filename="fipnum.inc", resdatafiles=resdatafiles) + grid.df2res(dframe, "FIPNUM", dtype=int, filename="fipnum.inc", resdatafiles=resdatafiles) This will produce the file `fipnum.inc` with the contents: .. literalinclude:: fipnum.inc -It is recommended to supply the ``resdatafiles`` object to ``df2ecl``, if not, correct grid +It is recommended to supply the ``resdatafiles`` object to ``df2res``, if not, correct grid size can not be ensured. diff --git a/docs/usage/nnc.rst b/docs/usage/nnc.rst index 4b10deaba..472cd1700 100644 --- a/docs/usage/nnc.rst +++ b/docs/usage/nnc.rst @@ -55,12 +55,12 @@ to an include file: nnc_df = nnc.df(resdatafiles) nnc_df["TRANM"] = 0.1 # Reduce all NNC transmissibilities - nnc.df2ecl_editnnc(nnc_df, filename="editnnc.inc") + nnc.df2res_editnnc(nnc_df, filename="editnnc.inc") and the contents of the exported file can be: .. - print(nnc.df2ecl_editnnc(nnc.df(resdatafiles).head(4).assign(TRANM=0.1))) + print(nnc.df2res_editnnc(nnc.df(resdatafiles).head(4).assign(TRANM=0.1))) .. code-block:: console diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index b025bf2ff..86f07ee68 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -74,7 +74,7 @@ When you are done with the table, you can generate new include files from your m .. code-block:: python - pvt.df2ecl(dframe, filename="pvt.inc") + pvt.df2res(dframe, filename="pvt.inc") When injecting this produced ``pvt.inc`` into any new input deck, ensure you check which keywords have been written out, compared to what you gave in to diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index ef429986d..295b91407 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -57,7 +57,7 @@ the command For a dataframe or a CSV file in the format provided by this module, an Eclipse include file can be generated either with the Python API -:func:`res2df.satfunc.df2ecl` function or the command +:func:`res2df.satfunc.df2res` function or the command .. code-block:: console @@ -117,5 +117,5 @@ Pyscal can create initialize its relperm objects from include files though the parsing capabilities of res2df.satfunc. The function ``pyscal.pyscallist.df()`` is analogous to ``res2df.satfunc.df()`` in -what it produces, and the :func:`res2df.satfunc.df2ecl()` can be used on both +what it produces, and the :func:`res2df.satfunc.df2res()` can be used on both (potentially with some filtering needed.). diff --git a/res2df/common.py b/res2df/common.py index 122ddd273..164cbcb86 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -508,7 +508,7 @@ def fill_reverse_parser( return parser -def df2ecl( +def df2res( dataframe: pd.DataFrame, keywords: Optional[Union[str, List[str], List[Optional[str]]]] = None, comments: Optional[Dict[str, str]] = None, @@ -519,7 +519,7 @@ def df2ecl( """Generate resdata include strings from dataframes in res2df format. This function hands over the actual text generation pr. keyword - to functions named df2ecl_ in the calling module. + to functions named df2res_ in the calling module. These functions may again use generic_ecltable() from this module for the actual string construction. @@ -611,7 +611,7 @@ def df2ecl( string += comment_formatter(comments["master"]) for keyword in keywords: # Construct the associated function names - function_name = "df2ecl_" + keyword.lower() + function_name = "df2res_" + keyword.lower() function = getattr(calling_module, function_name) if keyword in comments: string += function(dataframe, comments[keyword]) diff --git a/res2df/equil.py b/res2df/equil.py index fc3df876e..a7f47ce33 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -348,11 +348,11 @@ def equil_reverse_main(args) -> None: ) equil_df = pd.read_csv(args.csvfile) logger.info("Parsed %s", args.csvfile) - inc_string = df2ecl(equil_df, keywords=args.keywords) + inc_string = df2res(equil_df, keywords=args.keywords) common.write_inc_stdout_file(inc_string, args.output) -def df2ecl( +def df2res( equil_df: pd.DataFrame, keywords: Optional[List[str]] = None, comments: Optional[Dict[str, str]] = None, @@ -381,7 +381,7 @@ def df2ecl( string += ( phases_from_columns(equil_df.columns).upper().replace("-", "\n") + "\n\n" ) - string += common.df2ecl( + string += common.df2res( equil_df, keywords=keywords, comments=comments, @@ -392,7 +392,7 @@ def df2ecl( return string -def df2ecl_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print EQUIL keyword with data Args: @@ -427,7 +427,7 @@ def df2ecl_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: ) -def df2ecl_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print RSVD keyword with data This data consists of one table (rs as a function @@ -437,10 +437,10 @@ def df2ecl_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: dframe: Containing RSVD data comment Text that will be included as a comment """ - return _df2ecl_equilfuncs("RSVD", dframe, comment) + return _df2res_equilfuncs("RSVD", dframe, comment) -def df2ecl_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print RVVD keyword with data This data consists of one table (rv as a function @@ -450,10 +450,10 @@ def df2ecl_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: dframe: Containing RVVD data comment: Text that will be included as a comment """ - return _df2ecl_equilfuncs("RVVD", dframe, comment) + return _df2res_equilfuncs("RVVD", dframe, comment) -def df2ecl_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PBVD keyword with data Bubble-point versus depth @@ -465,10 +465,10 @@ def df2ecl_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: dframe: Containing PBVD data comment: Text that will be included as a comment """ - return _df2ecl_equilfuncs("PBVD", dframe, comment) + return _df2res_equilfuncs("PBVD", dframe, comment) -def df2ecl_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None): +def df2res_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None): """Print PDVD keyword with data. Dew-point versus depth. @@ -480,13 +480,13 @@ def df2ecl_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None): dframe: Containing PDVD data comment: Text that will be included as a comment """ - return _df2ecl_equilfuncs("PDVD", dframe, comment) + return _df2res_equilfuncs("PDVD", dframe, comment) -def _df2ecl_equilfuncs( +def _df2res_equilfuncs( keyword: str, dframe: pd.DataFrame, comment: Optional[str] = None ) -> str: - """Internal function to be used by df2ecl_() functions""" + """Internal function to be used by df2res_() functions""" if dframe.empty: return "-- No data!" string = f"{keyword}\n" @@ -500,7 +500,7 @@ def _df2ecl_equilfuncs( else: subset = dframe[dframe["KEYWORD"] == keyword] - def _df2ecl_equilfuncs_eqlnum(dframe: pd.DataFrame) -> str: + def _df2res_equilfuncs_eqlnum(dframe: pd.DataFrame) -> str: """Print one equilibriation function table for a specific EQLNUM @@ -519,5 +519,5 @@ def _df2ecl_equilfuncs_eqlnum(dframe: pd.DataFrame) -> str: subset = subset.set_index("EQLNUM").sort_index() for eqlnum in subset.index.unique(): string += f"-- EQLNUM: {eqlnum}\n" - string += _df2ecl_equilfuncs_eqlnum(subset[subset.index == eqlnum]) + string += _df2res_equilfuncs_eqlnum(subset[subset.index == eqlnum]) return string + "\n" diff --git a/res2df/grid.py b/res2df/grid.py index 2153d2f3b..5f72aa7f3 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -616,7 +616,7 @@ def drop_constant_columns( return dframe.drop(columnstodelete, axis=1) -def df2ecl( +def df2res( grid_df: pd.DataFrame, keywords: Union[str, List[str]], resdatafiles: Optional[ResdataFiles] = None, @@ -730,7 +730,7 @@ def df2ecl( logger.warning( ( "Mismatch between dumped vector length " - "%d from df2ecl and assumed grid size %d" + "%d from df2res and assumed grid size %d" ), len(vector), global_size, diff --git a/res2df/nnc.py b/res2df/nnc.py index a798bc78e..015c28cf2 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -200,7 +200,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: return parser -def df2ecl_editnnc( +def df2res_editnnc( nnc_df: pd.DataFrame, filename: Optional[str] = None, nocomments: bool = False ) -> str: """Write an EDITNNC keyword diff --git a/res2df/pvt.py b/res2df/pvt.py index 864c1c536..c10fc6355 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -319,11 +319,11 @@ def pvt_reverse_main(args) -> None: ) pvt_df = pd.read_csv(args.csvfile) logger.info("Parsed %s", args.csvfile) - inc_string = df2ecl(pvt_df, keywords=args.keywords) + inc_string = df2res(pvt_df, keywords=args.keywords) common.write_inc_stdout_file(inc_string, args.output) -def df2ecl( +def df2res( pvt_df: pd.DataFrame, keywords: Optional[Union[str, List[str]]] = None, comments: Optional[Dict[str, str]] = None, @@ -341,7 +341,7 @@ def df2ecl( filename: If supplied, the generated text will also be dumped to file. """ - return common.df2ecl( + return common.df2res( pvt_df, keywords, comments, @@ -351,7 +351,7 @@ def df2ecl( ) -def df2ecl_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print ROCK keyword with data Args: @@ -379,7 +379,7 @@ def df2ecl_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: return string + "\n" -def df2ecl_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print DENSITY keyword with data Args: @@ -408,7 +408,7 @@ def df2ecl_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: return string + "\n" -def df2ecl_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PVTW keyword with data PVTW is one line/record with data for a reference pressure @@ -444,7 +444,7 @@ def df2ecl_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: return string + "\n" -def df2ecl_pvtg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pvtg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PVTG keyword with data Args: @@ -503,7 +503,7 @@ def _pvtg_pvtnum_pg(dframe): return string + "\n" -def df2ecl_pvdg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pvdg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PVDG keyword with data This data consists of one table (volumefactor and visosity @@ -553,7 +553,7 @@ def _pvdg_pvtnum(dframe): return string + "\n" -def df2ecl_pvdo(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pvdo(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PVDO keyword with data Args: @@ -600,7 +600,7 @@ def _pvdo_pvtnum(dframe: pd.DataFrame) -> str: return string + "\n" -def df2ecl_pvto(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pvto(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PVTO-data from a dataframe Args: diff --git a/res2df/satfunc.py b/res2df/satfunc.py index a7ba81b56..849661a23 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -228,11 +228,11 @@ def satfunc_reverse_main(args) -> None: ) satfunc_df = pd.read_csv(args.csvfile) logger.info("Parsed %s", args.csvfile) - inc_string = df2ecl(satfunc_df, keywords=args.keywords) + inc_string = df2res(satfunc_df, keywords=args.keywords) common.write_inc_stdout_file(inc_string, args.output) -def df2ecl( +def df2res( satfunc_df: pd.DataFrame, keywords: Optional[List[str]] = None, comments: Optional[Dict[str, str]] = None, @@ -257,7 +257,7 @@ def df2ecl( """ string = "" - string += common.df2ecl( + string += common.df2res( satfunc_df, keywords=keywords, comments=comments, @@ -268,87 +268,87 @@ def df2ecl( return string -def df2ecl_swof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SWOF data. Used by df2ecl(). +def df2res_swof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Print SWOF data. Used by df2res(). Args: dframe: Containing SWOF data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SWOF", dframe, comment) + return _df2res_satfuncs("SWOF", dframe, comment) -def df2ecl_sgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SGOF data. Used by df2ecl(). +def df2res_sgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Print SGOF data. Used by df2res(). Args: dframe: Containing SGOF data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SGOF", dframe, comment) + return _df2res_satfuncs("SGOF", dframe, comment) -def df2ecl_sgfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SGFN data. Used by df2ecl(). +def df2res_sgfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Print SGFN data. Used by df2res(). Args: dframe: Containing SGFN data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SGFN", dframe, comment) + return _df2res_satfuncs("SGFN", dframe, comment) -def df2ecl_sgwfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SGWFN data. Used by df2ecl(). +def df2res_sgwfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Print SGWFN data. Used by df2res(). Args: dframe: Containing SGWFN data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SGWFN", dframe, comment) + return _df2res_satfuncs("SGWFN", dframe, comment) -def df2ecl_swfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SWFN data. Used by df2ecl(). +def df2res_swfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Print SWFN data. Used by df2res(). Args: dframe: Containing SWFN data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SWFN", dframe, comment) + return _df2res_satfuncs("SWFN", dframe, comment) -def df2ecl_slgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SLGOF data. Used by df2ecl(). +def df2res_slgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Print SLGOF data. Used by df2res(). Args: dframe: Containing SLGOF data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SLGOF", dframe, comment) + return _df2res_satfuncs("SLGOF", dframe, comment) -def df2ecl_sof2(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SOF2 data. Used by df2ecl(). +def df2res_sof2(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Print SOF2 data. Used by df2res(). Args: dframe: Containing SOF2 data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SOF2", dframe, comment) + return _df2res_satfuncs("SOF2", dframe, comment) -def df2ecl_sof3(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SOF3 data. Used by df2ecl(). +def df2res_sof3(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: + """Print SOF3 data. Used by df2res(). Args: dframe: Containing SOF3 data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SOF3", dframe, comment) + return _df2res_satfuncs("SOF3", dframe, comment) -def _df2ecl_satfuncs( +def _df2res_satfuncs( keyword: str, dframe: pd.DataFrame, comment: Optional[str] = None ) -> str: if dframe.empty: @@ -366,7 +366,7 @@ def _df2ecl_satfuncs( subset = subset.set_index("SATNUM").sort_index() # Make a function that is to be called for each SATNUM - def _df2ecl_satfuncs_satnum(keyword, dframe): + def _df2res_satfuncs_satnum(keyword, dframe): """Print one saturation function for one specific SATNUM""" col_headers = RENAMERS[keyword]["DATA"] string = ( @@ -380,5 +380,5 @@ def _df2ecl_satfuncs_satnum(keyword, dframe): # Loop over every SATNUM for satnum in subset.index.unique(): string += f"-- SATNUM: {satnum}\n" - string += _df2ecl_satfuncs_satnum(keyword, subset[subset.index == satnum]) + string += _df2res_satfuncs_satnum(keyword, subset[subset.index == satnum]) return string + "\n" diff --git a/res2df/summary.py b/res2df/summary.py index 008a09808..bcf228a46 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -679,7 +679,7 @@ def _fix_dframe_for_libecl(dframe: pd.DataFrame) -> pd.DataFrame: return dframe -def df2eclsum( +def df2ressum( dframe: pd.DataFrame, casename: str = "SYNTHETIC", ) -> Summary: @@ -938,7 +938,7 @@ def summary_reverse_main(args) -> None: # Summary.fwrite() can only write to current directory: cwd = os.getcwd() - eclsum = df2eclsum(summary_df, eclbase) + eclsum = df2ressum(summary_df, eclbase) try: os.chdir(outputdir) Summary.fwrite(eclsum) diff --git a/res2df/vfp/__init__.py b/res2df/vfp/__init__.py index 6f3a7dd8e..4fd957cbf 100644 --- a/res2df/vfp/__init__.py +++ b/res2df/vfp/__init__.py @@ -7,8 +7,8 @@ basic_data2pyarrow, df, df2basic_data, - df2ecl, - df2ecls, + df2res, + df2ress, dfs, fill_parser, fill_reverse_parser, diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index 048f9a54e..a5c3b0664 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -326,7 +326,7 @@ def pyarrow_tables( return pyarrow_tables_vfp -def df2ecls( +def df2ress( dframe: pd.DataFrame, keyword: str = "VFPPROD", comments: Optional[Dict[str, str]] = None, @@ -355,14 +355,14 @@ def df2ecls( if np.all(df_vfp["VFP_TYPE"] == keyword): if comments and keyword in comments.keys(): if keyword == "VFPPROD": - vfp_strs.append(vfpprod.df2ecl(df_vfp, comments["VFPPROD"])) + vfp_strs.append(vfpprod.df2res(df_vfp, comments["VFPPROD"])) elif keyword == "VFPINJ": - vfp_strs.append(vfpinj.df2ecl(df_vfp, comments["VFPINJ"])) + vfp_strs.append(vfpinj.df2res(df_vfp, comments["VFPINJ"])) else: if keyword == "VFPPROD": - vfp_strs.append(vfpprod.df2ecl(df_vfp)) + vfp_strs.append(vfpprod.df2res(df_vfp)) elif keyword == "VFPINJ": - vfp_strs.append(vfpinj.df2ecl(df_vfp)) + vfp_strs.append(vfpinj.df2res(df_vfp)) else: raise ValueError( f"VFP number {vfpno} does not have consistent " @@ -372,7 +372,7 @@ def df2ecls( return vfp_strs -def df2ecl( +def df2res( dframe: pd.DataFrame, keyword: str = "VFPPROD", comments: Optional[Dict[str, str]] = None, @@ -392,7 +392,7 @@ def df2ecl( to file. """ - strs_vfp = df2ecls(dframe, keyword=keyword, comments=comments) + strs_vfp = df2ress(dframe, keyword=keyword, comments=comments) str_vfps = "" if comments and "master" in comments.keys(): @@ -529,6 +529,6 @@ def vfp_reverse_main(args) -> None: ) vfp_df = pd.read_csv(args.csvfile) logger.info("Parsed {args.csvfile}") - inc_string = df2ecl(vfp_df, args.keyword) + inc_string = df2res(vfp_df, args.keyword) if args.output: common.write_inc_stdout_file(inc_string, args.output) diff --git a/res2df/vfp/_vfpinj.py b/res2df/vfp/_vfpinj.py index 76e75af88..7771e5bc2 100755 --- a/res2df/vfp/_vfpinj.py +++ b/res2df/vfp/_vfpinj.py @@ -651,7 +651,7 @@ def _write_table_records( return ecl_str -def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Produce a string defining single VFPINJ Eclipse input from a dataframe All data for the keywords VFPINJ will be returned. diff --git a/res2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py index 4e2573127..0a0d62393 100755 --- a/res2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -935,7 +935,7 @@ def _write_table_records( return ecl_str -def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Produce a string defining single VFPPROD Eclipse input from a dataframe All data for the keywords VFPPROD will be returned. diff --git a/tests/test_common.py b/tests/test_common.py index 284c3525b..f09d0f681 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -151,15 +151,15 @@ def test_handle_wanted_keywords(wanted, deckstr, supported, expected): assert common.handle_wanted_keywords(wanted, deck, supported) == expected -def df2ecl_equil(dframe, comment: str = None): - """Wrapper function to be able to test df2ecl +def df2res_equil(dframe, comment: str = None): + """Wrapper function to be able to test df2res (it asks for a function in the calling module)""" - return equil.df2ecl_equil(dframe, comment) + return equil.df2res_equil(dframe, comment) -def test_df2ecl(): - """Test general properties of df2ecl. +def test_df2res(): + """Test general properties of df2res. This function is mainly tested in each submodule.""" dframe = pd.DataFrame( @@ -177,33 +177,33 @@ def test_df2ecl(): ) with pytest.raises(AssertionError): # supported keywords are not supplied - common.df2ecl(dframe) + common.df2res(dframe) with pytest.raises(AssertionError): - common.df2ecl(dframe, supported=None) + common.df2res(dframe, supported=None) with pytest.raises(ValueError, match="KEYWORD must be in the dataframe"): - common.df2ecl( + common.df2res( dframe.drop("KEYWORD", axis=1), keywords=["EQUIL"], supported=["EQUIL"] ) - string = common.df2ecl(dframe, supported=["EQUIL"]) + string = common.df2res(dframe, supported=["EQUIL"]) # The next calls differ only in timestamp: assert len(string) == len( - common.df2ecl(dframe, keywords="EQUIL", supported=["EQUIL"]) + common.df2res(dframe, keywords="EQUIL", supported=["EQUIL"]) ) assert len(string) == len( - common.df2ecl(dframe, keywords=["EQUIL"], supported=["EQUIL"]) + common.df2res(dframe, keywords=["EQUIL"], supported=["EQUIL"]) ) assert "EQUIL\n" in string assert "2469" in string assert "-- Output file printed by tests.test_common" in string - assert common.df2ecl(dframe, supported=["PORO"]) == "" + assert common.df2res(dframe, supported=["PORO"]) == "" - assert "EQUIL\n-- foobar" in common.df2ecl( + assert "EQUIL\n-- foobar" in common.df2res( dframe, comments={"EQUIL": "foobar"}, supported=["EQUIL"] ) - assert "\n\n-- masterfoobar\nEQUIL" in common.df2ecl( + assert "\n\n-- masterfoobar\nEQUIL" in common.df2res( dframe, comments={"master": "masterfoobar"}, supported=["EQUIL"] ) @@ -211,16 +211,16 @@ def test_df2ecl(): tworows["EQLNUM"] = [3, 1] tworows["PRESSURE"] = [3456, 1234] with pytest.raises(ValueError): - common.df2ecl(tworows, supported=["EQUIL"], consecutive="EQLNUM") + common.df2res(tworows, supported=["EQUIL"], consecutive="EQLNUM") # This would be a bug if client code did this, because the wrong # consecutive column is set: - assert "3456" in common.df2ecl(tworows, supported=["EQUIL"], consecutive="PVTNUM") + assert "3456" in common.df2res(tworows, supported=["EQUIL"], consecutive="PVTNUM") tworows["EQLNUM"] = [1, 3] with pytest.raises(ValueError): - common.df2ecl(tworows, supported=["EQUIL"], consecutive="EQLNUM") + common.df2res(tworows, supported=["EQUIL"], consecutive="EQLNUM") tworows["EQLNUM"] = [2, 1] # Passes because the frame is sorted on EQLNUM: - string = common.df2ecl(tworows, supported=["EQUIL"], consecutive="EQLNUM") + string = common.df2res(tworows, supported=["EQUIL"], consecutive="EQLNUM") assert "EQUIL" in string assert string.find("3456") > string.find("1234") diff --git a/tests/test_equil.py b/tests/test_equil.py index 45bb4e54d..698d3c9a4 100644 --- a/tests/test_equil.py +++ b/tests/test_equil.py @@ -77,25 +77,25 @@ def test_equil2df(): # Check that we can dump from dataframe to include file # and reparse to the same dataframe: - inc = equil.df2ecl(equildf, withphases=True) + inc = equil.df2res(equildf, withphases=True) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(equildf, df_from_inc, check_dtype=False) -def test_df2ecl(tmp_path): +def test_df2res(tmp_path): """Test that we can write include files to disk""" os.chdir(tmp_path) resdatafiles = ResdataFiles(EIGHTCELLS) equildf = equil.df(resdatafiles) - equil.df2ecl(equildf, filename="equil.inc") + equil.df2res(equildf, filename="equil.inc") assert Path("equil.inc").is_file() # Test automatic directory creation: - equil.df2ecl(equildf, filename="eclipse/include/equil.inc") + equil.df2res(equildf, filename="eclipse/include/equil.inc") assert Path("eclipse/include/equil.inc").is_file() -def test_df2ecl_equil(): +def test_df2res_equil(): """Test the underlying function directly""" dframe = pd.DataFrame( [ @@ -111,18 +111,18 @@ def test_df2ecl_equil(): ] ) # Check that we don't need the KEYWORD in the underlying function - assert equil.df2ecl_equil(dframe) == equil.df2ecl_equil( + assert equil.df2res_equil(dframe) == equil.df2res_equil( dframe.drop("KEYWORD", axis="columns") ) # Can also drop EQLNUM since we have only one row: - assert equil.df2ecl_equil(dframe) == equil.df2ecl_equil( + assert equil.df2res_equil(dframe) == equil.df2res_equil( dframe.drop("EQLNUM", axis="columns") ) # Problem if we have two rows, nothing is returned and a critical error is logged assert ( - equil.df2ecl_equil(pd.concat([dframe, dframe]).drop("EQLNUM", axis="columns")) + equil.df2res_equil(pd.concat([dframe, dframe]).drop("EQLNUM", axis="columns")) == "" ) @@ -142,22 +142,22 @@ def test_decks(): assert len(df) == 1 assert "IGNORE1" not in df assert df["EQLNUM"].unique()[0] == 1 - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) # 0 columns can be both integers and floats. pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) # Test empty data: - inc = equil.df2ecl_equil(equil.df("")) + inc = equil.df2res_equil(equil.df("")) assert "No data" in inc assert equil.df(inc).empty # Test more empty data: - assert "No data" in equil.df2ecl_equil(equil.df("")) - assert "No data" in equil.df2ecl_rsvd(equil.df("")) - assert "No data" in equil.df2ecl_rvvd(equil.df("")) - assert "No data" in equil.df2ecl_pbvd(equil.df("")) - assert "No data" in equil.df2ecl_pdvd(equil.df("")) + assert "No data" in equil.df2res_equil(equil.df("")) + assert "No data" in equil.df2res_rsvd(equil.df("")) + assert "No data" in equil.df2res_rvvd(equil.df("")) + assert "No data" in equil.df2res_pbvd(equil.df("")) + assert "No data" in equil.df2res_pdvd(equil.df("")) deckstr = """ OIL @@ -170,7 +170,7 @@ def test_decks(): assert df["OWC"].values == 2200 assert len(df) == 1 assert "IGNORE1" not in df - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) # 0 columns can be both integers and floats. pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -187,7 +187,7 @@ def test_decks(): assert "OWC" not in df assert len(df) == 1 assert "IGNORE2" not in df - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) # 0 columns can be both integers and floats. pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -205,7 +205,7 @@ def test_decks(): assert "OWC" not in df assert len(df) == 1 assert "IGNORE2" not in df - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) # 0 columns can be both integers and floats. pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -229,7 +229,7 @@ def test_decks(): assert "OWC" in df assert len(df) == 2 assert "IGNORE2" not in df - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) # 0 columns can be both integers and floats. pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -294,7 +294,7 @@ def test_rsvd(): assert max(rsvd_df["EQLNUM"]) == 3 assert set(rsvd_df["Z"].values) == {10, 30, 50} assert set(rsvd_df["RS"].values) == {100, 400} - inc = equil.df2ecl(rsvd_df) + inc = equil.df2res(rsvd_df) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(rsvd_df, df_from_inc) @@ -317,7 +317,7 @@ def test_rsvd(): assert max(rsvd_df["EQLNUM"]) == 2 assert set(rsvd_df["Z"].values) == {10, 30, 50, 60} assert set(rsvd_df["RS"].values) == {100, 400, 1000} - inc = equil.df2ecl(rsvd_df) + inc = equil.df2res(rsvd_df) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(rsvd_df, df_from_inc) @@ -340,7 +340,7 @@ def test_rvvd(): assert set(rvvd_df["Z"].values) == {10, 30, 50} assert set(rvvd_df["RV"].values) == {100, 400} - inc = equil.df2ecl(rvvd_df) + inc = equil.df2res(rvvd_df) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(rvvd_df, df_from_inc) @@ -364,7 +364,7 @@ def test_rvvd(): assert set(rvvd_df["Z"].values) == {10, 30, 50, 60} assert set(rvvd_df["RV"].values) == {100, 400, 1000} - inc = equil.df2ecl(rvvd_df) + inc = equil.df2res(rvvd_df) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(rvvd_df, df_from_inc) @@ -383,7 +383,7 @@ def test_pbvd(): assert set(pbvd_df["Z"].values) == {10, 30, 50} assert set(pbvd_df["PB"].values) == {100, 400} - inc = equil.df2ecl(pbvd_df) + inc = equil.df2res(pbvd_df) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(pbvd_df, df_from_inc) @@ -394,14 +394,14 @@ def test_pbvd(): pd.testing.assert_frame_equal(pbvd_df.drop("KEYWORD", axis="columns"), pbvd_df2) # Check that we don't need the KEYWORD column for the underlying function: - assert equil.df2ecl_pbvd(pbvd_df) == equil.df2ecl_pbvd( + assert equil.df2res_pbvd(pbvd_df) == equil.df2res_pbvd( pbvd_df.drop("KEYWORD", axis="columns") ) # If EQLNUM column is dropped it is not possible to guess the # correct include file, so the code must fail: with pytest.raises(KeyError): - equil.df2ecl_pbvd(pbvd_df.drop("EQLNUM", axis="columns")) + equil.df2res_pbvd(pbvd_df.drop("EQLNUM", axis="columns")) def test_pdvd(): @@ -418,7 +418,7 @@ def test_pdvd(): assert set(pdvd_df["Z"].values) == {10, 30, 50} assert set(pdvd_df["PD"].values) == {100, 400} - inc = equil.df2ecl(pdvd_df) + inc = equil.df2res(pdvd_df) df_from_inc = equil.df(inc) pdvd_df2 = equil.pdvd_fromdeck(deckstr) pd.testing.assert_frame_equal(pdvd_df, df_from_inc) @@ -467,7 +467,7 @@ def test_ntequl(): df = equil.df(deckstr, ntequl=2) assert len(df) == 2 - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -494,7 +494,7 @@ def test_ntequl(): assert set(df["GOC"].values) == set([2100, 2100]) assert len(df) == 2 - inc = equil.df2ecl(df, withphases=True) + inc = equil.df2res(df, withphases=True) df_from_inc = equil.df(inc) pd.testing.assert_frame_equal(df, df_from_inc, check_dtype=False) @@ -536,7 +536,7 @@ def test_eclipse_rounding(somefloat, expected): } ] ) - assert expected in equil.df2ecl(dframe, withphases=False) + assert expected in equil.df2res(dframe, withphases=False) def test_main_subparser(tmp_path, mocker): diff --git a/tests/test_grid.py b/tests/test_grid.py index 400782997..7116e917e 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -188,17 +188,17 @@ def test_grid_df(): ) -def test_df2ecl(tmp_path): +def test_df2res(tmp_path): """Test if we are able to output include files for grid data""" resdatafiles = ResdataFiles(REEK) grid_df = grid.df(resdatafiles) - fipnum_str = grid.df2ecl(grid_df, "FIPNUM", dtype=int) - assert grid.df2ecl(grid_df, "FIPNUM", dtype="int", nocomments=True) == grid.df2ecl( + fipnum_str = grid.df2res(grid_df, "FIPNUM", dtype=int) + assert grid.df2res(grid_df, "FIPNUM", dtype="int", nocomments=True) == grid.df2res( grid_df, "FIPNUM", dtype=int, nocomments=True ) with pytest.raises(ValueError, match="Wrong dtype argument foo"): - grid.df2ecl(grid_df, "FIPNUM", dtype="foo") + grid.df2res(grid_df, "FIPNUM", dtype="foo") assert "FIPNUM" in fipnum_str assert "-- Output file printed by res2df.grid" in fipnum_str @@ -206,51 +206,51 @@ def test_df2ecl(tmp_path): assert "35840 total cell count" in fipnum_str # (comment at the end) assert len(fipnum_str) > 100 - fipnum_str_nocomment = grid.df2ecl(grid_df, "FIPNUM", dtype=int, nocomments=True) + fipnum_str_nocomment = grid.df2res(grid_df, "FIPNUM", dtype=int, nocomments=True) assert "--" not in fipnum_str_nocomment - fipnum2_str = grid.df2ecl( + fipnum2_str = grid.df2res( grid_df, "FIPNUM", dtype=int, resdatafiles=resdatafiles, nocomments=True ) # This would mean that we guessed the correct global size in the first run assert fipnum_str_nocomment == fipnum2_str - float_fipnum_str = grid.df2ecl(grid_df, "FIPNUM", dtype=float) + float_fipnum_str = grid.df2res(grid_df, "FIPNUM", dtype=float) assert len(float_fipnum_str) > len(fipnum_str) # lots of .0 in the string. - fipsatnum_str = grid.df2ecl(grid_df, ["FIPNUM", "SATNUM"], dtype=int) + fipsatnum_str = grid.df2res(grid_df, ["FIPNUM", "SATNUM"], dtype=int) assert "FIPNUM" in fipsatnum_str assert "SATNUM" in fipsatnum_str grid_df["FIPNUM"] = grid_df["FIPNUM"] * 3333 - fipnum_big_str = grid.df2ecl(grid_df, "FIPNUM", dtype=int) + fipnum_big_str = grid.df2res(grid_df, "FIPNUM", dtype=int) assert "3333" in fipnum_big_str assert len(fipnum_big_str) > len(fipnum_str) os.chdir(tmp_path) - grid.df2ecl(grid_df, ["PERMX", "PERMY", "PERMZ"], dtype=float, filename="perm.inc") + grid.df2res(grid_df, ["PERMX", "PERMY", "PERMZ"], dtype=float, filename="perm.inc") assert Path("perm.inc").is_file() incstring = Path("perm.inc").read_text(encoding="utf8").splitlines() assert sum([1 for line in incstring if "PERM" in line]) == 6 - assert grid.df2ecl(grid_df, ["PERMX"], dtype=float, nocomments=True) == grid.df2ecl( + assert grid.df2res(grid_df, ["PERMX"], dtype=float, nocomments=True) == grid.df2res( grid_df, ["PERMX"], dtype="float", nocomments=True ) # with pytest.raises(ValueError, match="Wrong dtype argument"): - grid.df2ecl(grid_df, ["PERMX"], dtype=dict) + grid.df2res(grid_df, ["PERMX"], dtype=dict) with pytest.raises(ValueError): - grid.df2ecl(grid_df, ["PERMRR"]) + grid.df2res(grid_df, ["PERMRR"]) # Check when we have restart info included: gr_rst = grid.df(resdatafiles, rstdates="all") - fipnum_str_rst = grid.df2ecl(gr_rst, "FIPNUM", dtype=int, nocomments=True) + fipnum_str_rst = grid.df2res(gr_rst, "FIPNUM", dtype=int, nocomments=True) assert fipnum_str_rst == fipnum_str_nocomment # When dates are stacked, there are NaN's in the FIPNUM column, # which should be gracefully ignored. gr_rst_stacked = grid.df(resdatafiles, rstdates="all", stackdates=True) - fipnum_str_rst = grid.df2ecl(gr_rst_stacked, "FIPNUM", dtype=int, nocomments=True) + fipnum_str_rst = grid.df2res(gr_rst_stacked, "FIPNUM", dtype=int, nocomments=True) assert fipnum_str_rst == fipnum_str_nocomment # dateinheaders here will be ignored due to stackdates: @@ -260,10 +260,10 @@ def test_df2ecl(tmp_path): ) -def test_df2ecl_mock(): - """Test that we can use df2ecl for mocked minimal dataframes""" +def test_df2res_mock(): + """Test that we can use df2res for mocked minimal dataframes""" a_grid = pd.DataFrame(columns=["FIPNUM"], data=[[1], [2], [3]]) - simple_fipnum_inc = grid.df2ecl( + simple_fipnum_inc = grid.df2res( a_grid, keywords="FIPNUM", dtype=int, nocomments=True ) # (A warning is printed, that warning is warranted) diff --git a/tests/test_nnc.py b/tests/test_nnc.py index 48a64b94c..c6fa545a8 100644 --- a/tests/test_nnc.py +++ b/tests/test_nnc.py @@ -87,14 +87,14 @@ def test_nnc2df_faultnames(): # Remove I_x, J_x, K_x (and _y) which is not needed -def test_df2ecl_editnnc(tmp_path): +def test_df2res_editnnc(tmp_path): """Test generation of EDITNNC keyword""" resdatafiles = ResdataFiles(REEK) nncdf = nnc.df(resdatafiles) os.chdir(tmp_path) nncdf["TRANM"] = 2 - editnnc = nnc.df2ecl_editnnc(nncdf, filename="editnnc.inc") + editnnc = nnc.df2res_editnnc(nncdf, filename="editnnc.inc") editnnc_fromfile = Path("editnnc.inc").read_text(encoding="utf8") assert editnnc == editnnc_fromfile assert "EDITNNC" in editnnc @@ -103,17 +103,17 @@ def test_df2ecl_editnnc(tmp_path): # Fails when columns are missing with pytest.raises((KeyError, ValueError)): - nnc.df2ecl_editnnc(nncdf[["I1", "I2"]]) + nnc.df2res_editnnc(nncdf[["I1", "I2"]]) - editnnc = nnc.df2ecl_editnnc(nncdf, nocomments=True) + editnnc = nnc.df2res_editnnc(nncdf, nocomments=True) assert "avg multiplier" not in editnnc # Test compatibility with trans module: trans_df = trans.df(resdatafiles, addnnc=True) - editnnc = nnc.df2ecl_editnnc(trans_df.assign(TRANM=0.3)) + editnnc = nnc.df2res_editnnc(trans_df.assign(TRANM=0.3)) assert "avg multiplier 0.3" in editnnc or "avg multiplier 0.29999" in editnnc - print(nnc.df2ecl_editnnc(nnc.df(resdatafiles).head(4).assign(TRANM=0.1))) + print(nnc.df2res_editnnc(nnc.df(resdatafiles).head(4).assign(TRANM=0.1))) @pytest.mark.skipif(not HAVE_OPM, reason="Requires OPM") diff --git a/tests/test_pvt.py b/tests/test_pvt.py index 6b77beda2..e31aa7495 100644 --- a/tests/test_pvt.py +++ b/tests/test_pvt.py @@ -47,7 +47,7 @@ def test_pvto_strings(): assert set(dframe["PVTNUM"].values) == {1} assert max(dframe["PRESSURE"]) == 200 - dframe_via_string = pvt.pvto_fromdeck(pvt.df2ecl_pvto(dframe)) + dframe_via_string = pvt.pvto_fromdeck(pvt.df2res_pvto(dframe)) pd.testing.assert_frame_equal(dframe_via_string, dframe) # Provide TABDIMS in first test.. Infer later @@ -72,7 +72,7 @@ def test_pvto_strings(): assert len(dframe["PRESSURE"].unique()) == 6 assert len(dframe["VOLUMEFACTOR"].unique()) == 3 - dframe_via_string = pvt.pvto_fromdeck(pvt.df2ecl_pvto(dframe)) + dframe_via_string = pvt.pvto_fromdeck(pvt.df2res_pvto(dframe)) pd.testing.assert_frame_equal(dframe_via_string, dframe) # Now test the same but without TABDIMS: @@ -94,11 +94,11 @@ def test_pvto_strings(): assert len(dframe["RS"].unique()) == 4 assert len(dframe["PRESSURE"].unique()) == 6 assert len(dframe["VOLUMEFACTOR"].unique()) == 3 - dframe_via_string = pvt.pvto_fromdeck(pvt.df2ecl_pvto(dframe)) + dframe_via_string = pvt.pvto_fromdeck(pvt.df2res_pvto(dframe)) pd.testing.assert_frame_equal(dframe_via_string, dframe) # Test emtpy data: - inc = pvt.df2ecl_pvto(pvt.df("")) + inc = pvt.df2res_pvto(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -123,7 +123,7 @@ def test_pvdg_string(): assert "VISCOSITY" in dframe # Test emtpy data: - inc = pvt.df2ecl_pvdg(pvt.df("")) + inc = pvt.df2res_pvdg(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -152,7 +152,7 @@ def test_pvdo_string(): ) # Test emtpy data: - inc = pvt.df2ecl_pvdo(pvt.df("")) + inc = pvt.df2res_pvdo(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -177,7 +177,7 @@ def test_pvt_reek(): assert pvto_df["VOLUMEFACTOR"].max() == 2.851 assert pvto_df["VISCOSITY"].max() == 1.0001 - dframe_via_string = pvt.pvto_fromdeck(pvt.df2ecl_pvto(pvto_df)) + dframe_via_string = pvt.pvto_fromdeck(pvt.df2res_pvto(pvto_df)) pd.testing.assert_frame_equal(dframe_via_string, pvto_df) density_df = pvt.density_fromdeck(resdatafiles.get_ecldeck()) @@ -189,7 +189,7 @@ def test_pvt_reek(): ), check_like=True, ) - dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df)) + dframe_via_string = pvt.density_fromdeck(pvt.df2res_density(density_df)) pd.testing.assert_frame_equal(dframe_via_string, density_df) rock_df = pvt.rock_fromdeck(resdatafiles.get_ecldeck()) @@ -263,7 +263,7 @@ def test_pvtg_string(): assert max(pvtg_df["VISCOSITY"]) == 0.0393 # Test empty data: - inc = pvt.df2ecl_pvtg(pvt.df("")) + inc = pvt.df2res_pvtg(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -300,7 +300,7 @@ def test_density(): assert "WATERDENSITY" in density_df assert "GASDENSITY" in density_df - dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df)) + dframe_via_string = pvt.density_fromdeck(pvt.df2res_density(density_df)) pd.testing.assert_frame_equal(dframe_via_string, density_df) two_pvtnum_deck = """DENSITY @@ -316,11 +316,11 @@ def test_density(): assert density_df["PVTNUM"].max() == 2 assert density_df["PVTNUM"].min() == 1 assert "OILDENSITY" in density_df - dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df)) + dframe_via_string = pvt.density_fromdeck(pvt.df2res_density(density_df)) pd.testing.assert_frame_equal(dframe_via_string, density_df) # Test emtpy data: - inc = pvt.df2ecl_density(pvt.df("")) + inc = pvt.df2res_density(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -353,7 +353,7 @@ def test_pvtw(): assert len(pvtw_df) == 2 # Test emtpy data: - inc = pvt.df2ecl_pvtw(pvt.df("")) + inc = pvt.df2res_pvtw(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -366,11 +366,11 @@ def test_rock(): assert len(rock_df) == 1 assert "PRESSURE" in rock_df assert "COMPRESSIBILITY" in rock_df - dframe_via_string = pvt.rock_fromdeck(pvt.df2ecl_rock(rock_df)) + dframe_via_string = pvt.rock_fromdeck(pvt.df2res_rock(rock_df)) pd.testing.assert_frame_equal(dframe_via_string, rock_df) # Test emtpy data: - inc = pvt.df2ecl_rock(pvt.df("")) + inc = pvt.df2res_rock(pvt.df("")) assert "No data" in inc assert pvt.df(inc).empty @@ -475,22 +475,22 @@ def test_magic_stdout(tmp_path): assert not dframe.empty -def test_df2ecl(): - """df2ecl is a wrapper around the df2ecl_* functions +def test_df2res(): + """df2res is a wrapper around the df2res_* functions The validity of produced dataframes is tested in other test functions herein, here we mainly test for the API and error handling""" with pytest.raises(ValueError): - pvt.df2ecl(pd.DataFrame()) + pvt.df2res(pd.DataFrame()) -def test_df2ecl_pvto(): +def test_df2res_pvto(): """Test that we can print a PVTO dataframe to E100 include file""" dframe = pd.DataFrame( columns=["PVTNUM", "RS", "PRESSURE", "VOLUMEFACTOR", "VISCOSITY"], data=[[1, 50, 100, 2, 1.04]], ) - pvto_string = pvt.df2ecl_pvto(dframe) + pvto_string = pvt.df2res_pvto(dframe) assert "PVTO" in pvto_string assert "1.04" in pvto_string assert "100" in pvto_string @@ -506,7 +506,7 @@ def test_df2ecl_pvto(): columns=["PVTNUM", "RS", "PRESSURE", "VOLUMEFACTOR", "VISCOSITY"], data=[[1, 50, 100, 2, 1.04], [1, 50, 120, 3, 1.05]], ) - pvto_string = pvt.df2ecl_pvto(dframe) + pvto_string = pvt.df2res_pvto(dframe) assert "PVTO" in pvto_string assert "1.05" in pvto_string assert "120" in pvto_string @@ -519,17 +519,17 @@ def test_df2ecl_pvto(): ) # If PVTNUM is missing, the code gives up if there are many rows. - assert "PVTO" not in pvt.df2ecl_pvto( + assert "PVTO" not in pvt.df2res_pvto( pd.concat([dframe, dframe]).drop("PVTNUM", axis="columns") ) # If only one row, this is accepted: - assert "PVTO" in pvt.df2ecl_pvto(dframe.head(1).drop("PVTNUM", axis="columns")) + assert "PVTO" in pvt.df2res_pvto(dframe.head(1).drop("PVTNUM", axis="columns")) # (the corner case with only one row is not very meaningful, but at # least it is well defined how to treat it) -def test_df2ecl_rock(tmp_path): +def test_df2res_rock(tmp_path): """Test generation of ROCK include files from dataframes""" os.chdir(tmp_path) @@ -538,14 +538,14 @@ def test_df2ecl_rock(tmp_path): data=[[1, "ROCK", 100, 0.001]], ) - rock_inc = pvt.df2ecl(rock_df) + rock_inc = pvt.df2res(rock_df) assert "ROCK" in rock_inc - rock_inc = pvt.df2ecl(rock_df, comments=dict(ROCK="foo")) + rock_inc = pvt.df2res(rock_df, comments=dict(ROCK="foo")) assert "foo" in rock_inc - rock_inc = pvt.df2ecl(rock_df, comments=dict(DENSITY="foo")) + rock_inc = pvt.df2res(rock_df, comments=dict(DENSITY="foo")) assert "foo" not in rock_inc - rock_inc = pvt.df2ecl(rock_df, comments=dict(ROCK="foo\nbar"), filename="foo.inc") + rock_inc = pvt.df2res(rock_df, comments=dict(ROCK="foo\nbar"), filename="foo.inc") assert Path("foo.inc").is_file() assert "foo" in rock_inc assert "bar" in rock_inc @@ -559,15 +559,15 @@ def test_df2ecl_rock(tmp_path): rock_df = rock_df_from_inc.reindex(sorted(rock_df.columns), axis=1) pd.testing.assert_frame_equal(rock_df_from_inc, rock_df) - rock_inc = pvt.df2ecl(rock_df, keywords=["DENSITY"]) + rock_inc = pvt.df2res(rock_df, keywords=["DENSITY"]) assert not rock_inc - rock_inc = pvt.df2ecl(rock_df, keywords="DENSITY") + rock_inc = pvt.df2res(rock_df, keywords="DENSITY") assert not rock_inc - rock_inc = pvt.df2ecl(rock_df, keywords=["ROCK", "DENSITY"]) + rock_inc = pvt.df2res(rock_df, keywords=["ROCK", "DENSITY"]) assert "ROCK" in rock_inc assert "DENSITY" not in rock_inc - rock_inc = pvt.df2ecl(rock_df, keywords="ROCK") + rock_inc = pvt.df2res(rock_df, keywords="ROCK") assert "ROCK" in rock_inc # This dataframe is ignored, if we miss PVTNUM: @@ -575,40 +575,40 @@ def test_df2ecl_rock(tmp_path): columns=["KEYWORD", "PRESSURE", "COMPRESSIBILITY"], data=[["ROCK", 100, 0.001], ["ROCK", 200, 0.002]], ) - assert "ROCK" not in pvt.df2ecl_rock(ambig_rock_df) + assert "ROCK" not in pvt.df2res_rock(ambig_rock_df) # But if only one row, it is ok: - assert "ROCK" in pvt.df2ecl_rock(ambig_rock_df.head(1)) + assert "ROCK" in pvt.df2res_rock(ambig_rock_df.head(1)) # If we don't want the ROCK keyword, we won't get it: - nonrock_inc = pvt.df2ecl(rock_df, keywords=["PVTO"]) + nonrock_inc = pvt.df2res(rock_df, keywords=["PVTO"]) assert "ROCK" not in nonrock_inc -def test_df2ecl_density(): +def test_df2res_density(): """Test generation of PVT density include files from dataframes""" density_df = pd.DataFrame( columns=["PVTNUM", "OILDENSITY", "WATERDENSITY", "GASDENSITY"], data=[[1, 827.64, 999.04, 1.1427]], ) - dens_inc = pvt.df2ecl_density(density_df) + dens_inc = pvt.df2res_density(density_df) assert "DENSITY" in dens_inc # If PVTNUM is missing, the code gives up: - assert "DENSITY" not in pvt.df2ecl_density( + assert "DENSITY" not in pvt.df2res_density( pd.concat([density_df, density_df]).drop("PVTNUM", axis="columns") ) # Unless there is only one row: - assert "DENSITY" in pvt.df2ecl_density(density_df.drop("PVTNUM", axis="columns")) + assert "DENSITY" in pvt.df2res_density(density_df.drop("PVTNUM", axis="columns")) # Missing column: with pytest.raises(KeyError, match="OILDENSITY"): - pvt.df2ecl_density(density_df.drop("OILDENSITY", axis="columns")) + pvt.df2res_density(density_df.drop("OILDENSITY", axis="columns")) -def test_df2ecl_pvtw(): +def test_df2res_pvtw(): """Test generation of PVTW include statements""" pvtw_df = pd.DataFrame( columns=[ @@ -621,22 +621,22 @@ def test_df2ecl_pvtw(): ], data=[[327.3, 1.03, 4.51e-005, 0.25, 0.0, 1]], ) - assert "PVTW" in pvt.df2ecl_pvtw(pvtw_df) + assert "PVTW" in pvt.df2res_pvtw(pvtw_df) # If PVTNUM is missing, the code gives up: - assert "PVTW" not in pvt.df2ecl_pvtw( + assert "PVTW" not in pvt.df2res_pvtw( pd.concat([pvtw_df, pvtw_df]).drop("PVTNUM", axis="columns") ) # Unless there is only one row: - assert "PVTW" in pvt.df2ecl_pvtw(pvtw_df.drop("PVTNUM", axis="columns")) + assert "PVTW" in pvt.df2res_pvtw(pvtw_df.drop("PVTNUM", axis="columns")) # Missing column: with pytest.raises(KeyError, match="VOLUMEFACTOR"): - pvt.df2ecl_pvtw(pvtw_df.drop("VOLUMEFACTOR", axis="columns")) + pvt.df2res_pvtw(pvtw_df.drop("VOLUMEFACTOR", axis="columns")) -def test_df2ecl_pvtg(): +def test_df2res_pvtg(): """Test generation of PVTG include statements""" pvtg_df = pd.DataFrame( columns=["OGR", "VOLUMEFACTOR", "VISCOSITY", "PRESSURE", "PVTNUM"], @@ -646,26 +646,26 @@ def test_df2ecl_pvtg(): [0.00014, 0.0523, 0.0234, 60.0, 2], ], ) - assert "PVTG" in pvt.df2ecl_pvtg(pvtg_df) - assert "PVTG" in pvt.df2ecl_pvtg(pvtg_df.assign(KEYWORD="PVTG")) + assert "PVTG" in pvt.df2res_pvtg(pvtg_df) + assert "PVTG" in pvt.df2res_pvtg(pvtg_df.assign(KEYWORD="PVTG")) pd.testing.assert_frame_equal( - pvt.df(pvt.df2ecl_pvtg(pvtg_df)).drop("KEYWORD", axis="columns"), pvtg_df + pvt.df(pvt.df2res_pvtg(pvtg_df)).drop("KEYWORD", axis="columns"), pvtg_df ) # If PVTNUM is missing, the code gives up: - assert "PVTG" not in pvt.df2ecl_pvtg( + assert "PVTG" not in pvt.df2res_pvtg( pd.concat([pvtg_df, pvtg_df]).drop("PVTNUM", axis="columns") ) # Unless there is only one row: - assert "PVTG" in pvt.df2ecl_pvtg(pvtg_df.head(1).drop("PVTNUM", axis="columns")) + assert "PVTG" in pvt.df2res_pvtg(pvtg_df.head(1).drop("PVTNUM", axis="columns")) # Missing column: with pytest.raises(KeyError, match="VOLUMEFACTOR"): - pvt.df2ecl_pvtg(pvtg_df.drop("VOLUMEFACTOR", axis="columns")) + pvt.df2res_pvtg(pvtg_df.drop("VOLUMEFACTOR", axis="columns")) -def test_df2ecl_pvdo_pvdg(): +def test_df2res_pvdo_pvdg(): """Test construction of PVDO and PVDG statements from dataframe. The keyword data and code is similar enough to warrant one test @@ -680,33 +680,33 @@ def test_df2ecl_pvdo_pvdg(): ], ) - assert "PVDO" in pvt.df2ecl_pvdo(pvdog_df) - assert "PVDG" in pvt.df2ecl_pvdg(pvdog_df) + assert "PVDO" in pvt.df2res_pvdo(pvdog_df) + assert "PVDG" in pvt.df2res_pvdg(pvdog_df) - assert "PVDO" in pvt.df2ecl_pvdo(pvdog_df.assign(KEYWORD="PVDO")) - assert "PVDG" in pvt.df2ecl_pvdg(pvdog_df.assign(KEYWORD="PVDG")) + assert "PVDO" in pvt.df2res_pvdo(pvdog_df.assign(KEYWORD="PVDO")) + assert "PVDG" in pvt.df2res_pvdg(pvdog_df.assign(KEYWORD="PVDG")) pd.testing.assert_frame_equal( - pvt.df(pvt.df2ecl_pvdo(pvdog_df)).drop("KEYWORD", axis="columns"), pvdog_df + pvt.df(pvt.df2res_pvdo(pvdog_df)).drop("KEYWORD", axis="columns"), pvdog_df ) pd.testing.assert_frame_equal( - pvt.df(pvt.df2ecl_pvdg(pvdog_df)).drop("KEYWORD", axis="columns"), pvdog_df + pvt.df(pvt.df2res_pvdg(pvdog_df)).drop("KEYWORD", axis="columns"), pvdog_df ) # If PVTNUM is missing, the code gives up: - assert "PVDO" not in pvt.df2ecl_pvdo( + assert "PVDO" not in pvt.df2res_pvdo( pd.concat([pvdog_df, pvdog_df]).drop("PVTNUM", axis="columns") ) - assert "PVDG" not in pvt.df2ecl_pvdg( + assert "PVDG" not in pvt.df2res_pvdg( pd.concat([pvdog_df, pvdog_df]).drop("PVTNUM", axis="columns") ) # Unless there is only one row: - assert "PVDO" in pvt.df2ecl_pvdo(pvdog_df.head(1).drop("PVTNUM", axis="columns")) - assert "PVDG" in pvt.df2ecl_pvdg(pvdog_df.head(1).drop("PVTNUM", axis="columns")) + assert "PVDO" in pvt.df2res_pvdo(pvdog_df.head(1).drop("PVTNUM", axis="columns")) + assert "PVDG" in pvt.df2res_pvdg(pvdog_df.head(1).drop("PVTNUM", axis="columns")) # Missing column: with pytest.raises(KeyError, match="VOLUMEFACTOR"): - pvt.df2ecl_pvdo(pvdog_df.drop("VOLUMEFACTOR", axis="columns")) + pvt.df2res_pvdo(pvdog_df.drop("VOLUMEFACTOR", axis="columns")) with pytest.raises(KeyError, match="VOLUMEFACTOR"): - pvt.df2ecl_pvdg(pvdog_df.drop("VOLUMEFACTOR", axis="columns")) + pvt.df2res_pvdg(pvdog_df.drop("VOLUMEFACTOR", axis="columns")) diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index 90579ce32..583c007fb 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -58,7 +58,7 @@ def test_satfunc_roundtrip(): it back to an include file, and then reinterpret it to the same""" resdatafiles = ResdataFiles(EIGHTCELLS) satdf = satfunc.df(resdatafiles.get_ecldeck()) - inc = satfunc.df2ecl(satdf) + inc = satfunc.df2res(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal( satdf.sort_values(["SATNUM", "KEYWORD"]), @@ -66,20 +66,20 @@ def test_satfunc_roundtrip(): ) -def test_df2ecl_order(): +def test_df2res_order(): """Test that we can control the keyword order in generated strings by the list supplied in keywords argument""" resdatafiles = ResdataFiles(REEK) satdf = satfunc.df(resdatafiles.get_ecldeck()) - swof_sgof = satfunc.df2ecl(satdf, keywords=["SWOF", "SGOF"]) + swof_sgof = satfunc.df2res(satdf, keywords=["SWOF", "SGOF"]) assert swof_sgof.find("SWOF") < swof_sgof.find("SGOF") - sgof_swof = satfunc.df2ecl(satdf, keywords=["SGOF", "SWOF"]) + sgof_swof = satfunc.df2res(satdf, keywords=["SGOF", "SWOF"]) assert sgof_swof.find("SGOF") < sgof_swof.find("SWOF") - only_swof = satfunc.df2ecl(satdf, keywords=["SWOF"]) + only_swof = satfunc.df2res(satdf, keywords=["SWOF"]) assert "SGOF" not in only_swof - only_sgof = satfunc.df2ecl(satdf, keywords="SGOF") + only_sgof = satfunc.df2res(satdf, keywords="SGOF") assert "SWOF" not in only_sgof @@ -90,7 +90,7 @@ def test_nodata(): satdf = satfunc.df(swofstr) assert len(satdf) == 0 - inc = satfunc.df2ecl_swof(satdf) + inc = satfunc.df2res_swof(satdf) assert "No data" in inc df_from_inc = satfunc.df(inc) assert df_from_inc.empty @@ -245,7 +245,7 @@ def test_str2df(string, expected_df): if expected_df.empty: return - inc = satfunc.df2ecl(satdf) + inc = satfunc.df2res(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal(df_from_inc, expected_df) @@ -272,7 +272,7 @@ def test_sgof_satnuminferrer(tmp_path, mocker): assert "SATNUM" in sgofdf assert len(sgofdf["SATNUM"].unique()) == 3 assert len(sgofdf) == 8 - inc = satfunc.df2ecl(sgofdf) + inc = satfunc.df2res(sgofdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal(sgofdf, df_from_inc) diff --git a/tests/test_summary.py b/tests/test_summary.py index 2fc3f4a05..80a0269f9 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -18,7 +18,7 @@ _fix_dframe_for_libecl, date_range, df, - df2eclsum, + df2ressum, resample_smry_dates, smry_meta, ) @@ -411,7 +411,7 @@ def test_foreseeable_future(tmp_path): {"DATE": "2500-01-01", "FPR": 180}, ] ) - eclsum = df2eclsum(src_dframe, casename="PLUGABANDON") + eclsum = df2ressum(src_dframe, casename="PLUGABANDON") dframe = summary.df(eclsum) assert ( @@ -437,7 +437,7 @@ def test_foreseeable_future(tmp_path): "FPR": range(70), } ) - eclsum = df2eclsum(src_dframe, casename="PLUGABANDON") + eclsum = df2ressum(src_dframe, casename="PLUGABANDON") dframe = summary.df(eclsum) # Still buggy: assert dframe.index[-1] == dt(2068, 12, 31, 23, 57, 52) @@ -449,7 +449,7 @@ def test_foreseeable_future(tmp_path): "FPR": range(69), } ) - eclsum = df2eclsum(src_dframe, casename="PLUGABANDON") + eclsum = df2ressum(src_dframe, casename="PLUGABANDON") dframe = summary.df(eclsum) # Works fine when stepping only 68 years: assert dframe.index[-1] == dt(2468, 1, 1, 0, 0, 0) @@ -850,7 +850,7 @@ def test_smry_meta_synthetic(): {"DATE": np.datetime64("2016-01-01"), "FOPT": 1000, "FOPR": 100}, ] ).set_index("DATE") - synt_meta = smry_meta(df2eclsum(dframe)) + synt_meta = smry_meta(df2ressum(dframe)) # Dummy unit provided by EclSum: assert synt_meta["FOPT"]["unit"] == "UNIT" @@ -944,7 +944,7 @@ def test_smry_meta_synthetic(): ], ) def test_fix_dframe_for_libecl(dframe, expected_dframe): - """Test the dataframe preprocessor/validator for df2eclsum works""" + """Test the dataframe preprocessor/validator for df2ressum works""" pd.testing.assert_frame_equal( _fix_dframe_for_libecl(dframe), expected_dframe, check_index_type=False ) @@ -1019,14 +1019,14 @@ def test_fix_dframe_for_libecl(dframe, expected_dframe): ), ], ) -def test_df2eclsum(dframe): +def test_df2ressum(dframe): """Test that a dataframe can be converted to an EclSum object, and then read back again""" # Massage the dframe first so we can assert on equivalence after. dframe = _fix_dframe_for_libecl(dframe) - eclsum = df2eclsum(dframe) + eclsum = df2ressum(dframe) if dframe.empty: assert eclsum is None return @@ -1039,7 +1039,7 @@ def test_df2eclsum(dframe): ) -def test_df2eclsum_datetimeindex(): +def test_df2ressum_datetimeindex(): """Test that providing a dataframe with a datetimeindex also works""" dframe = pd.DataFrame( [ @@ -1049,7 +1049,7 @@ def test_df2eclsum_datetimeindex(): dframe["DATE"] = pd.to_datetime(dframe["DATE"]) dframe.set_index("DATE") - roundtrip = df(df2eclsum(dframe)) + roundtrip = df(df2ressum(dframe)) assert isinstance(roundtrip.index, pd.DatetimeIndex) assert roundtrip["FOPR"].values == [100] assert roundtrip["FOPT"].values == [1000] @@ -1063,7 +1063,7 @@ def test_duplicated_summary_vectors(caplog): res2df.summary.df() should deduplicate this, and give a warning. """ - # res2df.df2eclsum() is not able to mock such a UNSMRY file. + # res2df.df2ressum() is not able to mock such a UNSMRY file. dupe_datafile = ( TESTDIR / "data" @@ -1182,7 +1182,7 @@ def test_res2df_errors(tmp_path): assert df(ResdataFiles("FOO")).empty -def test_df2eclsum_errors(): +def test_df2ressum_errors(): """Test various error conditions, checking that the correct error message is emitted""" dframe = pd.DataFrame( @@ -1191,18 +1191,18 @@ def test_df2eclsum_errors(): ] ) with pytest.raises(ValueError, match="casename foobar must be UPPER CASE"): - df2eclsum(dframe, casename="foobar") + df2ressum(dframe, casename="foobar") with pytest.raises(ValueError, match="Do not use dots in casename"): - df2eclsum(dframe, casename="FOOBAR.UNSMRY") # .UNSMRY should not be included + df2ressum(dframe, casename="FOOBAR.UNSMRY") # .UNSMRY should not be included # No date included: with pytest.raises(ValueError, match="dataframe must have a datetime index"): - df2eclsum(pd.DataFrame([{"FOPT": 1000}])) + df2ressum(pd.DataFrame([{"FOPT": 1000}])) @pytest.mark.integration def test_csv2res_summary(tmp_path, mocker): - """Check that we can call df2eclsum through the csv2res command line + """Check that we can call df2ressum through the csv2res command line utility""" dframe = pd.DataFrame( [ diff --git a/tests/test_vfp.py b/tests/test_vfp.py index c3f94c7b2..3d048a88a 100644 --- a/tests/test_vfp.py +++ b/tests/test_vfp.py @@ -1015,9 +1015,9 @@ def test_res2pyarrow_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", [VFPPROD_CASES[0]]) -def test_df2ecl_vfpprod(test_input, expected): - """Test df2ecl for VFPPROD (case without default values)""" - ecl_vfpprod = vfp.df2ecl(expected, "VFPPROD") +def test_df2res_vfpprod(test_input, expected): + """Test df2res for VFPPROD (case without default values)""" + ecl_vfpprod = vfp.df2res(expected, "VFPPROD") assert ecl_vfpprod.strip() == test_input.strip() @@ -1025,13 +1025,13 @@ def test_df2ecl_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", [VFPPROD_CASES[0]]) def test_pyarrow2ecl_vfpprod(test_input, expected): """Test pyarrow2ecl for VFPPROD (case without default values)""" - deck = ResdataFiles.str2deck(vfp.df2ecl(expected, "VFPPROD")) + deck = ResdataFiles.str2deck(vfp.df2res(expected, "VFPPROD")) vfpprod_df = vfp.df(deck, "VFPPROD") vfpprod_data = vfp.df2basic_data(vfpprod_df) vfpprod_pa = vfp.basic_data2pyarrow(vfpprod_data) vfpprod_data = vfp.pyarrow2basic_data(vfpprod_pa) vfpprod_df = vfp.basic_data2df(vfpprod_data) - vfpprod_ecl = vfp.df2ecl(vfpprod_df, "VFPPROD") + vfpprod_ecl = vfp.df2res(vfpprod_df, "VFPPROD") assert vfpprod_ecl.strip() == test_input.strip() @@ -1046,9 +1046,9 @@ def test_res2df_vfpinj(test_input, expected): @pytest.mark.parametrize("test_input, expected", [VFPINJ_CASES[0]]) -def test_df2ecl_vfpinj(test_input, expected): - """Test df2ecl for VFPINJ (case without default values)""" - ecl_vfpinj = vfp.df2ecl(expected, "VFPINJ") +def test_df2res_vfpinj(test_input, expected): + """Test df2res for VFPINJ (case without default values)""" + ecl_vfpinj = vfp.df2res(expected, "VFPINJ") assert ecl_vfpinj.strip() == test_input.strip() @@ -1056,13 +1056,13 @@ def test_df2ecl_vfpinj(test_input, expected): @pytest.mark.parametrize("test_input, expected", [VFPINJ_CASES[0]]) def test_pyarrow2ecl_vfpinj(test_input, expected): """Test pyarrow2ecl for VFPPROD (case without default values)""" - deck = ResdataFiles.str2deck(vfp.df2ecl(expected, "VFPINJ")) + deck = ResdataFiles.str2deck(vfp.df2res(expected, "VFPINJ")) vfpinj_df = vfp.df(deck, "VFPINJ") vfpinj_data = vfp.df2basic_data(vfpinj_df) vfpinj_pa = vfp.basic_data2pyarrow(vfpinj_data) vfpinj_data = vfp.pyarrow2basic_data(vfpinj_pa) vfpinj_df = vfp.basic_data2df(vfpinj_data) - vfpinj_ecl = vfp.df2ecl(vfpinj_df, "VFPINJ") + vfpinj_ecl = vfp.df2res(vfpinj_df, "VFPINJ") assert vfpinj_ecl.strip() == test_input.strip() From aacd22da476263abdc6e9c4f7861d1e513271ae6 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 14 Nov 2023 16:09:20 +0100 Subject: [PATCH 13/68] Rename in common.py --- res2df/common.py | 32 +++++++++++++++----------------- res2df/equil.py | 12 ++++++------ res2df/fipreports.py | 4 ++-- res2df/grid.py | 2 +- res2df/pvt.py | 14 +++++++------- res2df/satfunc.py | 2 +- tests/test_common.py | 28 ++++++++++++++-------------- 7 files changed, 46 insertions(+), 48 deletions(-) diff --git a/res2df/common.py b/res2df/common.py index 164cbcb86..dffd79460 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -94,7 +94,7 @@ .splitlines() ) ] -ECLMONTH2NUM = { +MONTH2NUM = { "JAN": 1, "FEB": 2, "MAR": 3, @@ -109,7 +109,7 @@ "NOV": 11, "DEC": 12, } -NUM2ECLMONTH = {num: month for month, num in ECLMONTH2NUM.items()} +NUM2MONTH = {num: month for month, num in MONTH2NUM.items()} logger: logging.Logger = logging.getLogger(__name__) @@ -165,14 +165,12 @@ def write_inc_stdout_file(string: str, outputfilename: str) -> None: print(f"Wrote to {outputfilename}") -def parse_ecl_month(eclmonth: str) -> int: - """Translate Eclipse month strings to integer months""" - return ECLMONTH2NUM[eclmonth] +def parse_month(rdmonth: str) -> int: + """Translate resdata month strings to integer months""" + return MONTH2NUM[rdmonth] -def datetime_to_eclipsedate( - timestamp: Union[str, datetime.datetime, datetime.date] -) -> str: +def datetime_to_ecldate(timestamp: Union[str, datetime.datetime, datetime.date]) -> str: """Convert a Python timestamp or date to the Eclipse DATE format""" if isinstance(timestamp, str): if list(map(len, timestamp.split(" ")[0].split("-"))) != [4, 2, 2]: @@ -181,13 +179,13 @@ def datetime_to_eclipsedate( timestamp = dateutil.parser.parse(timestamp) # noqa (py36 flake8 bug) if not isinstance(timestamp, (datetime.datetime, datetime.date)): raise TypeError("Require string or datetime") - string = f"{timestamp.day} '{NUM2ECLMONTH[timestamp.month]}' {timestamp.year}" + string = f"{timestamp.day} '{NUM2MONTH[timestamp.month]}' {timestamp.year}" if isinstance(timestamp, datetime.datetime): string += " " + timestamp.strftime("%H:%M:%S") return string.replace("00:00:00", "").strip() -def ecl_keyworddata_to_df( +def keyworddata_to_df( deck, keyword: str, renamer: Optional[Dict[str, Union[str, List[str]]]] = None, @@ -353,7 +351,7 @@ def parse_opmio_date_rec(record: "opm.io.DeckRecord") -> datetime.date: day = record[0].get_int(0) month = record[1].get_str(0) year = record[2].get_int(0) - return datetime.date(year=year, month=parse_ecl_month(month), day=day) + return datetime.date(year=year, month=parse_month(month), day=day) def parse_opmio_tstep_rec(record: "opm.io.DeckRecord") -> List[Union[float, int]]: @@ -521,7 +519,7 @@ def df2res( This function hands over the actual text generation pr. keyword to functions named df2res_ in the calling module. - These functions may again use generic_ecltable() from this module + These functions may again use generic_deck_table() from this module for the actual string construction. Args: @@ -539,7 +537,7 @@ def df2res( to file. Returns: - string that can be used as an include file for Eclipse. + string that can be used as an include file for resdata. """ from_module = inspect.stack()[1] calling_module = inspect.getmodule(from_module[0]) @@ -624,15 +622,15 @@ def df2res( return string -def generic_ecltable( +def generic_deck_table( dframe: pd.DataFrame, keyword: str, comment: Optional[str] = None, renamer: Optional[Dict[str, str]] = None, drop_trailing_columns: bool = True, ) -> str: - """Construct a typical Eclipse table for data following - a keyword. Each row (record in Eclipse terms) ends with a slash. + """Construct a deck table for data following + a keyword. Each row ends with a slash. This function will *not* add a final slash after all rows, as this is keyword dependent. Some keywords require it, some keywords @@ -747,7 +745,7 @@ def generic_ecltable( return string + tablestring + "\n" -def runlength_eclcompress(string: str, sep: str = " ") -> str: +def runlength_compress(string: str, sep: str = " ") -> str: """Compress a string of space-separated elements so that 2 2 2 2 2 3 3 4 diff --git a/res2df/equil.py b/res2df/equil.py index a7f47ce33..05e6f2eb4 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -141,7 +141,7 @@ def rsvd_fromdeck( """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "RSVD", renamer=RENAMERS["RSVD"], recordcountername="EQLNUM" ) @@ -158,7 +158,7 @@ def rvvd_fromdeck( """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "RVVD", renamer=RENAMERS["RVVD"], recordcountername="EQLNUM" ) @@ -175,7 +175,7 @@ def pbvd_fromdeck( """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "PBVD", renamer=RENAMERS["PBVD"], recordcountername="EQLNUM" ) @@ -192,7 +192,7 @@ def pdvd_fromdeck( """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "PDVD", renamer=RENAMERS["PDVD"], recordcountername="EQLNUM" ) @@ -264,7 +264,7 @@ def equil_fromdeck( raise ValueError(f"Could not determine phase configuration, got '{phases}'") columnrenamer = RENAMERS[phases_from_deck(deck)] - dataframe = common.ecl_keyworddata_to_df( + dataframe = common.keyworddata_to_df( deck, "EQUIL", renamer=columnrenamer, recordcountername="EQLNUM" ) @@ -418,7 +418,7 @@ def df2res_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: phases = phases_from_columns(subset.columns) - return common.generic_ecltable( + return common.generic_deck_table( subset, "EQUIL", renamer=RENAMERS[phases], # type: ignore diff --git a/res2df/fipreports.py b/res2df/fipreports.py index 493a3073d..7274a05f3 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -11,7 +11,7 @@ import pandas as pd from res2df import ResdataFiles, getLogger_res2csv -from res2df.common import parse_ecl_month, write_dframe_stdout_file +from res2df.common import parse_month, write_dframe_stdout_file logger = logging.getLogger(__name__) @@ -152,7 +152,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra if matcheddate is not None: newdate = datetime.date( year=int(matcheddate.group(3)), - month=parse_ecl_month(matcheddate.group(2).upper()), + month=parse_month(matcheddate.group(2).upper()), day=int(matcheddate.group(1)), ) if newdate != date: diff --git a/res2df/grid.py b/res2df/grid.py index 5f72aa7f3..e9e87d560 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -737,7 +737,7 @@ def df2res( ) logger.warning("Data will be dumped, but may error in simulator") strvector = " ".join([str(x) for x in vector]) - strvector = common.runlength_eclcompress(strvector) + strvector = common.runlength_compress(strvector) string += keyword + "\n" indent = " " * 5 diff --git a/res2df/pvt.py b/res2df/pvt.py index c10fc6355..904299449 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -81,7 +81,7 @@ def pvtw_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "PVTW", renamer=RENAMERS["PVTW"], recordcountername="PVTNUM" ) @@ -98,7 +98,7 @@ def density_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "DENSITY", renamer=RENAMERS["DENSITY"], recordcountername="PVTNUM" ) @@ -115,7 +115,7 @@ def rock_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - return common.ecl_keyworddata_to_df( + return common.keyworddata_to_df( deck, "ROCK", renamer=RENAMERS["ROCK"], recordcountername="PVTNUM" ) @@ -132,7 +132,7 @@ def pvto_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvto_df = common.ecl_keyworddata_to_df( + pvto_df = common.keyworddata_to_df( deck, "PVTO", renamer=RENAMERS["PVTO"], emptyrecordcountername="PVTNUM" ) return pvto_df @@ -150,7 +150,7 @@ def pvdo_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvdg_df = common.ecl_keyworddata_to_df( + pvdg_df = common.keyworddata_to_df( deck, "PVDO", renamer=RENAMERS["PVDO"], recordcountername="PVTNUM" ) return pvdg_df @@ -168,7 +168,7 @@ def pvdg_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvdg_df = common.ecl_keyworddata_to_df( + pvdg_df = common.keyworddata_to_df( deck, "PVDG", renamer=RENAMERS["PVDG"], recordcountername="PVTNUM" ) return pvdg_df @@ -186,7 +186,7 @@ def pvtg_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvtg_df = common.ecl_keyworddata_to_df( + pvtg_df = common.keyworddata_to_df( deck, "PVTG", renamer=RENAMERS["PVTG"], emptyrecordcountername="PVTNUM" ) return pvtg_df diff --git a/res2df/satfunc.py b/res2df/satfunc.py index 849661a23..5fbbefb2c 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -104,7 +104,7 @@ def df( for keyword in wanted_keywords: frames.append( interpolate_defaults( - common.ecl_keyworddata_to_df( + common.keyworddata_to_df( deck, keyword, renamer=RENAMERS[keyword], recordcountername="SATNUM" ).assign(KEYWORD=keyword) ) diff --git a/tests/test_common.py b/tests/test_common.py index f09d0f681..65d6e63d5 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -250,24 +250,24 @@ def test_df2res(): ), ], ) -def test_datetime_to_eclipsedate(somedate, expected): +def test_datetime_to_ecldate(somedate, expected): """Test conversion of datetime to Eclipse date or datetime syntax""" - assert common.datetime_to_eclipsedate(somedate) == expected + assert common.datetime_to_ecldate(somedate) == expected def test_eclcompress(): """Test that we can compress string using Eclipse style run-length encoding""" - assert common.runlength_eclcompress("") == "" - assert common.runlength_eclcompress(" ") == "" - assert common.runlength_eclcompress("1 2") == "1 2" - assert common.runlength_eclcompress("1 2", sep=" ") == "1 2" - assert common.runlength_eclcompress("1 2", sep=" ") == "1 2" - assert common.runlength_eclcompress("1") == "1" - assert common.runlength_eclcompress("1 1") == "2*1" - assert common.runlength_eclcompress("1 1 1") == "3*1" - assert common.runlength_eclcompress("1 1 1") == "3*1" - assert common.runlength_eclcompress("1 \n 1 1 2") == "3*1 2" + assert common.runlength_compress("") == "" + assert common.runlength_compress(" ") == "" + assert common.runlength_compress("1 2") == "1 2" + assert common.runlength_compress("1 2", sep=" ") == "1 2" + assert common.runlength_compress("1 2", sep=" ") == "1 2" + assert common.runlength_compress("1") == "1" + assert common.runlength_compress("1 1") == "2*1" + assert common.runlength_compress("1 1 1") == "3*1" + assert common.runlength_compress("1 1 1") == "3*1" + assert common.runlength_compress("1 \n 1 1 2") == "3*1 2" @pytest.mark.parametrize( @@ -446,10 +446,10 @@ def test_well_matching_template(template, wells, output): ), ], ) -def test_generic_ecltable( +def test_generic_deck_table( dframe, keyword, comment, renamer, drop_trailing_columns, expected ): - stringtable = common.generic_ecltable( + stringtable = common.generic_deck_table( dframe, keyword, comment=comment, From e80d4a2b0cb7b92c24ca7e9e9e4837300f7a4519 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Wed, 15 Nov 2023 11:45:21 +0100 Subject: [PATCH 14/68] eclsum->summary --- res2df/csv2res.py | 2 +- res2df/resdatafiles.py | 12 +++--- res2df/summary.py | 93 +++++++++++++++++++++--------------------- tests/test_eclfiles.py | 2 +- tests/test_summary.py | 32 +++++++-------- 5 files changed, 71 insertions(+), 70 deletions(-) diff --git a/res2df/csv2res.py b/res2df/csv2res.py index dcd040c7a..cdeec3199 100644 --- a/res2df/csv2res.py +++ b/res2df/csv2res.py @@ -51,7 +51,7 @@ def get_parser() -> argparse.ArgumentParser: summary_parser = subparsers.add_parser( "summary", - help="Write EclSum UNSMRY files", + help="Write summary UNSMRY files", description=("Write Eclipse UNSMRY files from CSV files."), ) summary.fill_reverse_parser(summary_parser) diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index 6207ded37..ab65da265 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -70,7 +70,7 @@ def __init__(self, eclbase): # Set class variables to None self._egridfile = None # Should be ResdataFile self._initfile = None # Should be ResdataFile - self._eclsum = None # Should be Summary + self._summary = None # Should be Summary self._egrid = None # Should be Grid @@ -138,7 +138,7 @@ def get_egridfile(self) -> ResdataFile: return self._egridfile - def get_eclsum(self, include_restart: bool = True) -> Summary: + def get_summary(self, include_restart: bool = True) -> Summary: """Find and return the summary file and return as Summary object @@ -146,15 +146,15 @@ def get_eclsum(self, include_restart: bool = True) -> Summary: include_restart: Sent to libecl for whether restart files should be traversed. """ - if not self._eclsum: + if not self._summary: smryfilename = self._eclbase + ".UNSMRY" if not Path(smryfilename).is_file(): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), smryfilename ) logger.info("Opening UNSMRY file: %s", smryfilename) - self._eclsum = Summary(smryfilename, include_restart=include_restart) - return self._eclsum + self._summary = Summary(smryfilename, include_restart=include_restart) + return self._summary def get_initfile(self) -> ResdataFile: """Find and return the INIT file as an ResdataFile object""" @@ -207,7 +207,7 @@ def close(self) -> None: self._egridfile = None self._initfile = None # This is necessary for garbage collection to close the Summary file: - self._eclsum = None + self._summary = None self._rstfile = None self._rftfile = None diff --git a/res2df/summary.py b/res2df/summary.py index bcf228a46..afbc6cb63 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -84,7 +84,7 @@ def _ensure_date_or_none(some_date: Optional[Union[str, dt.date]]) -> Optional[d def _crop_datelist( - eclsumsdates: List[dt.datetime], + summarydates: List[dt.datetime], freq: Union[dt.date, dt.datetime, str], start_date: Optional[dt.date] = None, end_date: Optional[dt.date] = None, @@ -94,7 +94,7 @@ def _crop_datelist( only cropped or returned as is. Args: - eclsumsdates: list of datetimes, typically coming from Summary.dates + summarydates: list of datetimes, typically coming from Summary.dates freq: Either a date or datetime, or a frequency string "raw", "first" or "last". start_date: Dates prior to this date will be cropped. @@ -105,7 +105,7 @@ def _crop_datelist( """ datetimes: Union[List[dt.date], List[dt.datetime]] = [] # type: ignore if freq == FREQ_RAW: - datetimes = eclsumsdates + datetimes = summarydates datetimes.sort() if start_date: # Convert to datetime (at 00:00:00) @@ -117,9 +117,9 @@ def _crop_datelist( datetimes = [x for x in datetimes if x < end_date] datetimes = datetimes + [end_date] elif freq == FREQ_FIRST: - datetimes = [min(eclsumsdates).date()] + datetimes = [min(summarydates).date()] elif freq == FREQ_LAST: - datetimes = [max(eclsumsdates).date()] + datetimes = [max(summarydates).date()] elif isinstance(freq, (dt.date, dt.datetime)): datetimes = [freq] return datetimes @@ -193,7 +193,7 @@ def _fallback_date_range(start: dt.date, end: dt.date, freq: str) -> List[dt.dat def resample_smry_dates( - eclsumsdates: List[dt.datetime], + summarydates: List[dt.datetime], freq: str = FREQ_RAW, normalize: bool = True, start_date: Optional[Union[str, dt.date]] = None, @@ -206,7 +206,7 @@ def resample_smry_dates( can be returned, on the same date range. Incoming dates can also be cropped. Args: - eclsumsdates: list of datetimes, typically coming from Summary.dates + summarydates: list of datetimes, typically coming from Summary.dates freq: string denoting requested frequency for the returned list of datetime. 'raw' will return the input datetimes (no resampling). @@ -233,7 +233,7 @@ def resample_smry_dates( if freq in [FREQ_RAW, FREQ_FIRST, FREQ_LAST] or isinstance( freq, (dt.date, dt.datetime) ): - return _crop_datelist(eclsumsdates, freq, start_date, end_date) + return _crop_datelist(summarydates, freq, start_date, end_date) # In case freq is an ISO-date(time)-string, interpret as such: try: @@ -244,8 +244,8 @@ def resample_smry_dates( pass # These are datetime.datetime, not datetime.date - start_smry = min(eclsumsdates) - end_smry = max(eclsumsdates) + start_smry = min(summarydates) + end_smry = max(summarydates) # Normalize start and end date according to frequency by extending the time range. # [1997-11-05, 2020-03-02] and monthly frequecy @@ -355,10 +355,10 @@ def df( column_keys = [column_keys] if isinstance(resdatafiles, Summary): - eclsum = resdatafiles + summary = resdatafiles else: try: - eclsum = resdatafiles.get_eclsum(include_restart=include_restart) + summary = resdatafiles.get_summary(include_restart=include_restart) except OSError: logger.warning("Error reading summary instance, returning empty dataframe") return pd.DataFrame() @@ -366,7 +366,7 @@ def df( time_index_arg: Optional[Union[List[dt.date], List[dt.datetime]]] if isinstance(time_index, str) and time_index == "raw": time_index_arg = resample_smry_dates( - eclsum.dates, + summary.dates, "raw", False, start_date, @@ -374,7 +374,7 @@ def df( ) elif isinstance(time_index, str): time_index_arg = resample_smry_dates( - eclsum.dates, + summary.dates, time_index, True, start_date, @@ -402,8 +402,7 @@ def df( time_index_str or "raw", ) - # dframe = eclsum.pandas_frame(time_index_arg, column_keys) - dframe = _libecl_eclsum_pandas_frame(eclsum, time_index_arg, column_keys) + dframe = _summary_pandas_frame(summary, time_index_arg, column_keys) logger.info( "Dataframe with smry data ready, %d columns and %d rows", @@ -415,7 +414,7 @@ def df( dframe = _merge_params(dframe, paramfile, resdatafiles) # Add metadata as an attribute the dataframe, using experimental Pandas features: - meta = smry_meta(eclsum) + meta = smry_meta(summary) # Slice meta to dataframe columns: dframe.attrs["meta"] = { column_key: meta[column_key] for column_key in dframe if column_key in meta @@ -592,20 +591,20 @@ def smry_meta(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]: * wgname (str or None) """ if isinstance(resdatafiles, Summary): - eclsum = resdatafiles + summary = resdatafiles else: - eclsum = resdatafiles.get_eclsum() + summary = resdatafiles.get_summary() meta: Dict[str, Dict[str, Any]] = {} - for col in eclsum.keys(): + for col in summary.keys(): meta[col] = {} - meta[col]["unit"] = eclsum.unit(col) - meta[col]["is_total"] = eclsum.is_total(col) - meta[col]["is_rate"] = eclsum.is_rate(col) - meta[col]["is_historical"] = eclsum.smspec_node(col).is_historical() - meta[col]["keyword"] = eclsum.smspec_node(col).keyword - meta[col]["wgname"] = eclsum.smspec_node(col).wgname - num = eclsum.smspec_node(col).get_num() + meta[col]["unit"] = summary.unit(col) + meta[col]["is_total"] = summary.is_total(col) + meta[col]["is_rate"] = summary.is_rate(col) + meta[col]["is_historical"] = summary.smspec_node(col).is_historical() + meta[col]["keyword"] = summary.smspec_node(col).keyword + meta[col]["wgname"] = summary.smspec_node(col).wgname + num = summary.smspec_node(col).get_num() if num is not None: meta[col]["get_num"] = num return meta @@ -701,40 +700,40 @@ def df2ressum( raise ValueError(f"Do not use dots in casename {casename}") dframe = _fix_dframe_for_libecl(dframe) - return _libecl_eclsum_from_pandas(casename, dframe) + return resdata_summary_from_pandas(casename, dframe) # return Summary.from_pandas(casename, dframe) -def _libecl_eclsum_pandas_frame( - eclsum: Summary, +def _summary_pandas_frame( + summary: Summary, time_index: Optional[Union[List[dt.date], List[dt.datetime]]] = None, column_keys: Optional[List[str]] = None, ) -> pd.DataFrame: """Build a Pandas dataframe from an Summary object. - Temporarily copied from libecl to circumvent bug + Temporarily copied from resdata to circumvent bug - https://github.com/equinor/ecl/issues/802 + https://github.com/equinor/resdata/issues/802 """ if column_keys is None: - keywords = SummaryKeyWordVector(eclsum, add_keywords=True) + keywords = SummaryKeyWordVector(summary, add_keywords=True) else: - keywords = SummaryKeyWordVector(eclsum) + keywords = SummaryKeyWordVector(summary) for key in column_keys: keywords.add_keywords(key) # pylint: disable=protected-access if time_index is None: - time_index = eclsum.dates # Changed from libecl + time_index = summary.dates # Changed from libecl data = np.zeros([len(time_index), len(keywords)]) Summary._init_pandas_frame( - eclsum, keywords, data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) + summary, keywords, data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) ) else: - time_points = eclsum._make_time_vector(time_index) + time_points = summary._make_time_vector(time_index) data = np.zeros([len(time_points), len(keywords)]) Summary._init_pandas_frame_interp( - eclsum, + summary, keywords, time_points, data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), @@ -754,7 +753,7 @@ def _libecl_eclsum_pandas_frame( return frame -def _libecl_eclsum_from_pandas( +def resdata_summary_from_pandas( case: str, frame: pd.DataFrame, dims: Optional[List[int]] = None, @@ -781,18 +780,20 @@ def _libecl_eclsum_from_pandas( header_list = Summary._compile_headers_list(headers, dims) if dims is None: dims = [1, 1, 1] - ecl_sum = Summary.writer(case, start_time, dims[0], dims[1], dims[2]) + the_summary = Summary.writer(case, start_time, dims[0], dims[1], dims[2]) for keyword, wgname, num, unit in header_list: var_list.append( - ecl_sum.add_variable(keyword, wgname=wgname, num=num, unit=unit).getKey1() + the_summary.add_variable( + keyword, wgname=wgname, num=num, unit=unit + ).getKey1() ) for idx, time in enumerate(frame.index): days = (time - start_time).days - t_step = ecl_sum.add_t_step(idx + 1, days) + t_step = the_summary.add_t_step(idx + 1, days) for var in var_list: t_step[var] = frame.iloc[idx][var] - return ecl_sum + return the_summary def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: @@ -882,7 +883,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Fill a parser for the operation: dataframe -> eclsum files""" + """Fill a parser for the operation: dataframe -> summary files""" parser.add_argument( "-o", @@ -938,10 +939,10 @@ def summary_reverse_main(args) -> None: # Summary.fwrite() can only write to current directory: cwd = os.getcwd() - eclsum = df2ressum(summary_df, eclbase) + summary = df2ressum(summary_df, eclbase) try: os.chdir(outputdir) - Summary.fwrite(eclsum) + Summary.fwrite(summary) finally: os.chdir(cwd) diff --git a/tests/test_eclfiles.py b/tests/test_eclfiles.py index d9f219e69..bcbe9b147 100644 --- a/tests/test_eclfiles.py +++ b/tests/test_eclfiles.py @@ -56,7 +56,7 @@ def test_filedescriptors(): assert len(list(fd_dir.glob("*"))) == pre_fd_count assert resdatafiles._rstfile is None - resdatafiles.get_eclsum() + resdatafiles.get_summary() assert len(list(fd_dir.glob("*"))) == pre_fd_count + 1 resdatafiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count diff --git a/tests/test_summary.py b/tests/test_summary.py index 80a0269f9..37e7c256e 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -411,9 +411,9 @@ def test_foreseeable_future(tmp_path): {"DATE": "2500-01-01", "FPR": 180}, ] ) - eclsum = df2ressum(src_dframe, casename="PLUGABANDON") + res_summary = df2ressum(src_dframe, casename="PLUGABANDON") - dframe = summary.df(eclsum) + dframe = summary.df(res_summary) assert ( dframe.index == [ @@ -426,7 +426,7 @@ def test_foreseeable_future(tmp_path): ).all() # Try with time interpolation involved: - dframe = summary.df(eclsum, time_index="yearly") + dframe = summary.df(res_summary, time_index="yearly") assert len(dframe) == 501 assert dframe.index.max() == datetime.date(year=2500, month=1, day=1) @@ -437,8 +437,8 @@ def test_foreseeable_future(tmp_path): "FPR": range(70), } ) - eclsum = df2ressum(src_dframe, casename="PLUGABANDON") - dframe = summary.df(eclsum) + res_summary = df2ressum(src_dframe, casename="PLUGABANDON") + dframe = summary.df(res_summary) # Still buggy: assert dframe.index[-1] == dt(2068, 12, 31, 23, 57, 52) @@ -449,8 +449,8 @@ def test_foreseeable_future(tmp_path): "FPR": range(69), } ) - eclsum = df2ressum(src_dframe, casename="PLUGABANDON") - dframe = summary.df(eclsum) + res_summary = df2ressum(src_dframe, casename="PLUGABANDON") + dframe = summary.df(res_summary) # Works fine when stepping only 68 years: assert dframe.index[-1] == dt(2468, 1, 1, 0, 0, 0) @@ -635,7 +635,7 @@ def test_resample_smry_dates(): resdatafiles = ResdataFiles(REEK) - ecldates = resdatafiles.get_eclsum().dates + ecldates = resdatafiles.get_summary().dates assert isinstance(resample_smry_dates(ecldates), list) assert isinstance(resample_smry_dates(ecldates, freq="last"), list) @@ -812,7 +812,7 @@ def test_unique_datetime_retain_index_name(filepath): def test_smry_meta(): - """Test obtaining metadata dictionary for summary vectors from an EclSum object""" + """Test obtaining metadata dictionary for summary vectors from an summary object""" meta = smry_meta(ResdataFiles(REEK)) assert isinstance(meta, dict) @@ -852,7 +852,7 @@ def test_smry_meta_synthetic(): ).set_index("DATE") synt_meta = smry_meta(df2ressum(dframe)) - # Dummy unit provided by EclSum: + # Dummy unit provided by summary: assert synt_meta["FOPT"]["unit"] == "UNIT" @@ -1020,18 +1020,18 @@ def test_fix_dframe_for_libecl(dframe, expected_dframe): ], ) def test_df2ressum(dframe): - """Test that a dataframe can be converted to an EclSum object, and then read + """Test that a dataframe can be converted to an summary object, and then read back again""" # Massage the dframe first so we can assert on equivalence after. dframe = _fix_dframe_for_libecl(dframe) - eclsum = df2ressum(dframe) + summary = df2ressum(dframe) if dframe.empty: - assert eclsum is None + assert summary is None return - dframe_roundtrip = df(eclsum) + dframe_roundtrip = df(summary) pd.testing.assert_frame_equal( dframe.sort_index(axis=1), dframe_roundtrip.sort_index(axis=1), @@ -1056,7 +1056,7 @@ def test_df2ressum_datetimeindex(): def test_duplicated_summary_vectors(caplog): - """EclSum files on disk may contain repeated vectors + """summary files on disk may contain repeated vectors if the user has inserted a vector name twice in the SUMMARY section @@ -1176,7 +1176,7 @@ def test_res2df_errors(tmp_path): Path("FOO.DATA").write_text("RUNSPEC", encoding="utf8") assert str(ResdataFiles("FOO").get_ecldeck()).strip() == "RUNSPEC" with pytest.raises(OSError): - ResdataFiles("FOO").get_eclsum() + ResdataFiles("FOO").get_summary() # Getting a dataframe from bogus data should give empty data: assert df(ResdataFiles("FOO")).empty From 34a274f7666da44f491149e69a2e595c80e4ab8d Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Wed, 15 Nov 2023 12:23:44 +0100 Subject: [PATCH 15/68] ecl_unit_system->unit_system --- docs/usage/equil.rst | 2 +- res2df/wellcompletiondata.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index 53589e2c4..5a8064d8c 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -2,7 +2,7 @@ equil ----- This is the res2df module for processing the ``SOLUTION`` section of -the Eclipse input deck. +the input deck. Supported keywords are ``EQUIL``, ``RSVD``, ``RVVD``, ``PBVD`` and ``PDVD``. Typical usage is diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index d6678e024..25d4647e3 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -90,8 +90,8 @@ def df( return compdat_df -def _get_ecl_unit_system(resdatafiles: ResdataFiles) -> EclipseUnitSystem: - """Returns the unit system of an Eclipse input deck. The options are \ +def _get_unit_system(resdatafiles: ResdataFiles) -> EclipseUnitSystem: + """Returns the unit system of an input deck. The options are \ METRIC, FIELD, LAB and PVT-M. If none of these are found, the function returns METRIC which is the @@ -107,7 +107,7 @@ def _get_ecl_unit_system(resdatafiles: ResdataFiles) -> EclipseUnitSystem: def _get_metadata(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]: """Provide metadata for the well completion data export""" meta: Dict[str, Dict[str, str]] = {} - unitsystem = _get_ecl_unit_system(resdatafiles) + unitsystem = _get_unit_system(resdatafiles) kh_units = { EclipseUnitSystem.METRIC: KHUnit.METRIC, EclipseUnitSystem.FIELD: KHUnit.FIELD, From d2f9862108ba5a63668ea5c55c3732c8146d7d37 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Wed, 15 Nov 2023 12:51:00 +0100 Subject: [PATCH 16/68] Reservoir files -> Reservoir simulator input decks --- README.md | 2 +- docs/csv2res.rst | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 7658f2e30..5cff34d76 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ res2df is a Pandas DataFrame wrapper around libecl and opm.io, which are used to access binary files outputted by reservoir simulators, or its input files --- or any other tool outputting to the same data format. -The reverse operation, from a Pandas DataFrame to reservoir include files, +The reverse operation, from a Pandas DataFrame to reservoir simulator include files, is provided for some of the modules. The package consists of a module pr. datatype, e.g. one module for summary diff --git a/docs/csv2res.rst b/docs/csv2res.rst index 5207373db..4f36d5acb 100644 --- a/docs/csv2res.rst +++ b/docs/csv2res.rst @@ -1,10 +1,10 @@ csv2res ======= -Some of the modules inside res2df is able to write reservoir files +Some of the modules inside res2df is able to write reservoir simulator input decks from dataframes (in the format dumped by res2df). This makes it possible -to produce reservoir input data in any application that can write CSV files, -and use this tool to convert it into reservoir files, or it can +to produce reservoir input decks in any application that can write CSV files, +and use this tool to convert it into reservoir simulator files, or it can facilitate operations/manipulations of an existing deck using any tool that can work on CSV files, by first running res2csv on an input file, transforming it, and writing back using csv2res. From a87291ecf29581c37ac79dae727f6d9f5dc6aaf3 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Wed, 15 Nov 2023 13:03:48 +0100 Subject: [PATCH 17/68] ecl_str -> deck_str --- res2df/vfp/_vfpcommon.py | 12 +++--- res2df/vfp/_vfpinj.py | 70 ++++++++++++++++---------------- res2df/vfp/_vfpprod.py | 86 ++++++++++++++++++++-------------------- 3 files changed, 84 insertions(+), 84 deletions(-) diff --git a/res2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py index a26f8faeb..a03bff959 100755 --- a/res2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -194,12 +194,12 @@ def _write_vfp_range( if var_type != "UNDEFINED": var_type_str = var_type - ecl_str = f"-- {var_type_str} units - {unit_type} ( {len(values)} values )\n" + deck_str = f"-- {var_type_str} units - {unit_type} ( {len(values)} values )\n" for i, value in enumerate(values): - ecl_str += format % value + deck_str += format % value if (i + 1) % values_per_line == 0 and i < len(values) - 1: - ecl_str += "\n" - ecl_str += " /\n" - ecl_str += "\n" + deck_str += "\n" + deck_str += " /\n" + deck_str += "\n" - return ecl_str + return deck_str diff --git a/res2df/vfp/_vfpinj.py b/res2df/vfp/_vfpinj.py index 7771e5bc2..4c4d83b39 100755 --- a/res2df/vfp/_vfpinj.py +++ b/res2df/vfp/_vfpinj.py @@ -564,15 +564,15 @@ def _write_basic_record( if unit_type != "DEFAULT": unit_type_str = unit_type - ecl_str = "-- Table Datum Depth Rate Type THP Type UNITS TAB Type\n" - ecl_str += "-- ----- ----------- --------- -------- -------- --------\n" - ecl_str += f" {tableno:5d}" - ecl_str += f" {datum:11.1f}" - ecl_str += f" {flo_type:>9s}" - ecl_str += f" {pressure_type:>8s}" - ecl_str += f" {unit_type_str:>8s}" - ecl_str += f" {tab_type:>8s} /\n\n" - return ecl_str + deck_str = "-- Table Datum Depth Rate Type THP Type UNITS TAB Type\n" + deck_str += "-- ----- ----------- --------- -------- -------- --------\n" + deck_str += f" {tableno:5d}" + deck_str += f" {datum:11.1f}" + deck_str += f" {flo_type:>9s}" + deck_str += f" {pressure_type:>8s}" + deck_str += f" {unit_type_str:>8s}" + deck_str += f" {tab_type:>8s} /\n\n" + return deck_str def _write_table( @@ -589,23 +589,23 @@ def _write_table( values_per_line: Number of values per line in output """ - ecl_str = "" + deck_str = "" for idx, row in table.iterrows(): - ecl_str += f"{idx:2d}" + deck_str += f"{idx:2d}" no_flo = len(table.loc[idx].to_list()) for n, value in enumerate(table.loc[idx].to_list()): - ecl_str += format % value + deck_str += format % value if (n + 1) % values_per_line == 0: if n < no_flo - 1: - ecl_str += "\n" - ecl_str += " " * 2 + deck_str += "\n" + deck_str += " " * 2 else: - ecl_str += "\n" + deck_str += "\n" elif n == no_flo - 1: - ecl_str += "\n" - ecl_str += "/\n" + deck_str += "\n" + deck_str += "/\n" - return ecl_str + return deck_str def _write_table_records( @@ -624,7 +624,7 @@ def _write_table_records( values_per_line: Number of values per line in output """ - ecl_str = "" + deck_str = "" no_records = len(thp_indices) no_flow_values = table.size // no_records if table.size % no_records > 0: @@ -634,21 +634,21 @@ def _write_table_records( for row in range(0, no_records): thp = thp_indices[row] - ecl_str += f"{thp:2d}" + deck_str += f"{thp:2d}" for n, value in enumerate(table[row, :]): - ecl_str += format % value + deck_str += format % value if (n + 1) % values_per_line == 0: if n < no_flow_values - 1: - ecl_str += "\n" - ecl_str += " " * 2 + deck_str += "\n" + deck_str += " " * 2 else: - ecl_str += "\n" + deck_str += "\n" elif n == no_flow_values - 1: - ecl_str += "\n" + deck_str += "\n" - ecl_str += "/\n" + deck_str += "/\n" - return ecl_str + return deck_str def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: @@ -670,16 +670,16 @@ def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_type = vfpinj_data["UNIT_TYPE"] # Write dataframe to string with Eclipse format for VFPINJ - ecl_str = "VFPINJ\n" + deck_str = "VFPINJ\n" if comment: - ecl_str += common.comment_formatter(comment) + deck_str += common.comment_formatter(comment) else: - ecl_str += "\n" + deck_str += "\n" unit_value = vfpinj_data["UNIT_TYPE"].value if vfpinj_data["UNIT_TYPE"] == UNITTYPE.DEFAULT: unit_value = "1*" - ecl_str += _write_basic_record( + deck_str += _write_basic_record( vfpinj_data["TABLE_NUMBER"], vfpinj_data["DATUM"], vfpinj_data["RATE_TYPE"].value, @@ -687,22 +687,22 @@ def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_value, vfpinj_data["TAB_TYPE"].value, ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpinj_data["FLOW_VALUES"], rate_type.value, VFPINJ_UNITS[unit_type.value]["FLO"][rate_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpinj_data["THP_VALUES"], thp_type.value, VFPINJ_UNITS[unit_type.value]["THP"][thp_type.value], "%10.6g", ) - ecl_str += _write_table_records( + deck_str += _write_table_records( vfpinj_data["THP_INDICES"], vfpinj_data["BHP_TABLE"], "%10.6g", ) - return ecl_str + return deck_str diff --git a/res2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py index 0a0d62393..bc55e65ae 100755 --- a/res2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -834,20 +834,20 @@ def _write_basic_record( if alq_type != "UNDEFINED": alq_type_str = alq_type - ecl_str = "-- Table Datum Depth Rate Type WFR Type " - ecl_str += "GFR Type THP Type ALQ Type UNITS TAB Type\n" - ecl_str += "-- ----- ----------- --------- -------- " - ecl_str += "-------- -------- -------- ------ --------\n" - ecl_str += f" {tableno:5d}" - ecl_str += f" {datum:11.1f}" - ecl_str += f" {flo_type:>8s}" - ecl_str += f" {wfr_type:>8s}" - ecl_str += f" {gfr_type:>8s}" - ecl_str += f" {pressure_type:>8s}" - ecl_str += f" {alq_type_str:>8s}" - ecl_str += f" {unit_type:>6s}" - ecl_str += f" {tab_type:>8s} /\n\n" - return ecl_str + deck_str = "-- Table Datum Depth Rate Type WFR Type " + deck_str += "GFR Type THP Type ALQ Type UNITS TAB Type\n" + deck_str += "-- ----- ----------- --------- -------- " + deck_str += "-------- -------- -------- ------ --------\n" + deck_str += f" {tableno:5d}" + deck_str += f" {datum:11.1f}" + deck_str += f" {flo_type:>8s}" + deck_str += f" {wfr_type:>8s}" + deck_str += f" {gfr_type:>8s}" + deck_str += f" {pressure_type:>8s}" + deck_str += f" {alq_type_str:>8s}" + deck_str += f" {unit_type:>6s}" + deck_str += f" {tab_type:>8s} /\n\n" + return deck_str def _write_table( @@ -864,23 +864,23 @@ def _write_table( values_per_line: Number of values per line in output """ - ecl_str = "" + deck_str = "" for idx, row in table.iterrows(): - ecl_str += f"{idx[0]:2d} {idx[1]:2d} {idx[2]:2d} {idx[3]:2d}" + deck_str += f"{idx[0]:2d} {idx[1]:2d} {idx[2]:2d} {idx[3]:2d}" no_flo = len(table.loc[idx].to_list()) for n, value in enumerate(table.loc[idx].to_list()): - ecl_str += format % value + deck_str += format % value if (n + 1) % values_per_line == 0: if n < no_flo - 1: - ecl_str += "\n" - ecl_str += " " * 11 + deck_str += "\n" + deck_str += " " * 11 else: - ecl_str += "\n" + deck_str += "\n" elif n == no_flo - 1: - ecl_str += "\n" - ecl_str += "/\n" + deck_str += "\n" + deck_str += "/\n" - return ecl_str + return deck_str def _write_table_records( @@ -905,7 +905,7 @@ def _write_table_records( values_per_line: Number of values per line in output """ - ecl_str = "" + deck_str = "" no_records = len(thp_indices) no_flow_values = table.size // no_records if table.size % no_records > 0: @@ -918,21 +918,21 @@ def _write_table_records( wfr = wfr_indices[row] gfr = gfr_indices[row] alq = alq_indices[row] - ecl_str += f"{thp:2d} {wfr:2d} {gfr:2d} {alq:2d}" + deck_str += f"{thp:2d} {wfr:2d} {gfr:2d} {alq:2d}" for n, value in enumerate(table[row, :]): - ecl_str += format % value + deck_str += format % value if (n + 1) % values_per_line == 0: if n < no_flow_values - 1: - ecl_str += "\n" - ecl_str += " " * 11 + deck_str += "\n" + deck_str += " " * 11 else: - ecl_str += "\n" + deck_str += "\n" elif n == no_flow_values - 1: - ecl_str += "\n" + deck_str += "\n" - ecl_str += "/\n" + deck_str += "/\n" - return ecl_str + return deck_str def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: @@ -957,16 +957,16 @@ def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_type = vfpprod_data["UNIT_TYPE"] # Write dataframe to string with Eclipse format for VFPPROD - ecl_str = "VFPPROD\n" + deck_str = "VFPPROD\n" if comment: - ecl_str += common.comment_formatter(comment) + deck_str += common.comment_formatter(comment) else: - ecl_str += "\n" + deck_str += "\n" unit_value = vfpprod_data["UNIT_TYPE"].value if vfpprod_data["UNIT_TYPE"] == UNITTYPE.DEFAULT: unit_value = "1*" - ecl_str += _write_basic_record( + deck_str += _write_basic_record( vfpprod_data["TABLE_NUMBER"], vfpprod_data["DATUM"], vfpprod_data["RATE_TYPE"].value, @@ -977,37 +977,37 @@ def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_value, vfpprod_data["TAB_TYPE"].value, ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpprod_data["FLOW_VALUES"], rate_type.value, VFPPROD_UNITS[unit_type.value]["FLO"][rate_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpprod_data["THP_VALUES"], thp_type.value, VFPPROD_UNITS[unit_type.value]["THP"][thp_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpprod_data["WFR_VALUES"], wfr_type.value, VFPPROD_UNITS[unit_type.value]["WFR"][wfr_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpprod_data["GFR_VALUES"], gfr_type.value, VFPPROD_UNITS[unit_type.value]["GFR"][gfr_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + deck_str += _write_vfp_range( vfpprod_data["ALQ_VALUES"], alq_type.value, VFPPROD_UNITS[unit_type.value]["ALQ"][alq_type.value], "%10.6g", ) - ecl_str += _write_table_records( + deck_str += _write_table_records( vfpprod_data["THP_INDICES"], vfpprod_data["WFR_INDICES"], vfpprod_data["GFR_INDICES"], @@ -1016,4 +1016,4 @@ def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: "%10.6g", ) - return ecl_str + return deck_str From 74397c3137945782d251268ad967b0066adc8e80 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Wed, 15 Nov 2023 13:12:31 +0100 Subject: [PATCH 18/68] Update glossary --- docs/glossary.rst | 17 +++++++++++++++++ docs/index.rst | 5 +++-- docs/introduction.rst | 2 +- 3 files changed, 21 insertions(+), 3 deletions(-) create mode 100644 docs/glossary.rst diff --git a/docs/glossary.rst b/docs/glossary.rst new file mode 100644 index 000000000..a2a784481 --- /dev/null +++ b/docs/glossary.rst @@ -0,0 +1,17 @@ +Glossary +======== + +.. glossary:: + + deck/input deck + Inputs provided to reservoir simulators such as Eclipse or OPM-flow. + Usually a .DATA file pointing to other include files. One deck points + to multiple include files. + + include files + Files that provide inputs to reservoir simulators by using the INCLUDE statement + in input decks. By convention, these files often have the extension .INC (generally) + or .GRDECL (for files included into the grid section). + + reservoir simulator + Reservoir simulators such as OPM-flow or Eclipse. diff --git a/docs/index.rst b/docs/index.rst index c80403a24..a3d36622c 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -2,8 +2,8 @@ res2df ====== res2df is a Pandas DataFrame wrapper around libecl and opm.io, which -are used to access binary files outputted by the reservoir simulator -Eclipse, or its input files --- or any other tool outputting to the same +are used to access binary files outputted by the reservoir simulators or +their input files --- or any other tool outputting to the same data format. .. toctree:: @@ -16,6 +16,7 @@ data format. installation contribution history + glossary .. toctree:: :hidden: diff --git a/docs/introduction.rst b/docs/introduction.rst index 4ca2998f4..6367af83c 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -119,7 +119,7 @@ More documentation on :doc:`usage/fipreports`. ^^^^^^^^^^^ Extracts saturation functions (SWOF, SGOF, etc) from the deck and merges -into one DataFrame. Can write back to Eclipse include files. +into one DataFrame. Can write back to include files. More documentation on :doc:`usage/satfunc`. From a8e1f594df410200706b5408f9bd098374592e4c Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Wed, 15 Nov 2023 13:22:35 +0100 Subject: [PATCH 19/68] Eclipse PRT -> PRT --- docs/usage/fipreports.rst | 2 +- res2df/fipreports.py | 4 ++-- res2df/res2csv.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/usage/fipreports.rst b/docs/usage/fipreports.rst index 2e06dc1ec..afe2fee2c 100644 --- a/docs/usage/fipreports.rst +++ b/docs/usage/fipreports.rst @@ -1,7 +1,7 @@ fipreports ---------- -fipreports is a parser for the Eclipse PRT output file, extracting data +fipreports is a parser for the PRT output file, extracting data from these tables: .. literalinclude:: fipreports-example.txt diff --git a/res2df/fipreports.py b/res2df/fipreports.py index 7274a05f3..dd2740461 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -1,5 +1,5 @@ # pylint: disable=c0301 -"""Extract FIP region reports from Eclipse PRT file""" +"""Extract FIP region reports from PRT file""" import argparse import datetime @@ -194,7 +194,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """Fill parser with command line arguments""" - parser.add_argument("PRTFILE", type=str, help="Eclipse PRT file (or DATA file)") + parser.add_argument("PRTFILE", type=str, help="PRT file (or DATA file)") parser.add_argument( "--fipname", type=str, diff --git a/res2df/res2csv.py b/res2df/res2csv.py index 3d7bd1f86..1c83e6dee 100644 --- a/res2df/res2csv.py +++ b/res2df/res2csv.py @@ -157,7 +157,7 @@ def get_parser() -> argparse.ArgumentParser: ) subparsers_dict["fipreports"] = subparsers.add_parser( "fipreports", - help=("Extract FIPxxxxx REPORT REGION data from Eclipse PRT output file."), + help=("Extract FIPxxxxx REPORT REGION data from PRT output file."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( "Extract FIPxxxxx REPORT REGION data from PRT file. " @@ -178,7 +178,7 @@ def get_parser() -> argparse.ArgumentParser: ) subparsers_dict["fipreports"] = subparsers.add_parser( "fipreports", - help=("Extract FIPxxxxx REPORT REGION data from Eclipse PRT output file."), + help=("Extract FIPxxxxx REPORT REGION data from PRT output file."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( "Extract FIPxxxxx REPORT REGION data from PRT file. " From 21512f7d3f82f553ba72ce6d5be6dec9545a958a Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Wed, 15 Nov 2023 13:35:46 +0100 Subject: [PATCH 20/68] Remove Eclipse from grid doc --- docs/usage/grid.rst | 4 ++-- res2df/vfp/_vfpcommon.py | 3 ++- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/usage/grid.rst b/docs/usage/grid.rst index 63e94ec6b..675f022ce 100644 --- a/docs/usage/grid.rst +++ b/docs/usage/grid.rst @@ -2,7 +2,7 @@ grid ---- The grid module will extract static and dynamic cell properties from -an Eclipse grid (from the binary output files from Eclipse). Each row +a grid (from the binary output files from reservoir simulators). Each row in a returned dataframe represents one cell. Typical usage @@ -148,7 +148,7 @@ Generating include files from grid data If you have loaded grid data into a Pandas frame, some operations are easily performed, scaling porosity, permeability etc. Or remapping some region parameters. Using the :func:`res2df.grid.df2res()` function these manipulated vectors can be written back as -include files to Eclipse. +include files. Say you want to change the FIPNUM, and that FIPNUM 6 should be removed, and set it to FIPNUM 5. This can be accomplished using diff --git a/res2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py index a03bff959..af9feaedc 100755 --- a/res2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -1,4 +1,5 @@ -"""Common functionality for vfp module to extract VFPPROD/VFPINJ data from input deck to extract the VFPPROD/VFPINJ data from an Eclipse (input) deck as Pandas +"""Common functionality for vfp module to extract VFPPROD/VFPINJ data from input +deck to extract the VFPPROD/VFPINJ data from an Eclipse (input) deck as Pandas Dataframes Data can be extracted from a full input deck or from individual files. Supports From e9fa1be9d4d5956c56a0b12721d1727780cbad25 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Wed, 15 Nov 2023 13:49:10 +0100 Subject: [PATCH 21/68] Update some more .rst files --- docs/usage/nnc.rst | 2 +- docs/usage/pillars.rst | 4 ++-- docs/usage/satfunc.rst | 6 +++--- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/usage/nnc.rst b/docs/usage/nnc.rst index 472cd1700..5b587ff94 100644 --- a/docs/usage/nnc.rst +++ b/docs/usage/nnc.rst @@ -1,7 +1,7 @@ nnc --- -nnc will extract Non-Neighbour-Connections from your Eclipse grid as pairs +nnc will extract Non-Neighbour-Connections from your grid as pairs of *ijk* indices together with their associated transmissibilities. See also the :doc:`trans` module, which can extract all transmissibilities, not only diff --git a/docs/usage/pillars.rst b/docs/usage/pillars.rst index 33ed94e9d..7ec04f119 100644 --- a/docs/usage/pillars.rst +++ b/docs/usage/pillars.rst @@ -3,8 +3,8 @@ pillars ------- -pillars is a module to compute data on "pillars" in the grid from an -Eclipse simulation, including both static and dynamic data from the grid. +pillars is a module to compute data on "pillars" in the grid from a +simulation, including both static and dynamic data from the grid. Static data ^^^^^^^^^^^ diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index 295b91407..53dbaff00 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -33,7 +33,7 @@ Alternatively, the same data can be produced as a CSV file using the command lin It is possible to extract keywords one at a time using the ``--keywords`` command line option. -Instead of Eclipse data decks, individual include files may also be parsed, but +Instead of data decks, individual include files may also be parsed, but only one at a time. Generating include files from dataframes @@ -55,7 +55,7 @@ the command # Multiplicate these rows by 0.5 dframe.loc[rows_to_touch, "KRW"] *= 0.5 -For a dataframe or a CSV file in the format provided by this module, an Eclipse +For a dataframe or a CSV file in the format provided by this module, an include file can be generated either with the Python API :func:`res2df.satfunc.df2res` function or the command @@ -63,7 +63,7 @@ include file can be generated either with the Python API csv2res satfunc satfunc.csv --output relperm.inc --keywords SWOF SGOF --verbose -which should give a file ``relperm.inc`` that can be parsed by Eclipse. The command +which should give a file ``relperm.inc`` that can be parsed by reservoir simulators. The command above will only pick the keywords ``SWOF`` and ``SGOF`` (in the case there are data for more keywords in the dataframe). From 52f301096f63d95900f6133ff4720f3b79fd00a7 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Wed, 15 Nov 2023 14:46:38 +0100 Subject: [PATCH 22/68] Remove/replace unnecessary eclipse occurrences --- docs/usage/rft.rst | 2 +- docs/usage/trans.rst | 2 +- res2df/csv2res.py | 2 +- res2df/equil.py | 2 +- res2df/fipreports.py | 2 +- res2df/grid.py | 8 ++++---- res2df/inferdims.py | 2 +- res2df/nnc.py | 3 ++- res2df/parameters.py | 4 ++-- res2df/pillars.py | 2 +- res2df/pvt.py | 4 ++-- res2df/res2csv.py | 8 ++++---- res2df/resdatafiles.py | 4 ++-- res2df/rft.py | 2 +- res2df/satfunc.py | 2 +- res2df/summary.py | 4 ++-- res2df/trans.py | 6 +++--- res2df/wellcompletiondata.py | 18 +++++++++--------- tests/test_ert_hooks.py | 2 +- 19 files changed, 40 insertions(+), 39 deletions(-) diff --git a/docs/usage/rft.rst b/docs/usage/rft.rst index 03b1f3310..e77f90046 100644 --- a/docs/usage/rft.rst +++ b/docs/usage/rft.rst @@ -1,7 +1,7 @@ rft --- -rft will convert the binary RFT files from Eclipse to dataframes or CSV files, +rft will convert the binary RFT files from dataframes or CSV files, facilitating analysis of inflow and pressure for each connection the well has to the reservoir grid. diff --git a/docs/usage/trans.rst b/docs/usage/trans.rst index 4d4377220..a0e069417 100644 --- a/docs/usage/trans.rst +++ b/docs/usage/trans.rst @@ -26,7 +26,7 @@ connections The last column ``DIR`` is the direction of the connection in i-j-k, space, and can take on the values ``I``, ``J``, and ``K``. The ``TRAN`` column has values from the -``TRANX``, ``TRANY`` or ``TRANZ`` in the Eclipse output files. +``TRANX``, ``TRANY`` or ``TRANZ`` in output files. You can obtain this dataframe as a CSV file by writing this command on the command line: diff --git a/res2df/csv2res.py b/res2df/csv2res.py index cdeec3199..1b88bae6b 100644 --- a/res2df/csv2res.py +++ b/res2df/csv2res.py @@ -52,7 +52,7 @@ def get_parser() -> argparse.ArgumentParser: summary_parser = subparsers.add_parser( "summary", help="Write summary UNSMRY files", - description=("Write Eclipse UNSMRY files from CSV files."), + description=("Write UNSMRY files from CSV files."), ) summary.fill_reverse_parser(summary_parser) summary_parser.set_defaults(func=summary.summary_reverse_main) diff --git a/res2df/equil.py b/res2df/equil.py index 05e6f2eb4..909e8ee9f 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -342,7 +342,7 @@ def equil_main(args) -> None: def equil_reverse_main(args) -> None: - """Entry-point for module, for command line utility for CSV to Eclipse""" + """Entry-point for module, for command line utility for CSV to resdata""" logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) diff --git a/res2df/fipreports.py b/res2df/fipreports.py index dd2740461..7521682a8 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -99,7 +99,7 @@ def float_or_nan(string: str) -> float: def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFrame: """ - Parses a PRT file from Eclipse and finds FIPXXXX REGION REPORT blocks and + Parses a PRT file from and finds FIPXXXX REGION REPORT blocks and organizes those numbers into a dataframe Each row in the dataframe represents one parsed line in the PRT file, with diff --git a/res2df/grid.py b/res2df/grid.py index e9e87d560..d70224fc1 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -Extract grid information from Eclipse output files as Dataframes. +Extract grid information from grid files as Dataframes. Each cell in the grid correspond to one row. @@ -282,7 +282,7 @@ def rst2df( def gridgeometry2df( resdatafiles: ResdataFiles, zonemap: Optional[Dict[int, str]] = None ) -> pd.DataFrame: - """Produce a Pandas Dataframe with Eclipse gridgeometry + """Produce a Pandas Dataframe with Eclipse grid geometry Order is significant, and is determined by the order from libecl, and used when merging with other dataframes with cell-data. @@ -369,7 +369,7 @@ def merge_initvectors( for API users to only use the df() function. Args: - resdatafiles: Object representing the Eclipse output files + resdatafiles: Object representing the output files dframe: Table data to merge with initvectors: Names of INIT vectors to merge in. ijknames: Three strings that determine the I, J and K columns to use @@ -487,7 +487,7 @@ def df( any time dependent data from Restart files. Args: - resdatafiles: Handle to an Eclipse case + resdatafiles: Handle to a simulator case vectors: Vectors to include, wildcards supported. Used to match both INIT vectors and RESTART vectors. diff --git a/res2df/inferdims.py b/res2df/inferdims.py index 87d975522..f6196b16e 100644 --- a/res2df/inferdims.py +++ b/res2df/inferdims.py @@ -1,6 +1,6 @@ """ Support module for inferring EQLDIMS and TABDIMS from incomplete -Eclipse 100 decks (typically single include-files) +reservoir simulator decks (typically single include-files) """ import logging diff --git a/res2df/nnc.py b/res2df/nnc.py index 015c28cf2..4137de606 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -1,5 +1,6 @@ """ -Extract non-neighbour connection (NNC) information from Eclipse output files. +Extract non-neighbour connection (NNC) information from reservoir +simulator output files. """ import argparse import datetime diff --git a/res2df/parameters.py b/res2df/parameters.py index 601d04e8c..e7b28d5ab 100644 --- a/res2df/parameters.py +++ b/res2df/parameters.py @@ -1,5 +1,5 @@ """Support module for extra files with key-value information -related to Eclipse runs""" +related to simulator runs""" import json import logging @@ -25,7 +25,7 @@ def find_parameter_files( Args: ecldeck_or_eclpath: Either an ResdataFiles object of - an Eclipse output set (only the corresponding path will be used), + a simulator output set (only the corresponding path will be used), or path to a file or directory, that will be used as a starting point for locating parameter files filebase: the base of filenames to look for. diff --git a/res2df/pillars.py b/res2df/pillars.py index 35f6805ce..08f6cfa80 100644 --- a/res2df/pillars.py +++ b/res2df/pillars.py @@ -339,7 +339,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "--region", help=( - "Name of Eclipse region parameter for which to separate the computations. " + "Name of region parameter for which to separate the computations. " "Set to empty string to have no grouping (only by pillar)." ), type=str, diff --git a/res2df/pvt.py b/res2df/pvt.py index 904299449..e33c1f481 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -1,5 +1,5 @@ """ -Extract the PVT data from an Eclipse (input) deck as Pandas Dataframes +Extract the PVT data from an input deck as Pandas Dataframes Data can be extracted from a full input deck or from individual files. """ @@ -313,7 +313,7 @@ def pvt_main(args) -> None: def pvt_reverse_main(args) -> None: - """Entry-point for module, for command line utility for CSV to Eclipse""" + """Entry-point for module, for command line utility for CSV to simulator deck""" logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) diff --git a/res2df/res2csv.py b/res2df/res2csv.py index 1c83e6dee..46e3c905f 100644 --- a/res2df/res2csv.py +++ b/res2df/res2csv.py @@ -12,7 +12,7 @@ from res2df import __version__ # String constants in use for generating ERT forward model documentation: -DESCRIPTION: str = """Convert Eclipse input and output files into CSV files, +DESCRIPTION: str = """Convert reservoir simulator input and output files into CSV files, with the command line utility ``res2csv``. Run ``res2csv --help`` to see which subcommands are supported. @@ -108,7 +108,7 @@ def get_parser() -> argparse.ArgumentParser: help="Extract transmissibilities from EGRID file", formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( - "Extract transmissibilities (TRANX, TRANY, TRANZ) from Eclipse " + "Extract transmissibilities (TRANX, TRANY, TRANZ) from simulator " "binary output files. Each row represent a connection between a cell pair " "(I1, J1, K1) and (I2, J2, K2). It is possible to add INIT vectors for " "each of the cell in the cell pair, e.g. FIPNUM can be added as FIPNUM1 " @@ -143,10 +143,10 @@ def get_parser() -> argparse.ArgumentParser: ) subparsers_dict["rft"] = subparsers.add_parser( "rft", - help=("Extract RFT data from Eclipse binary output files."), + help=("Extract RFT data from simulator binary output files."), formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=( - "Extract RFT data from Eclipse binary output files to CSV. " + "Extract RFT data from simulator binary output files to CSV. " "Each row in the resulting table represents one point in a " "particular well at a particular time. " "If multisegment wells are found, associated data " diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index ab65da265..ebd5eb62a 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -1,4 +1,4 @@ -"""Module to hold Eclipse input and output filenames""" +"""Module to hold simulator input and output filenames""" import errno import logging @@ -47,7 +47,7 @@ class ResdataFiles(object): ResdataFile/Summary objects is easy for users, and with caching if wanted. - Various functions that needs some of the Eclipse output + Various functions that needs some of the simulator output (or input file) should be able to ask this class, and it should be loaded or served from cache. """ diff --git a/res2df/rft.py b/res2df/rft.py index 3b8658b40..3979ef637 100644 --- a/res2df/rft.py +++ b/res2df/rft.py @@ -1,4 +1,4 @@ -"""Converter module for Eclipse RFT output files to Pandas Dataframes +"""Converter module for simulator RFT output files to Pandas Dataframes If MULTISEG wells are found, the segment data associated to a connection is merged onto the same row as additional columns, diff --git a/res2df/satfunc.py b/res2df/satfunc.py index 5fbbefb2c..461df6a75 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -222,7 +222,7 @@ def satfunc_main(args) -> None: def satfunc_reverse_main(args) -> None: - """For command line utility for CSV to Eclipse""" + """For command line utility for CSV to resdata""" logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) diff --git a/res2df/summary.py b/res2df/summary.py index afbc6cb63..4777db54d 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -687,7 +687,7 @@ def df2ressum( Args: dframe: Dataframe with a DATE colum (or with the dates/datetimes in the index). - casename: Name of Eclipse casename/basename to be used for the Summary object + casename: Name of casename/basename to be used for the Summary object If the Summary object is later written to disk, this will be used to construct the filenames. """ @@ -889,7 +889,7 @@ def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentPar "-o", "--output", type=str, - help="Basename for Eclipse output files", + help="Basename for output files", default="SYNTSMRY", ) parser.add_argument("csvfile", help="Name of CSV file with summary data.") diff --git a/res2df/trans.py b/res2df/trans.py index 457b5d836..873271dd2 100644 --- a/res2df/trans.py +++ b/res2df/trans.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -Extract transmissibility information from Eclipse output files as Dataframes. +Extract transmissibility information from output files as Dataframes. """ import argparse import logging @@ -57,8 +57,8 @@ def df( you will get a corresponding FIPNUM1 and FIPNUM2 added. Args: - resdatafiles: An object representing your Eclipse run - vectors: Eclipse INIT vectors that you want to include + resdatafiles: An object representing your simulator run + vectors: simulator INIT vectors that you want to include boundaryfilter: Set to true if you want to filter where one INIT vector change. Only use for integer INIT vectors. group: Set to true if you want to sum transmissibilities over diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index 25d4647e3..51a24d671 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -18,7 +18,7 @@ logger = logging.getLogger(__name__) -class EclipseUnitSystem(str, Enum): +class UnitSystem(str, Enum): METRIC = "METRIC" FIELD = "FIELD" LAB = "LAB" @@ -90,18 +90,18 @@ def df( return compdat_df -def _get_unit_system(resdatafiles: ResdataFiles) -> EclipseUnitSystem: +def _get_unit_system(resdatafiles: ResdataFiles) -> UnitSystem: """Returns the unit system of an input deck. The options are \ METRIC, FIELD, LAB and PVT-M. If none of these are found, the function returns METRIC which is the default unit system in Eclipse. """ - unit_systems = [unitsystem.value for unitsystem in EclipseUnitSystem] + unit_systems = [unitsystem.value for unitsystem in UnitSystem] for keyword in resdatafiles.get_ecldeck(): if keyword.name in unit_systems: - return EclipseUnitSystem(keyword.name) - return EclipseUnitSystem.METRIC + return UnitSystem(keyword.name) + return UnitSystem.METRIC def _get_metadata(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]: @@ -109,10 +109,10 @@ def _get_metadata(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]: meta: Dict[str, Dict[str, str]] = {} unitsystem = _get_unit_system(resdatafiles) kh_units = { - EclipseUnitSystem.METRIC: KHUnit.METRIC, - EclipseUnitSystem.FIELD: KHUnit.FIELD, - EclipseUnitSystem.LAB: KHUnit.LAB, - EclipseUnitSystem.PVTM: KHUnit.PVTM, + UnitSystem.METRIC: KHUnit.METRIC, + UnitSystem.FIELD: KHUnit.FIELD, + UnitSystem.LAB: KHUnit.LAB, + UnitSystem.PVTM: KHUnit.PVTM, } meta["KH"] = {} meta["KH"]["unit"] = kh_units[unitsystem].value diff --git a/tests/test_ert_hooks.py b/tests/test_ert_hooks.py index db2999e0b..391858315 100644 --- a/tests/test_ert_hooks.py +++ b/tests/test_ert_hooks.py @@ -138,7 +138,7 @@ def test_get_module_variable(): assert jobs._get_module_variable_if_exists("foo", "bar") == "" assert jobs._get_module_variable_if_exists( "res2df.res2csv", "DESCRIPTION" - ).startswith("Convert Eclipse input and output") + ).startswith("Convert reservoir simulator input and output") assert jobs._get_module_variable_if_exists("res2df.res2csv", "NOPE") == "" From 2325fe8ba5ca012ca670abc1671a5048910f5d85 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Wed, 15 Nov 2023 15:18:36 +0100 Subject: [PATCH 23/68] libecl->resdata --- README.md | 2 +- docs/contribution.rst | 2 +- docs/index.rst | 2 +- docs/introduction.rst | 2 +- res2df/grid.py | 8 ++++---- res2df/nnc.py | 2 +- res2df/resdatafiles.py | 2 +- res2df/summary.py | 12 ++++++------ tests/test_eclfiles.py | 2 +- tests/test_grid.py | 2 +- tests/test_summary.py | 12 ++++++------ 11 files changed, 24 insertions(+), 24 deletions(-) diff --git a/README.md b/README.md index 5cff34d76..216314737 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,7 @@ # res2df -res2df is a Pandas DataFrame wrapper around libecl and opm.io, which +res2df is a Pandas DataFrame wrapper around resdata and opm.io, which are used to access binary files outputted by reservoir simulators, or its input files --- or any other tool outputting to the same data format. diff --git a/docs/contribution.rst b/docs/contribution.rst index f6a4e8d4d..8d13419e2 100644 --- a/docs/contribution.rst +++ b/docs/contribution.rst @@ -77,7 +77,7 @@ Using res2df without OPM ------------------------ OPM is only pip-installable on Linux. To use the non-OPM dependent res2df -modules on something else than Linux (but with libecl installed), you should +modules on something else than Linux (but with resdata installed), you should install all the dependencies (except OPM) using ``pip`` (see ``setup.py`` for list of dependencies), and then install res2df with the ``--no-deps`` option to ``pip``. After this, the non-OPM dependent modules should work, and others will diff --git a/docs/index.rst b/docs/index.rst index a3d36622c..6f3af9138 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -1,7 +1,7 @@ res2df ====== -res2df is a Pandas DataFrame wrapper around libecl and opm.io, which +res2df is a Pandas DataFrame wrapper around resdata and opm.io, which are used to access binary files outputted by the reservoir simulators or their input files --- or any other tool outputting to the same data format. diff --git a/docs/introduction.rst b/docs/introduction.rst index 6367af83c..276f24742 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -2,7 +2,7 @@ Introduction ============ *res2df* is a `Pandas DataFrame `_ wrapper -around `libecl `_ and `opm.io +around `resdata `_ and `opm.io `_, which are used to access binary files outputted by reservoir simulators such as Eclipse, or its input files --- or any other tool outputting to the same data format, diff --git a/res2df/grid.py b/res2df/grid.py index d70224fc1..30d4f5727 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -257,7 +257,7 @@ def rst2df( if dateinheaders or len(rstindices) > 1 and not stackdates: rst_df.columns = [colname + "@" + datestr for colname in rst_df.columns] - # libecl emits a number around -1.0000000200408773e+20 which + # resdata emits a number around -1.0000000200408773e+20 which # should be considered Not-a-number rst_df = rst_df.where(rst_df > -1e20 + 1e13) # some trial and error @@ -284,7 +284,7 @@ def gridgeometry2df( ) -> pd.DataFrame: """Produce a Pandas Dataframe with Eclipse grid geometry - Order is significant, and is determined by the order from libecl, and used + Order is significant, and is determined by the order from resdata, and used when merging with other dataframes with cell-data. Args: @@ -447,7 +447,7 @@ def init2df( ] ), ) - # libecl emits a number around -1.0000000200408773e+20 which + # resdata emits a number around -1.0000000200408773e+20 which # should be considered Not-a-number init_df = init_df.where(init_df > -1e20 + 1e13) # some trial and error @@ -483,7 +483,7 @@ def df( Grid information (center coordinates x, y, z), cell indices (i, j, k) (indices follow the Eclipse convention starting - at 1, not zero as in libecl), properties from INIT, and optionally + at 1, not zero as in resdata), properties from INIT, and optionally any time dependent data from Restart files. Args: diff --git a/res2df/nnc.py b/res2df/nnc.py index 4137de606..8a9d1cbdf 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -54,7 +54,7 @@ def df( return pd.DataFrame() # Grid indices for first cell in cell pairs, into a vertical - # vector. The indices are "global" in libecl terms, and are + # vector. The indices are "global" in resdata terms, and are # 1-based (FORTRAN). Convert to zero-based before sending to get_ijk() nnc1 = egrid_file["NNC1"][0].numpy_view().reshape(-1, 1) logger.info( diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index ebd5eb62a..3ccda2fdc 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -143,7 +143,7 @@ def get_summary(self, include_restart: bool = True) -> Summary: return as Summary object Args: - include_restart: Sent to libecl for whether restart files + include_restart: Sent to resdata for whether restart files should be traversed. """ if not self._summary: diff --git a/res2df/summary.py b/res2df/summary.py index 4777db54d..69ba5797e 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -336,7 +336,7 @@ def df( Dates past this date will be dropped, supplied end_date will always be included. Overriden if time_index is 'last'. - include_restart: boolean sent to libecl for whether restart + include_restart: boolean sent to resdata for whether restart files should be traversed params: If set, parameters.txt will be attempted loaded and merged with the summary data. @@ -420,7 +420,7 @@ def df( column_key: meta[column_key] for column_key in dframe if column_key in meta } - # Remove duplicated column names. These will occur from libecl + # Remove duplicated column names. These will occur from resdata # when the user has repeated vector names in the summary SECTION dupes = dframe.columns.duplicated() if dupes.any(): @@ -610,7 +610,7 @@ def smry_meta(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]: return meta -def _fix_dframe_for_libecl(dframe: pd.DataFrame) -> pd.DataFrame: +def _fix_dframe_for_resdata(dframe: pd.DataFrame) -> pd.DataFrame: """Fix a dataframe making it ready for Summary.from_pandas() * Ensures that the index is always datetime, and sorted. @@ -699,7 +699,7 @@ def df2ressum( if "." in casename: raise ValueError(f"Do not use dots in casename {casename}") - dframe = _fix_dframe_for_libecl(dframe) + dframe = _fix_dframe_for_resdata(dframe) return resdata_summary_from_pandas(casename, dframe) # return Summary.from_pandas(casename, dframe) @@ -724,7 +724,7 @@ def _summary_pandas_frame( # pylint: disable=protected-access if time_index is None: - time_index = summary.dates # Changed from libecl + time_index = summary.dates # Changed from resdata data = np.zeros([len(time_index), len(keywords)]) Summary._init_pandas_frame( summary, keywords, data.ctypes.data_as(ctypes.POINTER(ctypes.c_double)) @@ -761,7 +761,7 @@ def resdata_summary_from_pandas( ) -> Summary: """Build an Summary object from a Pandas dataframe. - Temporarily copied from libecl to circumvent bug + Temporarily copied from resdata to circumvent bug https://github.com/equinor/ecl/issues/802 """ diff --git a/tests/test_eclfiles.py b/tests/test_eclfiles.py index bcbe9b147..3d5830dbb 100644 --- a/tests/test_eclfiles.py +++ b/tests/test_eclfiles.py @@ -49,7 +49,7 @@ def test_filedescriptors(): assert resdatafiles._initfile is None resdatafiles.get_rstfile() - # Automatically closed by libecl + # Automatically closed by resdata assert len(list(fd_dir.glob("*"))) == pre_fd_count assert resdatafiles._rstfile is not None resdatafiles.close() diff --git a/tests/test_grid.py b/tests/test_grid.py index 7116e917e..7e5dc6497 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -155,7 +155,7 @@ def test_init2df(): assert "PORV" in init_df # The KRO data from the INIT file in Reek contains only NaN's, - # but libecl gives out a large negative integer/float. + # but resdata gives out a large negative integer/float. # res2df should ensure this comes out as a NaN (but it # should be allowed later to drop columns which have only NaNs)) if "KRO" in init_df: diff --git a/tests/test_summary.py b/tests/test_summary.py index 37e7c256e..42cf67b13 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -15,7 +15,7 @@ from res2df.summary import ( _df2pyarrow, _fallback_date_roll, - _fix_dframe_for_libecl, + _fix_dframe_for_resdata, date_range, df, df2ressum, @@ -394,7 +394,7 @@ def test_extrapolation(): datetime=True, ) - # But without datetime, we can get it extrapolated by libecl: + # But without datetime, we can get it extrapolated by resdata: assert summary.df( resdatafiles, column_keys=["FOPT"], time_index=[datetime.date(2300, 1, 1)] )["FOPT"].values == [lastfopt] @@ -943,10 +943,10 @@ def test_smry_meta_synthetic(): ), ], ) -def test_fix_dframe_for_libecl(dframe, expected_dframe): +def test_fix_dframe_for_resdata(dframe, expected_dframe): """Test the dataframe preprocessor/validator for df2ressum works""" pd.testing.assert_frame_equal( - _fix_dframe_for_libecl(dframe), expected_dframe, check_index_type=False + _fix_dframe_for_resdata(dframe), expected_dframe, check_index_type=False ) @@ -1024,7 +1024,7 @@ def test_df2ressum(dframe): back again""" # Massage the dframe first so we can assert on equivalence after. - dframe = _fix_dframe_for_libecl(dframe) + dframe = _fix_dframe_for_resdata(dframe) summary = df2ressum(dframe) if dframe.empty: @@ -1168,7 +1168,7 @@ def test_res2df_errors(tmp_path): Path("FOO.UNSMRY").write_bytes(os.urandom(100)) Path("FOO.SMSPEC").write_bytes(os.urandom(100)) with pytest.raises(OSError, match="Failed to create summary instance"): - # This is how libecl reacts to bogus binary data + # This is how resdata reacts to bogus binary data Summary("FOO.UNSMRY") # But ResdataFiles should be more tolerant, as it should be possible From f2649fa08fb1f237426852bf0541ec411fe0ba25 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Wed, 15 Nov 2023 15:22:33 +0100 Subject: [PATCH 24/68] resdata include files -> include files --- res2df/common.py | 2 +- res2df/csv2res.py | 10 ++++------ res2df/pvt.py | 4 ++-- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/res2df/common.py b/res2df/common.py index dffd79460..025b19915 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -471,7 +471,7 @@ def fill_reverse_parser( parser: argparse.ArgumentParser, modulename: str, defaultoutputfile: str ): """A standardized submodule parser for the command line utility - to produce resdata include files from a CSV file. + to produce include files from a CSV file. Arguments: parser: parser to fill with arguments diff --git a/res2df/csv2res.py b/res2df/csv2res.py index 1b88bae6b..8684b4aa6 100644 --- a/res2df/csv2res.py +++ b/res2df/csv2res.py @@ -1,6 +1,6 @@ #!/usr/bin/env python """ -Convert dataframes (in res2df format) to resdata include files, +Convert dataframes (in res2df format) to include files, for selected keywords """ @@ -10,7 +10,7 @@ from res2df import __version__, equil, pvt, satfunc, summary, vfp # String constants in use for generating ERT forward model documentation: -DESCRIPTION: str = """Convert CSV files into resdata include files. Uses the command +DESCRIPTION: str = """Convert CSV files into include files. Uses the command line utility ``csv2res``. Run ``csv2res --help`` to see which subcommands are supported. No options other than the output file is possible when used directly as a forward model. When writing synthetic summary files, the ECLBASE with no filename suffix is expected @@ -62,7 +62,7 @@ def get_parser() -> argparse.ArgumentParser: help="Write SOLUTION include files", description=( "Write SOLUTION keywords (EQUIL, RSVD, RVVD) " - "to resdata include files from CSV in res2df format." + "to include files from CSV in res2df format." ), ) equil.fill_reverse_parser(equil_parser) @@ -71,9 +71,7 @@ def get_parser() -> argparse.ArgumentParser: pvt_parser = subparsers.add_parser( "pvt", help="Write PVT include files", - description=( - "Write resdata include files from CSV files on the res2df format." - ), + description=("Write include files from CSV files on the res2df format."), ) pvt.fill_reverse_parser(pvt_parser) pvt_parser.set_defaults(func=pvt.pvt_reverse_main) diff --git a/res2df/pvt.py b/res2df/pvt.py index e33c1f481..cf0b02b46 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -199,7 +199,7 @@ def df( ) -> pd.DataFrame: """Extract all (most) PVT data from a deck. - If you want to call this function on resdata include files, + If you want to call this function on include files, read them in to strings as in this example: > pvt_df = pvt.df(open("pvt.inc").read()) @@ -269,7 +269,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Set up sys.argv parsers for writing resdata include files from + """Set up sys.argv parsers for writing include files from dataframes (as CSV files) Arguments: From 782854bc5a31c851f1a61244a6560000c376df6f Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 08:46:07 +0100 Subject: [PATCH 25/68] Use *with res2df format --- res2df/common.py | 5 +++-- res2df/csv2res.py | 8 ++++---- res2df/equil.py | 2 +- res2df/pvt.py | 2 +- res2df/satfunc.py | 2 +- 5 files changed, 10 insertions(+), 9 deletions(-) diff --git a/res2df/common.py b/res2df/common.py index 025b19915..efa775d2b 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -479,7 +479,8 @@ def fill_reverse_parser( defaultoutputfile: Default output filename """ parser.add_argument( - "csvfile", help="Name of CSV file with " + modulename + " data on res2df format" + "csvfile", + help="Name of CSV file with " + modulename + " data with " "res2df format", ) parser.add_argument( "-o", @@ -523,7 +524,7 @@ def df2res( for the actual string construction. Args: - dataframe: Dataframe with reservoir DATA on res2df format. + dataframe: Dataframe with res2df format. keywords: List of keywords to include. Will be reduced to the set of keywords available in dataframe and to those supported comments: Dictionary indexed by keyword with comments to be diff --git a/res2df/csv2res.py b/res2df/csv2res.py index 8684b4aa6..a09f53043 100644 --- a/res2df/csv2res.py +++ b/res2df/csv2res.py @@ -71,7 +71,7 @@ def get_parser() -> argparse.ArgumentParser: pvt_parser = subparsers.add_parser( "pvt", help="Write PVT include files", - description=("Write include files from CSV files on the res2df format."), + description=("Write include files from CSV files with res2df format."), ) pvt.fill_reverse_parser(pvt_parser) pvt_parser.set_defaults(func=pvt.pvt_reverse_main) @@ -80,8 +80,8 @@ def get_parser() -> argparse.ArgumentParser: "satfunc", help="Write saturation function include files", description=( - "Write saturation function include files from CSV files on " - "the res2df format." + "Write saturation function include files from CSV files with " + "res2df format." ), ) satfunc.fill_reverse_parser(satfunc_parser) @@ -91,7 +91,7 @@ def get_parser() -> argparse.ArgumentParser: "vfp", help="Write VFPPROD/VFPINJ include files", description=( - "Write VFPPROD/VFPINJ include files from CSV files on the res2df format." + "Write VFPPROD/VFPINJ include files from CSV files with res2df format." ), ) vfp.fill_reverse_parser(vfp_parser) diff --git a/res2df/equil.py b/res2df/equil.py index 909e8ee9f..317c105af 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -363,7 +363,7 @@ def df2res( solution (EQUIL, RSVD++) data. Args: - equil_df: Dataframe with data on res2df format. + equil_df: Dataframe with res2df format. keywords: List of keywords to include. Must be supported and present in the incoming dataframe. comments: Dictionary indexed by keyword with comments to be diff --git a/res2df/pvt.py b/res2df/pvt.py index cf0b02b46..f0f19b5f3 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -332,7 +332,7 @@ def df2res( """Generate resdata include strings from PVT dataframes Args: - pvt_df: Dataframe with PVT data on res2df format. + pvt_df: Dataframe with PVT data in res2df format. keywords: List of keywords to include. Must be supported and present in the incoming dataframe. comments: Dictionary indexed by keyword with comments to be diff --git a/res2df/satfunc.py b/res2df/satfunc.py index 461df6a75..51f6313a3 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -242,7 +242,7 @@ def df2res( saturation functions (SWOF, SGOF, ...) Args: - satfunc_df: Dataframe with data on res2df format. + satfunc_df: Dataframe with res2df format. keywords: List of keywords to include. Must be supported and present in the incoming dataframe. Keywords are printed in the order defined by this list. From a6c6b3f577e5c3a8367d4fa9802720333c2ce5cf Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 08:48:42 +0100 Subject: [PATCH 26/68] ecldeck->deck --- docs/introduction.rst | 6 +++--- docs/usage/equil.rst | 2 +- res2df/compdat.py | 4 ++-- res2df/equil.py | 4 ++-- res2df/faults.py | 4 ++-- res2df/gruptree.py | 4 ++-- res2df/parameters.py | 12 ++++++------ res2df/pvt.py | 4 ++-- res2df/resdatafiles.py | 2 +- res2df/satfunc.py | 4 ++-- res2df/vfp/_vfp.py | 12 ++++++------ res2df/wcon.py | 4 ++-- res2df/wellcompletiondata.py | 2 +- tests/test_compdat.py | 2 +- tests/test_eclfiles.py | 2 +- tests/test_faults.py | 4 ++-- tests/test_gruptree.py | 6 +++--- tests/test_nnc.py | 2 +- tests/test_pvt.py | 12 ++++++------ tests/test_satfunc.py | 8 ++++---- tests/test_summary.py | 2 +- tests/test_wcon.py | 2 +- 22 files changed, 52 insertions(+), 52 deletions(-) diff --git a/docs/introduction.rst b/docs/introduction.rst index 276f24742..ea5822273 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -20,9 +20,9 @@ Examples > res2csv --help > res2csv summary --help - > res2csv summary --column_keys "F*" --time_index monthly --output output.csv MYECLDECK.DATA + > res2csv summary --column_keys "F*" --time_index monthly --output output.csv MYDECK.DATA > res2csv pillars --help - > res2csv pillars --rstdates all MYECLDECK.DATA + > res2csv pillars --rstdates all MYDECK.DATA If you access the module from within a Python script, for each submodule there is a function called ``df()`` which provides more or less the same @@ -33,7 +33,7 @@ a Pandas Dataframe. import res2df - resdatafiles = res2df.ResdataFiles("MYECLDECK.DATA") + resdatafiles = res2df.ResdataFiles("MYDECK.DATA") smry = res2df.summary.df(resdatafiles, column_keys="F*", time_index="monthly") hc_contacts = res2df.pillars.df(resdatafiles, rstdates="all") diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index 5a8064d8c..5f649f3f8 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -11,7 +11,7 @@ Supported keywords are ``EQUIL``, ``RSVD``, ``RVVD``, ``PBVD`` and from res2df import equil, ResdataFiles - dframe = equil.df(ResdataFiles('MYECLDECK.DATA')) + dframe = equil.df(ResdataFiles('MYDECK.DATA')) Which will provide a dataframe similar to the example below. Note that the column `Z` is used both for datum depth and the depth values in ``RSVD`` tables. The diff --git a/res2df/compdat.py b/res2df/compdat.py index 316f7cdaf..5c12a512c 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -950,7 +950,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: Arguments: parser: parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of reservoir DATA file.") + parser.add_argument("DATAFILE", help="Name of resdata .DATA file.") parser.add_argument( "-o", "--output", @@ -992,7 +992,7 @@ def df( Returns: pd.Dataframe with one row pr cell to well connection """ - compdat_df = deck2dfs(resdatafiles.get_ecldeck())["COMPDAT"] + compdat_df = deck2dfs(resdatafiles.get_deck())["COMPDAT"] compdat_df = unrolldf(compdat_df) if initvectors: diff --git a/res2df/equil.py b/res2df/equil.py index 317c105af..6046622a6 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -102,7 +102,7 @@ def df( pd.DataFrame, at least with columns KEYWORD and EQLNUM """ if isinstance(deck, ResdataFiles): - deck = deck.get_ecldeck() + deck = deck.get_deck() deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) ntequl = deck["EQLDIMS"][0][inferdims.DIMS_POS["NTEQUL"]].get_int(0) @@ -316,7 +316,7 @@ def equil_main(args) -> None: ) resdatafiles = ResdataFiles(args.DATAFILE) if resdatafiles: - deck = resdatafiles.get_ecldeck() + deck = resdatafiles.get_deck() if "EQLDIMS" in deck: # Things are easier when a full deck with (correct) EQLDIMS # is supplied: diff --git a/res2df/faults.py b/res2df/faults.py index fe8632695..91ac28985 100644 --- a/res2df/faults.py +++ b/res2df/faults.py @@ -39,7 +39,7 @@ def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFram deck: input deck """ if isinstance(deck, ResdataFiles): - deck = deck.get_ecldeck() + deck = deck.get_deck() # In[91]: list(deck['FAULTS'][0]) # Out[91]: [[u'F1'], [36], [36], [41], [42], [1], [14], [u'I']] @@ -88,7 +88,7 @@ def faults_main(args) -> None: ) resdatafiles = ResdataFiles(args.DATAFILE) if resdatafiles: - deck = resdatafiles.get_ecldeck() + deck = resdatafiles.get_deck() faults_df = df(deck) write_dframe_stdout_file( faults_df, diff --git a/res2df/gruptree.py b/res2df/gruptree.py index 772ce5917..032e99aeb 100644 --- a/res2df/gruptree.py +++ b/res2df/gruptree.py @@ -69,7 +69,7 @@ def df( date = None if isinstance(deck, ResdataFiles): - deck = deck.get_ecldeck() + deck = deck.get_deck() edgerecords = [] # list of dict of rows containing an edge. nodedatarecords = [] @@ -454,7 +454,7 @@ def gruptree_main(args) -> None: print("Nothing to do. Set --output or --prettyprint") sys.exit(0) resdatafiles = ResdataFiles(args.DATAFILE) - dframe = df(resdatafiles.get_ecldeck(), startdate=args.startdate) + dframe = df(resdatafiles.get_deck(), startdate=args.startdate) if args.prettyprint: if "DATE" in dframe: print(prettyprint(dframe)) diff --git a/res2df/parameters.py b/res2df/parameters.py index e7b28d5ab..f2ee86481 100644 --- a/res2df/parameters.py +++ b/res2df/parameters.py @@ -16,7 +16,7 @@ def find_parameter_files( - ecldeck_or_eclpath: Union[ResdataFiles, str, Path], filebase: str = "parameters" + deck_or_eclpath: Union[ResdataFiles, str, Path], filebase: str = "parameters" ) -> List[Path]: """Locate a default prioritized list of files to try to read as key-value @@ -24,7 +24,7 @@ def find_parameter_files( current dir, one directory up, and two directories up. Args: - ecldeck_or_eclpath: Either an ResdataFiles object of + deck_or_eclpath: Either an ResdataFiles object of a simulator output set (only the corresponding path will be used), or path to a file or directory, that will be used as a starting point for locating parameter files @@ -35,10 +35,10 @@ def find_parameter_files( """ eclbasepath: Path fname: str - if isinstance(ecldeck_or_eclpath, ResdataFiles): - eclbasepath = Path(ecldeck_or_eclpath.get_path()) - elif isinstance(ecldeck_or_eclpath, (str, Path)): - eclbasepath = Path(ecldeck_or_eclpath).parent.absolute() + if isinstance(deck_or_eclpath, ResdataFiles): + eclbasepath = Path(deck_or_eclpath.get_path()) + elif isinstance(deck_or_eclpath, (str, Path)): + eclbasepath = Path(deck_or_eclpath).parent.absolute() else: raise TypeError files_to_lookfor: List[str] = [ diff --git a/res2df/pvt.py b/res2df/pvt.py index f0f19b5f3..1100548d0 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -218,7 +218,7 @@ def df( pd.DataFrame """ if isinstance(deck, ResdataFiles): - deck = deck.get_ecldeck() + deck = deck.get_deck() deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) ntpvt = deck["TABDIMS"][0][inferdims.DIMS_POS["NTPVT"]].get_int(0) @@ -286,7 +286,7 @@ def pvt_main(args) -> None: resdatafiles = ResdataFiles(args.DATAFILE) logger.info("Parsed %s", args.DATAFILE) if resdatafiles: - deck = resdatafiles.get_ecldeck() + deck = resdatafiles.get_deck() if "TABDIMS" in deck: # Things are easier when a full deck with correct TABDIMS # is supplied: diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index 3ccda2fdc..da799dbfe 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -83,7 +83,7 @@ def get_path(self) -> Path: """Return the full path to the directory with the DATA file""" return Path(self._eclbase).absolute().parent - def get_ecldeck(self) -> "opm.libopmcommon_python.Deck": + def get_deck(self) -> "opm.libopmcommon_python.Deck": """Return a opm.io deck of the DATA file""" if not self._deck: if Path(self._eclbase + ".DATA").is_file(): diff --git a/res2df/satfunc.py b/res2df/satfunc.py index 51f6313a3..d551652eb 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -94,7 +94,7 @@ def df( if isinstance(deck, ResdataFiles): # NB: If this is done on include files and not on DATA files # we can loose data for SATNUM > 1 - deck = deck.get_ecldeck() + deck = deck.get_deck() deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTSFUN", deck, ntsfun) assert "TABDIMS" in deck @@ -195,7 +195,7 @@ def satfunc_main(args) -> None: ) resdatafiles = ResdataFiles(args.DATAFILE) if resdatafiles: - deck = resdatafiles.get_ecldeck() + deck = resdatafiles.get_deck() if "TABDIMS" in deck: # Things are easier when a full deck with (correct) TABDIMS # is supplied: diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index a5c3b0664..55280a3cb 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -52,7 +52,7 @@ def basic_data( """ if isinstance(deck, ResdataFiles): - deck = deck.get_ecldeck() + deck = deck.get_deck() elif isinstance(deck, str): deck = ResdataFiles.str2deck(deck) @@ -256,7 +256,7 @@ def dfs( Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ if isinstance(deck, ResdataFiles): - deck = deck.get_ecldeck() + deck = deck.get_deck() elif isinstance(deck, str): deck = ResdataFiles.str2deck(deck) @@ -299,7 +299,7 @@ def pyarrow_tables( Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ if isinstance(deck, ResdataFiles): - deck = deck.get_ecldeck() + deck = deck.get_deck() elif isinstance(deck, str): deck = ResdataFiles.str2deck(deck) @@ -428,7 +428,7 @@ def df( return pd.DataFrame() if isinstance(deck, ResdataFiles): - deck = deck.get_ecldeck() + deck = deck.get_deck() elif isinstance(deck, str): deck = ResdataFiles.str2deck(deck) @@ -500,7 +500,7 @@ def vfp_main(args) -> None: outputfile = args.output outputfile.replace(".arrow", "") vfp_arrow_tables = pyarrow_tables( - resdatafiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers + resdatafiles.get_deck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers ) for vfp_table in vfp_arrow_tables: table_number = int( @@ -513,7 +513,7 @@ def vfp_main(args) -> None: logger.info(f"Parsed file {args.DATAFILE} for vfp.dfs_arrow") else: dframe = df( - resdatafiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers + resdatafiles.get_deck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers ) if args.output: common.write_dframe_stdout_file( diff --git a/res2df/wcon.py b/res2df/wcon.py index 7edbd91dc..45ceb207f 100644 --- a/res2df/wcon.py +++ b/res2df/wcon.py @@ -35,7 +35,7 @@ def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFram """ if isinstance(deck, ResdataFiles): - deck = deck.get_ecldeck() + deck = deck.get_deck() wconrecords = [] # List of dicts of every line in input file date = None # DATE columns will always be there, but can contain NaN @@ -96,7 +96,7 @@ def wcon_main(args) -> None: ) resdatafiles = ResdataFiles(args.DATAFILE) if resdatafiles: - deck = resdatafiles.get_ecldeck() + deck = resdatafiles.get_deck() wcon_df = df(deck) write_dframe_stdout_file( wcon_df, diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index 51a24d671..da454c471 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -98,7 +98,7 @@ def _get_unit_system(resdatafiles: ResdataFiles) -> UnitSystem: default unit system in Eclipse. """ unit_systems = [unitsystem.value for unitsystem in UnitSystem] - for keyword in resdatafiles.get_ecldeck(): + for keyword in resdatafiles.get_deck(): if keyword.name in unit_systems: return UnitSystem(keyword.name) return UnitSystem.METRIC diff --git a/tests/test_compdat.py b/tests/test_compdat.py index a2738da25..608f3a394 100644 --- a/tests/test_compdat.py +++ b/tests/test_compdat.py @@ -43,7 +43,7 @@ def test_df(): def test_comp2df(): """Test that dataframes are produced""" resdatafiles = ResdataFiles(EIGHTCELLS) - compdfs = compdat.deck2dfs(resdatafiles.get_ecldeck()) + compdfs = compdat.deck2dfs(resdatafiles.get_deck()) assert not compdfs["COMPDAT"].empty assert not compdfs["WELSEGS"].empty diff --git a/tests/test_eclfiles.py b/tests/test_eclfiles.py index 3d5830dbb..2b01fbf71 100644 --- a/tests/test_eclfiles.py +++ b/tests/test_eclfiles.py @@ -75,6 +75,6 @@ def test_filedescriptors(): assert len(list(fd_dir.glob("*"))) == pre_fd_count assert resdatafiles._rftfile is None - resdatafiles.get_ecldeck() + resdatafiles.get_deck() # This should not leave any file descriptor open assert len(list(fd_dir.glob("*"))) == pre_fd_count diff --git a/tests/test_faults.py b/tests/test_faults.py index 0ac482ce1..c93817313 100644 --- a/tests/test_faults.py +++ b/tests/test_faults.py @@ -27,7 +27,7 @@ def test_faults2df(): """Test that dataframes are produced""" resdatafiles = ResdataFiles(REEK) - faultsdf = faults.df(resdatafiles.get_ecldeck()) + faultsdf = faults.df(resdatafiles.get_deck()) assert "NAME" in faultsdf assert "I" in faultsdf @@ -55,7 +55,7 @@ def test_str2df(): def test_nofaults(): """Test on a dataset with no faults""" resdatafiles = ResdataFiles(EIGHTCELLS) - faultsdf = faults.df(resdatafiles.get_ecldeck()) + faultsdf = faults.df(resdatafiles.get_deck()) assert faultsdf.empty diff --git a/tests/test_gruptree.py b/tests/test_gruptree.py index 7af8a3749..9d84ee9b8 100644 --- a/tests/test_gruptree.py +++ b/tests/test_gruptree.py @@ -28,7 +28,7 @@ def test_eightcells_dataset(): """Test Eightcells dataset""" resdatafiles = ResdataFiles(EIGHTCELLS) - gruptree_df = gruptree.df(resdatafiles.get_ecldeck()) + gruptree_df = gruptree.df(resdatafiles.get_deck()) expected_dframe = pd.DataFrame( [ @@ -45,7 +45,7 @@ def test_eightcells_dataset(): def test_gruptree2df(): """Test that dataframes are produced""" resdatafiles = ResdataFiles(REEK) - grupdf = gruptree.df(resdatafiles.get_ecldeck()) + grupdf = gruptree.df(resdatafiles.get_deck()) assert not grupdf.empty assert len(grupdf["DATE"].unique()) == 5 @@ -53,7 +53,7 @@ def test_gruptree2df(): assert len(grupdf["PARENT"].dropna().unique()) == 3 assert set(grupdf["KEYWORD"].unique()) == set(["GRUPTREE", "WELSPECS"]) - grupdfnowells = gruptree.df(resdatafiles.get_ecldeck(), welspecs=False) + grupdfnowells = gruptree.df(resdatafiles.get_deck(), welspecs=False) assert len(grupdfnowells["KEYWORD"].unique()) == 1 assert grupdf["PARENT"].dropna().unique()[0] == "FIELD" diff --git a/tests/test_nnc.py b/tests/test_nnc.py index c6fa545a8..1be3befce 100644 --- a/tests/test_nnc.py +++ b/tests/test_nnc.py @@ -67,7 +67,7 @@ def test_nnc2df_faultnames(): """Add faultnames from FAULTS keyword to connections""" resdatafiles = ResdataFiles(REEK) nncdf = nnc.df(resdatafiles) - faultsdf = faults.df(resdatafiles.get_ecldeck()) + faultsdf = faults.df(resdatafiles.get_deck()) merged = pd.merge( nncdf, diff --git a/tests/test_pvt.py b/tests/test_pvt.py index e31aa7495..fa3313510 100644 --- a/tests/test_pvt.py +++ b/tests/test_pvt.py @@ -161,7 +161,7 @@ def test_pvt_reek(): """Test that the Reek PVT input can be parsed individually""" resdatafiles = ResdataFiles(REEK) - pvto_df = pvt.pvto_fromdeck(resdatafiles.get_ecldeck()) + pvto_df = pvt.pvto_fromdeck(resdatafiles.get_deck()) assert "PVTNUM" in pvto_df assert "PRESSURE" in pvto_df assert "VOLUMEFACTOR" in pvto_df @@ -180,7 +180,7 @@ def test_pvt_reek(): dframe_via_string = pvt.pvto_fromdeck(pvt.df2res_pvto(pvto_df)) pd.testing.assert_frame_equal(dframe_via_string, pvto_df) - density_df = pvt.density_fromdeck(resdatafiles.get_ecldeck()) + density_df = pvt.density_fromdeck(resdatafiles.get_deck()) pd.testing.assert_frame_equal( density_df, pd.DataFrame( @@ -192,14 +192,14 @@ def test_pvt_reek(): dframe_via_string = pvt.density_fromdeck(pvt.df2res_density(density_df)) pd.testing.assert_frame_equal(dframe_via_string, density_df) - rock_df = pvt.rock_fromdeck(resdatafiles.get_ecldeck()) + rock_df = pvt.rock_fromdeck(resdatafiles.get_deck()) assert "PVTNUM" in rock_df assert len(rock_df) == 1 assert "PRESSURE" in rock_df assert "COMPRESSIBILITY" in rock_df assert rock_df["PRESSURE"].values[0] == 327.3 - pvtw_df = pvt.pvtw_fromdeck(resdatafiles.get_ecldeck()) + pvtw_df = pvt.pvtw_fromdeck(resdatafiles.get_deck()) assert "PVTNUM" in pvtw_df assert pvtw_df["PVTNUM"].values[0] == 1 assert len(pvtw_df) == 1 @@ -210,7 +210,7 @@ def test_pvt_reek(): assert "VISCOSIBILITY" in pvtw_df assert pvtw_df["VISCOSITY"].values[0] == 0.25 - pvdg_df = pvt.pvdg_fromdeck(resdatafiles.get_ecldeck()) + pvdg_df = pvt.pvdg_fromdeck(resdatafiles.get_deck()) assert "PVTNUM" in pvdg_df assert "PRESSURE" in pvdg_df assert "VOLUMEFACTOR" in pvdg_df @@ -293,7 +293,7 @@ def test_pvtg_string(): def test_density(): """Test that DENSITY can be parsed from files and from strings""" resdatafiles = ResdataFiles(REEK) - density_df = pvt.density_fromdeck(resdatafiles.get_ecldeck()) + density_df = pvt.density_fromdeck(resdatafiles.get_deck()) assert len(density_df) == 1 assert "PVTNUM" in density_df assert "OILDENSITY" in density_df diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index 583c007fb..9bd4aa92a 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -26,11 +26,11 @@ EIGHTCELLS = str(TESTDIR / "data/eightcells/EIGHTCELLS.DATA") -def test_ecldeck_to_satfunc_dframe(): +def test_deck_to_satfunc_dframe(): """Test that dataframes can be produced from a full input deck (the example Reek case)""" resdatafiles = ResdataFiles(REEK) - satdf = satfunc.df(resdatafiles.get_ecldeck()) + satdf = satfunc.df(resdatafiles.get_deck()) assert set(satdf["KEYWORD"]) == {"SWOF", "SGOF"} assert set(satdf["SATNUM"]) == {1} @@ -57,7 +57,7 @@ def test_satfunc_roundtrip(): """Test that we can produce a SATNUM dataframe from the Reek case, convert it back to an include file, and then reinterpret it to the same""" resdatafiles = ResdataFiles(EIGHTCELLS) - satdf = satfunc.df(resdatafiles.get_ecldeck()) + satdf = satfunc.df(resdatafiles.get_deck()) inc = satfunc.df2res(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal( @@ -70,7 +70,7 @@ def test_df2res_order(): """Test that we can control the keyword order in generated strings by the list supplied in keywords argument""" resdatafiles = ResdataFiles(REEK) - satdf = satfunc.df(resdatafiles.get_ecldeck()) + satdf = satfunc.df(resdatafiles.get_deck()) swof_sgof = satfunc.df2res(satdf, keywords=["SWOF", "SGOF"]) assert swof_sgof.find("SWOF") < swof_sgof.find("SGOF") diff --git a/tests/test_summary.py b/tests/test_summary.py index 42cf67b13..ec90f3ad1 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -1174,7 +1174,7 @@ def test_res2df_errors(tmp_path): # But ResdataFiles should be more tolerant, as it should be possible # to extract other data if SMRY is corrupted Path("FOO.DATA").write_text("RUNSPEC", encoding="utf8") - assert str(ResdataFiles("FOO").get_ecldeck()).strip() == "RUNSPEC" + assert str(ResdataFiles("FOO").get_deck()).strip() == "RUNSPEC" with pytest.raises(OSError): ResdataFiles("FOO").get_summary() diff --git a/tests/test_wcon.py b/tests/test_wcon.py index b745e72fa..16a23dfe6 100644 --- a/tests/test_wcon.py +++ b/tests/test_wcon.py @@ -25,7 +25,7 @@ def test_wcon2df(): """Test that dataframes are produced""" resdatafiles = ResdataFiles(EIGHTCELLS) - wcondf = wcon.df(resdatafiles.get_ecldeck()) + wcondf = wcon.df(resdatafiles.get_deck()) assert not wcondf.empty assert "DATE" in wcondf # for all data From 1adc2eb20eadda508fdf6eb7b50fb8a19bcb3e15 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 09:03:51 +0100 Subject: [PATCH 27/68] use phrase "include file" --- res2df/common.py | 4 ++-- tests/test_satfunc.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/res2df/common.py b/res2df/common.py index efa775d2b..98b9936b0 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -538,7 +538,7 @@ def df2res( to file. Returns: - string that can be used as an include file for resdata. + string that can be used as an include file. """ from_module = inspect.stack()[1] calling_module = inspect.getmodule(from_module[0]) @@ -630,7 +630,7 @@ def generic_deck_table( renamer: Optional[Dict[str, str]] = None, drop_trailing_columns: bool = True, ) -> str: - """Construct a deck table for data following + """Construct an input deck table for data following a keyword. Each row ends with a slash. This function will *not* add a final slash after all rows, as diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index 9bd4aa92a..811a07fb8 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -686,7 +686,7 @@ def test_main_subparsers(tmp_path, mocker): def test_csv2res(tmp_path, mocker): - """Test command line interface for csv to Eclipse include files""" + """Test command line interface for csv to include files""" os.chdir(tmp_path) tmpcsvfile = "satfunc.csv" From c06b4bef4a7003fb6cf336e992eaccd093859529 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 09:04:56 +0100 Subject: [PATCH 28/68] ecl2df.yml->res2df.yml --- .github/workflows/{ecl2df.yml => res2df.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/workflows/{ecl2df.yml => res2df.yml} (100%) diff --git a/.github/workflows/ecl2df.yml b/.github/workflows/res2df.yml similarity index 100% rename from .github/workflows/ecl2df.yml rename to .github/workflows/res2df.yml From fa8e365ef551733f6686e579053a768e9c46430a Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 09:17:05 +0100 Subject: [PATCH 29/68] Call OPM "OPM Flow" --- docs/glossary.rst | 8 ++++---- res2df/fipreports.py | 2 +- tests/test_fipreports.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index a2a784481..5b91031b2 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -4,9 +4,9 @@ Glossary .. glossary:: deck/input deck - Inputs provided to reservoir simulators such as Eclipse or OPM-flow. - Usually a .DATA file pointing to other include files. One deck points - to multiple include files. + Inputs provided to reservoir simulators such as Eclipse or OPM Flow. + Usually a .DATA file pointing to other include files. One deck + typically points to multiple include files. include files Files that provide inputs to reservoir simulators by using the INCLUDE statement @@ -14,4 +14,4 @@ Glossary or .GRDECL (for files included into the grid section). reservoir simulator - Reservoir simulators such as OPM-flow or Eclipse. + Reservoir simulators such as OPM Flow or Eclipse. diff --git a/res2df/fipreports.py b/res2df/fipreports.py index 7521682a8..ebb50503c 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -134,7 +134,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra ".+" + fipname + r"\s+REPORT\s+REGION\s+(\d+)", re.IGNORECASE ) - # Flag for whether we are supposedly parsing a PRT file made by OPM flow: + # Flag for whether we are supposedly parsing a PRT file made by OPM Flow: opm = False with open(prtfile, encoding="utf-8") as prt_fh: diff --git a/tests/test_fipreports.py b/tests/test_fipreports.py index 5f4b0660c..52df874ca 100644 --- a/tests/test_fipreports.py +++ b/tests/test_fipreports.py @@ -346,7 +346,7 @@ def test_rogue_eclipse_output(tmp_path): def test_prtstring_opmflow(tmp_path): - """Test parsing the PRT output from OPM flow.""" + """Test parsing the PRT output from OPM Flow.""" prtstring = """ Starting time step 3, stepsize 19.6 days, at day 11.4/31, date = 12-Jan-2000 From 332a568b4c8a4df6d1fb0b6b9ab9b8f62a38a969 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 09:18:17 +0100 Subject: [PATCH 30/68] Include lower case include file notations --- docs/glossary.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index 5b91031b2..043a84fbe 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -10,8 +10,8 @@ Glossary include files Files that provide inputs to reservoir simulators by using the INCLUDE statement - in input decks. By convention, these files often have the extension .INC (generally) - or .GRDECL (for files included into the grid section). + in input decks. By convention, these files often have the extension .INC/.inc + (generally) or .GRDECL/.grdecl (for files included into the grid section). reservoir simulator Reservoir simulators such as OPM Flow or Eclipse. From f7c7959a22460e8e8e22cbdac5c3694a719c3a20 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 09:29:47 +0100 Subject: [PATCH 31/68] Fix smaller doc errors --- docs/usage/equil.rst | 7 ++++--- docs/usage/grid.rst | 2 +- docs/usage/gruptree.rst | 3 ++- docs/usage/pvt.rst | 2 +- docs/usage/rft.rst | 2 +- docs/usage/satfunc.rst | 6 +++--- 6 files changed, 12 insertions(+), 10 deletions(-) diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index 5f649f3f8..a640e1508 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -51,10 +51,11 @@ one meter for compatibility, which you could do by the statements: dframe.loc[rsvd_rows, "Z"] = dframe.loc[rsvd_rows, "Z"] + 1 -Re-exporting tables to include files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +Re-exporting tables to include-files +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When you are done with the table, you can generate new include files from your modified data by issuing +When you are done with the table, you can generate new include files from your modified +data by issuing .. code-block:: python diff --git a/docs/usage/grid.rst b/docs/usage/grid.rst index 675f022ce..8797d2cf7 100644 --- a/docs/usage/grid.rst +++ b/docs/usage/grid.rst @@ -143,7 +143,7 @@ from that module as a by-product of the pillar computations. Generating include files from grid data -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ If you have loaded grid data into a Pandas frame, some operations are easily performed, scaling porosity, permeability etc. Or remapping some region parameters. Using the diff --git a/docs/usage/gruptree.rst b/docs/usage/gruptree.rst index 14f0db96f..4bf516f86 100644 --- a/docs/usage/gruptree.rst +++ b/docs/usage/gruptree.rst @@ -1,7 +1,8 @@ gruptree -------- -Extracts data from the GRUPTREE, GRUPNET and WELSPECS keywords from an input deck and presents the production network either as pretty-printed ASCII or in a +Extracts data from the GRUPTREE, GRUPNET and WELSPECS keywords from an input deck +and presents the production network either as pretty-printed ASCII or in a dataframe-representation. The GRUPTREE section of your input deck defines the production network diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index 86f07ee68..8730685a2 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -68,7 +68,7 @@ Possibly, different viscosity scaling pr. PVTNUM is needed Density values are easier to scale up or down to whatever is needed. Re-exporting tables to include files -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When you are done with the table, you can generate new include files from your modified data by issuing diff --git a/docs/usage/rft.rst b/docs/usage/rft.rst index e77f90046..eca5592e8 100644 --- a/docs/usage/rft.rst +++ b/docs/usage/rft.rst @@ -1,7 +1,7 @@ rft --- -rft will convert the binary RFT files from dataframes or CSV files, +rft will convert the binary RFT files to dataframes or CSV files, facilitating analysis of inflow and pressure for each connection the well has to the reservoir grid. diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index 53dbaff00..6d668152b 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -1,7 +1,7 @@ satfunc ------- -satfunc will extract saturation functions from input decks or from Eclipse +satfunc will extract saturation functions from input decks or from include files, these are the keywords ``SWOF``, ``SGOF``, ``SGWFN``, ``SWFN``, ``SOF2``, ``SGFN``, ``SOF3`` and ``SLGOF``. @@ -37,7 +37,7 @@ Instead of data decks, individual include files may also be parsed, but only one at a time. Generating include files from dataframes -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ When a dataframe of saturation function data is loaded into Python, any operation may be applied on the data. Simple operations would typically be scaling, perhaps @@ -113,7 +113,7 @@ to do directly on the dataframes. Before doing manipulations of dataframes in through the `pyscal `_ library. Pyscal can create curves from parametrizations, and interpolate between curves. -Pyscal can create initialize its relperm objects from include files +Pyscal can initialize its relperm objects from include files though the parsing capabilities of res2df.satfunc. The function ``pyscal.pyscallist.df()`` is analogous to ``res2df.satfunc.df()`` in From 44dd565051f99e8054d7b39ea85ce0fadb00ed2a Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 09:33:11 +0100 Subject: [PATCH 32/68] Bump date & version of fipnum.inc --- docs/usage/fipnum.inc | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/usage/fipnum.inc b/docs/usage/fipnum.inc index 456c130cf..2566db1d9 100644 --- a/docs/usage/fipnum.inc +++ b/docs/usage/fipnum.inc @@ -1,5 +1,5 @@ --- Output file printed by res2df.grid 0.6.0 --- at 2020-04-23 10:46:22.529558 +-- Output file printed by res2df.grid 0.17.2 +-- at 2023-11-16 9:31:23.318941 FIPNUM 21*2 19*1 20*2 20*1 20*2 20*1 19*2 21*1 19*2 21*1 18*2 From 570070338d43b9a156eda560e81b551ccd2f6d40 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 10:16:10 +0100 Subject: [PATCH 33/68] Use .DATA file consistently --- docs/csv2res.rst | 4 ++-- docs/glossary.rst | 9 +++++---- docs/introduction.rst | 12 ++++++------ docs/res2csv.rst | 2 +- docs/usage/compdat.rst | 4 ++-- docs/usage/equil.rst | 2 +- docs/usage/gruptree.rst | 6 +++--- docs/usage/pvt.rst | 4 ++-- docs/usage/satfunc.rst | 2 +- docs/usage/trans.rst | 2 +- docs/usage/wcon.rst | 2 +- res2df/common.py | 3 +-- res2df/compdat.py | 4 ++-- res2df/equil.py | 4 ++-- res2df/faults.py | 2 +- res2df/fipreports.py | 2 +- res2df/grid.py | 2 +- res2df/gruptree.py | 2 +- res2df/inferdims.py | 2 +- res2df/pvt.py | 6 +++--- res2df/res2csv.py | 4 ++-- res2df/resdatafiles.py | 2 +- res2df/satfunc.py | 6 +++--- res2df/summary.py | 2 +- res2df/vfp/_vfp.py | 10 +++++----- res2df/vfp/_vfpcommon.py | 2 +- res2df/vfp/_vfpinj.py | 12 ++++++------ res2df/vfp/_vfpprod.py | 12 ++++++------ res2df/wcon.py | 2 +- res2df/wellcompletiondata.py | 2 +- tests/test_gruptree.py | 2 +- tests/test_satfunc.py | 2 +- tests/test_welopen.py | 2 +- 33 files changed, 68 insertions(+), 68 deletions(-) diff --git a/docs/csv2res.rst b/docs/csv2res.rst index 4f36d5acb..6f1a4121e 100644 --- a/docs/csv2res.rst +++ b/docs/csv2res.rst @@ -1,9 +1,9 @@ csv2res ======= -Some of the modules inside res2df is able to write reservoir simulator input decks +Some of the modules inside res2df is able to write .DATA files from dataframes (in the format dumped by res2df). This makes it possible -to produce reservoir input decks in any application that can write CSV files, +to produce .DATA files in any application that can write CSV files, and use this tool to convert it into reservoir simulator files, or it can facilitate operations/manipulations of an existing deck using any tool that can work on CSV files, by first running res2csv on an input file, diff --git a/docs/glossary.rst b/docs/glossary.rst index 043a84fbe..56be125cc 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -3,14 +3,15 @@ Glossary .. glossary:: - deck/input deck + .DATA files Inputs provided to reservoir simulators such as Eclipse or OPM Flow. - Usually a .DATA file pointing to other include files. One deck - typically points to multiple include files. + Usually a .DATA file pointing to other include files. One .DATA file + typically points to multiple include files. A data file is defined as + a **full** data file if ?...TODO include files Files that provide inputs to reservoir simulators by using the INCLUDE statement - in input decks. By convention, these files often have the extension .INC/.inc + in .DATA files. By convention, these files often have the extension .INC/.inc (generally) or .GRDECL/.grdecl (for files included into the grid section). reservoir simulator diff --git a/docs/introduction.rst b/docs/introduction.rst index ea5822273..036915371 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -127,14 +127,14 @@ More documentation on :doc:`usage/satfunc`. ^^^^^^^^^ Extracts the information in the `EQUIL` table, `RSVD` and `RVVD` in the -input deck. Can write back to include files. +.DATA file. Can write back to include files. More documentation on :doc:`usage/equil`. ``compdat`` ^^^^^^^^^^^ -Extracts well connection data from the `COMPDAT` keyword in the input deck. +Extracts well connection data from the `COMPDAT` keyword in the .DATA file. For multi-segment wells, `WELSEGS` and `COMPSEGS` is also parsed. The data is available as three different dataframes, which can be merged. @@ -147,7 +147,7 @@ More documentation on :doc:`usage/compdat`. ^^^^^^^^^^^^ Extracts the information from the `GRUPTREE` and `WELSPECS` keyword, at -all timesteps, from the input deck. The tree structure at each relevant +all timesteps, from the .DATA file. The tree structure at each relevant date can be returned as a dataframe of the edges, as a nested dictionary or as a `treelib` tree. @@ -156,7 +156,7 @@ More documentation on :doc:`usage/gruptree`. ``pvt`` ^^^^^^^ -Extracts PVT data from an input deck, from the keywords `PVTO`, `PVDG`, +Extracts PVT data from a .DATA file, from the keywords `PVTO`, `PVDG`, `DENSITY`, `ROCK` etc. Can write data back to include files. More documentation on :doc:`usage/pvt`. @@ -173,7 +173,7 @@ More documentation on :doc:`usage/wcon`. ^^^^^^^^^^^^^^^^ This is an internal helper module in order to represent finished or -unfinished input decks and runs. The class ResdataFiles can cache binary +unfinished .DATA files and runs. The class ResdataFiles can cache binary files that are recently read, and is able to locate the various output files based on the basename or the `.DATA` filename. @@ -183,7 +183,7 @@ Metadata support parameters.txt ^^^^^^^^^^^^^^ -Metadata for each input deck are sometimes added in a text file named +Metadata for each .DATA file are sometimes added in a text file named ``parameters.txt``, alongside the Eclipse DATA file or one or two directory levels above it. diff --git a/docs/res2csv.rst b/docs/res2csv.rst index 7980788f6..2d035fbf3 100644 --- a/docs/res2csv.rst +++ b/docs/res2csv.rst @@ -4,7 +4,7 @@ res2csv Most of the functionality in res2df is exposed to the command line through the script *res2csv*. The first argument to this script is always the submodule (subcommand) from which you want functionality. Mandatory argument is -always an input deck or sometimes individual include files, and +always a .DATA file or sometimes individual include files, and there is usually an ``--output`` option to specify which file to dump the CSV to. If you want output to your terminal, use ``-`` as the output filename. diff --git a/docs/usage/compdat.rst b/docs/usage/compdat.rst index 23a8d72ab..d20f4d1eb 100644 --- a/docs/usage/compdat.rst +++ b/docs/usage/compdat.rst @@ -1,7 +1,7 @@ compdat ^^^^^^^ -This module extracts COMPDAT, WELSEGS and COMPSEGS from an input deck. +This module extracts COMPDAT, WELSEGS and COMPSEGS from a .DATA file. Additionally, it will parse WELOPEN statements and emit new COMPDAT statements from the actions in WELOPEN. @@ -26,7 +26,7 @@ for each of COMPDAT, and the segmentation keywords. .. warning:: When WELOPEN is in use, the dataframe can differ from Eclipse behaviour in certain circumstances. The dataframe representation from ``compdat`` does not separate from a "shut" well and the open-ness of its - connections. So in an input deck it is possible to shut a well, and then + connections. So in a .DATA FILE it is possible to shut a well, and then reopen it, and get back the original open/shut state of individual connections prior to well shut. The dataframe format will display `all` connections as open if a well is opened with defaulted indices. diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index a640e1508..78dcd3480 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -2,7 +2,7 @@ equil ----- This is the res2df module for processing the ``SOLUTION`` section of -the input deck. +the .DATA file. Supported keywords are ``EQUIL``, ``RSVD``, ``RVVD``, ``PBVD`` and ``PDVD``. Typical usage is diff --git a/docs/usage/gruptree.rst b/docs/usage/gruptree.rst index 4bf516f86..5b6623b8c 100644 --- a/docs/usage/gruptree.rst +++ b/docs/usage/gruptree.rst @@ -1,13 +1,13 @@ gruptree -------- -Extracts data from the GRUPTREE, GRUPNET and WELSPECS keywords from an input deck +Extracts data from the GRUPTREE, GRUPNET and WELSPECS keywords from a .DATA file and presents the production network either as pretty-printed ASCII or in a dataframe-representation. -The GRUPTREE section of your input deck defines the production network +The GRUPTREE section of your .DATA file defines the production network from wells and up to the platform (and possibly also to a field having -many platforms). In the input deck it be as simple as this:: +many platforms). In the .DATA file it be as simple as this:: START 01 'JAN' 2000 / diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index 8730685a2..fca105ed0 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -1,7 +1,7 @@ pvt --- -Extracts PVT related keyword data from the PROPS section in an input deck, +Extracts PVT related keyword data from the PROPS section in a .DATA file, typically the keywords ``PVTO``, ``PVDG``, ``DENSITY`` and ``ROCK``. Data from all keywords will be merged into one common dataframe. @@ -76,7 +76,7 @@ When you are done with the table, you can generate new include files from your m pvt.df2res(dframe, filename="pvt.inc") -When injecting this produced ``pvt.inc`` into any new input deck, ensure you +When injecting this produced ``pvt.inc`` into any new .DATA file, ensure you check which keywords have been written out, compared to what you gave in to `res2df.pvt` above. Any non-supported keywords will get lost in the import phase and need to be catered for outside res2df. diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index 6d668152b..ffa93033b 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -1,7 +1,7 @@ satfunc ------- -satfunc will extract saturation functions from input decks or from +satfunc will extract saturation functions from .DATA files or from include files, these are the keywords ``SWOF``, ``SGOF``, ``SGWFN``, ``SWFN``, ``SOF2``, ``SGFN``, ``SOF3`` and ``SLGOF``. diff --git a/docs/usage/trans.rst b/docs/usage/trans.rst index a0e069417..70b36a3c8 100644 --- a/docs/usage/trans.rst +++ b/docs/usage/trans.rst @@ -6,7 +6,7 @@ from a simulation grid. Python API: :func:`res2df.trans.df` -Applied on an input deck, the *trans* module will give out a dataframe of neighbour +Applied on a .DATA file, the *trans* module will give out a dataframe of neighbour connections .. code-block:: python diff --git a/docs/usage/wcon.rst b/docs/usage/wcon.rst index 05855ffe2..9071646f0 100644 --- a/docs/usage/wcon.rst +++ b/docs/usage/wcon.rst @@ -2,7 +2,7 @@ wcon ^^^^ This module extracts information from WCONHIST, WCONINJE, WCONINJH and -WCONPROD from an input deck. +WCONPROD from a .DATA file. .. wcon.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/wcon.csv', index=False) diff --git a/res2df/common.py b/res2df/common.py index 98b9936b0..2925334a9 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -630,8 +630,7 @@ def generic_deck_table( renamer: Optional[Dict[str, str]] = None, drop_trailing_columns: bool = True, ) -> str: - """Construct an input deck table for data following - a keyword. Each row ends with a slash. + """Construct string contents of a .DATA file table. This function will *not* add a final slash after all rows, as this is keyword dependent. Some keywords require it, some keywords diff --git a/res2df/compdat.py b/res2df/compdat.py index 5c12a512c..f84b3f86a 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -82,7 +82,7 @@ def deck2dfs( Args: deck: A deck representing the schedule - Does not have to be a full input deck, an include file is sufficient + Does not have to be a full .DATA file, an include file is sufficient start_date: The default date to use for events where the DATE or START keyword is not found in advance. Default: None @@ -950,7 +950,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: Arguments: parser: parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of resdata .DATA file.") + parser.add_argument("DATAFILE", help="Name of .DATA file.") parser.add_argument( "-o", "--output", diff --git a/res2df/equil.py b/res2df/equil.py index 6046622a6..b5b032c8a 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -1,5 +1,5 @@ """ -Extract EQUIL from an input deck as Pandas DataFrame +Extract EQUIL from a .DATA file as Pandas DataFrame """ import argparse @@ -92,7 +92,7 @@ def df( are possibly already removed by the OPM parser in resdatafiles.str2deck(). Arguments: - deck: input deck or string with deck. If + deck: .DATA file or string with deck. If not string, EQLDIMS must be present in the deck. keywords: Requested keywords for which to extract data. ntequl: If not None, should state the NTEQUL in EQLDIMS. If diff --git a/res2df/faults.py b/res2df/faults.py index 91ac28985..193f66c29 100644 --- a/res2df/faults.py +++ b/res2df/faults.py @@ -36,7 +36,7 @@ def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFram All data for the keyword FAULTS will be returned. Args: - deck: input deck + deck: .DATA file """ if isinstance(deck, ResdataFiles): deck = deck.get_deck() diff --git a/res2df/fipreports.py b/res2df/fipreports.py index ebb50503c..6aede9cf0 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -109,7 +109,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra prtfile: filename (PRT) or an ResdataFiles object fipname: The name of the regport regions, FIPNUM, FIPZON or whatever Max length of the string is 8, the first three characters must be FIP, - and the next 3 characters must be unique for a given input deck. + and the next 3 characters must be unique for a given .DATA file. """ if isinstance(prtfile, ResdataFiles): prtfile = prtfile.get_prtfilename() diff --git a/res2df/grid.py b/res2df/grid.py index 30d4f5727..ffd2d3052 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -626,7 +626,7 @@ def df2res( ) -> str: """ Write an include file with grid data keyword, like PERMX, PORO, - FIPNUM etc, for the GRID section of the input deck. + FIPNUM etc, for the GRID section of the .DATA file. Output (returned as string and optionally written to file) will then contain f.ex:: diff --git a/res2df/gruptree.py b/res2df/gruptree.py index 032e99aeb..aee343787 100644 --- a/res2df/gruptree.py +++ b/res2df/gruptree.py @@ -1,4 +1,4 @@ -"""Extract GRUPTREE information from an input deck""" +"""Extract GRUPTREE information from a .DATA file""" import argparse import collections diff --git a/res2df/inferdims.py b/res2df/inferdims.py index f6196b16e..fb9fa8b00 100644 --- a/res2df/inferdims.py +++ b/res2df/inferdims.py @@ -31,7 +31,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: stricter mode, to detect the correct table dimensionality Arguments: - deck: String containing an input deck or only a few resdata keywords + deck: String containing a .DATA file or only a few resdata keywords dimkeyword: Either TABDIMS or EQLDIMS dimitem: The element number in TABDIMS/EQLDIMS to modify Returns: diff --git a/res2df/pvt.py b/res2df/pvt.py index 1100548d0..de95fd9dc 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -1,7 +1,7 @@ """ -Extract the PVT data from an input deck as Pandas Dataframes +Extract the PVT data from a .DATA file as Pandas Dataframes -Data can be extracted from a full input deck or from individual files. +Data can be extracted from a full .DATA file or from individual files. """ import argparse @@ -239,7 +239,7 @@ def df( def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Set up sys.argv parsers for parsing input deck or + """Set up sys.argv parsers for parsing .DATA file or include files into dataframes Arguments: diff --git a/res2df/res2csv.py b/res2df/res2csv.py index 46e3c905f..9fb763de0 100644 --- a/res2df/res2csv.py +++ b/res2df/res2csv.py @@ -24,7 +24,7 @@ CATEGORY: str = "utility.eclipse" EXAMPLES: str = """ -Outputting the EQUIL data from an input deck. The ECLBASE variable from your +Outputting the EQUIL data from a .DATA file. The ECLBASE variable from your ERT config is supplied implicitly:: FORWARD_MODEL res2csv(=equil, =equil.csv) @@ -130,7 +130,7 @@ def get_parser() -> argparse.ArgumentParser: "pvt", help="Extract PVT data", description=( - "Extract data for the PVT keywords in an input deck " + "Extract data for the PVT keywords in a .DATA file " "and merge all data into a single dataframe. " "Supported keywords are PVTO, PVDO, PVTG, PVDG, PVTW, " "ROCK and DENSITY. Gas phase pressure and oil phase " diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index da799dbfe..a5b6d7f81 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -41,7 +41,7 @@ class ResdataFiles(object): """ - Class for holding an input deck with result files + Class for holding .DATA result files Exists only for convenience, so that loading of ResdataFile/Summary objects is easy for users, and with diff --git a/res2df/satfunc.py b/res2df/satfunc.py index d551652eb..e87d93e50 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -1,8 +1,8 @@ """ Extract saturation function data (SWOF, SGOF, SWFN, etc.) -from an input deck as Pandas DataFrame. +from a .DATA file as Pandas DataFrame. -Data can be extracted from a full input deck (`*.DATA`) +Data can be extracted from a full .DATA file (`*.DATA`) or from individual files. Note that when parsing from individual files, it is @@ -129,7 +129,7 @@ def df( def interpolate_defaults(dframe: pd.DataFrame) -> pd.DataFrame: """Interpolate NaN's linearly in saturation. - Saturation function tables in input decks can have certain values defaulted. + Saturation function tables in .DATA files can have certain values defaulted. When parsed by common.res2df, these values are returned as np.nan. The incoming dataframe must be associated to one keyword only, but can consist of multiple SATNUMs. diff --git a/res2df/summary.py b/res2df/summary.py index 69ba5797e..77d2f2562 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -321,7 +321,7 @@ def df( is always named "DATE". Arguments: - resdatafiles: ResdataFiles object representing the input deck. Alternatively + resdatafiles: ResdataFiles object representing a .DATA file. Alternatively an Summary object. time_index: string indicating a resampling frequency, 'yearly', 'monthly', 'daily', 'last' or 'raw', the latter will diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index 55280a3cb..bea8bb8c9 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -1,6 +1,6 @@ """Extract the VFPPROD/VFPINJ data from an Eclipse (input) deck as Pandas Dataframes -Data can be extracted from a full input deck or from individual files. Supports +Data can be extracted from a full .DATA file or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow and pyarrow.table """ @@ -45,7 +45,7 @@ def basic_data( BASIC_DATA_KEYS in _vfpprod and _vfpinj. Args: - deck: input deck or string with deck + deck: .DATA file or string with deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -250,7 +250,7 @@ def dfs( Data for the keyword VFPPROD or VFPINJ will be returned as separate item in list Args: - deck: input deck or string with deck + deck: .DATA file or string with deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -293,7 +293,7 @@ def pyarrow_tables( Data for the keyword VFPPROD or VFPINJ will be returned as separate item in list Args: - deck: input deck or string with deck + deck: .DATA file or string with deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -418,7 +418,7 @@ def df( All data for the keywords VFPPROD/VFPINJ will be returned. Args: - deck: input deck or string wit deck + deck: .DATA file or string wit deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: str with list of VFP table numbers to extract """ diff --git a/res2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py index af9feaedc..dc0030595 100755 --- a/res2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -2,7 +2,7 @@ deck to extract the VFPPROD/VFPINJ data from an Eclipse (input) deck as Pandas Dataframes -Data can be extracted from a full input deck or from individual files. Supports +Data can be extracted from a full .DATA file or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow and pyarrow.table """ diff --git a/res2df/vfp/_vfpinj.py b/res2df/vfp/_vfpinj.py index 4c4d83b39..b3994371e 100755 --- a/res2df/vfp/_vfpinj.py +++ b/res2df/vfp/_vfpinj.py @@ -3,7 +3,7 @@ basic_data (dictionary with basic data types), df (pandas DataFrame) or pyarrow_tables (pyarrow.Tables). -Data can be extracted from a full input deck or from individual files. +Data can be extracted from a full .DATA file or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow a pyarrow.Table. Also functionality to write pandas DataFrame and pyarrow.Table to file as Eclipse .Ecl format @@ -74,7 +74,7 @@ def basic_data( Empty string returned if vfp table number does not match any number in list Args: - keyword: input deck keyword + keyword: .DATA file keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds """ @@ -473,12 +473,12 @@ def df( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pd.DataFrame, None]: - """Return a dataframes of a single VFPINJ table from an input deck + """Return a dataframes of a single VFPINJ table from a .DATA file Data from the VFPINJ keyword are stacked into a Pandas Dataframe Args: - keyword: input deck keyword + keyword: .DATA file keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ @@ -510,11 +510,11 @@ def pyarrow( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pa.Table, None]: - """Return a pyarrow Table of a single VFPINJ table from an input deck + """Return a pyarrow Table of a single VFPINJ table from a .DATA file If no VFPINJ table found, return None Args: - keyword: input deck keyword + keyword: .DATA file keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ diff --git a/res2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py index bc55e65ae..e6e254ab6 100755 --- a/res2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -3,7 +3,7 @@ basic_data (dictionary with basic data types), df (pandas DataFrame) or pyarrow_tables (pyarrow.Tables). -Data can be extracted from a full input deck or from individual files. +Data can be extracted from a full .DATA file or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow as pyarrow.Table. Also functionality to write pandas DataFrame and pyarrow.Table to file as Eclipse .Ecl format. @@ -86,7 +86,7 @@ def basic_data( Empty string returned if vfp table number does not match any number in list Args: - keyword: input deck keyword + keyword: .DATA file keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds """ @@ -720,10 +720,10 @@ def df( vfpnumbers_str: Optional[str] = None, ) -> Union[pd.DataFrame, None]: """Return a dataframe or pyarrow Table of a single VFPPROD table - from an input deck. + from a .DATA file. Args: - keyword: input deck keyword + keyword: .DATA file keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ @@ -764,11 +764,11 @@ def pyarrow( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pa.Table, None]: - """Return a pyarrow Table of a single VFPPROD table from an input deck. + """Return a pyarrow Table of a single VFPPROD table from a .DATA file. If no VFPPROD curve found, return None Args: - keyword: input deck keyword + keyword: .DATA file keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ diff --git a/res2df/wcon.py b/res2df/wcon.py index 45ceb207f..563929c10 100644 --- a/res2df/wcon.py +++ b/res2df/wcon.py @@ -1,4 +1,4 @@ -"""Extract WCON* from an input deck""" +"""Extract WCON* from a .DATA file""" import argparse import datetime diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index da454c471..f4eca1984 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -91,7 +91,7 @@ def df( def _get_unit_system(resdatafiles: ResdataFiles) -> UnitSystem: - """Returns the unit system of an input deck. The options are \ + """Returns the unit system of a .DATA file. The options are \ METRIC, FIELD, LAB and PVT-M. If none of these are found, the function returns METRIC which is the diff --git a/tests/test_gruptree.py b/tests/test_gruptree.py index 9d84ee9b8..a090d2639 100644 --- a/tests/test_gruptree.py +++ b/tests/test_gruptree.py @@ -427,7 +427,7 @@ def test_emptytree_strdeck(): def test_emptytree_commandlinetool(tmp_path, mocker, caplog): - """Test the command line tool on an input deck which is empty""" + """Test the command line tool on a .DATA file which is empty""" os.chdir(tmp_path) Path("EMPTY.DATA").write_text("", encoding="utf8") mocker.patch("sys.argv", ["res2csv", "gruptree", "--prettyprint", "EMPTY.DATA"]) diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index 811a07fb8..2b76f8df9 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -27,7 +27,7 @@ def test_deck_to_satfunc_dframe(): - """Test that dataframes can be produced from a full input deck (the + """Test that dataframes can be produced from a full .DATA file (the example Reek case)""" resdatafiles = ResdataFiles(REEK) satdf = satfunc.df(resdatafiles.get_deck()) diff --git a/tests/test_welopen.py b/tests/test_welopen.py index e0e574e6a..314677ed3 100644 --- a/tests/test_welopen.py +++ b/tests/test_welopen.py @@ -1481,7 +1481,7 @@ def test_welopen_df(): ], ) def test_welopen_complump(test_input, expected): - """Test the welopen_complump functionality through input decks""" + """Test the welopen_complump functionality through .DATA files""" deck = ResdataFiles.str2deck(test_input) dfs = compdat.deck2dfs(deck) pd.testing.assert_frame_equal(dfs["COMPDAT"][expected.columns], expected) From ae41aadc0cbb822d733fc0c28eb124c7c3f0d7e6 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 10:20:34 +0100 Subject: [PATCH 34/68] Add reservoir simulator output file to glossary Fixup glossary --- docs/glossary.rst | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index 56be125cc..99122ed70 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -3,16 +3,21 @@ Glossary .. glossary:: - .DATA files + reservoir simulator + Reservoir simulators such as OPM Flow or Eclipse. + + .DATA file Inputs provided to reservoir simulators such as Eclipse or OPM Flow. Usually a .DATA file pointing to other include files. One .DATA file typically points to multiple include files. A data file is defined as a **full** data file if ?...TODO - include files + include file Files that provide inputs to reservoir simulators by using the INCLUDE statement in .DATA files. By convention, these files often have the extension .INC/.inc (generally) or .GRDECL/.grdecl (for files included into the grid section). - reservoir simulator - Reservoir simulators such as OPM Flow or Eclipse. + reservoir simulator output file + When a reservoir simulator runs, several files will be generated. + These will have extensions such as .EGRID, .FEGRID, .UNSMRY, .GRID, .INIT, etc. + See the opm flow manual Appendix D (https://opm-project.org/wp-content/uploads/2023/06/OPM_Flow_Reference_Manual_2023-04_Rev-0_Reduced.pdf) From 668312ef786696649f054bd607ce5c756b37bade Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 10:33:45 +0100 Subject: [PATCH 35/68] EclFile/EclGrid -> res sim output files --- res2df/nnc.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/res2df/nnc.py b/res2df/nnc.py index 8a9d1cbdf..4b5b2e033 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -31,8 +31,8 @@ def df( between the two cells) Args: - resdatafiles: object that can serve EclFile and EclGrid - on demand + resdatafiles: object that can serve reservoir simulator + output files on demand. coords: Set to True if you want the midpoint of the two connected cells to be computed and added to the columns X, Y and Z. From 2c9d028c68e7570b7ee83b38a663ec566f828b76 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 11:33:00 +0100 Subject: [PATCH 36/68] Remove ECLBASE mention in res2csv EXAMPLES str --- res2df/res2csv.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/res2df/res2csv.py b/res2df/res2csv.py index 9fb763de0..8845d5bd1 100644 --- a/res2df/res2csv.py +++ b/res2df/res2csv.py @@ -24,8 +24,8 @@ CATEGORY: str = "utility.eclipse" EXAMPLES: str = """ -Outputting the EQUIL data from a .DATA file. The ECLBASE variable from your -ERT config is supplied implicitly:: +Outputting the EQUIL data from a .DATA file. This is implicitly +supplied in ERT configs:: FORWARD_MODEL res2csv(=equil, =equil.csv) From bc1acf87353d6d0eb5c4899205cda169bc471335 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 11:43:11 +0100 Subject: [PATCH 37/68] result files to output files --- res2df/resdatafiles.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index a5b6d7f81..14773e26d 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -41,7 +41,7 @@ class ResdataFiles(object): """ - Class for holding .DATA result files + Class for holding .DATA output files Exists only for convenience, so that loading of ResdataFile/Summary objects is easy for users, and with From 971331a23457f67808f3efb750369f2165a03a9b Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 12:24:25 +0100 Subject: [PATCH 38/68] deck_or_eclpath->deckpath --- res2df/parameters.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/res2df/parameters.py b/res2df/parameters.py index f2ee86481..706734bf9 100644 --- a/res2df/parameters.py +++ b/res2df/parameters.py @@ -16,7 +16,7 @@ def find_parameter_files( - deck_or_eclpath: Union[ResdataFiles, str, Path], filebase: str = "parameters" + deckpath: Union[ResdataFiles, str, Path], filebase: str = "parameters" ) -> List[Path]: """Locate a default prioritized list of files to try to read as key-value @@ -24,7 +24,7 @@ def find_parameter_files( current dir, one directory up, and two directories up. Args: - deck_or_eclpath: Either an ResdataFiles object of + deckpath: Either a ResdataFiles object of a simulator output set (only the corresponding path will be used), or path to a file or directory, that will be used as a starting point for locating parameter files @@ -35,10 +35,10 @@ def find_parameter_files( """ eclbasepath: Path fname: str - if isinstance(deck_or_eclpath, ResdataFiles): - eclbasepath = Path(deck_or_eclpath.get_path()) - elif isinstance(deck_or_eclpath, (str, Path)): - eclbasepath = Path(deck_or_eclpath).parent.absolute() + if isinstance(deckpath, ResdataFiles): + eclbasepath = Path(deckpath.get_path()) + elif isinstance(deckpath, (str, Path)): + eclbasepath = Path(deckpath).parent.absolute() else: raise TypeError files_to_lookfor: List[str] = [ From ad1398795ee1ae85c77622b188f694d06e623f90 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 14:08:56 +0100 Subject: [PATCH 39/68] rst link .DATA_FILE --- docs/csv2res.rst | 4 ++-- docs/introduction.rst | 12 ++++++------ docs/res2csv.rst | 2 +- docs/usage/compdat.rst | 4 ++-- docs/usage/equil.rst | 2 +- docs/usage/gruptree.rst | 6 +++--- docs/usage/pvt.rst | 4 ++-- docs/usage/satfunc.rst | 2 +- docs/usage/trans.rst | 2 +- docs/usage/wcon.rst | 2 +- 10 files changed, 20 insertions(+), 20 deletions(-) diff --git a/docs/csv2res.rst b/docs/csv2res.rst index 6f1a4121e..9a5283e8a 100644 --- a/docs/csv2res.rst +++ b/docs/csv2res.rst @@ -1,9 +1,9 @@ csv2res ======= -Some of the modules inside res2df is able to write .DATA files +Some of the modules inside res2df is able to write :term:`.DATA files<.DATA file>` from dataframes (in the format dumped by res2df). This makes it possible -to produce .DATA files in any application that can write CSV files, +to produce :term:`.DATA files<.DATA file>` in any application that can write CSV files, and use this tool to convert it into reservoir simulator files, or it can facilitate operations/manipulations of an existing deck using any tool that can work on CSV files, by first running res2csv on an input file, diff --git a/docs/introduction.rst b/docs/introduction.rst index 036915371..794fc67a0 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -127,14 +127,14 @@ More documentation on :doc:`usage/satfunc`. ^^^^^^^^^ Extracts the information in the `EQUIL` table, `RSVD` and `RVVD` in the -.DATA file. Can write back to include files. +:term:`.DATA file`. Can write back to include files. More documentation on :doc:`usage/equil`. ``compdat`` ^^^^^^^^^^^ -Extracts well connection data from the `COMPDAT` keyword in the .DATA file. +Extracts well connection data from the `COMPDAT` keyword in the :term:`.DATA file`. For multi-segment wells, `WELSEGS` and `COMPSEGS` is also parsed. The data is available as three different dataframes, which can be merged. @@ -147,7 +147,7 @@ More documentation on :doc:`usage/compdat`. ^^^^^^^^^^^^ Extracts the information from the `GRUPTREE` and `WELSPECS` keyword, at -all timesteps, from the .DATA file. The tree structure at each relevant +all timesteps, from the :term:`.DATA file`. The tree structure at each relevant date can be returned as a dataframe of the edges, as a nested dictionary or as a `treelib` tree. @@ -156,7 +156,7 @@ More documentation on :doc:`usage/gruptree`. ``pvt`` ^^^^^^^ -Extracts PVT data from a .DATA file, from the keywords `PVTO`, `PVDG`, +Extracts PVT data from a :term:`.DATA file`, from the keywords `PVTO`, `PVDG`, `DENSITY`, `ROCK` etc. Can write data back to include files. More documentation on :doc:`usage/pvt`. @@ -173,7 +173,7 @@ More documentation on :doc:`usage/wcon`. ^^^^^^^^^^^^^^^^ This is an internal helper module in order to represent finished or -unfinished .DATA files and runs. The class ResdataFiles can cache binary +unfinished :term:`.DATA files <.DATA file>` and runs. The class ResdataFiles can cache binary files that are recently read, and is able to locate the various output files based on the basename or the `.DATA` filename. @@ -183,7 +183,7 @@ Metadata support parameters.txt ^^^^^^^^^^^^^^ -Metadata for each .DATA file are sometimes added in a text file named +Metadata for each :term:`.DATA file` are sometimes added in a text file named ``parameters.txt``, alongside the Eclipse DATA file or one or two directory levels above it. diff --git a/docs/res2csv.rst b/docs/res2csv.rst index 2d035fbf3..f21e38409 100644 --- a/docs/res2csv.rst +++ b/docs/res2csv.rst @@ -4,7 +4,7 @@ res2csv Most of the functionality in res2df is exposed to the command line through the script *res2csv*. The first argument to this script is always the submodule (subcommand) from which you want functionality. Mandatory argument is -always a .DATA file or sometimes individual include files, and +always a :term:`.DATA file` or sometimes individual include files, and there is usually an ``--output`` option to specify which file to dump the CSV to. If you want output to your terminal, use ``-`` as the output filename. diff --git a/docs/usage/compdat.rst b/docs/usage/compdat.rst index d20f4d1eb..12ef0ecbf 100644 --- a/docs/usage/compdat.rst +++ b/docs/usage/compdat.rst @@ -1,7 +1,7 @@ compdat ^^^^^^^ -This module extracts COMPDAT, WELSEGS and COMPSEGS from a .DATA file. +This module extracts COMPDAT, WELSEGS and COMPSEGS from a :term:`.DATA file`. Additionally, it will parse WELOPEN statements and emit new COMPDAT statements from the actions in WELOPEN. @@ -26,7 +26,7 @@ for each of COMPDAT, and the segmentation keywords. .. warning:: When WELOPEN is in use, the dataframe can differ from Eclipse behaviour in certain circumstances. The dataframe representation from ``compdat`` does not separate from a "shut" well and the open-ness of its - connections. So in a .DATA FILE it is possible to shut a well, and then + connections. So in a :term:`.DATA file` it is possible to shut a well, and then reopen it, and get back the original open/shut state of individual connections prior to well shut. The dataframe format will display `all` connections as open if a well is opened with defaulted indices. diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index 78dcd3480..038f5205d 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -2,7 +2,7 @@ equil ----- This is the res2df module for processing the ``SOLUTION`` section of -the .DATA file. +the :term:`.DATA file`. Supported keywords are ``EQUIL``, ``RSVD``, ``RVVD``, ``PBVD`` and ``PDVD``. Typical usage is diff --git a/docs/usage/gruptree.rst b/docs/usage/gruptree.rst index 5b6623b8c..ecc561e3d 100644 --- a/docs/usage/gruptree.rst +++ b/docs/usage/gruptree.rst @@ -1,13 +1,13 @@ gruptree -------- -Extracts data from the GRUPTREE, GRUPNET and WELSPECS keywords from a .DATA file +Extracts data from the GRUPTREE, GRUPNET and WELSPECS keywords from a :term:`.DATA file` and presents the production network either as pretty-printed ASCII or in a dataframe-representation. -The GRUPTREE section of your .DATA file defines the production network +The GRUPTREE section of your :term:`.DATA file` defines the production network from wells and up to the platform (and possibly also to a field having -many platforms). In the .DATA file it be as simple as this:: +many platforms). In the :term:`.DATA file` it be as simple as this:: START 01 'JAN' 2000 / diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index fca105ed0..3334c8876 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -1,7 +1,7 @@ pvt --- -Extracts PVT related keyword data from the PROPS section in a .DATA file, +Extracts PVT related keyword data from the PROPS section in a :term:`.DATA file`, typically the keywords ``PVTO``, ``PVDG``, ``DENSITY`` and ``ROCK``. Data from all keywords will be merged into one common dataframe. @@ -76,7 +76,7 @@ When you are done with the table, you can generate new include files from your m pvt.df2res(dframe, filename="pvt.inc") -When injecting this produced ``pvt.inc`` into any new .DATA file, ensure you +When injecting this produced ``pvt.inc`` into any new :term:`.DATA file`, ensure you check which keywords have been written out, compared to what you gave in to `res2df.pvt` above. Any non-supported keywords will get lost in the import phase and need to be catered for outside res2df. diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index ffa93033b..1d0974c71 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -1,7 +1,7 @@ satfunc ------- -satfunc will extract saturation functions from .DATA files or from +satfunc will extract saturation functions from :term:`.DATA files <.DATA file>` or from include files, these are the keywords ``SWOF``, ``SGOF``, ``SGWFN``, ``SWFN``, ``SOF2``, ``SGFN``, ``SOF3`` and ``SLGOF``. diff --git a/docs/usage/trans.rst b/docs/usage/trans.rst index 70b36a3c8..83577e70c 100644 --- a/docs/usage/trans.rst +++ b/docs/usage/trans.rst @@ -6,7 +6,7 @@ from a simulation grid. Python API: :func:`res2df.trans.df` -Applied on a .DATA file, the *trans* module will give out a dataframe of neighbour +Applied on a :term:`.DATA file`, the *trans* module will give out a dataframe of neighbour connections .. code-block:: python diff --git a/docs/usage/wcon.rst b/docs/usage/wcon.rst index 9071646f0..910c62281 100644 --- a/docs/usage/wcon.rst +++ b/docs/usage/wcon.rst @@ -2,7 +2,7 @@ wcon ^^^^ This module extracts information from WCONHIST, WCONINJE, WCONINJH and -WCONPROD from a .DATA file. +WCONPROD from a :term:`.DATA file`. .. wcon.df(ResdataFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/wcon.csv', index=False) From a2167a2a7beee69091aa04f38d026021aee0116c Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 14:44:53 +0100 Subject: [PATCH 40/68] Describe deck in glossary --- docs/glossary.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index 99122ed70..59bb51e6f 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -17,7 +17,10 @@ Glossary in .DATA files. By convention, these files often have the extension .INC/.inc (generally) or .GRDECL/.grdecl (for files included into the grid section). - reservoir simulator output file + deck + Refers to a .DATA file and the include files it points to + + output file When a reservoir simulator runs, several files will be generated. These will have extensions such as .EGRID, .FEGRID, .UNSMRY, .GRID, .INIT, etc. See the opm flow manual Appendix D (https://opm-project.org/wp-content/uploads/2023/06/OPM_Flow_Reference_Manual_2023-04_Rev-0_Reduced.pdf) From 262947ea04ddefea3491ee3e48fadf75554a1316 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 14:45:06 +0100 Subject: [PATCH 41/68] Add links to csv2res.rst --- docs/csv2res.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/csv2res.rst b/docs/csv2res.rst index 9a5283e8a..7ad27649a 100644 --- a/docs/csv2res.rst +++ b/docs/csv2res.rst @@ -5,8 +5,8 @@ Some of the modules inside res2df is able to write :term:`.DATA files<.DATA file from dataframes (in the format dumped by res2df). This makes it possible to produce :term:`.DATA files<.DATA file>` in any application that can write CSV files, and use this tool to convert it into reservoir simulator files, or it can -facilitate operations/manipulations of an existing deck using any tool -that can work on CSV files, by first running res2csv on an input file, +facilitate operations/manipulations of an existing :term:`deck` using any tool +that can work on CSV files, by first running res2csv on an :term:`include file`, transforming it, and writing back using csv2res. Mandatory argument for csv2res is @@ -15,7 +15,7 @@ an ``--output`` option to specify which include file to write to. If you want output to your terminal, use ``-`` as the output filename. Unless you also specify the ``--keywords`` argument with a list of wanted keywords, all supported keywords for a submodule which is also found in the CSV file provided, -will be dumped to output file. +will be dumped to an :term:`output file`. .. argparse:: :ref: res2df.csv2res.get_parser From dea0081d9a41d0f34363e74dda38b1e013cb1e10 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 14:52:49 +0100 Subject: [PATCH 42/68] Add links to index.rst --- docs/index.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/index.rst b/docs/index.rst index 6f3af9138..187fcaa1a 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -2,8 +2,8 @@ res2df ====== res2df is a Pandas DataFrame wrapper around resdata and opm.io, which -are used to access binary files outputted by the reservoir simulators or -their input files --- or any other tool outputting to the same +are used to access :term:`binary files outputted by the reservoir simulators ` or +their :term:`input files ` --- or any other tool outputting to the same data format. .. toctree:: From 49af8cfb8872743a663b2033ec3bbd666059e1f3 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 14:55:25 +0100 Subject: [PATCH 43/68] Add links to introduction.rst --- docs/introduction.rst | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/docs/introduction.rst b/docs/introduction.rst index 794fc67a0..f9bd54f70 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -4,8 +4,8 @@ Introduction *res2df* is a `Pandas DataFrame `_ wrapper around `resdata `_ and `opm.io `_, which are used to access -binary files outputted by reservoir simulators such as Eclipse, or its -input files --- or any other tool outputting to the same data format, +:term:`binary files outputted by reservoir simulators ` such as Eclipse, or its +:term:`input files ` --- or any other tool outputting to the same data format, f.ex. `flow `_. Most of the features can be reached from the command line, through the @@ -45,7 +45,7 @@ Short description of each submodule ``summary`` ^^^^^^^^^^^^^^ -Extracts summary data from `.UNSMRY` files, at requested time sampling and +Extracts summary data from :term:`.UNSMRY ` files, at requested time sampling and for requested vectors. More documentation on :doc:`usage/summary`. @@ -53,7 +53,8 @@ More documentation on :doc:`usage/summary`. ``grid`` ^^^^^^^^ -Extracts grid data from `.INIT` and `.EGRID` and `.UNRST` files. Restart file +Extracts grid data from :term:`.INIT `, :term:`.EGRID `, +and :term:`.UNRST ` files. Restart file are optional to extract, and dates must be picked (or all). Data is merged into one DataFrame by the `i`, `j` and `k` indices. Bulk cell volume is included. Cells are indexed starting with 1. @@ -94,7 +95,8 @@ More documentation on :doc:`usage/trans`. ``rft`` ^^^^^^^ -Reads the `.RFT` files which are outputted by the simulator when +Reads the `.RFT` files which are outputted by the +:term:`simulator ` when the `WRFTPLT` keyword is used, with details along wellbores. For multisegment wells, the well topology is calculated and data @@ -107,7 +109,7 @@ More documentation on :doc:`usage/rft`. ^^^^^^^^^^^^^^ Parses the PRT file looking for region reports (starting -with " ... FIPNUM REPORT REGION". It will extract all the data +with " ... FIPNUM REPORT REGION"). It will extract all the data in the ASCII table in the PRT file and organize into a dataframe, currently-in-place, outflow to wells, outflows to regions, etc. It also supports custom FIPxxxxx names. @@ -118,8 +120,8 @@ More documentation on :doc:`usage/fipreports`. ``satfunc`` ^^^^^^^^^^^ -Extracts saturation functions (SWOF, SGOF, etc) from the deck and merges -into one DataFrame. Can write back to include files. +Extracts saturation functions (SWOF, SGOF, etc) from the :term:`deck` and merges +into one DataFrame. Can write back to :term:`include files `. More documentation on :doc:`usage/satfunc`. @@ -127,7 +129,7 @@ More documentation on :doc:`usage/satfunc`. ^^^^^^^^^ Extracts the information in the `EQUIL` table, `RSVD` and `RVVD` in the -:term:`.DATA file`. Can write back to include files. +:term:`.DATA file`. Can write back to :term:`include files `. More documentation on :doc:`usage/equil`. @@ -138,8 +140,8 @@ Extracts well connection data from the `COMPDAT` keyword in the :term:`.DATA fil For multi-segment wells, `WELSEGS` and `COMPSEGS` is also parsed. The data is available as three different dataframes, which can be merged. -It is also possible to parse individual "include" files, not only a -finished working deck. +It is also possible to parse individual :term:`"include files" `, not only a +finished working :term:`deck`. More documentation on :doc:`usage/compdat`. @@ -157,7 +159,7 @@ More documentation on :doc:`usage/gruptree`. ^^^^^^^ Extracts PVT data from a :term:`.DATA file`, from the keywords `PVTO`, `PVDG`, -`DENSITY`, `ROCK` etc. Can write data back to include files. +`DENSITY`, `ROCK` etc. Can write data back to :term:`include files `. More documentation on :doc:`usage/pvt`. @@ -174,8 +176,8 @@ More documentation on :doc:`usage/wcon`. This is an internal helper module in order to represent finished or unfinished :term:`.DATA files <.DATA file>` and runs. The class ResdataFiles can cache binary -files that are recently read, and is able to locate the various output -files based on the basename or the `.DATA` filename. +files that are recently read, and is able to locate the various +:term:`output files ` based on the basename or the `.DATA` filename. Metadata support ---------------- @@ -202,9 +204,9 @@ have to be merged with pandas.merge(). Zone names ^^^^^^^^^^ -If a text file with zone names are found alongside the Eclipse DATA file, some of the modules -will add that information to rows where appropriate. The zone or layer file should contains -lines like:: +If a text file with zone names are found alongside :term:`.DATA files <.DATA file>`, +some of the modules will add that information to rows where appropriate. +The zone or layer file should contains lines like:: 'ZoneA' 1-4 'ZoneB' 5-10 From 429ffdc950d3fa7d766cd560fe0b82e3208600ac Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 14:56:37 +0100 Subject: [PATCH 44/68] Add links to res2csv.rst --- docs/res2csv.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/docs/res2csv.rst b/docs/res2csv.rst index f21e38409..a5e26e16c 100644 --- a/docs/res2csv.rst +++ b/docs/res2csv.rst @@ -4,9 +4,10 @@ res2csv Most of the functionality in res2df is exposed to the command line through the script *res2csv*. The first argument to this script is always the submodule (subcommand) from which you want functionality. Mandatory argument is -always a :term:`.DATA file` or sometimes individual include files, and -there is usually an ``--output`` option to specify which file to dump -the CSV to. If you want output to your terminal, use ``-`` as the output filename. +always a :term:`.DATA file` or sometimes individual +:term:`include files `, and there is usually an ``--output`` +option to specify which file to dump the CSV to. +If you want output to your terminal, use ``-`` as the output filename. .. argparse:: :ref: res2df.res2csv.get_parser From d457a55bf6bec0d32f9cc32c4873c40c35a82375 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Thu, 16 Nov 2023 15:25:16 +0100 Subject: [PATCH 45/68] Add links to usage/*.rst --- docs/glossary.rst | 2 +- docs/usage/equil.rst | 3 ++- docs/usage/grid.rst | 7 ++++--- docs/usage/gruptree.rst | 6 +++--- docs/usage/nnc.rst | 2 +- docs/usage/pvt.rst | 7 ++++--- docs/usage/rft.rst | 2 +- docs/usage/satfunc.rst | 14 +++++++------- docs/usage/summary.rst | 2 +- 9 files changed, 24 insertions(+), 21 deletions(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index 59bb51e6f..3d4b814a0 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -18,7 +18,7 @@ Glossary (generally) or .GRDECL/.grdecl (for files included into the grid section). deck - Refers to a .DATA file and the include files it points to + Refers to a .DATA file and the include files it points to. output file When a reservoir simulator runs, several files will be generated. diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index 038f5205d..fea85f6a0 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -54,7 +54,8 @@ one meter for compatibility, which you could do by the statements: Re-exporting tables to include-files ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When you are done with the table, you can generate new include files from your modified +When you are done with the table, you can generate new +:term:`include files ` from your modified data by issuing .. code-block:: python diff --git a/docs/usage/grid.rst b/docs/usage/grid.rst index 8797d2cf7..94fb7f77f 100644 --- a/docs/usage/grid.rst +++ b/docs/usage/grid.rst @@ -2,8 +2,9 @@ grid ---- The grid module will extract static and dynamic cell properties from -a grid (from the binary output files from reservoir simulators). Each row -in a returned dataframe represents one cell. +a grid +(from the :term:`binary output files from reservoir simulators `). +Each row in a returned dataframe represents one cell. Typical usage @@ -148,7 +149,7 @@ Generating include files from grid data If you have loaded grid data into a Pandas frame, some operations are easily performed, scaling porosity, permeability etc. Or remapping some region parameters. Using the :func:`res2df.grid.df2res()` function these manipulated vectors can be written back as -include files. +:term:`include files `. Say you want to change the FIPNUM, and that FIPNUM 6 should be removed, and set it to FIPNUM 5. This can be accomplished using diff --git a/docs/usage/gruptree.rst b/docs/usage/gruptree.rst index ecc561e3d..5be75f44f 100644 --- a/docs/usage/gruptree.rst +++ b/docs/usage/gruptree.rst @@ -49,7 +49,7 @@ available (here also wells from WELSPECS is included): └── INJEAST └── INJ1 -In your deck, the table will be repeated for every new occurence of the +In your :term:`deck`, the table will be repeated for every new occurence of the GRUPTREE keyword in the Schedule section. GRUPNET and WELSPECS @@ -57,10 +57,10 @@ GRUPNET and WELSPECS By default, the module will also pick up information from GRUPNET (typical terminal pressure values for the network nodes) and WELSPECS (well -specifications), so for a full deck, your dataframe will contain more +specifications), so for a full :term:`deck`, your dataframe will contain more information than in the example above. -If our deck also contains:: +If our :term:`deck` also contains:: GRUPNET 'FIELD' 90 / diff --git a/docs/usage/nnc.rst b/docs/usage/nnc.rst index 5b587ff94..641e7d263 100644 --- a/docs/usage/nnc.rst +++ b/docs/usage/nnc.rst @@ -45,7 +45,7 @@ Data for the ``EDITNNC`` keyword can be dumped, in order to scale the NNC connec using Pandas operations. Select the connections you want to scale by slicing the nnc dataframe (either from the nnc module, or from the trans module), and fill transmissibility multipliers in a new column ``TRANM``, then this can be exported -to an include file: +to an :term:`include file: ` .. code-block:: python diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index 3334c8876..8774a081b 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -14,7 +14,7 @@ Example usage: resdatafiles = ResdataFiles("MYDATADECK.DATA") dframe = pvt.df(resdatafiles) -Alternatively, we may also read directly from an include file +Alternatively, we may also read directly from an :term:`include file` if we read the contents of the file and supply it as a string: .. code-block:: python @@ -30,7 +30,7 @@ if we read the contents of the file and supply it as a string: :header-rows: 1 If your PVT data resides in multiple include files, but you can't import -the entire deck, you have to merge the dataframes in Python like this: +the entire :term:`deck`, you have to merge the dataframes in Python like this: .. code-block:: python @@ -70,7 +70,8 @@ Density values are easier to scale up or down to whatever is needed. Re-exporting tables to include files ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -When you are done with the table, you can generate new include files from your modified data by issuing +When you are done with the table, you can generate new +:term:`include files ` from your modified data by issuing .. code-block:: python diff --git a/docs/usage/rft.rst b/docs/usage/rft.rst index eca5592e8..81a2642e3 100644 --- a/docs/usage/rft.rst +++ b/docs/usage/rft.rst @@ -20,7 +20,7 @@ Eclipse usage ^^^^^^^^^^^^^ In order to get RFT files emitted from Eclipse, you need the ``WRFTPLT`` keyword -in your DATA-file, example: +in your :term`.DATA file`, example: .. code-block:: console diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index 1d0974c71..8c6bba0a8 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -2,8 +2,8 @@ satfunc ------- satfunc will extract saturation functions from :term:`.DATA files <.DATA file>` or from -include files, these are the keywords ``SWOF``, ``SGOF``, ``SGWFN``, ``SWFN``, -``SOF2``, ``SGFN``, ``SOF3`` and ``SLGOF``. +:term:`include files `, these are the keywords ``SWOF``, ``SGOF``, +``SGWFN``, ``SWFN``, ``SOF2``, ``SGFN``, ``SOF3`` and ``SLGOF``. The data obtained from one invocation of the satfunc module will be put in one dataframe, where data from different keywords are separated by the ``KEYWORD`` @@ -56,7 +56,7 @@ the command dframe.loc[rows_to_touch, "KRW"] *= 0.5 For a dataframe or a CSV file in the format provided by this module, an -include file can be generated either with the Python API +:term:`include file` can be generated either with the Python API :func:`res2df.satfunc.df2res` function or the command .. code-block:: console @@ -67,12 +67,12 @@ which should give a file ``relperm.inc`` that can be parsed by reservoir simulat above will only pick the keywords ``SWOF`` and ``SGOF`` (in the case there are data for more keywords in the dataframe). -There are no automated checks for validity of the dumped include files. +There are no automated checks for validity of the dumped :term:`include file `. Extracting properties pr. SATNUM ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -If you have an include file prepared (from any source), you might need to +If you have an :term:`include file` prepared (from any source), you might need to determine certain properties like endpoint. If you need to determine for example "SOWCR" - the largest oil saturation for which oil is immobile, because you need to avoid SOWCR + SWCR overshooting 1, you can write a code @@ -94,7 +94,7 @@ because you need to avoid SOWCR + SWCR overshooting 1, you can write a code # Apply that function individually on each SATNUM: sat_df.groupby("SATNUM").apply(sowcr) -for an example include file, this could result in +for an example :term:`include file`, this could result in .. code-block:: console @@ -113,7 +113,7 @@ to do directly on the dataframes. Before doing manipulations of dataframes in through the `pyscal `_ library. Pyscal can create curves from parametrizations, and interpolate between curves. -Pyscal can initialize its relperm objects from include files +Pyscal can initialize its relperm objects from :term:`include files` though the parsing capabilities of res2df.satfunc. The function ``pyscal.pyscallist.df()`` is analogous to ``res2df.satfunc.df()`` in diff --git a/docs/usage/summary.rst b/docs/usage/summary.rst index 47abf0c08..e9020d535 100644 --- a/docs/usage/summary.rst +++ b/docs/usage/summary.rst @@ -1,7 +1,7 @@ summary ^^^^^^^ -This module extracts summary information from UNSMRY-files into +This module extracts summary information from :term:`UNSMRY-files ` into Pandas Dataframes. .. From 9728ab2679fd2171b01bd0da7203b2f611c7a004 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 09:01:24 +0100 Subject: [PATCH 46/68] Try term in py code .DATA file --- res2df/common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/res2df/common.py b/res2df/common.py index 2925334a9..348eb1fd5 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -630,7 +630,7 @@ def generic_deck_table( renamer: Optional[Dict[str, str]] = None, drop_trailing_columns: bool = True, ) -> str: - """Construct string contents of a .DATA file table. + """Construct string contents of a :term:`.DATA file` table. This function will *not* add a final slash after all rows, as this is keyword dependent. Some keywords require it, some keywords From 0918a8c10cb1ad26926e23a1dfaf493bae43dc76 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 09:08:57 +0100 Subject: [PATCH 47/68] .DATA file -> :term:`.DATA file` --- res2df/compdat.py | 2 +- res2df/equil.py | 4 ++-- res2df/faults.py | 2 +- res2df/fipreports.py | 2 +- res2df/grid.py | 2 +- res2df/gruptree.py | 2 +- res2df/inferdims.py | 2 +- res2df/pvt.py | 2 +- res2df/satfunc.py | 3 ++- res2df/summary.py | 4 ++-- res2df/vfp/_vfp.py | 8 ++++---- res2df/vfp/_vfpinj.py | 10 +++++----- res2df/vfp/_vfpprod.py | 10 +++++----- res2df/wellcompletiondata.py | 2 +- 14 files changed, 28 insertions(+), 27 deletions(-) diff --git a/res2df/compdat.py b/res2df/compdat.py index f84b3f86a..4d0a1f145 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -82,7 +82,7 @@ def deck2dfs( Args: deck: A deck representing the schedule - Does not have to be a full .DATA file, an include file is sufficient + Does not have to be a full :term:`.DATA file`, an include file is sufficient start_date: The default date to use for events where the DATE or START keyword is not found in advance. Default: None diff --git a/res2df/equil.py b/res2df/equil.py index b5b032c8a..91c7aa09d 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -1,5 +1,5 @@ """ -Extract EQUIL from a .DATA file as Pandas DataFrame +Extract EQUIL from a :term:`.DATA file` as Pandas DataFrame """ import argparse @@ -92,7 +92,7 @@ def df( are possibly already removed by the OPM parser in resdatafiles.str2deck(). Arguments: - deck: .DATA file or string with deck. If + deck: :term:`.DATA file` or string with deck. If not string, EQLDIMS must be present in the deck. keywords: Requested keywords for which to extract data. ntequl: If not None, should state the NTEQUL in EQLDIMS. If diff --git a/res2df/faults.py b/res2df/faults.py index 193f66c29..8727b0e1f 100644 --- a/res2df/faults.py +++ b/res2df/faults.py @@ -36,7 +36,7 @@ def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFram All data for the keyword FAULTS will be returned. Args: - deck: .DATA file + deck: :term:`.DATA file` """ if isinstance(deck, ResdataFiles): deck = deck.get_deck() diff --git a/res2df/fipreports.py b/res2df/fipreports.py index 6aede9cf0..4f4c896e8 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -109,7 +109,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra prtfile: filename (PRT) or an ResdataFiles object fipname: The name of the regport regions, FIPNUM, FIPZON or whatever Max length of the string is 8, the first three characters must be FIP, - and the next 3 characters must be unique for a given .DATA file. + and the next 3 characters must be unique for a given :term:`.DATA file`. """ if isinstance(prtfile, ResdataFiles): prtfile = prtfile.get_prtfilename() diff --git a/res2df/grid.py b/res2df/grid.py index ffd2d3052..3d33c3b96 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -626,7 +626,7 @@ def df2res( ) -> str: """ Write an include file with grid data keyword, like PERMX, PORO, - FIPNUM etc, for the GRID section of the .DATA file. + FIPNUM etc, for the GRID section of the :term:`.DATA file`. Output (returned as string and optionally written to file) will then contain f.ex:: diff --git a/res2df/gruptree.py b/res2df/gruptree.py index aee343787..697d0fa57 100644 --- a/res2df/gruptree.py +++ b/res2df/gruptree.py @@ -1,4 +1,4 @@ -"""Extract GRUPTREE information from a .DATA file""" +"""Extract GRUPTREE information from a :term:`.DATA file`""" import argparse import collections diff --git a/res2df/inferdims.py b/res2df/inferdims.py index fb9fa8b00..39fffbd14 100644 --- a/res2df/inferdims.py +++ b/res2df/inferdims.py @@ -31,7 +31,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: stricter mode, to detect the correct table dimensionality Arguments: - deck: String containing a .DATA file or only a few resdata keywords + deck: String containing a :term:`.DATA file` or only a few resdata keywords dimkeyword: Either TABDIMS or EQLDIMS dimitem: The element number in TABDIMS/EQLDIMS to modify Returns: diff --git a/res2df/pvt.py b/res2df/pvt.py index de95fd9dc..44c85824e 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -239,7 +239,7 @@ def df( def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Set up sys.argv parsers for parsing .DATA file or + """Set up sys.argv parsers for parsing :term:`.DATA file` or include files into dataframes Arguments: diff --git a/res2df/satfunc.py b/res2df/satfunc.py index e87d93e50..af607520d 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -129,7 +129,8 @@ def df( def interpolate_defaults(dframe: pd.DataFrame) -> pd.DataFrame: """Interpolate NaN's linearly in saturation. - Saturation function tables in .DATA files can have certain values defaulted. + Saturation function tables in :term:`.DATA files <.DATA file>` + can have certain values defaulted. When parsed by common.res2df, these values are returned as np.nan. The incoming dataframe must be associated to one keyword only, but can consist of multiple SATNUMs. diff --git a/res2df/summary.py b/res2df/summary.py index 77d2f2562..605b1dd0c 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -321,8 +321,8 @@ def df( is always named "DATE". Arguments: - resdatafiles: ResdataFiles object representing a .DATA file. Alternatively - an Summary object. + resdatafiles: ResdataFiles object representing a + :term:`.DATA file`. Alternatively a Summary object. time_index: string indicating a resampling frequency, 'yearly', 'monthly', 'daily', 'last' or 'raw', the latter will return the simulated report steps (also default). diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index bea8bb8c9..91fe4d081 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -45,7 +45,7 @@ def basic_data( BASIC_DATA_KEYS in _vfpprod and _vfpinj. Args: - deck: .DATA file or string with deck + deck: :term:`.DATA file` or string with deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -250,7 +250,7 @@ def dfs( Data for the keyword VFPPROD or VFPINJ will be returned as separate item in list Args: - deck: .DATA file or string with deck + deck: :term:`.DATA file` or string with deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -293,7 +293,7 @@ def pyarrow_tables( Data for the keyword VFPPROD or VFPINJ will be returned as separate item in list Args: - deck: .DATA file or string with deck + deck: :term:`.DATA file` or string with deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -418,7 +418,7 @@ def df( All data for the keywords VFPPROD/VFPINJ will be returned. Args: - deck: .DATA file or string wit deck + deck: :term:`.DATA file` or string wit deck keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: str with list of VFP table numbers to extract """ diff --git a/res2df/vfp/_vfpinj.py b/res2df/vfp/_vfpinj.py index b3994371e..aa02bf4a5 100755 --- a/res2df/vfp/_vfpinj.py +++ b/res2df/vfp/_vfpinj.py @@ -74,7 +74,7 @@ def basic_data( Empty string returned if vfp table number does not match any number in list Args: - keyword: .DATA file keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds """ @@ -473,12 +473,12 @@ def df( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pd.DataFrame, None]: - """Return a dataframes of a single VFPINJ table from a .DATA file + """Return a dataframes of a single VFPINJ table from a :term:`.DATA file` Data from the VFPINJ keyword are stacked into a Pandas Dataframe Args: - keyword: .DATA file keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ @@ -510,11 +510,11 @@ def pyarrow( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pa.Table, None]: - """Return a pyarrow Table of a single VFPINJ table from a .DATA file + """Return a pyarrow Table of a single VFPINJ table from a :term:`.DATA file` If no VFPINJ table found, return None Args: - keyword: .DATA file keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ diff --git a/res2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py index e6e254ab6..62909e9c0 100755 --- a/res2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -86,7 +86,7 @@ def basic_data( Empty string returned if vfp table number does not match any number in list Args: - keyword: .DATA file keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds """ @@ -720,10 +720,10 @@ def df( vfpnumbers_str: Optional[str] = None, ) -> Union[pd.DataFrame, None]: """Return a dataframe or pyarrow Table of a single VFPPROD table - from a .DATA file. + from a :term:`.DATA file`. Args: - keyword: .DATA file keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ @@ -764,11 +764,11 @@ def pyarrow( keyword: "opm.libopmcommon_python.DeckKeyword", vfpnumbers_str: Optional[str] = None, ) -> Union[pa.Table, None]: - """Return a pyarrow Table of a single VFPPROD table from a .DATA file. + """Return a pyarrow Table of a single VFPPROD table from a :term:`.DATA file`. If no VFPPROD curve found, return None Args: - keyword: .DATA file keyword + keyword: :term:`.DATA file` keyword vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index f4eca1984..983d27c78 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -91,7 +91,7 @@ def df( def _get_unit_system(resdatafiles: ResdataFiles) -> UnitSystem: - """Returns the unit system of a .DATA file. The options are \ + """Returns the unit system of a :term:`.DATA file`. The options are \ METRIC, FIELD, LAB and PVT-M. If none of these are found, the function returns METRIC which is the From 23bd2f09e6c7e670a495057afd8d16ca02a187b8 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 09:26:17 +0100 Subject: [PATCH 48/68] output files -> :term`output files` --- res2df/common.py | 2 +- res2df/grid.py | 6 +++--- res2df/nnc.py | 4 ++-- res2df/resdatafiles.py | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/res2df/common.py b/res2df/common.py index 348eb1fd5..b7d40a883 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -476,7 +476,7 @@ def fill_reverse_parser( Arguments: parser: parser to fill with arguments modulename: Will be included in the help text - defaultoutputfile: Default output filename + defaultoutputfile: Default :term:`output file`name """ parser.add_argument( "csvfile", diff --git a/res2df/grid.py b/res2df/grid.py index 3d33c3b96..af0cc7769 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -282,13 +282,13 @@ def rst2df( def gridgeometry2df( resdatafiles: ResdataFiles, zonemap: Optional[Dict[int, str]] = None ) -> pd.DataFrame: - """Produce a Pandas Dataframe with Eclipse grid geometry + """Produce a Pandas Dataframe with grid geometry Order is significant, and is determined by the order from resdata, and used when merging with other dataframes with cell-data. Args: - resdatafiles: object holding the Eclipse output files. + resdatafiles: object holding the :term:`output files `. zonemap: A zonemap dictionary mapping every K index to a string, which will be put in a column ZONE. If none is provided, a zonemap from a default file will be looked for. Provide an empty @@ -369,7 +369,7 @@ def merge_initvectors( for API users to only use the df() function. Args: - resdatafiles: Object representing the output files + resdatafiles: Object representing the :term:`output files ` dframe: Table data to merge with initvectors: Names of INIT vectors to merge in. ijknames: Three strings that determine the I, J and K columns to use diff --git a/res2df/nnc.py b/res2df/nnc.py index 4b5b2e033..f7d6d1d74 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -31,8 +31,8 @@ def df( between the two cells) Args: - resdatafiles: object that can serve reservoir simulator - output files on demand. + resdatafiles: object that can serve + :term:`reservoir simulator output files ` on demand. coords: Set to True if you want the midpoint of the two connected cells to be computed and added to the columns X, Y and Z. diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index 14773e26d..ea77d6491 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -41,7 +41,7 @@ class ResdataFiles(object): """ - Class for holding .DATA output files + Class for holding reservoir simulator :term:`output files ` Exists only for convenience, so that loading of ResdataFile/Summary objects is easy for users, and with From 838bcc29087394329a26188f25f43260f0c00a98 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 09:30:01 +0100 Subject: [PATCH 49/68] include file -> :term:`include file` --- res2df/common.py | 8 ++++---- res2df/compdat.py | 3 ++- res2df/equil.py | 2 +- res2df/grid.py | 2 +- res2df/pvt.py | 6 +++--- res2df/satfunc.py | 2 +- res2df/vfp/_vfp.py | 2 +- 7 files changed, 13 insertions(+), 12 deletions(-) diff --git a/res2df/common.py b/res2df/common.py index b7d40a883..93e0fdaad 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -154,7 +154,7 @@ def write_dframe_stdout_file( def write_inc_stdout_file(string: str, outputfilename: str) -> None: - """Write a string (typically an include file string) to stdout + """Write a string (typically an :term:`include file` string) to stdout or to a named file""" if outputfilename == MAGIC_STDOUT: # Ignore pipe errors when writing to stdout: @@ -471,12 +471,12 @@ def fill_reverse_parser( parser: argparse.ArgumentParser, modulename: str, defaultoutputfile: str ): """A standardized submodule parser for the command line utility - to produce include files from a CSV file. + to produce :term:`include files ` from a CSV file. Arguments: parser: parser to fill with arguments modulename: Will be included in the help text - defaultoutputfile: Default :term:`output file`name + defaultoutputfile: Default :term:`output file` name """ parser.add_argument( "csvfile", @@ -538,7 +538,7 @@ def df2res( to file. Returns: - string that can be used as an include file. + string that can be used as an :term:`include file`. """ from_module = inspect.stack()[1] calling_module = inspect.getmodule(from_module[0]) diff --git a/res2df/compdat.py b/res2df/compdat.py index 4d0a1f145..f04dadde4 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -82,7 +82,8 @@ def deck2dfs( Args: deck: A deck representing the schedule - Does not have to be a full :term:`.DATA file`, an include file is sufficient + Does not have to be a full :term:`.DATA file`, + a single :term:`include file` is sufficient start_date: The default date to use for events where the DATE or START keyword is not found in advance. Default: None diff --git a/res2df/equil.py b/res2df/equil.py index 91c7aa09d..2e5323cc9 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -305,7 +305,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Fill a parser for the operation dataframe -> resdata include file""" + """Fill a parser for the operation dataframe -> resdata :term:`include file`""" return common.fill_reverse_parser(parser, "EQUIL, RSVD++", "solution.inc") diff --git a/res2df/grid.py b/res2df/grid.py index af0cc7769..539bc32db 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -625,7 +625,7 @@ def df2res( nocomments: bool = False, ) -> str: """ - Write an include file with grid data keyword, like PERMX, PORO, + Write an :term:`include file` with grid data keyword, like PERMX, PORO, FIPNUM etc, for the GRID section of the :term:`.DATA file`. Output (returned as string and optionally written to file) will then diff --git a/res2df/pvt.py b/res2df/pvt.py index 44c85824e..195a67107 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -199,7 +199,7 @@ def df( ) -> pd.DataFrame: """Extract all (most) PVT data from a deck. - If you want to call this function on include files, + If you want to call this function on :term:`include files `, read them in to strings as in this example: > pvt_df = pvt.df(open("pvt.inc").read()) @@ -240,7 +240,7 @@ def df( def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """Set up sys.argv parsers for parsing :term:`.DATA file` or - include files into dataframes + :term:`include files ` into dataframes Arguments: parser (ArgumentParser or subparser): parser to fill with arguments @@ -269,7 +269,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Set up sys.argv parsers for writing include files from + """Set up sys.argv parsers for writing :term:`include files ` from dataframes (as CSV files) Arguments: diff --git a/res2df/satfunc.py b/res2df/satfunc.py index af607520d..3e24db1be 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -185,7 +185,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Fill a parser for the operation dataframe -> resdata include file""" + """Fill a parser for the operation dataframe -> resdata :term:`include file`""" return common.fill_reverse_parser(parser, "SWOF, SGOF++", "relperm.inc") diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index 91fe4d081..3ccbabc65 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -476,7 +476,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def fill_reverse_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: - """Fill a parser for the operation dataframe -> resdata include file""" + """Fill a parser for the operation dataframe -> resdata :term:`include file`""" return common.fill_reverse_parser(parser, "VFPPROD, VFPINJ", "vfp.inc") From 1920f361b4771b6df7aded75aa9750d3b4de03f8 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 09:54:39 +0100 Subject: [PATCH 50/68] Full deck/.DATA file -> Complete deck --- docs/glossary.rst | 6 +++++- res2df/compdat.py | 2 -- res2df/pvt.py | 2 +- res2df/satfunc.py | 2 +- res2df/vfp/_vfp.py | 2 +- res2df/vfp/_vfpcommon.py | 2 +- res2df/vfp/_vfpinj.py | 2 +- res2df/vfp/_vfpprod.py | 2 +- tests/test_satfunc.py | 2 +- 9 files changed, 12 insertions(+), 10 deletions(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index 3d4b814a0..67a0b9777 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -18,7 +18,11 @@ Glossary (generally) or .GRDECL/.grdecl (for files included into the grid section). deck - Refers to a .DATA file and the include files it points to. + Refers to inputs passed to reservoir simulators. It may be a .DATA file and the + include files it points to, or it may be a single or several include files. + If a deck contains all the information (i.e., keywords) the simulator needs + to run the requested simulation, it is defined as complete. If it is missing + needed information, it is incomplete. output file When a reservoir simulator runs, several files will be generated. diff --git a/res2df/compdat.py b/res2df/compdat.py index f04dadde4..ecd40fb64 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -82,8 +82,6 @@ def deck2dfs( Args: deck: A deck representing the schedule - Does not have to be a full :term:`.DATA file`, - a single :term:`include file` is sufficient start_date: The default date to use for events where the DATE or START keyword is not found in advance. Default: None diff --git a/res2df/pvt.py b/res2df/pvt.py index 195a67107..1f89578ed 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -1,7 +1,7 @@ """ Extract the PVT data from a .DATA file as Pandas Dataframes -Data can be extracted from a full .DATA file or from individual files. +Data can be extracted from a complete deck or from individual files. """ import argparse diff --git a/res2df/satfunc.py b/res2df/satfunc.py index 3e24db1be..647b9664d 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -2,7 +2,7 @@ Extract saturation function data (SWOF, SGOF, SWFN, etc.) from a .DATA file as Pandas DataFrame. -Data can be extracted from a full .DATA file (`*.DATA`) +Data can be extracted from a complete deck (`*.DATA`) or from individual files. Note that when parsing from individual files, it is diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index 3ccbabc65..eac954f63 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -1,6 +1,6 @@ """Extract the VFPPROD/VFPINJ data from an Eclipse (input) deck as Pandas Dataframes -Data can be extracted from a full .DATA file or from individual files. Supports +Data can be extracted from a complete deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow and pyarrow.table """ diff --git a/res2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py index dc0030595..54384edde 100755 --- a/res2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -2,7 +2,7 @@ deck to extract the VFPPROD/VFPINJ data from an Eclipse (input) deck as Pandas Dataframes -Data can be extracted from a full .DATA file or from individual files. Supports +Data can be extracted from a complete deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow and pyarrow.table """ diff --git a/res2df/vfp/_vfpinj.py b/res2df/vfp/_vfpinj.py index aa02bf4a5..ac7643bb3 100755 --- a/res2df/vfp/_vfpinj.py +++ b/res2df/vfp/_vfpinj.py @@ -3,7 +3,7 @@ basic_data (dictionary with basic data types), df (pandas DataFrame) or pyarrow_tables (pyarrow.Tables). -Data can be extracted from a full .DATA file or from individual files. +Data can be extracted from a complete deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow a pyarrow.Table. Also functionality to write pandas DataFrame and pyarrow.Table to file as Eclipse .Ecl format diff --git a/res2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py index 62909e9c0..2e7c2f97f 100755 --- a/res2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -3,7 +3,7 @@ basic_data (dictionary with basic data types), df (pandas DataFrame) or pyarrow_tables (pyarrow.Tables). -Data can be extracted from a full .DATA file or from individual files. +Data can be extracted from a complete deck or from individual files. Supports output both in csv format as a pandas DataFrame or in pyarrow as pyarrow.Table. Also functionality to write pandas DataFrame and pyarrow.Table to file as Eclipse .Ecl format. diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index 2b76f8df9..a58520de1 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -27,7 +27,7 @@ def test_deck_to_satfunc_dframe(): - """Test that dataframes can be produced from a full .DATA file (the + """Test that dataframes can be produced from a complete deck (the example Reek case)""" resdatafiles = ResdataFiles(REEK) satdf = satfunc.df(resdatafiles.get_deck()) From 07feab4edab770f4324066cee29acd3fbf5817aa Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 10:18:29 +0100 Subject: [PATCH 51/68] Link to deck --- res2df/common.py | 2 +- res2df/compdat.py | 27 ++++++++++++----------- res2df/equil.py | 44 ++++++++++++++++++------------------- res2df/faults.py | 4 ++-- res2df/gruptree.py | 6 +++--- res2df/inferdims.py | 26 +++++++++++----------- res2df/pvt.py | 49 +++++++++++++++++++++--------------------- res2df/resdatafiles.py | 4 ++-- res2df/satfunc.py | 10 ++++----- res2df/vfp/_vfp.py | 12 +++++------ res2df/vfp/_vfpprod.py | 2 +- res2df/wcon.py | 4 ++-- 12 files changed, 96 insertions(+), 94 deletions(-) diff --git a/res2df/common.py b/res2df/common.py index 93e0fdaad..cbede098a 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -200,7 +200,7 @@ def keyworddata_to_df( will be the name of your enumeration, e.g. PVTNUM, EQLNUM or SATNUM. Arguments: - deck: Parsed deck + deck: Parsed :term:`deck` keyword: Name of the keyword for which to extract data. renamer: Mapping of names present in OPM json files for the keyword to desired column names in returned dataframe diff --git a/res2df/compdat.py b/res2df/compdat.py index ecd40fb64..5e6450050 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -75,13 +75,13 @@ def deck2dfs( start_date: Optional[Union[str, datetime.date]] = None, unroll: bool = True, ) -> Dict[str, pd.DataFrame]: - """Loop through the deck and pick up information found + """Loop through the :term:`deck` and pick up information found - The loop over the deck is a state machine, as it has to pick up dates and + The loop over the :term:`deck` is a state machine, as it has to pick up dates and potential information from the WELSPECS keyword. Args: - deck: A deck representing the schedule + deck: A :term:`deck` representing the schedule start_date: The default date to use for events where the DATE or START keyword is not found in advance. Default: None @@ -829,16 +829,17 @@ def applywelopen( 'OP2' SHUT 66 44 10 / / - This deck would define two wells where OP1 and OP2 have two connected grid cells - each. The first welopen statment acts on the whole well, closing both the well and - the connections. If this statement used STOP instead of SHUT, the connections would - be left open. The second welopen statement acts on a single connection. Here SHUT - and STOP would give the same result. This behavior has been proven to be correct - in the simulator. The Eclipse manual states that 'If items 3 - 7 are all defaulted, - the Open/Shut/Stop command applies to the well, leaving the connections unchanged', - but this has been proven to be wrong. The state of the connection can be tested - by looking at the CPI summary vectors. The connection is SHUT if CPI==0 and OPEN - if CPI>0. + This :term:`deck` would define two wells where OP1 and OP2 have two + connected grid cells each. The first welopen statment acts on the whole + well, closing both the well and the connections. If this statement used STOP + instead of SHUT, the connections would be left open. The second welopen + statement acts on a single connection. Here SHUT and STOP would give the + same result. This behavior has been proven to be correct in the simulator. + The Eclipse manual states that 'If items 3 - 7 are all defaulted, the + Open/Shut/Stop command applies to the well, leaving the connections + unchanged', but this has been proven to be wrong. The state of the + connection can be tested by looking at the CPI summary vectors. The + connection is SHUT if CPI==0 and OPEN if CPI>0. WELOPEN can also be used at different dates and changes therefore the state of connections without explicit use of the COMPDAT keyword. This function translates diff --git a/res2df/equil.py b/res2df/equil.py index 2e5323cc9..8ec16c68b 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -80,20 +80,20 @@ def df( PBVD and PDVD. How each data value in the EQUIL records are to be interpreted - depends on the phase configuration in the deck, which means + depends on the phase configuration in the :term:`deck`, which means that we need more than the EQUIL section alone to determine the dataframe. - If ntequl is not supplied and EQLDIMS is not in the deck, the + If ntequl is not supplied and EQLDIMS is not in the :term:`deck`, the equil data is not well defined in terms of OPM. This means that we have to infer the correct number of EQUIL lines from what gives us successful parsing from OPM. In those cases, the - deck must be supplied as a string, if not, extra EQUIL lines + :term:`deck` must be supplied as a string, if not, extra EQUIL lines are possibly already removed by the OPM parser in resdatafiles.str2deck(). Arguments: - deck: :term:`.DATA file` or string with deck. If - not string, EQLDIMS must be present in the deck. + deck: :term:`.DATA file` or string with :term:`deck`. If + not string, EQLDIMS must be present in the :term:`deck`. keywords: Requested keywords for which to extract data. ntequl: If not None, should state the NTEQUL in EQLDIMS. If None and EQLDIMS is not present, it will be inferred. @@ -132,12 +132,12 @@ def df( def rsvd_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None ) -> pd.DataFrame: - """Extract RSVD data from a deck + """Extract RSVD data from a :term:`deck` Args: deck - ntequl: Number of EQLNUM regions in deck. Will - be inferred if not present in deck + ntequl: Number of EQLNUM regions in :term:`deck`. Will + be inferred if not present in :term:`deck` """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) @@ -149,12 +149,12 @@ def rsvd_fromdeck( def rvvd_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None ) -> pd.DataFrame: - """Extract RVVD data from a deck + """Extract RVVD data from a :term:`deck` Args: deck - ntequl: Number of EQLNUM regions in deck. Will - be inferred if not present in deck + ntequl: Number of EQLNUM regions in :term:`deck`. Will + be inferred if not present in :term:`deck` """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) @@ -166,12 +166,12 @@ def rvvd_fromdeck( def pbvd_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None ) -> pd.DataFrame: - """Extract PBVD data from a deck + """Extract PBVD data from a :term:`deck` Args: deck - ntequl: Number of EQLNUM regions in deck. Will - be inferred if not present in deck + ntequl: Number of EQLNUM regions in :term:`deck`. Will + be inferred if not present in :term:`deck` """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) @@ -183,12 +183,12 @@ def pbvd_fromdeck( def pdvd_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None ) -> pd.DataFrame: - """Extract PDVD data from a deck + """Extract PDVD data from a :term:`deck` Args: deck - ntequl: Number of EQLNUM regions in deck. Will - be inferred if not present in deck + ntequl: Number of EQLNUM regions in :term:`deck`. Will + be inferred if not present in :term:`deck` """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) @@ -198,12 +198,12 @@ def pdvd_fromdeck( def phases_from_deck(deck: Union[str, "opm.libopmcommon_python.Deck"]) -> str: - """Determined the set of phases from a deck, as + """Determined the set of phases from a :term:`deck`, as a string with values "oil-water-gas", "gas-water", "oil-water", or "oil-gas" Args: - deck: A parsed deck or DATA-file as a string + deck: A parsed :term:`deck` or DATA-file as a string Returns: String with phase configuration. Empty string if inconclusive. @@ -247,14 +247,14 @@ def phases_from_columns(columns: List[str]) -> str: def equil_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntequl: Optional[int] = None ) -> pd.DataFrame: - """Extract EQUIL data from a deck + """Extract EQUIL data from a :term:`deck` - If the deck is supplied as a string object, the number + If the :term:`deck` is supplied as a string object, the number of EQLNUM regions will be inferred if needed. Args: deck - ntequl: Number of EQLNUM regions in deck. + ntequl: Number of EQLNUM regions in :term:`deck`. """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) diff --git a/res2df/faults.py b/res2df/faults.py index 8727b0e1f..6564df62f 100644 --- a/res2df/faults.py +++ b/res2df/faults.py @@ -31,12 +31,12 @@ def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: - """Produce a dataframe of fault data from a deck + """Produce a dataframe of fault data from a :term:`deck` All data for the keyword FAULTS will be returned. Args: - deck: :term:`.DATA file` + deck: A :term:`deck` """ if isinstance(deck, ResdataFiles): deck = deck.get_deck() diff --git a/res2df/gruptree.py b/res2df/gruptree.py index 697d0fa57..8b467b36b 100644 --- a/res2df/gruptree.py +++ b/res2df/gruptree.py @@ -35,7 +35,7 @@ def df( startdate: Optional[datetime.date] = None, welspecs: bool = True, ) -> pd.DataFrame: - """Extract all group information from a deck + """Extract all group information from a :term:`deck` and present as a Pandas Dataframe of all edges. Properties for nodes given in GRUPNET/NODEPROP will @@ -52,14 +52,14 @@ def df( previous tree is copied and a new complete tree is added to the dataframe tagged with the new date. - startdate is only relevant when START is not in the deck. + startdate is only relevant when START is not in the :term:`deck`. Args: deck: opm.io Deck object or ResdataFiles Returns: pd.DataFrame with one row pr edge. Empty dataframe if no - information is found in deck. + information is found in :term:`deck`. """ date: Optional[datetime.date] diff --git a/res2df/inferdims.py b/res2df/inferdims.py index 39fffbd14..e9ed7974c 100644 --- a/res2df/inferdims.py +++ b/res2df/inferdims.py @@ -21,9 +21,9 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: - """Guess the correct dimension count for an incoming deck (string) + """Guess the correct dimension count for an incoming :term:`deck` (string) - The incoming deck must in string form, if not, extra data is most + The incoming :term:`deck` must in string form, if not, extra data is most likely already removed by the opm.io parser. TABDIMS or EQLDIMS must not be present @@ -94,15 +94,15 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: def inject_dimcount( deckstr: str, dimkeyword: str, dimitem: int, dimvalue: int, nowarn: bool = False ) -> str: - """Insert a TABDIMS with NTSFUN into a deck + """Insert a TABDIMS with NTSFUN into a :term:`deck` string This is simple string manipulation, not opm.io - deck manipulation (which might be possible to do). + :term:`deck` manipulation (which might be possible to do). This function is to be wrapped by inject_xxxdims_ntxxx() Arguments: - deckstr: A string containing a partial deck (f.ex only + deckstr: A string containing a partial :term:`deck` (f.ex only the SWOF keyword). dimkeyword: Either TABDIMS or EQLDIMS dimitem: Item 0 (NTSSFUN) or 1 (NTPVT) of TABDIMS, only 0 for EQLDIMS. @@ -112,7 +112,7 @@ def inject_dimcount( nowarn: By default it will warn if this function is run on a deckstr with TABDIMS/EQLDIMS present. Mute this if True. Returns: - New deck with TABDIMS/EQLDIMS prepended. + New :term:`deck` string with TABDIMS/EQLDIMS prepended. """ assert dimvalue > 0, "dimvalue must be larger than zero" if dimkeyword not in ["TABDIMS", "EQLDIMS"]: @@ -146,19 +146,19 @@ def inject_xxxdims_ntxxx( deck: Union[str, "opm.libopmcommon_python.Deck"], ntxxx_value: Optional[int] = None, ) -> "opm.libopmcommon_python.Deck": - """Ensures TABDIMS/EQLDIMS is present in a deck. + """Ensures TABDIMS/EQLDIMS is present in a :term:`deck`. - If ntxxx_value=None and ntxxx_name not in the deck, ntxxx_name will - be inferred through trial-and-error parsing of the deck, and then injected - into the deck. + If ntxxx_value=None and ntxxx_name not in the :term:`deck`, ntxxx_name will + be inferred through trial-and-error parsing of the :term:`deck`, and then injected + into the :term:`deck`. Args: xxxdims: TABDIMS or EQLDIMS ntxxx_name: NTPVT, NTEQUL or NTSFUN - deck: A data deck. If ntxxx_name is to be - estimated this *must* be a string and not a fully parsed deck. + deck: A data :term:`deck`. If ntxxx_name is to be + estimated this *must* be a string and not a fully parsed :term:`deck`. npxxx_value: Supply this if ntxxx_name is known, but not present in the - deck, this will override any guessing. If the deck already + deck, this will override any guessing. If the :term:`deck` already contains XXXDIMS, this will be ignored. Returns: diff --git a/res2df/pvt.py b/res2df/pvt.py index 1f89578ed..2e1825a9a 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -72,12 +72,12 @@ def pvtw_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract PVTW from a deck + """Extract PVTW from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) @@ -89,12 +89,12 @@ def pvtw_fromdeck( def density_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract DENSITY from a deck + """Extract DENSITY from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) @@ -106,12 +106,12 @@ def density_fromdeck( def rock_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract ROCK from a deck + """Extract ROCK from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) @@ -123,12 +123,12 @@ def rock_fromdeck( def pvto_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract PVTO from a deck + """Extract PVTO from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) @@ -141,7 +141,7 @@ def pvto_fromdeck( def pvdo_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract PVDO from a deck + """Extract PVDO from a :term:`deck` Args: deck @@ -159,12 +159,12 @@ def pvdo_fromdeck( def pvdg_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract PVDG from a deck + """Extract PVDG from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) @@ -177,12 +177,12 @@ def pvdg_fromdeck( def pvtg_fromdeck( deck: Union[str, "opm.libopmcommon_python.Deck"], ntpvt: Optional[int] = None ) -> pd.DataFrame: - """Extract PVTG from a deck + """Extract PVTG from a :term:`deck` Args: deck - ntpvt: Number of PVT regions in deck. Will - be inferred if not present in deck. + ntpvt: Number of PVT regions in :term:`deck`. Will + be inferred if not present in :term:`deck`. """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) @@ -197,7 +197,7 @@ def df( keywords: Optional[List[str]] = None, ntpvt: Optional[int] = None, ) -> pd.DataFrame: - """Extract all (most) PVT data from a deck. + """Extract all (most) PVT data from a :term:`deck`. If you want to call this function on :term:`include files `, read them in to strings as in this example: @@ -205,12 +205,12 @@ def df( > pvt_df = pvt.df(open("pvt.inc").read()) Arguments: - deck: Incoming data deck. Always + deck: Incoming data :term:`deck`. Always supply as a string if you don't know TABDIMS-NTSFUN. keywords: List of keywords for which data is wanted. All data will be merged into one dataframe. - pvtnumcount: Number of PVTNUMs defined in the deck, only - needed if TABDIMS with NTPVT is not found in the deck. + pvtnumcount: Number of PVTNUMs defined in the :term:`deck`, only + needed if TABDIMS with NTPVT is not found in the :term:`deck`. If not supplied (or None) and NTPVT is not defined, it will be attempted inferred. @@ -313,7 +313,8 @@ def pvt_main(args) -> None: def pvt_reverse_main(args) -> None: - """Entry-point for module, for command line utility for CSV to simulator deck""" + """Entry-point for module, for command line utility for CSV to simulator + :term:`deck`""" logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index ea77d6491..868b149d7 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -84,7 +84,7 @@ def get_path(self) -> Path: return Path(self._eclbase).absolute().parent def get_deck(self) -> "opm.libopmcommon_python.Deck": - """Return a opm.io deck of the DATA file""" + """Return a opm.io :term:`deck` of the DATA file""" if not self._deck: if Path(self._eclbase + ".DATA").is_file(): deckfile = self._eclbase + ".DATA" @@ -100,7 +100,7 @@ def get_deck(self) -> "opm.libopmcommon_python.Deck": def str2deck( string: str, parsecontext: Optional[List[Tuple[str, Any]]] = None ) -> "opm.libopmcommon_python.Deck": - """Produce a opm.io deck from a string, using permissive + """Produce a opm.io :term:`deck` from a string, using permissive parsing by default""" if parsecontext is None: parsecontext = opm.io.ParseContext(OPMIOPARSER_RECOVERY) diff --git a/res2df/satfunc.py b/res2df/satfunc.py index 647b9664d..6a559742e 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -69,22 +69,22 @@ def df( The two first columns in the dataframe are 'KEYWORD' (which can be SWOF, SGOF, etc.), and then SATNUM which is an index counter from 1 and onwards. Then follows the data for each individual keyword that - is found in the deck. + is found in the :term:`deck`. SATNUM data can only be parsed correctly if TABDIMS is present and stating how many saturation functions there should be. If you have a string with TABDIMS missing, you must supply - this as a string to this function, and not a parsed deck, as + this as a string to this function, and not a parsed :term:`deck`, as the default parser in ResdataFiles is very permissive (and only returning the first function by default). Arguments: - deck: Incoming data deck. Always + deck: Incoming data :term:`deck`. Always supply as a string if you don't know TABDIMS-NTSFUN. keywords: Requested keywords for which to to extract data. - ntsfun: Number of SATNUMs defined in the deck, only - needed if TABDIMS with NTSFUN is not found in the deck. + ntsfun: Number of SATNUMs defined in the :term:`deck`, only + needed if TABDIMS with NTSFUN is not found in the :term:`deck`. If not supplied (or None) and NTSFUN is not defined, it will be attempted inferred. diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index eac954f63..2b4498695 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -45,7 +45,7 @@ def basic_data( BASIC_DATA_KEYS in _vfpprod and _vfpinj. Args: - deck: :term:`.DATA file` or string with deck + deck: :term:`.DATA file` or string with :term:`deck` keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -245,12 +245,12 @@ def dfs( keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> List[pd.DataFrame]: - """Produce a list of dataframes of vfp tables from a deck + """Produce a list of dataframes of vfp tables from a :term:`deck` Data for the keyword VFPPROD or VFPINJ will be returned as separate item in list Args: - deck: :term:`.DATA file` or string with deck + deck: :term:`.DATA file` or string with :term:`deck` keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -288,12 +288,12 @@ def pyarrow_tables( keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> List[pa.Table]: - """Produce a list of pyarrow.Table of vfp tables from a deck + """Produce a list of pyarrow.Table of vfp tables from a :term:`deck` Data for the keyword VFPPROD or VFPINJ will be returned as separate item in list Args: - deck: :term:`.DATA file` or string with deck + deck: :term:`.DATA file` or string with :term:`deck` keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. @@ -418,7 +418,7 @@ def df( All data for the keywords VFPPROD/VFPINJ will be returned. Args: - deck: :term:`.DATA file` or string wit deck + deck: :term:`.DATA file` or string wit :term:`deck` keyword: VFP table type, i.e. 'VFPPROD' or 'VFPINJ' vfpnumbers_str: str with list of VFP table numbers to extract """ diff --git a/res2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py index 2e7c2f97f..04239698d 100755 --- a/res2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -655,7 +655,7 @@ def pyarrow2basic_data(pa_table: pa.Table) -> Dict[str, Any]: def _check_basic_data(vfp_data: Dict[str, Any]) -> bool: """Perform a check of the VFPPROD data contained in the dictionary. Checks if all data is present and if the dimensions of the arrays - are consisitent. + are consistent. Args: vfp_data: Dictionary containing all data for a VFPPROD keyword in Eclipse diff --git a/res2df/wcon.py b/res2df/wcon.py index 563929c10..3957109de 100644 --- a/res2df/wcon.py +++ b/res2df/wcon.py @@ -29,9 +29,9 @@ def df(deck: Union[ResdataFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: - """Loop through the deck and pick up information found + """Loop through the :term:`deck` and pick up information found - The loop over the deck is a state machine, as it has to pick up dates + The loop over the :term:`deck` is a state machine, as it has to pick up dates """ if isinstance(deck, ResdataFiles): From 49ed0e1d45c07386d54ab2c60061c0cfd8dd9b60 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 11:54:38 +0100 Subject: [PATCH 52/68] Fix RES2CSV casing --- res2df/res2csv.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/res2df/res2csv.py b/res2df/res2csv.py index 8845d5bd1..d0ebdc848 100644 --- a/res2df/res2csv.py +++ b/res2df/res2csv.py @@ -27,12 +27,12 @@ Outputting the EQUIL data from a .DATA file. This is implicitly supplied in ERT configs:: - FORWARD_MODEL res2csv(=equil, =equil.csv) + FORWARD_MODEL RES2CSV(=equil, =equil.csv) For a yearly summary export of the realization, options have to be supplied with the XARG options:: - FORWARD_MODEL res2csv(=summary, =yearly.csv, ="--time_index", ="yearly") + FORWARD_MODEL RES2CSV(=summary, =yearly.csv, ="--time_index", ="yearly") The quotes around double-dashed options are critical to avoid ERT taking for a comment. For more options, use ```` etc. From cae4acd8aae065ff1a3a053819f580e06e9d8615 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 12:11:41 +0100 Subject: [PATCH 53/68] resdata keyword -> keyword --- res2df/common.py | 6 +++--- res2df/compdat.py | 2 +- res2df/inferdims.py | 2 +- res2df/res2csv.py | 2 +- res2df/vfp/_vfpcommon.py | 2 +- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/res2df/common.py b/res2df/common.py index cbede098a..7876b7b3b 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -34,7 +34,7 @@ from .constants import MAGIC_STDOUT # Parse named JSON files, this exposes a dict of dictionary describing the contents -# of supported resdata keyword data +# of supported keyword data OPMKEYWORDS: Dict[str, dict] = {} for keyw in [ "BRANPROP", @@ -192,7 +192,7 @@ def keyworddata_to_df( recordcountername: Optional[str] = None, emptyrecordcountername: Optional[str] = None, ) -> pd.DataFrame: - """Extract data associated to an resdata keyword into a tabular form. + """Extract data associated to a keyword into tabular form. Two modes of enumeration of tables in the keyworddata is supported, you will have to find out which one fits your particular keyword. Activate @@ -275,7 +275,7 @@ def parse_opmio_deckrecord( Args: record: Record be parsed - keyword: Which resdata keyword this belongs to + keyword: Which keyword this belongs to itemlistname: The key in the json dict that describes the items, typically 'items' or 'records' recordindex: For keywords where itemlistname is 'records', this is a diff --git a/res2df/compdat.py b/res2df/compdat.py index 5e6450050..6a2da3b2c 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -1,4 +1,4 @@ -"""Parser and dataframe generator for the resdata keywords: +"""Parser and dataframe generator for the keywords: * COMPDAT * COMPLUMP * COMPSEGS diff --git a/res2df/inferdims.py b/res2df/inferdims.py index e9ed7974c..3b8506b34 100644 --- a/res2df/inferdims.py +++ b/res2df/inferdims.py @@ -31,7 +31,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: stricter mode, to detect the correct table dimensionality Arguments: - deck: String containing a :term:`.DATA file` or only a few resdata keywords + deck: String containing a :term:`.DATA file` or only a few keywords dimkeyword: Either TABDIMS or EQLDIMS dimitem: The element number in TABDIMS/EQLDIMS to modify Returns: diff --git a/res2df/res2csv.py b/res2df/res2csv.py index d0ebdc848..abd7d40fc 100644 --- a/res2df/res2csv.py +++ b/res2df/res2csv.py @@ -151,7 +151,7 @@ def get_parser() -> argparse.ArgumentParser: "particular well at a particular time. " "If multisegment wells are found, associated data " "to a connection is merged onto the same row as additional columns. " - "You need the resdata keyword WRFTPLT present in your DATA-file to get " + "You need the keyword WRFTPLT present in your DATA-file to get " "the data outputted." ), ) diff --git a/res2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py index 54384edde..956b26530 100755 --- a/res2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -68,7 +68,7 @@ def _deckrecord2list( Args: record: Record be parsed - keyword: Which resdata keyword this belongs to + keyword: Which keyword this belongs to recordindex: For keywords where itemlistname is 'records', this is a list index to the "record". recordname: Name of the record From 1f08f18c43d87b0e18b21cd2bd856c7bef584ab3 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 12:17:29 +0100 Subject: [PATCH 54/68] Remove TODO (full/not full described in deck) --- docs/glossary.rst | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index 67a0b9777..b0634afd7 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -9,8 +9,7 @@ Glossary .DATA file Inputs provided to reservoir simulators such as Eclipse or OPM Flow. Usually a .DATA file pointing to other include files. One .DATA file - typically points to multiple include files. A data file is defined as - a **full** data file if ?...TODO + typically points to multiple include files. include file Files that provide inputs to reservoir simulators by using the INCLUDE statement From 5bcb38f5c9501c91240de53640d6fe41923ef9f5 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 12:19:58 +0100 Subject: [PATCH 55/68] Fix some typos --- docs/usage/satfunc.rst | 2 +- res2df/fipreports.py | 2 +- res2df/resdatafiles.py | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index 8c6bba0a8..271894b8c 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -114,7 +114,7 @@ through the `pyscal `_ library. Pyscal can create curves from parametrizations, and interpolate between curves. Pyscal can initialize its relperm objects from :term:`include files` -though the parsing capabilities of res2df.satfunc. +through the parsing capabilities of res2df.satfunc. The function ``pyscal.pyscallist.df()`` is analogous to ``res2df.satfunc.df()`` in what it produces, and the :func:`res2df.satfunc.df2res()` can be used on both diff --git a/res2df/fipreports.py b/res2df/fipreports.py index 4f4c896e8..2ce32dcfc 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -106,7 +106,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra DATE and region index added. Args: - prtfile: filename (PRT) or an ResdataFiles object + prtfile: filename (PRT) or a resdataFiles object fipname: The name of the regport regions, FIPNUM, FIPZON or whatever Max length of the string is 8, the first three characters must be FIP, and the next 3 characters must be unique for a given :term:`.DATA file`. diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index 868b149d7..e82ffe80b 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -157,7 +157,7 @@ def get_summary(self, include_restart: bool = True) -> Summary: return self._summary def get_initfile(self) -> ResdataFile: - """Find and return the INIT file as an ResdataFile object""" + """Find and return the INIT file as a ResdataFile object""" if not self._initfile: initfilename = self._eclbase + ".INIT" if not Path(initfilename).is_file(): @@ -169,7 +169,7 @@ def get_initfile(self) -> ResdataFile: return self._initfile def get_rftfile(self) -> ResdataFile: - """Find and return the RFT file as an ResdataFile object""" + """Find and return the RFT file as a ResdataFile object""" if not self._rftfile: rftfilename = self._eclbase + ".RFT" if not Path(rftfilename).is_file(): @@ -181,7 +181,7 @@ def get_rftfile(self) -> ResdataFile: return self._rftfile def get_rstfile(self) -> ResdataFile: - """Find and return the UNRST file as an ResdataFile object""" + """Find and return the UNRST file as a ResdataFile object""" if not self._rstfile: rstfilename = self._eclbase + ".UNRST" if not Path(rstfilename).is_file(): From b70ee4893b40c34bab0e18061f0612ce6405b536 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 12:23:08 +0100 Subject: [PATCH 56/68] fixup an->a grammar --- res2df/common.py | 2 +- res2df/resdatafiles.py | 2 +- res2df/summary.py | 6 +++--- tests/test_grid.py | 2 +- tests/test_summary.py | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/res2df/common.py b/res2df/common.py index 7876b7b3b..0d433a280 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -842,7 +842,7 @@ def stack_on_colnames( def is_color(input_string: str) -> bool: """Checks if the input string is a valid color. That is six-digit hexadecimal, three-digit hexadecimal or - given as an SVG color keyword name + given as a SVG color keyword name """ if input_string.lower() in SVG_COLOR_NAMES: return True diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index e82ffe80b..2f9bf33a2 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -112,7 +112,7 @@ def file2deck(filename: Union[str, Path]) -> "opm.libopmcommon_python.Deck": return ResdataFiles.str2deck(Path(filename).read_text(encoding="utf-8")) def get_egrid(self) -> Grid: - """Find and return EGRID file as an Grid object""" + """Find and return EGRID file as a Grid object""" if not self._egrid: egridfilename = self._eclbase + ".EGRID" if not Path(egridfilename).is_file(): diff --git a/res2df/summary.py b/res2df/summary.py index 605b1dd0c..5a2e271e2 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -682,7 +682,7 @@ def df2ressum( dframe: pd.DataFrame, casename: str = "SYNTHETIC", ) -> Summary: - """Convert a dataframe to an Summary object + """Convert a dataframe to a Summary object Args: dframe: Dataframe with a DATE colum (or with the @@ -709,7 +709,7 @@ def _summary_pandas_frame( time_index: Optional[Union[List[dt.date], List[dt.datetime]]] = None, column_keys: Optional[List[str]] = None, ) -> pd.DataFrame: - """Build a Pandas dataframe from an Summary object. + """Build a Pandas dataframe from a Summary object. Temporarily copied from resdata to circumvent bug @@ -759,7 +759,7 @@ def resdata_summary_from_pandas( dims: Optional[List[int]] = None, headers: Optional[List[tuple]] = None, ) -> Summary: - """Build an Summary object from a Pandas dataframe. + """Build a Summary object from a Pandas dataframe. Temporarily copied from resdata to circumvent bug diff --git a/tests/test_grid.py b/tests/test_grid.py index 7e5dc6497..456d646b3 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -95,7 +95,7 @@ def test_gridzonemap(): ) assert pd.isnull(df_bogus_zones["ZONE"]).all() - # Test a custom "subzone" map via direct usage of merge_zone on an dataframe + # Test a custom "subzone" map via direct usage of merge_zone on a dataframe # where ZONE already exists: dframe = grid.df(resdatafiles) diff --git a/tests/test_summary.py b/tests/test_summary.py index ec90f3ad1..cf97ff8a4 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -812,7 +812,7 @@ def test_unique_datetime_retain_index_name(filepath): def test_smry_meta(): - """Test obtaining metadata dictionary for summary vectors from an summary object""" + """Test obtaining metadata dictionary for summary vectors from a summary object""" meta = smry_meta(ResdataFiles(REEK)) assert isinstance(meta, dict) @@ -1020,7 +1020,7 @@ def test_fix_dframe_for_resdata(dframe, expected_dframe): ], ) def test_df2ressum(dframe): - """Test that a dataframe can be converted to an summary object, and then read + """Test that a dataframe can be converted to a summary object, and then read back again""" # Massage the dframe first so we can assert on equivalence after. From 56ae3d41a0b5ff58f31cbb21893e33673750de95 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Fri, 17 Nov 2023 13:51:12 +0100 Subject: [PATCH 57/68] Give some include file examples --- README.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 216314737..9a06b3332 100644 --- a/README.md +++ b/README.md @@ -12,8 +12,9 @@ res2df is a Pandas DataFrame wrapper around resdata and opm.io, which are used to access binary files outputted by reservoir simulators, or its input files --- or any other tool outputting to the same data format. -The reverse operation, from a Pandas DataFrame to reservoir simulator include files, -is provided for some of the modules. +The reverse operation, from a Pandas DataFrame to reservoir simulator include files +(commonly given the extension ".inc", ".grdecl" etc.) is provided for some of the +modules. The package consists of a module pr. datatype, e.g. one module for summary files (.UNSMRY), one for completion data etc. From 67cf99e00a087df49977e3470df9739f3ff7dff5 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Mon, 20 Nov 2023 15:26:27 +0100 Subject: [PATCH 58/68] Use consistent notation for .DATA file --- docs/glossary.rst | 6 +++--- docs/introduction.rst | 2 +- docs/usage/compdat.rst | 2 +- docs/usage/grid.rst | 4 ++-- res2df/compdat.py | 4 +++- res2df/equil.py | 6 ++++-- res2df/faults.py | 4 +++- res2df/fipreports.py | 2 +- res2df/grid.py | 2 +- res2df/gruptree.py | 4 +++- res2df/nnc.py | 2 +- res2df/pillars.py | 2 +- res2df/pvt.py | 2 +- res2df/res2csv.py | 2 +- res2df/resdatafiles.py | 6 +++--- res2df/rft.py | 6 +++--- res2df/satfunc.py | 4 ++-- res2df/summary.py | 6 +++--- res2df/trans.py | 2 +- res2df/vfp/_vfp.py | 4 +++- res2df/wcon.py | 2 +- res2df/wellcompletiondata.py | 2 +- res2df/wellconnstatus.py | 2 +- tests/test_summary.py | 2 +- 24 files changed, 45 insertions(+), 35 deletions(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index b0634afd7..2870da0d0 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -8,16 +8,16 @@ Glossary .DATA file Inputs provided to reservoir simulators such as Eclipse or OPM Flow. - Usually a .DATA file pointing to other include files. One .DATA file + Usually a :term:`.DATA file` pointing to other include files. One :term:`.DATA file`` typically points to multiple include files. include file Files that provide inputs to reservoir simulators by using the INCLUDE statement - in .DATA files. By convention, these files often have the extension .INC/.inc + in :term:`.DATA files <.DATA file>`. By convention, these files often have the extension .INC/.inc (generally) or .GRDECL/.grdecl (for files included into the grid section). deck - Refers to inputs passed to reservoir simulators. It may be a .DATA file and the + Refers to inputs passed to reservoir simulators. It may be a :term:`.DATA file` and the include files it points to, or it may be a single or several include files. If a deck contains all the information (i.e., keywords) the simulator needs to run the requested simulation, it is defined as complete. If it is missing diff --git a/docs/introduction.rst b/docs/introduction.rst index f9bd54f70..040ad4bbc 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -186,7 +186,7 @@ parameters.txt ^^^^^^^^^^^^^^ Metadata for each :term:`.DATA file` are sometimes added in a text file named -``parameters.txt``, alongside the Eclipse DATA file or one or two directory levels +``parameters.txt``, alongside the Eclipse .DATA file or one or two directory levels above it. Each line in the text file should contain a string, interpreted as the key, and diff --git a/docs/usage/compdat.rst b/docs/usage/compdat.rst index 12ef0ecbf..d9ff0f7a7 100644 --- a/docs/usage/compdat.rst +++ b/docs/usage/compdat.rst @@ -40,4 +40,4 @@ be added to the returned data through the option ``--initvectors``: .. code-block:: console res2csv compdat --verbose MYDATADECK.DATA --initvectors FIPNUM PERMX - # (put the DATA file first, if not it will be interpreted as a vector) + # (put the .DATA file first, if not it will be interpreted as a vector) diff --git a/docs/usage/grid.rst b/docs/usage/grid.rst index 94fb7f77f..929a1bb9d 100644 --- a/docs/usage/grid.rst +++ b/docs/usage/grid.rst @@ -3,7 +3,7 @@ grid The grid module will extract static and dynamic cell properties from a grid -(from the :term:`binary output files from reservoir simulators `). +(from the :term:`output files of reservoir simulators `). Each row in a returned dataframe represents one cell. Typical usage @@ -115,7 +115,7 @@ the whereabouts of the file: resdatafiles = ResdataFiles("'MYDATADECK.DATA") dframe = grid.df(resdatafiles) - # The filename with layers is relative to DATA-file location + # The filename with layers is relative to .DATA file location # or an absolute path. subzonemap = res2df.common.parse_zonemapfile("subzones.lyr") dframe_with_subzones = common.merge_zones( diff --git a/res2df/compdat.py b/res2df/compdat.py index 6a2da3b2c..446fcc63d 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -950,7 +950,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: Arguments: parser: parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of .DATA file.") + parser.add_argument( + "DATAFILE", help="Name of the .DATA input file for the reservoir simulator" + ) parser.add_argument( "-o", "--output", diff --git a/res2df/equil.py b/res2df/equil.py index 8ec16c68b..472467536 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -203,7 +203,7 @@ def phases_from_deck(deck: Union[str, "opm.libopmcommon_python.Deck"]) -> str: or "oil-gas" Args: - deck: A parsed :term:`deck` or DATA-file as a string + deck: A parsed :term:`deck` or :term:`.DATA file` as a string Returns: String with phase configuration. Empty string if inconclusive. @@ -283,7 +283,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (argparse.ArgumentParser or argparse.subparser): parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of reservoir DATA file.") + parser.add_argument( + "DATAFILE", help="Name of the .DATA input file for the reservoir simulator" + ) parser.add_argument( "-o", "--output", diff --git a/res2df/faults.py b/res2df/faults.py index 6564df62f..f1b1880ac 100644 --- a/res2df/faults.py +++ b/res2df/faults.py @@ -69,7 +69,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: Arguments: parser: argparse.ArgumentParser or argparse.subparser """ - parser.add_argument("DATAFILE", help="Name of reservoir DATA file.") + parser.add_argument( + "DATAFILE", help="Name of the .DATA input file for the reservoir simulator" + ) parser.add_argument( "-o", "--output", diff --git a/res2df/fipreports.py b/res2df/fipreports.py index 2ce32dcfc..b278da9ba 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -194,7 +194,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """Fill parser with command line arguments""" - parser.add_argument("PRTFILE", type=str, help="PRT file (or DATA file)") + parser.add_argument("PRTFILE", type=str, help="PRT file (or .DATA file)") parser.add_argument( "--fipname", type=str, diff --git a/res2df/grid.py b/res2df/grid.py index 539bc32db..89c87857a 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -540,7 +540,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of reservoir DATA file. " + help="Name of reservoir .DATA file. " + "INIT and EGRID file must lie alongside.", ) parser.add_argument( diff --git a/res2df/gruptree.py b/res2df/gruptree.py index 8b467b36b..6dc4552aa 100644 --- a/res2df/gruptree.py +++ b/res2df/gruptree.py @@ -392,7 +392,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (argparse.ArgumentParser or argparse.subparser): parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of reservoir DATA file.") + parser.add_argument( + "DATAFILE", help="Name of the .DATA input file for the reservoir simulator" + ) parser.add_argument( "-o", "--output", diff --git a/res2df/nnc.py b/res2df/nnc.py index f7d6d1d74..051ede833 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -178,7 +178,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of reservoir DATA file. " + help="Name of reservoir .DATA file. " + "INIT and EGRID file must lie alongside.", ) parser.add_argument( diff --git a/res2df/pillars.py b/res2df/pillars.py index 08f6cfa80..bece04b18 100644 --- a/res2df/pillars.py +++ b/res2df/pillars.py @@ -333,7 +333,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", help=( - "Name of reservoir DATA file. " "INIT and EGRID file must lie alongside." + "Name of reservoir .DATA file. " "INIT and EGRID file must lie alongside." ), ) parser.add_argument( diff --git a/res2df/pvt.py b/res2df/pvt.py index 2e1825a9a..072c85820 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -246,7 +246,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (ArgumentParser or subparser): parser to fill with arguments """ parser.add_argument( - "DATAFILE", help="Name of reservoir DATA file or file with PVT keywords." + "DATAFILE", help="Name of reservoir .DATA file or file with PVT keywords." ) parser.add_argument( "-o", diff --git a/res2df/res2csv.py b/res2df/res2csv.py index abd7d40fc..342dfbb69 100644 --- a/res2df/res2csv.py +++ b/res2df/res2csv.py @@ -151,7 +151,7 @@ def get_parser() -> argparse.ArgumentParser: "particular well at a particular time. " "If multisegment wells are found, associated data " "to a connection is merged onto the same row as additional columns. " - "You need the keyword WRFTPLT present in your DATA-file to get " + "You need the keyword WRFTPLT present in your .DATA file to get " "the data outputted." ), ) diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index 2f9bf33a2..49bafbb69 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -60,7 +60,7 @@ def __init__(self, eclbase): if ".DATA" in eclbase and not Path(eclbase).is_file(): logger.warning("File %s does not exist", eclbase) # (this is not an error, because it is possible - # to obtain summary without the DATA file being present) + # to obtain summary without the .DATA file being present) # Strip .DATA or . at end of eclbase: eclbase = rreplace(".DATA", "", eclbase) @@ -80,11 +80,11 @@ def __init__(self, eclbase): self._deck = None def get_path(self) -> Path: - """Return the full path to the directory with the DATA file""" + """Return the full path to the directory with the .DATA file""" return Path(self._eclbase).absolute().parent def get_deck(self) -> "opm.libopmcommon_python.Deck": - """Return a opm.io :term:`deck` of the DATA file""" + """Return a opm.io :term:`deck` of the .DATA file""" if not self._deck: if Path(self._eclbase + ".DATA").is_file(): deckfile = self._eclbase + ".DATA" diff --git a/res2df/rft.py b/res2df/rft.py index 3979ef637..42c550344 100644 --- a/res2df/rft.py +++ b/res2df/rft.py @@ -654,9 +654,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", help=( - "Name of reservoir DATA file or RFT file. " - "If DATA file is provided, it will look for" - " the associated DATA file" + "Name of reservoir .DATA file or RFT file. " + "If .DATA file is provided, it will look for" + " the associated .DATA file" ), ) parser.add_argument( diff --git a/res2df/satfunc.py b/res2df/satfunc.py index 6a559742e..c5d993e6e 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -92,7 +92,7 @@ def df( pd.DataFrame, columns 'KEYWORD', 'SW', 'KRW', 'KROW', 'PC', .. """ if isinstance(deck, ResdataFiles): - # NB: If this is done on include files and not on DATA files + # NB: If this is done on include files and not on .DATA files # we can loose data for SATNUM > 1 deck = deck.get_deck() deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTSFUN", deck, ntsfun) @@ -162,7 +162,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of reservoir DATA file or file with saturation functions.", + help="Name of reservoir .DATA file or file with saturation functions.", ) parser.add_argument( "-o", diff --git a/res2df/summary.py b/res2df/summary.py index 5a2e271e2..10c8b4ca5 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -425,7 +425,7 @@ def df( dupes = dframe.columns.duplicated() if dupes.any(): logger.warning( - "Duplicated columns detected, check your DATA file " + "Duplicated columns detected, check your .DATA file " "for repeated vectors in the SUMMARY section" ) logger.warning("Duplicates: %s", list(dframe.columns[dupes])) @@ -804,7 +804,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of reservoir DATA file. " + "UNSMRY file must lie alongside.", + help="Name of reservoir .DATA file. " + "UNSMRY file must lie alongside.", ) parser.add_argument( "--time_index", @@ -856,7 +856,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: type=str, help=( "Filename of key-value parameter file to look for if -p is set, " - "relative to reservoir DATA file or an absolute filename. " + "relative to reservoir .DATA file or an absolute filename. " "If not supplied, parameters.{json,yml,txt} in " "{., .. and ../..} will be merged in." ), diff --git a/res2df/trans.py b/res2df/trans.py index 873271dd2..d793ada79 100644 --- a/res2df/trans.py +++ b/res2df/trans.py @@ -261,7 +261,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of reservoir DATA file. " + help="Name of reservoir .DATA file. " + "INIT and EGRID file must lie alongside.", ) parser.add_argument("--vectors", nargs="+", help="Extra INIT vectors to be added") diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index 2b4498695..733849f71 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -448,7 +448,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (argparse.ArgumentParser or argparse.subparser): parser to fill with arguments """ - parser.add_argument("DATAFILE", help="Name of reservoir DATA file.") + parser.add_argument( + "DATAFILE", help="Name of the .DATA input file for the reservoir simulator" + ) parser.add_argument( "-o", "--output", diff --git a/res2df/wcon.py b/res2df/wcon.py index 3957109de..cb537ecb6 100644 --- a/res2df/wcon.py +++ b/res2df/wcon.py @@ -80,7 +80,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: fill with arguments """ parser.add_argument( - "DATAFILE", help="Name of reservoir DATA file or resdata include file." + "DATAFILE", help="Name of reservoir .DATA file or resdata include file." ) parser.add_argument( "-o", "--output", type=str, help="Name of output csv file.", default="wcon.csv" diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index 983d27c78..4f2586622 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -250,7 +250,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", type=str, - help="Name of reservoir DATA file. " + "UNSMRY file must lie alongside.", + help="Name of reservoir .DATA file. " + "UNSMRY file must lie alongside.", ) parser.add_argument( "--zonemap", diff --git a/res2df/wellconnstatus.py b/res2df/wellconnstatus.py index 7860dd589..97a672d88 100644 --- a/res2df/wellconnstatus.py +++ b/res2df/wellconnstatus.py @@ -96,7 +96,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", type=str, - help="Name of reservoir DATA file. " + "UNSMRY file must lie alongside.", + help="Name of reservoir .DATA file. " + "UNSMRY file must lie alongside.", ) parser.add_argument( "-o", diff --git a/tests/test_summary.py b/tests/test_summary.py index cf97ff8a4..0c9fbb508 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -252,7 +252,7 @@ def test_paramsupport_explicitfile(tmp_path, mocker): """Test explicit naming of parameters file from command line. This is a little bit tricky because the parameter file is assumed to be - relative to the DATA file, not to working directory unless it is absolute.""" + relative to the .DATA file, not to working directory unless it is absolute.""" tmpcsvfile = tmp_path / "smrywithrandomparams.txt" randomparamfile = tmp_path / "fooparams.txt" From a02db6e03c48e43c60f39eb33e54290d25a617a1 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Mon, 20 Nov 2023 15:34:19 +0100 Subject: [PATCH 59/68] fix some docstrings --- docs/glossary.rst | 4 +++- res2df/equil.py | 8 +++++--- res2df/fipreports.py | 2 +- res2df/nnc.py | 2 +- res2df/pillars.py | 2 +- res2df/resdatafiles.py | 4 ++-- res2df/summary.py | 2 +- res2df/wellcompletiondata.py | 2 +- res2df/wellconnstatus.py | 2 +- 9 files changed, 16 insertions(+), 12 deletions(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index 2870da0d0..caba10c8d 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -4,7 +4,9 @@ Glossary .. glossary:: reservoir simulator - Reservoir simulators such as OPM Flow or Eclipse. + Simulation of reservoir fields come in many forms, but for the purposes of + res2df we only consider simulators takes a :term:`deck` as input and produces + term`output file`s such `.UNSRMY`. This includes, OPM flow and Eclipse. .DATA file Inputs provided to reservoir simulators such as Eclipse or OPM Flow. diff --git a/res2df/equil.py b/res2df/equil.py index 472467536..4883e9713 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -344,7 +344,9 @@ def equil_main(args) -> None: def equil_reverse_main(args) -> None: - """Entry-point for module, for command line utility for CSV to resdata""" + """Entry-point for module, for command line utility + for CSV to reservoir simulator :term:`include files ` + """ logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) @@ -361,8 +363,8 @@ def df2res( withphases: bool = False, filename: Optional[str] = None, ) -> str: - """Generate resdata include strings from dataframes with - solution (EQUIL, RSVD++) data. + """Generate string contents of :term:`include files ` + from dataframes with solution (EQUIL, RSVD++) data. Args: equil_df: Dataframe with res2df format. diff --git a/res2df/fipreports.py b/res2df/fipreports.py index b278da9ba..8f7b2f60c 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -106,7 +106,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra DATE and region index added. Args: - prtfile: filename (PRT) or a resdataFiles object + prtfile: filename (PRT) or a ResdataFiles object fipname: The name of the regport regions, FIPNUM, FIPZON or whatever Max length of the string is 8, the first three characters must be FIP, and the next 3 characters must be unique for a given :term:`.DATA file`. diff --git a/res2df/nnc.py b/res2df/nnc.py index 051ede833..f4ccec7fa 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -1,6 +1,6 @@ """ Extract non-neighbour connection (NNC) information from reservoir -simulator output files. +simulator :term:`output files `. """ import argparse import datetime diff --git a/res2df/pillars.py b/res2df/pillars.py index bece04b18..2c4edf678 100644 --- a/res2df/pillars.py +++ b/res2df/pillars.py @@ -333,7 +333,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", help=( - "Name of reservoir .DATA file. " "INIT and EGRID file must lie alongside." + "Name of reservoir .DATA file. INIT and EGRID file must lie alongside." ), ) parser.add_argument( diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index 49bafbb69..a79e67277 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -47,8 +47,8 @@ class ResdataFiles(object): ResdataFile/Summary objects is easy for users, and with caching if wanted. - Various functions that needs some of the simulator output - (or input file) should be able to ask this class, and + Various functions that needs some of the simulator :term:`output ` + (or :term:`input file`) should be able to ask this class, and it should be loaded or served from cache. """ diff --git a/res2df/summary.py b/res2df/summary.py index 10c8b4ca5..71b415d3a 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -804,7 +804,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of reservoir .DATA file. " + "UNSMRY file must lie alongside.", + help="Name of reservoir .DATA file. UNSMRY file must lie alongside.", ) parser.add_argument( "--time_index", diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index 4f2586622..95d6f2103 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -250,7 +250,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", type=str, - help="Name of reservoir .DATA file. " + "UNSMRY file must lie alongside.", + help="Name of reservoir .DATA file. UNSMRY file must lie alongside.", ) parser.add_argument( "--zonemap", diff --git a/res2df/wellconnstatus.py b/res2df/wellconnstatus.py index 97a672d88..e0c514fb1 100644 --- a/res2df/wellconnstatus.py +++ b/res2df/wellconnstatus.py @@ -96,7 +96,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", type=str, - help="Name of reservoir .DATA file. " + "UNSMRY file must lie alongside.", + help="Name of reservoir .DATA file. UNSMRY file must lie alongside.", ) parser.add_argument( "-o", From 69e2285ef8cd8694b54c83011c1d9dc71ec78b28 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 21 Nov 2023 08:44:20 +0100 Subject: [PATCH 60/68] include string -> include file content --- docs/glossary.rst | 2 +- res2df/common.py | 4 ++-- res2df/equil.py | 2 +- res2df/grid.py | 2 +- res2df/nnc.py | 2 +- res2df/pillars.py | 4 +--- res2df/pvt.py | 2 +- res2df/satfunc.py | 4 ++-- 8 files changed, 10 insertions(+), 12 deletions(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index caba10c8d..304a44b6c 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -10,7 +10,7 @@ Glossary .DATA file Inputs provided to reservoir simulators such as Eclipse or OPM Flow. - Usually a :term:`.DATA file` pointing to other include files. One :term:`.DATA file`` + Usually a :term:`.DATA file` pointing to other include files. One :term:`.DATA file` typically points to multiple include files. include file diff --git a/res2df/common.py b/res2df/common.py index 0d433a280..42cb70e39 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -515,7 +515,7 @@ def df2res( consecutive: Optional[str] = None, filename: Optional[str] = None, ) -> str: - """Generate resdata include strings from dataframes in res2df format. + """Generate resdata :term:`include file` content from dataframes in res2df format. This function hands over the actual text generation pr. keyword to functions named df2res_ in the calling module. @@ -538,7 +538,7 @@ def df2res( to file. Returns: - string that can be used as an :term:`include file`. + string that can be used as contents of :term:`include file`. """ from_module = inspect.stack()[1] calling_module = inspect.getmodule(from_module[0]) diff --git a/res2df/equil.py b/res2df/equil.py index 4883e9713..c4f22f403 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -344,7 +344,7 @@ def equil_main(args) -> None: def equil_reverse_main(args) -> None: - """Entry-point for module, for command line utility + """Entry-point for module, for command line utility for CSV to reservoir simulator :term:`include files ` """ logger = getLogger_res2csv( # pylint: disable=redefined-outer-name diff --git a/res2df/grid.py b/res2df/grid.py index 89c87857a..06e8f5ff2 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -625,7 +625,7 @@ def df2res( nocomments: bool = False, ) -> str: """ - Write an :term:`include file` with grid data keyword, like PERMX, PORO, + Write a :term:`include file` contents with grid data keyword, like PERMX, PORO, FIPNUM etc, for the GRID section of the :term:`.DATA file`. Output (returned as string and optionally written to file) will then diff --git a/res2df/nnc.py b/res2df/nnc.py index f4ccec7fa..e29f89423 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -234,7 +234,7 @@ def df2res_editnnc( in the produced string/file Returns: - string with the EDITNNC keyword. + :term:`include file` content string with the EDITNNC keyword. """ string = "" diff --git a/res2df/pillars.py b/res2df/pillars.py index 2c4edf678..d4d05c058 100644 --- a/res2df/pillars.py +++ b/res2df/pillars.py @@ -332,9 +332,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help=( - "Name of reservoir .DATA file. INIT and EGRID file must lie alongside." - ), + help=("Name of reservoir .DATA file. INIT and EGRID file must lie alongside."), ) parser.add_argument( "--region", diff --git a/res2df/pvt.py b/res2df/pvt.py index 072c85820..20b9faa78 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -330,7 +330,7 @@ def df2res( comments: Optional[Dict[str, str]] = None, filename: Optional[str] = None, ) -> str: - """Generate resdata include strings from PVT dataframes + """Generate resdata :term:`include file` content from PVT dataframes Args: pvt_df: Dataframe with PVT data in res2df format. diff --git a/res2df/satfunc.py b/res2df/satfunc.py index c5d993e6e..5b54713d3 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -239,7 +239,7 @@ def df2res( comments: Optional[Dict[str, str]] = None, filename: Optional[str] = None, ) -> str: - """Generate resdata include strings from dataframes with + """Generate resdata :term:`include file` content from dataframes with saturation functions (SWOF, SGOF, ...) Args: @@ -254,7 +254,7 @@ def df2res( to file. Returns: - Generated resdata include string + Generated resdata :term:`include file` content """ string = "" From d67b2f0d42c089e2d3e6d85aedd611480c1f4bde Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 21 Nov 2023 08:55:04 +0100 Subject: [PATCH 61/68] Print X keyword with data -> Create string with include file contents for X keyword --- res2df/equil.py | 14 +++++++------- res2df/pvt.py | 32 +++++++++++++++++++------------- res2df/satfunc.py | 19 ++++++++++--------- tests/test_userapi.py | 2 +- 4 files changed, 37 insertions(+), 30 deletions(-) diff --git a/res2df/equil.py b/res2df/equil.py index c4f22f403..e27141309 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -397,7 +397,7 @@ def df2res( def df2res_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print EQUIL keyword with data + """Create string with :term:`include file` contents for EQUIL keyword Args: dframe: Containing EQUIL data @@ -432,7 +432,7 @@ def df2res_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print RSVD keyword with data + """Create string with :term:`include file` contents for RSVD keyword This data consists of one table (rs as a function of depth) for each EQLNUM @@ -445,7 +445,7 @@ def df2res_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print RVVD keyword with data + """Create string with :term:`include file` contents for RVVD keyword This data consists of one table (rv as a function of depth) for each EQLNUM @@ -458,7 +458,7 @@ def df2res_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PBVD keyword with data + """Create string with :term:`include file` contents for PBVD keyword Bubble-point versus depth @@ -473,7 +473,7 @@ def df2res_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None): - """Print PDVD keyword with data. + """Create string with :term:`include file` contents for PDVD keyword. Dew-point versus depth. @@ -505,8 +505,8 @@ def _df2res_equilfuncs( subset = dframe[dframe["KEYWORD"] == keyword] def _df2res_equilfuncs_eqlnum(dframe: pd.DataFrame) -> str: - """Print one equilibriation function table for a specific - EQLNUM + """Create string with :term:`include file` contents + for one equilibriation function table for a specific EQLNUM Args: dframe (pd.DataFrame): Cropped to only contain data for one EQLNUM diff --git a/res2df/pvt.py b/res2df/pvt.py index 20b9faa78..0f0629eb5 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -353,7 +353,7 @@ def df2res( def df2res_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print ROCK keyword with data + """Create string with :term:`include file` contents for ROCK keyword Args: dframe (pd.DataFrame): Containing ROCK data @@ -381,7 +381,7 @@ def df2res_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print DENSITY keyword with data + """Create string with :term:`include file` contents for DENSITY keyword Args: dframe: Containing DENSITY data @@ -410,7 +410,7 @@ def df2res_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PVTW keyword with data + """Create string with :term:`include file` contents for PVTW keyword PVTW is one line/record with data for a reference pressure for each PVTNUM. @@ -446,7 +446,7 @@ def df2res_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_pvtg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PVTG keyword with data + """Create string with :term:`include file` contents for PVTG keyword Args: dframe: Containing PVTG data @@ -473,7 +473,8 @@ def df2res_pvtg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: subset = subset.set_index("PVTNUM").sort_index() def _pvtg_pvtnum(dframe): - """Print PVTG-data for a specific PVTNUM""" + """Create string with :term:`include file` contents for + PVTG-data with a specific PVTNUM""" string = "" dframe = dframe.set_index("PRESSURE").sort_index() for p_gas in dframe.index.unique(): @@ -481,7 +482,8 @@ def _pvtg_pvtnum(dframe): return string + "/\n" def _pvtg_pvtnum_pg(dframe): - """Print PVTG-data for a particular gas phase pressure""" + """Create string with :term:`include file` contents for + PVTG-data with a particular gas phase pressure""" string = "" assert len(dframe.index.unique()) == 1 p_gas = dframe.index.values[0] @@ -505,7 +507,7 @@ def _pvtg_pvtnum_pg(dframe): def df2res_pvdg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PVDG keyword with data + """Create string with :term:`include file` contents for PVDG keyword This data consists of one table (volumefactor and visosity as a function of pressure) pr. PVTNUM. @@ -531,7 +533,8 @@ def df2res_pvdg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: subset["PVTNUM"] = 1 def _pvdg_pvtnum(dframe): - """Print PVDG-data for a specific PVTNUM + """Create string with :term:`include file` contents for + PVDG-data with a specific PVTNUM Args: dframe (pd.DataFrame): Cropped to only contain the relevant data. @@ -555,7 +558,7 @@ def _pvdg_pvtnum(dframe): def df2res_pvdo(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PVDO keyword with data + """Create string with :term:`include file` contents for PVDO keyword Args: dframe: Containing PVDO data @@ -578,7 +581,8 @@ def df2res_pvdo(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: subset["PVTNUM"] = 1 def _pvdo_pvtnum(dframe: pd.DataFrame) -> str: - """Print PVDO-data for a specific PVTNUM + """Create string with :term:`include file` contents + for PVDO-data for a specific PVTNUM Args: dframe (pd.DataFrame): Cropped to only contain the relevant data. @@ -602,7 +606,7 @@ def _pvdo_pvtnum(dframe: pd.DataFrame) -> str: def df2res_pvto(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print PVTO-data from a dataframe + """Create string with :term:`include file` contents for PVTO-data from a dataframe Args: dframe: Containing PVTO data @@ -627,7 +631,8 @@ def df2res_pvto(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: subset = subset.set_index("PVTNUM").sort_index() def _pvto_pvtnum(dframe: pd.DataFrame) -> str: - """Print PVTO-data for a specific PVTNUM""" + """Create string with :term:`include file` contents + for PVTO-data for a specific PVTNUM""" string = "" dframe = dframe.set_index("RS").sort_index() for rs in dframe.index.unique(): @@ -635,7 +640,8 @@ def _pvto_pvtnum(dframe: pd.DataFrame) -> str: return string + "/\n" def _pvto_pvtnum_rs(dframe: pd.DataFrame) -> str: - """Print PVTO-data for a particular RS""" + """Create string with :term:`include file` contents + for PVTO-data for a particular RS""" string = "" assert len(dframe.index.unique()) == 1 rs = dframe.index.values[0] diff --git a/res2df/satfunc.py b/res2df/satfunc.py index 5b54713d3..3d770962f 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -270,7 +270,7 @@ def df2res( def df2res_swof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SWOF data. Used by df2res(). + """Create string with :term:`include file` contents for SWOF. Used by df2res(). Args: dframe: Containing SWOF data @@ -280,7 +280,7 @@ def df2res_swof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_sgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SGOF data. Used by df2res(). + """Create string with :term:`include file` contents for SGOF. Used by df2res(). Args: dframe: Containing SGOF data @@ -290,7 +290,7 @@ def df2res_sgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_sgfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SGFN data. Used by df2res(). + """Create string with :term:`include file` contents for SGFN. Used by df2res(). Args: dframe: Containing SGFN data @@ -300,7 +300,7 @@ def df2res_sgfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_sgwfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SGWFN data. Used by df2res(). + """Create string with :term:`include file` contents for SGWFN. Used by df2res(). Args: dframe: Containing SGWFN data @@ -310,7 +310,7 @@ def df2res_sgwfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_swfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SWFN data. Used by df2res(). + """Create string with :term:`include file` contents for SWFN. Used by df2res(). Args: dframe: Containing SWFN data @@ -320,7 +320,7 @@ def df2res_swfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_slgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SLGOF data. Used by df2res(). + """Create string with :term:`include file` contents for SLGOF. Used by df2res(). Args: dframe: Containing SLGOF data @@ -330,7 +330,7 @@ def df2res_slgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_sof2(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SOF2 data. Used by df2res(). + """Create string with :term:`include file` contents for SOF2. Used by df2res(). Args: dframe: Containing SOF2 data @@ -340,7 +340,7 @@ def df2res_sof2(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def df2res_sof3(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Print SOF3 data. Used by df2res(). + """Create string with :term:`include file` contents for SOF3. Used by df2res(). Args: dframe: Containing SOF3 data @@ -368,7 +368,8 @@ def _df2res_satfuncs( # Make a function that is to be called for each SATNUM def _df2res_satfuncs_satnum(keyword, dframe): - """Print one saturation function for one specific SATNUM""" + """Create string with :term:`include file` contents + for one saturation function for one specific SATNUM""" col_headers = RENAMERS[keyword]["DATA"] string = ( "-- " diff --git a/tests/test_userapi.py b/tests/test_userapi.py index 7abd3e5d4..2bb211fbb 100644 --- a/tests/test_userapi.py +++ b/tests/test_userapi.py @@ -57,7 +57,7 @@ def test_userapi(): hcpv_table = grst_df.groupby("FIPNUM").sum()[["OILPV", "HCPV"]] assert not hcpv_table.empty - # Print the HCPV table by FIPNUM: + # Create string with :term:`include file` contents for the HCPV table by FIPNUM: print() print((hcpv_table / 1e6).round(2)) From dff35bfe6aaec5c74c60c9cac2b880dba4d5ada2 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 21 Nov 2023 09:32:51 +0100 Subject: [PATCH 62/68] Produce a string representing -> Creates include file contents --- res2df/pvt.py | 4 ++-- res2df/vfp/_vfp.py | 6 ++++-- res2df/vfp/_vfpcommon.py | 3 ++- res2df/vfp/_vfpinj.py | 12 ++++++++---- res2df/vfp/_vfpprod.py | 12 ++++++++---- 5 files changed, 24 insertions(+), 13 deletions(-) diff --git a/res2df/pvt.py b/res2df/pvt.py index 0f0629eb5..b677b0b50 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -632,7 +632,7 @@ def df2res_pvto(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: def _pvto_pvtnum(dframe: pd.DataFrame) -> str: """Create string with :term:`include file` contents - for PVTO-data for a specific PVTNUM""" + for PVTO-data for a specific PVTNUM""" string = "" dframe = dframe.set_index("RS").sort_index() for rs in dframe.index.unique(): @@ -641,7 +641,7 @@ def _pvto_pvtnum(dframe: pd.DataFrame) -> str: def _pvto_pvtnum_rs(dframe: pd.DataFrame) -> str: """Create string with :term:`include file` contents - for PVTO-data for a particular RS""" + for PVTO-data for a particular RS""" string = "" assert len(dframe.index.unique()) == 1 rs = dframe.index.values[0] diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index 733849f71..0a050ddd6 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -331,7 +331,8 @@ def df2ress( keyword: str = "VFPPROD", comments: Optional[Dict[str, str]] = None, ) -> List[str]: - """Produce a list of strings defining VFPPROD/VFPINJ Eclipse input from a dataframe + """Produce a list of strings defining VFPPROD/VFPINJ Eclipse + :term:`input file` contents from a dataframe All data for the keyword VFPPROD or VFPINJ will be returned. @@ -378,7 +379,8 @@ def df2res( comments: Optional[Dict[str, str]] = None, filename: Optional[str] = None, ) -> str: - """Produce a string defining all VFPPROD/VFPINJ Eclipse input from a dataframe + """Create a string defining all VFPPROD/VFPINJ Eclipse + :term:`input file` contents from a dataframe All data for the keywords VFPPROD/VFPINJ will be returned. diff --git a/res2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py index 956b26530..83b23ea5d 100755 --- a/res2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -181,7 +181,8 @@ def _write_vfp_range( format: str = "%10.6g", values_per_line: int = 5, ) -> str: - """Produce a string representing a resdata record for a given table range + """Creates a :term:`include file` content string of a resdata + record for a given table range Args: values: List/array with the range sorted diff --git a/res2df/vfp/_vfpinj.py b/res2df/vfp/_vfpinj.py index ac7643bb3..adba85e8b 100755 --- a/res2df/vfp/_vfpinj.py +++ b/res2df/vfp/_vfpinj.py @@ -550,7 +550,8 @@ def _write_basic_record( unit_type: str, tab_type: str, ) -> str: - """Produce a string representing the first record for Eclipse VFPINJ keyword + """Creates a :term:`include file` content string of the + first record for the Eclipse VFPINJ keyword Args: tableno: VFPROD table number @@ -580,7 +581,8 @@ def _write_table( format: str = "%10.6g", values_per_line: int = 5, ) -> str: - """Produce a string representing a resdata record for a VFPINJ table (BHP part) + """Creates a :term:`include file` content string representing + a resdata record for a VFPINJ table (BHP part) Args: table: DataFrame with multiindex for table ranges and colums @@ -614,7 +616,8 @@ def _write_table_records( format: str = "%10.6g", values_per_line: int = 5, ) -> str: - """Produce a string representing a resdata record for a VFPINJ table (BHP part) + """Creates a :term:`include file` content string representing + for a VFPINJ table (BHP part) Args: thp_indices: array of int representing index for THP value for record @@ -652,7 +655,8 @@ def _write_table_records( def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Produce a string defining single VFPINJ Eclipse input from a dataframe + """Creates a :term:`include file` content string + representing single VFPINJ Eclipse input from a dataframe All data for the keywords VFPINJ will be returned. diff --git a/res2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py index 04239698d..44bf1ed92 100755 --- a/res2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -816,7 +816,8 @@ def _write_basic_record( unit_type: str, tab_type: str, ) -> str: - """Produce a string representing the first record for Eclipse VFPPROD keyword + """Creates a :term:`include file` content string representing + the first record for Eclipse VFPPROD keyword Args: tableno: VFPROD table number @@ -855,7 +856,8 @@ def _write_table( format: str = "%10.3", values_per_line: int = 5, ) -> str: - """Produce a string representing a resdata record for a VFPPROD table (BHP part) + """Creates a :term:`include file` content string representing + a resdata record for a VFPPROD table (BHP part) Args: table: DataFrame with multiindex for table ranges and colums @@ -892,7 +894,8 @@ def _write_table_records( format: str = "%10.3", values_per_line: int = 5, ) -> str: - """Produce a string representing a resdata record for a VFPPROD table (BHP part) + """Creates a :term:`include file` content string representing a + resdata record for a VFPPROD table (BHP part) Args: thp_indices: array of int representing index for THP value for record @@ -936,7 +939,8 @@ def _write_table_records( def df2res(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: - """Produce a string defining single VFPPROD Eclipse input from a dataframe + """Creates a :term:`include file` content string + representing single VFPPROD Eclipse input from a dataframe All data for the keywords VFPPROD will be returned. From cb011568318e0535055fd396366f70d37419889d Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 21 Nov 2023 09:41:39 +0100 Subject: [PATCH 63/68] input file->include file oops --- res2df/resdatafiles.py | 2 +- res2df/vfp/_vfp.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/res2df/resdatafiles.py b/res2df/resdatafiles.py index a79e67277..e37452714 100644 --- a/res2df/resdatafiles.py +++ b/res2df/resdatafiles.py @@ -48,7 +48,7 @@ class ResdataFiles(object): caching if wanted. Various functions that needs some of the simulator :term:`output ` - (or :term:`input file`) should be able to ask this class, and + (or :term:`include file`) should be able to ask this class, and it should be loaded or served from cache. """ diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index 0a050ddd6..7bb254893 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -332,7 +332,7 @@ def df2ress( comments: Optional[Dict[str, str]] = None, ) -> List[str]: """Produce a list of strings defining VFPPROD/VFPINJ Eclipse - :term:`input file` contents from a dataframe + :term:`include file` contents from a dataframe All data for the keyword VFPPROD or VFPINJ will be returned. @@ -380,7 +380,7 @@ def df2res( filename: Optional[str] = None, ) -> str: """Create a string defining all VFPPROD/VFPINJ Eclipse - :term:`input file` contents from a dataframe + :term:`include file` contents from a dataframe All data for the keywords VFPPROD/VFPINJ will be returned. From 763bee9fba3a1f9dbfb6bc91c8c28ae16fb064ae Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 21 Nov 2023 10:35:01 +0100 Subject: [PATCH 64/68] Clarify some glossary terms --- docs/glossary.rst | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/docs/glossary.rst b/docs/glossary.rst index 304a44b6c..e6ed5b095 100644 --- a/docs/glossary.rst +++ b/docs/glossary.rst @@ -5,13 +5,12 @@ Glossary reservoir simulator Simulation of reservoir fields come in many forms, but for the purposes of - res2df we only consider simulators takes a :term:`deck` as input and produces - term`output file`s such `.UNSRMY`. This includes, OPM flow and Eclipse. + res2df we only consider simulators that take a :term:`deck` as input and produces + :term:`output files ` such `.UNSRMY`. This includes, OPM flow and Eclipse. .DATA file - Inputs provided to reservoir simulators such as Eclipse or OPM Flow. - Usually a :term:`.DATA file` pointing to other include files. One :term:`.DATA file` - typically points to multiple include files. + Input provided to reservoir simulators such as Eclipse or OPM Flow. + Often a :term:`.DATA file` includes other :term:`include files ` with the INCLUDE keyword. include file Files that provide inputs to reservoir simulators by using the INCLUDE statement @@ -22,8 +21,7 @@ Glossary Refers to inputs passed to reservoir simulators. It may be a :term:`.DATA file` and the include files it points to, or it may be a single or several include files. If a deck contains all the information (i.e., keywords) the simulator needs - to run the requested simulation, it is defined as complete. If it is missing - needed information, it is incomplete. + to run the requested simulation, it is defined as complete. Otherwise it is incomplete. output file When a reservoir simulator runs, several files will be generated. From b383910fe49951e91e5adf7f012d5866e5c12345 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 21 Nov 2023 10:43:13 +0100 Subject: [PATCH 65/68] Reword description of compdat --- docs/introduction.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/introduction.rst b/docs/introduction.rst index 040ad4bbc..1bcdcf84f 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -136,12 +136,12 @@ More documentation on :doc:`usage/equil`. ``compdat`` ^^^^^^^^^^^ -Extracts well connection data from the `COMPDAT` keyword in the :term:`.DATA file`. +Extracts well connection data from the `COMPDAT` keyword in the :term:`deck`. For multi-segment wells, `WELSEGS` and `COMPSEGS` is also parsed. The data is available as three different dataframes, which can be merged. -It is also possible to parse individual :term:`"include files" `, not only a -finished working :term:`deck`. +It is also possible to parse individual :term:`"include files" `. +These files do not necessarily have to be part of a complete :term:`deck` More documentation on :doc:`usage/compdat`. From 00c0ea81731128e0c19d436a3da604968831c66e Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 21 Nov 2023 10:47:35 +0100 Subject: [PATCH 66/68] Fix satfunc doc wording --- docs/usage/satfunc.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index 271894b8c..17143dcc7 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -33,7 +33,7 @@ Alternatively, the same data can be produced as a CSV file using the command lin It is possible to extract keywords one at a time using the ``--keywords`` command line option. -Instead of data decks, individual include files may also be parsed, but +Instead of complete :term:`decks `, individual include files may also be parsed, but only one at a time. Generating include files from dataframes From 71fc136ddd3a63dcead8accbdc21f845eb043e56 Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 21 Nov 2023 11:27:51 +0100 Subject: [PATCH 67/68] Fix wording UNSMRY files --- res2df/csv2res.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/res2df/csv2res.py b/res2df/csv2res.py index a09f53043..19867fc22 100644 --- a/res2df/csv2res.py +++ b/res2df/csv2res.py @@ -51,7 +51,7 @@ def get_parser() -> argparse.ArgumentParser: summary_parser = subparsers.add_parser( "summary", - help="Write summary UNSMRY files", + help="Write UNSMRY files", description=("Write UNSMRY files from CSV files."), ) summary.fill_reverse_parser(summary_parser) From 972b3efab5a1d15c252ba933eddb9ddb3a48eaaa Mon Sep 17 00:00:00 2001 From: "Yngve S. Kristiansen" Date: Tue, 21 Nov 2023 11:56:33 +0100 Subject: [PATCH 68/68] Reservoir .DATA file -> simulator input .DATA file --- res2df/grid.py | 4 ++-- res2df/nnc.py | 4 ++-- res2df/pillars.py | 3 ++- res2df/pvt.py | 4 +++- res2df/rft.py | 2 +- res2df/satfunc.py | 3 ++- res2df/summary.py | 5 +++-- res2df/trans.py | 4 ++-- res2df/wcon.py | 3 ++- res2df/wellcompletiondata.py | 3 ++- res2df/wellconnstatus.py | 3 ++- 11 files changed, 23 insertions(+), 15 deletions(-) diff --git a/res2df/grid.py b/res2df/grid.py index 06e8f5ff2..0a59fbf96 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -540,8 +540,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of reservoir .DATA file. " - + "INIT and EGRID file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist .INIT and .EGRID files with the same path and basename.", ) parser.add_argument( "--vectors", diff --git a/res2df/nnc.py b/res2df/nnc.py index e29f89423..9ced3ea6e 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -178,8 +178,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of reservoir .DATA file. " - + "INIT and EGRID file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist .INIT and .EGRID files with the same path and basename.", ) parser.add_argument( "-c", diff --git a/res2df/pillars.py b/res2df/pillars.py index d4d05c058..76e3d45f9 100644 --- a/res2df/pillars.py +++ b/res2df/pillars.py @@ -332,7 +332,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help=("Name of reservoir .DATA file. INIT and EGRID file must lie alongside."), + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist .INIT and .EGRID files with the same path and basename.", ) parser.add_argument( "--region", diff --git a/res2df/pvt.py b/res2df/pvt.py index b677b0b50..c38222b0f 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -246,7 +246,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser (ArgumentParser or subparser): parser to fill with arguments """ parser.add_argument( - "DATAFILE", help="Name of reservoir .DATA file or file with PVT keywords." + "DATAFILE", + help="Name of the .DATA input file for the reservoir simulator," + + " or file with PVT keywords.", ) parser.add_argument( "-o", diff --git a/res2df/rft.py b/res2df/rft.py index 42c550344..ec8f4e4d8 100644 --- a/res2df/rft.py +++ b/res2df/rft.py @@ -654,7 +654,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", help=( - "Name of reservoir .DATA file or RFT file. " + "Name of .DATA input file for the reservoir simulator, or RFT file. " "If .DATA file is provided, it will look for" " the associated .DATA file" ), diff --git a/res2df/satfunc.py b/res2df/satfunc.py index 3d770962f..c3aa62fb6 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -162,7 +162,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of reservoir .DATA file or file with saturation functions.", + help="Name of .DATA input file for the reservoir simulator," + + " or file with saturation functions.", ) parser.add_argument( "-o", diff --git a/res2df/summary.py b/res2df/summary.py index 71b415d3a..4b67d3e58 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -804,7 +804,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of reservoir .DATA file. UNSMRY file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist a UNSMRY file with the same path and basename.", ) parser.add_argument( "--time_index", @@ -856,7 +857,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: type=str, help=( "Filename of key-value parameter file to look for if -p is set, " - "relative to reservoir .DATA file or an absolute filename. " + "relative to simulator input (.DATA) file or an absolute filename. " "If not supplied, parameters.{json,yml,txt} in " "{., .. and ../..} will be merged in." ), diff --git a/res2df/trans.py b/res2df/trans.py index d793ada79..f041917ed 100644 --- a/res2df/trans.py +++ b/res2df/trans.py @@ -261,8 +261,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: """ parser.add_argument( "DATAFILE", - help="Name of reservoir .DATA file. " - + "INIT and EGRID file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist INIT and EGRID files with the same path and basename.", ) parser.add_argument("--vectors", nargs="+", help="Extra INIT vectors to be added") parser.add_argument( diff --git a/res2df/wcon.py b/res2df/wcon.py index cb537ecb6..35f261544 100644 --- a/res2df/wcon.py +++ b/res2df/wcon.py @@ -80,7 +80,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: fill with arguments """ parser.add_argument( - "DATAFILE", help="Name of reservoir .DATA file or resdata include file." + "DATAFILE", + help="Name of the .DATA input file or include file.", ) parser.add_argument( "-o", "--output", type=str, help="Name of output csv file.", default="wcon.csv" diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index 95d6f2103..6922ba3b9 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -250,7 +250,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", type=str, - help="Name of reservoir .DATA file. UNSMRY file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist a UNSMRY file with the same path and basename", ) parser.add_argument( "--zonemap", diff --git a/res2df/wellconnstatus.py b/res2df/wellconnstatus.py index e0c514fb1..69cf06706 100644 --- a/res2df/wellconnstatus.py +++ b/res2df/wellconnstatus.py @@ -96,7 +96,8 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: parser.add_argument( "DATAFILE", type=str, - help="Name of reservoir .DATA file. UNSMRY file must lie alongside.", + help="Name of the .DATA input file for the reservoir simulator." + + " There must exist a UNSMRY file with the same path and basename.", ) parser.add_argument( "-o",