diff --git a/docs/introduction.rst b/docs/introduction.rst index b26e87d85..e1305abce 100644 --- a/docs/introduction.rst +++ b/docs/introduction.rst @@ -33,9 +33,9 @@ a Pandas Dataframe. import res2df - eclfiles = res2df.EclFiles("MYECLDECK.DATA") - smry = res2df.summary.df(eclfiles, column_keys="F*", time_index="monthly") - hc_contacts = res2df.pillars.df(eclfiles, rstdates="all") + resfiles = res2df.ResFiles("MYECLDECK.DATA") + smry = res2df.summary.df(resfiles, column_keys="F*", time_index="monthly") + hc_contacts = res2df.pillars.df(resfiles, rstdates="all") See the API for more documentation and possibilities for each module. @@ -169,11 +169,11 @@ associated data in a dataframe format. More documentation on :doc:`usage/wcon`. -``eclfiles`` +``resfiles`` ^^^^^^^^^^^^ This is an internal helper module in order to represent finished or -unfinished Eclipse decks and runs. The class EclFiles can cache binary +unfinished Eclipse decks and runs. The class ResFiles can cache binary files that are recently read, and is able to locate the various output files based on the basename or the `.DATA` filename. diff --git a/docs/usage/compdat.rst b/docs/usage/compdat.rst index f7850fb98..07a06b638 100644 --- a/docs/usage/compdat.rst +++ b/docs/usage/compdat.rst @@ -7,13 +7,13 @@ Additionally, it will parse WELOPEN statements and emit new COMPDAT statements from the actions in WELOPEN. .. - compdat.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/compdat.csv', index=False) + compdat.df(ResFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/compdat.csv', index=False) .. code-block:: python - from res2df import compdat, EclFiles + from res2df import compdat, ResFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = compdat.df(eclfiles) + resfiles = ResFiles("MYDATADECK.DATA") + dframe = compdat.df(resfiles) .. csv-table:: Example COMPDAT table :file: compdat.csv diff --git a/docs/usage/equil.rst b/docs/usage/equil.rst index 6004587f4..dd87e1db5 100644 --- a/docs/usage/equil.rst +++ b/docs/usage/equil.rst @@ -9,9 +9,9 @@ Supported keywords are ``EQUIL``, ``RSVD``, ``RVVD``, ``PBVD`` and .. code-block:: python - from res2df import equil, EclFiles + from res2df import equil, ResFiles - dframe = equil.df(EclFiles('MYECLDECK.DATA')) + dframe = equil.df(ResFiles('MYECLDECK.DATA')) Which will provide a dataframe similar to the example below. Note that the column `Z` is used both for datum depth and the depth values in ``RSVD`` tables. The @@ -19,7 +19,7 @@ amount of columns obtained depends on the input dataset, and should be possible to link up with the Eclipse documentation. API doc: :func:`res2df.equil.df` .. - dframe = equil.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) + dframe = equil.df(ResFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) dframe[['EQLNUM', 'KEYWORD', 'Z', 'PRESSURE', 'OWC', 'GOC', 'RS']]\ .to_csv(index=False)) diff --git a/docs/usage/grid.rst b/docs/usage/grid.rst index 3df33b081..e610bf416 100644 --- a/docs/usage/grid.rst +++ b/docs/usage/grid.rst @@ -9,16 +9,16 @@ Typical usage .. code-block:: python - from res2df import grid, EclFiles + from res2df import grid, ResFiles - eclfiles = EclFiles('MYDATADECK.DATA') - dframe = grid.df(eclfiles, rstdates='last') + resfiles = ResFiles('MYDATADECK.DATA') + dframe = grid.df(resfiles, rstdates='last') where the API is documented at :func:`res2df.grid.df`. .. - eclfiles = EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA') - grid.df(eclfiles).sample(10).to_csv('docs/usage/grid.csv', float_format="%.2f", index=False) + resfiles = ResFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA') + grid.df(resfiles).sample(10).to_csv('docs/usage/grid.csv', float_format="%.2f", index=False) .. csv-table:: Example grid table :file: grid.csv @@ -110,10 +110,10 @@ the whereabouts of the file: .. code-block:: python - from res2df import grid, EclFiles, common + from res2df import grid, ResFiles, common - eclfiles = EclFiles("'MYDATADECK.DATA") - dframe = grid.df(eclfiles) + resfiles = ResFiles("'MYDATADECK.DATA") + dframe = grid.df(resfiles) # The filename with layers is relative to DATA-file location # or an absolute path. subzonemap = res2df.common.parse_zonemapfile("subzones.lyr") @@ -155,21 +155,21 @@ it to FIPNUM 5. This can be accomplished using .. code-block:: python - from res2df import grid, EclFiles, common + from res2df import grid, ResFiles, common - eclfiles = EclFiles("'MYDATADECK.DATA") - dframe = grid.df(eclfiles) + resfiles = ResFiles("'MYDATADECK.DATA") + dframe = grid.df(resfiles) # Change FIPNUM 6 to FIPNUM 5: rows_to_touch = dframe["FIPNUM"] == 6 dframe.loc[rows_to_touch, "FIPNUM"] = 5 # Write back to new include file, ensure datatype is integer. - grid.df2ecl(dframe, "FIPNUM", dtype=int, filename="fipnum.inc", eclfiles=eclfiles) + grid.df2ecl(dframe, "FIPNUM", dtype=int, filename="fipnum.inc", resfiles=resfiles) This will produce the file `fipnum.inc` with the contents: .. literalinclude:: fipnum.inc -It is recommended to supply the ``eclfiles`` object to ``df2ecl``, if not, correct grid +It is recommended to supply the ``resfiles`` object to ``df2ecl``, if not, correct grid size can not be ensured. diff --git a/docs/usage/nnc.rst b/docs/usage/nnc.rst index 604a5ce45..7627c860d 100644 --- a/docs/usage/nnc.rst +++ b/docs/usage/nnc.rst @@ -11,14 +11,14 @@ Note: Eclipse300 will not export TRANNNC data in parallel mode. Run in serial to get this output. .. - nnc.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/nnc.csv', index=False) + nnc.df(ResFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/nnc.csv', index=False) .. code-block:: python - from res2df import nnc, EclFiles + from res2df import nnc, ResFiles - eclfiles = EclFiles('MYDATADECK.DATA') - dframe = nnc.df(eclfiles) + resfiles = ResFiles('MYDATADECK.DATA') + dframe = nnc.df(resfiles) .. csv-table:: Example nnc table :file: nnc.csv @@ -49,10 +49,10 @@ to an Eclipse include file: .. code-block:: python - from ecl2f import nnc, EclFiles + from ecl2f import nnc, ResFiles - eclfiles = EclFiles("MYDATADECK.DATA") - nnc_df = nnc.df(eclfiles) + resfiles = ResFiles("MYDATADECK.DATA") + nnc_df = nnc.df(resfiles) nnc_df["TRANM"] = 0.1 # Reduce all NNC transmissibilities nnc.df2ecl_editnnc(nnc_df, filename="editnnc.inc") @@ -60,7 +60,7 @@ to an Eclipse include file: and the contents of the exported file can be: .. - print(nnc.df2ecl_editnnc(nnc.df(eclfiles).head(4).assign(TRANM=0.1))) + print(nnc.df2ecl_editnnc(nnc.df(resfiles).head(4).assign(TRANM=0.1))) .. code-block:: console diff --git a/docs/usage/pillars.rst b/docs/usage/pillars.rst index 391d0b47d..7fb525f7d 100644 --- a/docs/usage/pillars.rst +++ b/docs/usage/pillars.rst @@ -13,9 +13,9 @@ Typical usage is to obtain property statistics, and compute contacts pr. pillar (and optionally pr some region parameter). .. - from res2df import pillars, EclFiles - pillars.df(res2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) - pillars.df(res2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head().to_csv("pillars-example1.csv"float_format="%.1f", index=False)) + from res2df import pillars, ResFiles + pillars.df(res2df.ResFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')) + pillars.df(res2df.ResFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head().to_csv("pillars-example1.csv"float_format="%.1f", index=False)) .. csv-table:: Example pillar table :file: pillars-example1.csv @@ -90,7 +90,7 @@ By default, dynamic data are added as a set of columns for every date, like in this example: .. - pillars.df(res2df.EclFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), rstdates='all').dropna().head().to_csv('pillars-dyn1-unstacked.csv', float_format="%.1f", index=False) + pillars.df(res2df.ResFiles('../tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), rstdates='all').dropna().head().to_csv('pillars-dyn1-unstacked.csv', float_format="%.1f", index=False) .. csv-table:: Example pillar table with dynamical data, unstacked :file: pillars-dyn1-unstacked.csv diff --git a/docs/usage/pvt.rst b/docs/usage/pvt.rst index fedd33886..a2b7e7226 100644 --- a/docs/usage/pvt.rst +++ b/docs/usage/pvt.rst @@ -9,10 +9,10 @@ Example usage: .. code-block:: python - from res2df import pvt, EclFiles + from res2df import pvt, ResFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = pvt.df(eclfiles) + resfiles = ResFiles("MYDATADECK.DATA") + dframe = pvt.df(resfiles) Alternatively, we may also read directly from an include file if we read the contents of the file and supply it as a string: @@ -22,7 +22,7 @@ if we read the contents of the file and supply it as a string: dframe = pvt.df(open("pvt.inc").read()) .. - pvt.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).tail(15).to_csv('docs/usage/pvt.csv', index=False) + pvt.df(ResFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).tail(15).to_csv('docs/usage/pvt.csv', index=False) .. csv-table:: Example PVT table (last 15 rows to show non-Nan data) diff --git a/docs/usage/satfunc.rst b/docs/usage/satfunc.rst index 44b605733..50539d077 100644 --- a/docs/usage/satfunc.rst +++ b/docs/usage/satfunc.rst @@ -11,14 +11,14 @@ column. .. import numpy as np - satfunc.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).iloc[np.r_[0:5, 37:42, -5:0]].to_csv('docs/usage/satfunc.csv', index=False) + satfunc.df(ResFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).iloc[np.r_[0:5, 37:42, -5:0]].to_csv('docs/usage/satfunc.csv', index=False) .. code-block:: python - from res2df import satfunc, EclFiles + from res2df import satfunc, ResFiles - eclfiles = EclFiles('MYDATADECK.DATA') - dframe = satfunc.df(eclfiles) + resfiles = ResFiles('MYDATADECK.DATA') + dframe = satfunc.df(resfiles) .. csv-table:: Example satfunc table (only a subset of the rows are shown) :file: satfunc.csv diff --git a/docs/usage/summary.rst b/docs/usage/summary.rst index 968d37670..dad52c977 100644 --- a/docs/usage/summary.rst +++ b/docs/usage/summary.rst @@ -5,14 +5,14 @@ This module extracts summary information from UNSMRY-files into Pandas Dataframes. .. - summary.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), column_keys="F*PT", time_index='yearly').to_csv("summary.csv") + summary.df(ResFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA'), column_keys="F*PT", time_index='yearly').to_csv("summary.csv") .. code-block:: python - from res2df import summary, EclFiles + from res2df import summary, ResFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = summary.df(eclfiles, column_keys="F*PT", time_index="yearly") + resfiles = ResFiles("MYDATADECK.DATA") + dframe = summary.df(resfiles, column_keys="F*PT", time_index="yearly") If you don't specify ``column_keys``, all included summary vectors will be retrieved. Default for ``time_index`` is the report datetimes written by diff --git a/docs/usage/trans.rst b/docs/usage/trans.rst index 9a26f7558..31c483ce5 100644 --- a/docs/usage/trans.rst +++ b/docs/usage/trans.rst @@ -11,13 +11,13 @@ connections .. code-block:: python - from res2df import trans, EclFiles + from res2df import trans, ResFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = res2df.trans.df(eclfiles) + resfiles = ResFiles("MYDATADECK.DATA") + dframe = res2df.trans.df(resfiles) .. - res2df.trans.df(res2df.EclFiles("2_R001_REEK-0.DATA")).sample(7)\ + res2df.trans.df(res2df.ResFiles("2_R001_REEK-0.DATA")).sample(7)\ .to_csv("trans1.csv", float_format="%.2f", index=False) .. csv-table:: Neighbour transmissibilities, sample rows from an example simulation. @@ -79,12 +79,12 @@ like this. Example: .. code-block:: python - dframe = res2df.trans.df(eclfiles, vectors="FIPNUM", boundaryfilter=True, addnnc=True) + dframe = res2df.trans.df(resfiles, vectors="FIPNUM", boundaryfilter=True, addnnc=True) which gives the dataframe .. - res2df.trans.df(res2df.EclFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", boundaryfilter=True).sample(10).to_csv("trans-boundaries.csv", index=False, float_format="%.2f") + res2df.trans.df(res2df.ResFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", boundaryfilter=True).sample(10).to_csv("trans-boundaries.csv", index=False, float_format="%.2f") .. csv-table:: Sample rows from connections where FIPNUM is changing :file: trans-boundaries.csv @@ -105,13 +105,13 @@ over a region interface. This is accomplished by adding the ``group=True`` optio .. code-block:: python - from res2df import trans, EclFiles + from res2df import trans, ResFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = res2df.trans.df(eclfiles, vectors="FIPNUM", addnnc=True, group=True) + resfiles = ResFiles("MYDATADECK.DATA") + dframe = res2df.trans.df(resfiles, vectors="FIPNUM", addnnc=True, group=True) .. - res2df.trans.df(res2df.EclFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", group=True).to_csv("trans-group.csv", index=False, float_format="%.2f") + res2df.trans.df(res2df.ResFiles("2_R001_REEK-0.DATA"), addnnc=True, vectors="FIPNUM", group=True).to_csv("trans-group.csv", index=False, float_format="%.2f") .. csv-table:: Transmissibilities summed over each FIPNUM interface :file: trans-group.csv diff --git a/docs/usage/wcon.rst b/docs/usage/wcon.rst index 393607935..12e26c3c0 100644 --- a/docs/usage/wcon.rst +++ b/docs/usage/wcon.rst @@ -5,13 +5,13 @@ This module extracts information from WCONHIST, WCONINJE, WCONINJH and WCONPROD from an Eclipse deck. .. - wcon.df(EclFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/wcon.csv', index=False) + wcon.df(ResFiles('tests/data/reek/eclipse/model/2_R001_REEK-0.DATA')).head(15).to_csv('docs/usage/wcon.csv', index=False) .. code-block:: python - from res2df import wcon, EclFiles + from res2df import wcon, ResFiles - eclfiles = EclFiles("MYDATADECK.DATA") - dframe = wcon.df(eclfiles) + resfiles = ResFiles("MYDATADECK.DATA") + dframe = wcon.df(resfiles) .. csv-table:: Example WCON table :file: wcon.csv diff --git a/res2df/__init__.py b/res2df/__init__.py index f4244fc5a..809176f16 100644 --- a/res2df/__init__.py +++ b/res2df/__init__.py @@ -11,7 +11,7 @@ __version__ = "v0.0.0" from .constants import MAGIC_STDOUT -from .eclfiles import EclFiles +from .resfiles import ResFiles SUBMODULES: List[str] = [ "compdat", diff --git a/res2df/compdat.py b/res2df/compdat.py index e6fcd8e79..c6df29698 100644 --- a/res2df/compdat.py +++ b/res2df/compdat.py @@ -35,7 +35,7 @@ parse_opmio_tstep_rec, write_dframe_stdout_file, ) -from .eclfiles import EclFiles +from .resfiles import ResFiles from .grid import merge_initvectors logger = logging.getLogger(__name__) @@ -973,13 +973,13 @@ def compdat_main(args): logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - compdat_df = df(eclfiles, initvectors=args.initvectors) + resfiles = ResFiles(args.DATAFILE) + compdat_df = df(resfiles, initvectors=args.initvectors) write_dframe_stdout_file(compdat_df, args.output, index=False, caller_logger=logger) def df( - eclfiles: EclFiles, + resfiles: ResFiles, initvectors: Optional[List[str]] = None, zonemap: Optional[Dict[int, str]] = None, ) -> pd.DataFrame: @@ -992,17 +992,17 @@ def df( Returns: pd.Dataframe with one row pr cell to well connection """ - compdat_df = deck2dfs(eclfiles.get_ecldeck())["COMPDAT"] + compdat_df = deck2dfs(resfiles.get_ecldeck())["COMPDAT"] compdat_df = unrolldf(compdat_df) if initvectors: compdat_df = merge_initvectors( - eclfiles, compdat_df, initvectors, ijknames=["I", "J", "K1"] + resfiles, compdat_df, initvectors, ijknames=["I", "J", "K1"] ) if zonemap is None: # If no zonemap is submitted, search for zonemap in default location - zonemap = eclfiles.get_zonemap() + zonemap = resfiles.get_zonemap() if zonemap: logger.info("Merging zonemap into compdat") diff --git a/res2df/eclfiles.py b/res2df/eclfiles.py index 2fca379eb..d06376872 100644 --- a/res2df/eclfiles.py +++ b/res2df/eclfiles.py @@ -39,7 +39,7 @@ ] -class EclFiles(object): +class ResFiles(object): """ Class for holding an Eclipse deck with result files @@ -109,7 +109,7 @@ def str2deck( @staticmethod def file2deck(filename: Union[str, Path]) -> "opm.libopmcommon_python.Deck": """Try to convert standalone files into opm.io Deck objects""" - return EclFiles.str2deck(Path(filename).read_text(encoding="utf-8")) + return ResFiles.str2deck(Path(filename).read_text(encoding="utf-8")) def get_egrid(self) -> Grid: """Find and return EGRID file as an Grid object""" diff --git a/res2df/equil.py b/res2df/equil.py index 8458343c9..ebdbeb347 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -11,7 +11,7 @@ from res2df import common, getLogger_res2csv, inferdims -from .eclfiles import EclFiles +from .resfiles import ResFiles try: # pylint: disable=unused-import @@ -72,7 +72,7 @@ def df( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResFiles, "opm.libopmcommon_python.Deck"], keywords: Optional[List[str]] = None, ntequl: Optional[int] = None, ) -> pd.DataFrame: @@ -89,7 +89,7 @@ def df( that we have to infer the correct number of EQUIL lines from what gives us successful parsing from OPM. In those cases, the deck must be supplied as a string, if not, extra EQUIL lines - are possibly already removed by the OPM parser in eclfiles.str2deck(). + are possibly already removed by the OPM parser in resfiles.str2deck(). Arguments: deck: Eclipse deck or string with deck. If @@ -101,7 +101,7 @@ def df( Return: pd.DataFrame, at least with columns KEYWORD and EQLNUM """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResFiles): deck = deck.get_ecldeck() deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) @@ -314,9 +314,9 @@ def equil_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resfiles = ResFiles(args.DATAFILE) + if resfiles: + deck = resfiles.get_ecldeck() if "EQLDIMS" in deck: # Things are easier when a full deck with (correct) EQLDIMS # is supplied: diff --git a/res2df/faults.py b/res2df/faults.py index 98599540b..5863f8e16 100644 --- a/res2df/faults.py +++ b/res2df/faults.py @@ -10,7 +10,7 @@ import pandas as pd -from res2df import EclFiles, getLogger_res2csv +from res2df import ResFiles, getLogger_res2csv from res2df.common import parse_opmio_deckrecord, write_dframe_stdout_file try: @@ -30,7 +30,7 @@ ALLOWED_FACES = ["X", "Y", "Z", "I", "J", "K", "X-", "Y-", "Z-", "I-", "J-", "K-"] -def df(deck: Union[EclFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: +def df(deck: Union[ResFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: """Produce a dataframe of fault data from a deck All data for the keyword FAULTS will be returned. @@ -38,7 +38,7 @@ def df(deck: Union[EclFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: Args: deck: Eclipse deck """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResFiles): deck = deck.get_ecldeck() # In[91]: list(deck['FAULTS'][0]) @@ -86,9 +86,9 @@ def faults_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resfiles = ResFiles(args.DATAFILE) + if resfiles: + deck = resfiles.get_ecldeck() faults_df = df(deck) write_dframe_stdout_file( faults_df, diff --git a/res2df/fipreports.py b/res2df/fipreports.py index ed3c741ae..3c8cd9061 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -10,7 +10,7 @@ import numpy as np import pandas as pd -from res2df import EclFiles, getLogger_res2csv +from res2df import ResFiles, getLogger_res2csv from res2df.common import parse_ecl_month, write_dframe_stdout_file logger = logging.getLogger(__name__) @@ -97,7 +97,7 @@ def float_or_nan(string: str) -> float: ) -def df(prtfile: Union[str, EclFiles], fipname: str = "FIPNUM") -> pd.DataFrame: +def df(prtfile: Union[str, ResFiles], fipname: str = "FIPNUM") -> pd.DataFrame: """ Parses a PRT file from Eclipse and finds FIPXXXX REGION REPORT blocks and organizes those numbers into a dataframe @@ -106,12 +106,12 @@ def df(prtfile: Union[str, EclFiles], fipname: str = "FIPNUM") -> pd.DataFrame: DATE and region index added. Args: - prtfile: filename (PRT) or an EclFiles object + prtfile: filename (PRT) or an ResFiles object fipname: The name of the regport regions, FIPNUM, FIPZON or whatever Max length of the string is 8, the first three characters must be FIP, and the next 3 characters must be unique for a given Eclipse deck. """ - if isinstance(prtfile, EclFiles): + if isinstance(prtfile, ResFiles): prtfile = prtfile.get_prtfilename() if not fipname.startswith("FIP"): raise ValueError("fipname must start with FIP") @@ -217,6 +217,6 @@ def fipreports_main(args) -> None: if args.PRTFILE.endswith(".PRT"): prtfile = args.PRTFILE else: - prtfile = EclFiles(args.PRTFILE).get_prtfilename() + prtfile = ResFiles(args.PRTFILE).get_prtfilename() dframe = df(prtfile, args.fipname) write_dframe_stdout_file(dframe, args.output, index=False, caller_logger=logger) diff --git a/res2df/grid.py b/res2df/grid.py index 4aab554cb..c07cb536d 100644 --- a/res2df/grid.py +++ b/res2df/grid.py @@ -26,27 +26,27 @@ from res2df import __version__, common, getLogger_res2csv -from .eclfiles import EclFiles +from .resfiles import ResFiles logger = logging.getLogger(__name__) -def get_available_rst_dates(eclfiles: EclFiles) -> List[datetime.date]: +def get_available_rst_dates(resfiles: ResFiles) -> List[datetime.date]: """Return a list of datetime objects for the available dates in the RST file""" - report_indices = ResdataFile.file_report_list(eclfiles.get_rstfilename()) + report_indices = ResdataFile.file_report_list(resfiles.get_rstfilename()) logger.info( "Restart report indices (count %s): %s", str(len(report_indices)), str(report_indices), ) return [ - eclfiles.get_rstfile().iget_restart_sim_time(index).date() + resfiles.get_rstfile().iget_restart_sim_time(index).date() for index in range(0, len(report_indices)) ] def dates2rstindices( - eclfiles: EclFiles, dates: Optional[Union[str, datetime.date, List[datetime.date]]] + resfiles: ResFiles, dates: Optional[Union[str, datetime.date, List[datetime.date]]] ) -> Tuple[List[int], List[datetime.date], List[str]]: """Return the restart index/indices for a given datetime or list of datetimes @@ -68,7 +68,7 @@ def dates2rstindices( if not dates: return ([], [], []) - availabledates = get_available_rst_dates(eclfiles) + availabledates = get_available_rst_dates(resfiles) supportedmnemonics = ["first", "last", "all"] @@ -141,7 +141,7 @@ def _df2pyarrow(dframe: pd.DataFrame) -> pyarrow.Table: def rst2df( - eclfiles: EclFiles, + resfiles: ResFiles, date: Union[str, datetime.date, List[datetime.date]], vectors: Optional[Union[str, List[str]]] = None, dateinheaders: bool = False, @@ -155,7 +155,7 @@ def rst2df( when merging with the grid geometry dataframe. Args: - eclfiles: EclFiles object + resfiles: ResFiles object date: datetime.date or list of datetime.date, must correspond to an existing date. If list, it forces dateinheaders to be True. @@ -183,16 +183,16 @@ def rst2df( # First task is to determine the restart index to extract # data for: - (rstindices, chosendates, isodates) = dates2rstindices(eclfiles, date) + (rstindices, chosendates, isodates) = dates2rstindices(resfiles, date) logger.info("Extracting restart information at dates %s", str(isodates)) # Determine the available restart vectors, we only include # those with correct length, meaning that they are defined # for all active cells: - activecells = eclfiles.get_egrid().getNumActive() + activecells = resfiles.get_egrid().getNumActive() rstvectors = [] - for vec in eclfiles.get_rstfile().headers: + for vec in resfiles.get_rstfile().headers: if vec[1] == activecells and any( fnmatch.fnmatch(vec[0], key) for key in vectors ): @@ -211,7 +211,7 @@ def rst2df( present_rstvectors = [] for vec in rstvectors: try: - if eclfiles.get_rstfile().iget_named_kw(vec, rstindex): + if resfiles.get_rstfile().iget_named_kw(vec, rstindex): present_rstvectors.append(vec) except IndexError: pass @@ -232,7 +232,7 @@ def rst2df( columns=present_rstvectors, data=np.hstack( [ - eclfiles.get_rstfile() + resfiles.get_rstfile() .iget_named_kw(vec, rstindex) .numpyView() .reshape(-1, 1) @@ -279,7 +279,7 @@ def rst2df( def gridgeometry2df( - eclfiles: EclFiles, zonemap: Optional[Dict[int, str]] = None + resfiles: ResFiles, zonemap: Optional[Dict[int, str]] = None ) -> pd.DataFrame: """Produce a Pandas Dataframe with Eclipse gridgeometry @@ -287,7 +287,7 @@ def gridgeometry2df( when merging with other dataframes with cell-data. Args: - eclfiles: object holding the Eclipse output files. + resfiles: object holding the Eclipse output files. zonemap: A zonemap dictionary mapping every K index to a string, which will be put in a column ZONE. If none is provided, a zonemap from a default file will be looked for. Provide an empty @@ -299,8 +299,8 @@ def gridgeometry2df( pr. cell. The index of the dataframe are the global indices. If a zonemap is provided, zone information will be in the column ZONE. """ - egrid_file = eclfiles.get_egridfile() - grid = eclfiles.get_egrid() + egrid_file = resfiles.get_egridfile() + grid = resfiles.get_egrid() if not egrid_file or not grid: raise ValueError("No EGRID file supplied") @@ -348,7 +348,7 @@ def gridgeometry2df( if zonemap is None: # Look for default zonemap file: - zonemap = eclfiles.get_zonemap() + zonemap = resfiles.get_zonemap() if zonemap: logger.info("Merging zonemap into grid") grid_df = common.merge_zones(grid_df, zonemap, kname="K") @@ -357,7 +357,7 @@ def gridgeometry2df( def merge_initvectors( - eclfiles: EclFiles, + resfiles: ResFiles, dframe: pd.DataFrame, initvectors: List[str], ijknames: Optional[List[str]] = None, @@ -368,7 +368,7 @@ def merge_initvectors( for API users to only use the df() function. Args: - eclfiles: Object representing the Eclipse output files + resfiles: Object representing the Eclipse output files dframe: Table data to merge with initvectors: Names of INIT vectors to merge in. ijknames: Three strings that determine the I, J and K columns to use @@ -385,7 +385,7 @@ def merge_initvectors( if len(ijknames) != 3: raise ValueError("ijknames must be a list of length 3") assert isinstance(dframe, pd.DataFrame) - assert isinstance(eclfiles, EclFiles) + assert isinstance(resfiles, ResFiles) if not set(ijknames).issubset(dframe.columns): raise ValueError( @@ -398,12 +398,12 @@ def merge_initvectors( assert isinstance(initvectors, list) logger.info("Merging INIT data %s into dataframe", str(initvectors)) - ijkinit = df(eclfiles, vectors=initvectors)[["I", "J", "K"] + initvectors] + ijkinit = df(resfiles, vectors=initvectors)[["I", "J", "K"] + initvectors] return pd.merge(dframe, ijkinit, left_on=ijknames, right_on=["I", "J", "K"]) def init2df( - eclfiles: EclFiles, vectors: Optional[Union[str, List[str]]] = None + resfiles: ResFiles, vectors: Optional[Union[str, List[str]]] = None ) -> pd.DataFrame: """Extract information from INIT file with cell data @@ -413,7 +413,7 @@ def init2df( Order is significant, as index is used for merging Args: - eclfiles: Object that can serve the EGRID and INIT files + resfiles: Object that can serve the EGRID and INIT files vectors: List of vectors to include, glob-style wildcards supported. """ @@ -422,8 +422,8 @@ def init2df( if not isinstance(vectors, list): vectors = [vectors] - init = eclfiles.get_initfile() - egrid = eclfiles.get_egrid() + init = resfiles.get_initfile() + egrid = resfiles.get_egrid() # Build list of vector names to include: usevectors = [] @@ -470,7 +470,7 @@ def init2df( def df( - eclfiles: EclFiles, + resfiles: ResFiles, vectors: Union[str, List[str]] = "*", dropconstants: bool = False, rstdates: Optional[Union[str, datetime.date, List[datetime.date]]] = None, @@ -486,7 +486,7 @@ def df( any time dependent data from Restart files. Args: - eclfiles: Handle to an Eclipse case + resfiles: Handle to an Eclipse case vectors: Vectors to include, wildcards supported. Used to match both INIT vectors and RESTART vectors. @@ -506,12 +506,12 @@ def df( dictionary to avoid looking for the default file, and no ZONE column will be added. """ - gridgeom = gridgeometry2df(eclfiles, zonemap) - initdf = init2df(eclfiles, vectors=vectors) + gridgeom = gridgeometry2df(resfiles, zonemap) + initdf = init2df(resfiles, vectors=vectors) rst_df = None if rstdates: rst_df = rst2df( - eclfiles, + resfiles, rstdates, vectors=vectors, dateinheaders=dateinheaders, @@ -617,7 +617,7 @@ def drop_constant_columns( def df2ecl( grid_df: pd.DataFrame, keywords: Union[str, List[str]], - eclfiles: Optional[EclFiles] = None, + resfiles: Optional[ResFiles] = None, dtype: Optional[Type] = None, filename: Optional[str] = None, nocomments: bool = False, @@ -643,7 +643,7 @@ def df2ecl( The grid can contain both active and inactive cells. keywords: The keyword(s) to export, with one value for every cell. - eclfiles: If provided, the total cell count for the grid + resfiles: If provided, the total cell count for the grid will be requested from this object. If not, it will be *guessed* from the maximum number of GLOBAL_INDEX, which can be under-estimated in the corner-case that the last cells are inactive. @@ -670,10 +670,10 @@ def df2ecl( # Figure out the total number of cells for which we need to export data for: global_size = None active_cells = None - if eclfiles is not None: - if eclfiles.get_egrid() is not None: - global_size = eclfiles.get_egrid().get_global_size() - active_cells = eclfiles.get_egrid().getNumActive() + if resfiles is not None: + if resfiles.get_egrid() is not None: + global_size = resfiles.get_egrid().get_global_size() + active_cells = resfiles.get_egrid().getNumActive() if "GLOBAL_INDEX" not in grid_df: logger.warning( @@ -763,9 +763,9 @@ def grid_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resfiles = ResFiles(args.DATAFILE) grid_df = df( - eclfiles, + resfiles, vectors=args.vectors, rstdates=args.rstdates, dropconstants=args.dropconstants, diff --git a/res2df/gruptree.py b/res2df/gruptree.py index d7422c4cd..f5c12a92c 100644 --- a/res2df/gruptree.py +++ b/res2df/gruptree.py @@ -19,7 +19,7 @@ except ImportError: pass -from res2df import EclFiles, getLogger_res2csv +from res2df import ResFiles, getLogger_res2csv from res2df.common import ( parse_opmio_date_rec, parse_opmio_deckrecord, @@ -31,7 +31,7 @@ def df( - deck: Union[EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[ResFiles, "opm.libopmcommon_python.Deck"], startdate: Optional[datetime.date] = None, welspecs: bool = True, ) -> pd.DataFrame: @@ -55,7 +55,7 @@ def df( startdate is only relevant when START is not in the deck. Args: - deck: opm.io Deck object or EclFiles + deck: opm.io Deck object or ResFiles Returns: pd.DataFrame with one row pr edge. Empty dataframe if no @@ -68,7 +68,7 @@ def df( else: date = None - if isinstance(deck, EclFiles): + if isinstance(deck, ResFiles): deck = deck.get_ecldeck() edgerecords = [] # list of dict of rows containing an edge. @@ -453,8 +453,8 @@ def gruptree_main(args) -> None: if not args.output and not args.prettyprint: print("Nothing to do. Set --output or --prettyprint") sys.exit(0) - eclfiles = EclFiles(args.DATAFILE) - dframe = df(eclfiles.get_ecldeck(), startdate=args.startdate) + resfiles = ResFiles(args.DATAFILE) + dframe = df(resfiles.get_ecldeck(), startdate=args.startdate) if args.prettyprint: if "DATE" in dframe: print(prettyprint(dframe)) diff --git a/res2df/inferdims.py b/res2df/inferdims.py index aa0f9f5eb..97dad4a77 100644 --- a/res2df/inferdims.py +++ b/res2df/inferdims.py @@ -12,7 +12,7 @@ # Let parts of res2df work without OPM: pass -from res2df import EclFiles +from res2df import ResFiles logger = logging.getLogger(__name__) @@ -68,7 +68,7 @@ def guess_dim(deckstring: str, dimkeyword: str, dimitem: int = 0) -> int: deckstring, dimkeyword, dimitem, dimcountguess, nowarn=True ) try: - EclFiles.str2deck( + ResFiles.str2deck( deck_candidate, parsecontext=opm.io.ParseContext( opmioparser_recovery_fail_extra_records @@ -170,7 +170,7 @@ def inject_xxxdims_ntxxx( if xxxdims in deck and ntxxx_value is None: # Then we have nothing to do, but ensure we parse a potential string to a deck if isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResFiles.str2deck(deck) return deck if xxxdims in deck and ntxxx_value is not None: @@ -195,6 +195,6 @@ def inject_xxxdims_ntxxx( str(deck), xxxdims, DIMS_POS[ntxxx_name], ntxxx_estimate, nowarn=True ) # Overwrite the deck object - deck = EclFiles.str2deck(augmented_strdeck) + deck = ResFiles.str2deck(augmented_strdeck) return deck diff --git a/res2df/nnc.py b/res2df/nnc.py index f755309b0..6dedd694b 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -10,13 +10,13 @@ import pandas as pd -from res2df import EclFiles, __version__, common, getLogger_res2csv, grid +from res2df import ResFiles, __version__, common, getLogger_res2csv, grid from res2df.common import write_dframe_stdout_file logger: logging.Logger = logging.getLogger(__name__) -def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.DataFrame: +def df(resfiles: ResFiles, coords: bool = False, pillars: bool = False) -> pd.DataFrame: """Produce a Pandas Dataframe with NNC information A NNC is a pair of cells that are not next to each other @@ -28,7 +28,7 @@ def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.Da between the two cells) Args: - eclfiles: object that can serve EclFile and EclGrid + resfiles: object that can serve ResFile and EclGrid on demand coords: Set to True if you want the midpoint of the two connected cells to be computed and added to the columns @@ -39,9 +39,9 @@ def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.Da Returns: Empty if no NNC information found. """ - egrid_file = eclfiles.get_egridfile() - egrid_grid = eclfiles.get_egrid() - init_file = eclfiles.get_initfile() + egrid_file = resfiles.get_egridfile() + egrid_grid = resfiles.get_egrid() + init_file = resfiles.get_initfile() if not ("NNC1" in egrid_file and "NNC2" in egrid_file): logger.warning("No NNC data in EGRID") @@ -98,11 +98,11 @@ def df(eclfiles: EclFiles, coords: bool = False, pillars: bool = False) -> pd.Da if pillars: nncdf = filter_vertical(nncdf) if coords: - nncdf = add_nnc_coords(nncdf, eclfiles) + nncdf = add_nnc_coords(nncdf, resfiles) return nncdf -def add_nnc_coords(nncdf: pd.DataFrame, eclfiles: EclFiles) -> pd.DataFrame: +def add_nnc_coords(nncdf: pd.DataFrame, resfiles: ResFiles) -> pd.DataFrame: """Add columns X, Y and Z for the connection midpoint This extracts x, y and z for (I1, J1, K1) and (I2, J2, K2) @@ -110,12 +110,12 @@ def add_nnc_coords(nncdf: pd.DataFrame, eclfiles: EclFiles) -> pd.DataFrame: Arguments: nncdf: With grid index columns (I1, J1, K1, I2, J2, K2) - eclfiles: Object used to fetch grid data from EGRID. + resfiles: Object used to fetch grid data from EGRID. Returns: Incoming dataframe augmented with the columns X, Y and Z. """ - gridgeometry = grid.gridgeometry2df(eclfiles) + gridgeometry = grid.gridgeometry2df(resfiles) gnncdf = pd.merge( nncdf, gridgeometry, @@ -279,8 +279,8 @@ def nnc_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - nncdf = df(eclfiles, coords=args.coords, pillars=args.pillars) + resfiles = ResFiles(args.DATAFILE) + nncdf = df(resfiles, coords=args.coords, pillars=args.pillars) write_dframe_stdout_file( nncdf, args.output, diff --git a/res2df/parameters.py b/res2df/parameters.py index 6b7ea1b57..d48f814cc 100644 --- a/res2df/parameters.py +++ b/res2df/parameters.py @@ -10,13 +10,13 @@ import pandas as pd import yaml -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles logger = logging.getLogger(__name__) def find_parameter_files( - ecldeck_or_eclpath: Union[EclFiles, str, Path], filebase: str = "parameters" + ecldeck_or_eclpath: Union[ResFiles, str, Path], filebase: str = "parameters" ) -> List[Path]: """Locate a default prioritized list of files to try to read as key-value @@ -24,7 +24,7 @@ def find_parameter_files( current dir, one directory up, and two directories up. Args: - ecldeck_or_eclpath: Either an EclFiles object of + ecldeck_or_eclpath: Either an ResFiles object of an Eclipse output set (only the corresponding path will be used), or path to a file or directory, that will be used as a starting point for locating parameter files @@ -35,7 +35,7 @@ def find_parameter_files( """ eclbasepath: Path fname: str - if isinstance(ecldeck_or_eclpath, EclFiles): + if isinstance(ecldeck_or_eclpath, ResFiles): eclbasepath = Path(ecldeck_or_eclpath.get_path()) elif isinstance(ecldeck_or_eclpath, (str, Path)): eclbasepath = Path(ecldeck_or_eclpath).parent.absolute() diff --git a/res2df/pillars.py b/res2df/pillars.py index e94421ec7..f092c1610 100644 --- a/res2df/pillars.py +++ b/res2df/pillars.py @@ -8,7 +8,7 @@ import dateutil.parser import pandas as pd -from res2df import EclFiles, common, getLogger_res2csv, grid +from res2df import ResFiles, common, getLogger_res2csv, grid logger: logging.Logger = logging.getLogger(__name__) @@ -33,7 +33,7 @@ def df( - eclfiles: EclFiles, + resfiles: ResFiles, region: Optional[str] = None, rstdates: Optional[Union[str, datetime.date, List[datetime.date]]] = None, soilcutoff: float = 0.2, @@ -83,9 +83,9 @@ def df( if region: vectors.append(region) vectors.extend(["POR*", "PERM*", "SWAT", "SGAS", "1OVERBO", "1OVERBG"]) - grid_df = grid.df(eclfiles, rstdates=rstdates, vectors=vectors, dateinheaders=True) + grid_df = grid.df(resfiles, rstdates=rstdates, vectors=vectors, dateinheaders=True) - rstdates_iso = grid.dates2rstindices(eclfiles, rstdates)[2] + rstdates_iso = grid.dates2rstindices(resfiles, rstdates)[2] grid_df["PILLAR"] = grid_df["I"].astype(str) + "-" + grid_df["J"].astype(str) logger.info("Computing pillar statistics") @@ -415,9 +415,9 @@ def pillars_main(args) -> None: __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resfiles = ResFiles(args.DATAFILE) dframe = df( - eclfiles, + resfiles, region=args.region, rstdates=args.rstdates, soilcutoff=args.soilcutoff, diff --git a/res2df/pvt.py b/res2df/pvt.py index 5d9b3e640..d3093d29e 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -11,7 +11,7 @@ import pandas as pd -from res2df import EclFiles, common, getLogger_res2csv, inferdims +from res2df import ResFiles, common, getLogger_res2csv, inferdims try: # Needed for mypy @@ -217,7 +217,7 @@ def df( Return: pd.DataFrame """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResFiles): deck = deck.get_ecldeck() deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) @@ -283,10 +283,10 @@ def pvt_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resfiles = ResFiles(args.DATAFILE) logger.info("Parsed %s", args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + if resfiles: + deck = resfiles.get_ecldeck() if "TABDIMS" in deck: # Things are easier when a full deck with correct TABDIMS # is supplied: diff --git a/res2df/rft.py b/res2df/rft.py index 42a422931..ccea3e4e6 100644 --- a/res2df/rft.py +++ b/res2df/rft.py @@ -26,7 +26,7 @@ from res2df import getLogger_res2csv from .common import merge_zones, write_dframe_stdout_file -from .eclfiles import EclFiles +from .resfiles import ResFiles from .gruptree import tree_from_dict logger: logging.Logger = logging.getLogger(__name__) @@ -515,18 +515,18 @@ def add_extras(dframe: pd.DataFrame, inplace: bool = True) -> pd.DataFrame: def df( - eclfiles: EclFiles, wellname: Optional[str] = None, date: Optional[str] = None + resfiles: ResFiles, wellname: Optional[str] = None, date: Optional[str] = None ) -> pd.DataFrame: """Loop over an RFT file and construct a dataframe representation of the data, ordered by well and date. Args: - eclfiles: Object used to locate the RFT file + resfiles: Object used to locate the RFT file wellname: If provided, only wells matching this string exactly will be included date: If provided, all other dates will be ignored. YYYY-MM-DD. """ - rftfile = eclfiles.get_rftfile() + rftfile = resfiles.get_rftfile() rftdata = [] for rftrecord in rftrecords(rftfile): @@ -632,7 +632,7 @@ def df( if rftdata_df.HOSTGRID.unique()[0].strip() == "": del rftdata_df["HOSTGRID"] - zonemap = eclfiles.get_zonemap() + zonemap = resfiles.get_zonemap() if zonemap: if "K" in rftdata_df: kname = "K" @@ -679,10 +679,10 @@ def rft_main(args) -> None: ) if args.DATAFILE.endswith(".RFT"): # Support the RFT file as an argument also: - eclfiles = EclFiles(args.DATAFILE.replace(".RFT", "") + ".DATA") + resfiles = ResFiles(args.DATAFILE.replace(".RFT", "") + ".DATA") else: - eclfiles = EclFiles(args.DATAFILE) - rft_df = df(eclfiles, wellname=args.wellname, date=args.date) + resfiles = ResFiles(args.DATAFILE) + rft_df = df(resfiles, wellname=args.wellname, date=args.date) if rft_df.empty: if args.wellname is not None or args.date is not None: logger.warning("No data. Check your well and/or date filter") diff --git a/res2df/satfunc.py b/res2df/satfunc.py index fc17096bb..4c6b040ac 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -28,7 +28,7 @@ from res2df import common, getLogger_res2csv, inferdims from .common import write_dframe_stdout_file -from .eclfiles import EclFiles +from .resfiles import ResFiles logger: logging.Logger = logging.getLogger(__name__) @@ -75,7 +75,7 @@ def df( and stating how many saturation functions there should be. If you have a string with TABDIMS missing, you must supply this as a string to this function, and not a parsed deck, as - the default parser in EclFiles is very permissive (and only + the default parser in ResFiles is very permissive (and only returning the first function by default). Arguments: @@ -91,7 +91,7 @@ def df( Return: pd.DataFrame, columns 'KEYWORD', 'SW', 'KRW', 'KROW', 'PC', .. """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResFiles): # NB: If this is done on include files and not on DATA files # we can loose data for SATNUM > 1 deck = deck.get_ecldeck() @@ -192,13 +192,13 @@ def satfunc_main(args) -> None: logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resfiles = ResFiles(args.DATAFILE) + if resfiles: + deck = resfiles.get_ecldeck() if "TABDIMS" in deck: # Things are easier when a full deck with (correct) TABDIMS # is supplied: - satfunc_df = df(eclfiles, keywords=args.keywords) + satfunc_df = df(resfiles, keywords=args.keywords) else: # This might be an include file for which we have to infer/guess # TABDIMS. Then we send it to df() as a string diff --git a/res2df/summary.py b/res2df/summary.py index ab9cf4569..e5dd053ae 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -20,7 +20,7 @@ from . import parameters from .common import write_dframe_stdout_file -from .eclfiles import EclFiles +from .resfiles import ResFiles logger: logging.Logger = logging.getLogger(__name__) @@ -297,7 +297,7 @@ def resample_smry_dates( def df( - eclfiles: EclFiles, + resfiles: ResFiles, time_index: Optional[str] = None, column_keys: Optional[Union[List[str], str]] = None, start_date: Optional[Union[str, dt.date]] = None, @@ -321,7 +321,7 @@ def df( is always named "DATE". Arguments: - eclfiles: EclFiles object representing the Eclipse deck. Alternatively + resfiles: ResFiles object representing the Eclipse deck. Alternatively an Summary object. time_index: string indicating a resampling frequency, 'yearly', 'monthly', 'daily', 'last' or 'raw', the latter will @@ -354,11 +354,11 @@ def df( if isinstance(column_keys, str): column_keys = [column_keys] - if isinstance(eclfiles, Summary): - eclsum = eclfiles + if isinstance(resfiles, Summary): + eclsum = resfiles else: try: - eclsum = eclfiles.get_eclsum(include_restart=include_restart) + eclsum = resfiles.get_eclsum(include_restart=include_restart) except OSError: logger.warning("Error reading summary instance, returning empty dataframe") return pd.DataFrame() @@ -412,7 +412,7 @@ def df( ) dframe.index.name = "DATE" if params or paramfile: - dframe = _merge_params(dframe, paramfile, eclfiles) + dframe = _merge_params(dframe, paramfile, resfiles) # Add metadata as an attribute the dataframe, using experimental Pandas features: meta = smry_meta(eclsum) @@ -539,7 +539,7 @@ def _df2pyarrow(dframe: pd.DataFrame) -> pyarrow.Table: def _merge_params( dframe: pd.DataFrame, paramfile: Optional[Union[str, Path]] = None, - eclfiles: Optional[Union[str, EclFiles]] = None, + resfiles: Optional[Union[str, ResFiles]] = None, ) -> pd.DataFrame: """Locate parameters in a file and add to the dataframe. @@ -547,16 +547,16 @@ def _merge_params( the parameters.txt file based on the location of an Eclise run. """ - if paramfile is None and eclfiles is not None: - param_files = parameters.find_parameter_files(eclfiles) + if paramfile is None and resfiles is not None: + param_files = parameters.find_parameter_files(resfiles) logger.info("Loading parameters from files: %s", str(param_files)) param_dict = parameters.load_all(param_files) elif ( paramfile is not None - and eclfiles is not None + and resfiles is not None and not Path(paramfile).is_absolute() ): - param_files = parameters.find_parameter_files(eclfiles, filebase=str(paramfile)) + param_files = parameters.find_parameter_files(resfiles, filebase=str(paramfile)) logger.info("Loading parameters from files: %s", str(param_files)) param_dict = parameters.load_all(param_files) elif paramfile is not None and Path(paramfile).is_absolute(): @@ -574,7 +574,7 @@ def _merge_params( return dframe -def smry_meta(eclfiles: EclFiles) -> Dict[str, Dict[str, Any]]: +def smry_meta(resfiles: ResFiles) -> Dict[str, Dict[str, Any]]: """Provide metadata for summary data vectors. A dictionary indexed by summary vector name is returned, and each @@ -589,10 +589,10 @@ def smry_meta(eclfiles: EclFiles) -> Dict[str, Dict[str, Any]]: * keyword (str) * wgname (str or None) """ - if isinstance(eclfiles, Summary): - eclsum = eclfiles + if isinstance(resfiles, Summary): + eclsum = resfiles else: - eclsum = eclfiles.get_eclsum() + eclsum = resfiles.get_eclsum() meta: Dict[str, Dict[str, Any]] = {} for col in eclsum.keys(): @@ -904,9 +904,9 @@ def summary_main(args) -> None: args.DATAFILE.replace(".DATA", "").replace(".UNSMRY", "").replace(".SMSPEC", "") ) - eclfiles = EclFiles(eclbase) + resfiles = ResFiles(eclbase) sum_df = df( - eclfiles, + resfiles, time_index=args.time_index, column_keys=args.column_keys, start_date=args.start_date, diff --git a/res2df/trans.py b/res2df/trans.py index af94e9979..0e50e2018 100644 --- a/res2df/trans.py +++ b/res2df/trans.py @@ -13,7 +13,7 @@ from res2df import getLogger_res2csv from res2df.common import write_dframe_stdout_file -from .eclfiles import EclFiles +from .resfiles import ResFiles try: import networkx @@ -26,7 +26,7 @@ def df( - eclfiles: EclFiles, + resfiles: ResFiles, vectors: Optional[Union[str, List[str]]] = None, boundaryfilter: bool = False, group: bool = False, @@ -57,7 +57,7 @@ def df( you will get a corresponding FIPNUM1 and FIPNUM2 added. Args: - eclfiles: An object representing your Eclipse run + resfiles: An object representing your Eclipse run vectors: Eclipse INIT vectors that you want to include boundaryfilter: Set to true if you want to filter where one INIT vector change. Only use for integer INIT vectors. @@ -101,7 +101,7 @@ def df( "Filtering to both k and to ij simultaneously results in empty dataframe" ) - grid_df = res2df.grid.df(eclfiles) + grid_df = res2df.grid.df(resfiles) existing_vectors = [vec for vec in vectors if vec in grid_df.columns] if len(existing_vectors) < len(vectors): logger.warning( @@ -149,7 +149,7 @@ def df( if addnnc: logger.info("Adding NNC data") - nnc_df = res2df.nnc.df(eclfiles, coords=False, pillars=False) + nnc_df = res2df.nnc.df(resfiles, coords=False, pillars=False) nnc_df["DIR"] = "NNC" trans_df = pd.concat([trans_df, nnc_df], sort=False) @@ -236,12 +236,12 @@ def df( return trans_df -def make_nx_graph(eclfiles: EclFiles, region: str = "FIPNUM") -> "networkx.Graph": +def make_nx_graph(resfiles: ResFiles, region: str = "FIPNUM") -> "networkx.Graph": """Construct a networkx graph for the transmissibilities.""" if not HAVE_NETWORKX: logger.error("Please install networkx for this function to work") return None - trans_df = df(eclfiles, vectors=[region], coords=True, group=True) + trans_df = df(resfiles, vectors=[region], coords=True, group=True) reg1 = region + "1" reg2 = region + "2" graph = networkx.Graph() @@ -306,9 +306,9 @@ def trans_main(args): logger = getLogger_res2csv( # pylint: disable=redefined-outer-name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) + resfiles = ResFiles(args.DATAFILE) trans_df = df( - eclfiles, + resfiles, vectors=args.vectors, boundaryfilter=args.boundaryfilter, onlykdir=args.onlyk, diff --git a/res2df/vfp/_vfp.py b/res2df/vfp/_vfp.py index 4448df815..b1306163a 100755 --- a/res2df/vfp/_vfp.py +++ b/res2df/vfp/_vfp.py @@ -25,7 +25,7 @@ except ImportError: pass -from res2df import EclFiles, common, getLogger_res2csv +from res2df import ResFiles, common, getLogger_res2csv from . import _vfpinj as vfpinj from . import _vfpprod as vfpprod @@ -35,7 +35,7 @@ def basic_data( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> List[Dict[str, Any]]: @@ -51,10 +51,10 @@ def basic_data( Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResFiles): deck = deck.get_ecldeck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResFiles.str2deck(deck) if keyword not in SUPPORTED_KEYWORDS: raise ValueError( @@ -241,7 +241,7 @@ def pyarrow2basic_data(pa_table: pa.Table) -> Union[Dict[str, Any], None]: def dfs( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> List[pd.DataFrame]: @@ -255,10 +255,10 @@ def dfs( vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResFiles): deck = deck.get_ecldeck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResFiles.str2deck(deck) if keyword not in SUPPORTED_KEYWORDS: raise ValueError( @@ -284,7 +284,7 @@ def dfs( def pyarrow_tables( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> List[pa.Table]: @@ -298,10 +298,10 @@ def pyarrow_tables( vfpnumbers_str: String with list of vfp table numbers to extract. Syntax "[0,1,8:11]" corresponds to [0,1,8,9,10,11]. """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResFiles): deck = deck.get_ecldeck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResFiles.str2deck(deck) if keyword not in SUPPORTED_KEYWORDS: raise ValueError( @@ -409,7 +409,7 @@ def df2ecl( def df( - deck: Union[str, EclFiles, "opm.libopmcommon_python.Deck"], + deck: Union[str, ResFiles, "opm.libopmcommon_python.Deck"], keyword: str = "VFPPROD", vfpnumbers_str: Optional[str] = None, ) -> pd.DataFrame: @@ -427,10 +427,10 @@ def df( logger.warning("No keywords provided to vfp.df. Empty dataframe returned") return pd.DataFrame() - if isinstance(deck, EclFiles): + if isinstance(deck, ResFiles): deck = deck.get_ecldeck() elif isinstance(deck, str): - deck = EclFiles.str2deck(deck) + deck = ResFiles.str2deck(deck) # Extract all VFPROD/VFPINJ as separate dataframes dfs_vfp = dfs(deck, keyword, vfpnumbers_str) @@ -495,12 +495,12 @@ def vfp_main(args) -> None: if "vfpnumbers" in args: vfpnumbers = str(args.vfpnumbers) - eclfiles = EclFiles(args.DATAFILE) + resfiles = ResFiles(args.DATAFILE) if args.arrow: outputfile = args.output outputfile.replace(".arrow", "") vfp_arrow_tables = pyarrow_tables( - eclfiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers + resfiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers ) for vfp_table in vfp_arrow_tables: table_number = int( @@ -513,7 +513,7 @@ def vfp_main(args) -> None: logger.info(f"Parsed file {args.DATAFILE} for vfp.dfs_arrow") else: dframe = df( - eclfiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers + resfiles.get_ecldeck(), keyword=args.keyword, vfpnumbers_str=vfpnumbers ) if args.output: common.write_dframe_stdout_file( diff --git a/res2df/wcon.py b/res2df/wcon.py index 8246e0d68..c2d94d255 100644 --- a/res2df/wcon.py +++ b/res2df/wcon.py @@ -15,7 +15,7 @@ except ImportError: pass -from res2df import EclFiles, getLogger_res2csv +from res2df import ResFiles, getLogger_res2csv from res2df.common import ( parse_opmio_date_rec, parse_opmio_deckrecord, @@ -28,13 +28,13 @@ WCONKEYS = ["WCONHIST", "WCONINJE", "WCONINJH", "WCONPROD"] -def df(deck: Union[EclFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: +def df(deck: Union[ResFiles, "opm.libopmcommon_python.Deck"]) -> pd.DataFrame: """Loop through the deck and pick up information found The loop over the deck is a state machine, as it has to pick up dates """ - if isinstance(deck, EclFiles): + if isinstance(deck, ResFiles): deck = deck.get_ecldeck() wconrecords = [] # List of dicts of every line in input file @@ -94,9 +94,9 @@ def wcon_main(args) -> None: logger = getLogger_res2csv( # pylint: disable:redefined-outer_name __name__, vars(args) ) - eclfiles = EclFiles(args.DATAFILE) - if eclfiles: - deck = eclfiles.get_ecldeck() + resfiles = ResFiles(args.DATAFILE) + if resfiles: + deck = resfiles.get_ecldeck() wcon_df = df(deck) write_dframe_stdout_file( wcon_df, diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index 8b0af8dcc..761649c98 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -11,7 +11,7 @@ import pyarrow.feather from res2df import common, compdat, getLogger_res2csv, wellconnstatus -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles from .common import write_dframe_stdout_file @@ -33,7 +33,7 @@ class KHUnit(Enum): def df( - eclfiles: EclFiles, + resfiles: ResFiles, zonemap: Dict[int, str], use_wellconnstatus: bool = False, excl_well_startswith: Optional[str] = None, @@ -49,14 +49,14 @@ def df( only. Args: - eclfiles; EclFiles object + resfiles; ResFiles object zonemap: dictionary with layer->zone mapping use_wellconnstatus: boolean Returns: pd.DataFrame with one row per unique combination of well, zone and date. """ - compdat_df = compdat.df(eclfiles, zonemap=zonemap) + compdat_df = compdat.df(resfiles, zonemap=zonemap) if "ZONE" not in compdat_df.columns: logger.warning( "ZONE column not generated in compdat table. " @@ -75,13 +75,13 @@ def df( compdat_df = _excl_well_startswith(compdat_df, excl_well_startswith) if use_wellconnstatus: - wellconnstatus_df = wellconnstatus.df(eclfiles) + wellconnstatus_df = wellconnstatus.df(resfiles) compdat_df = _merge_compdat_and_connstatus(compdat_df, wellconnstatus_df) compdat_df = _aggregate_layer_to_zone(compdat_df) # Add metadata as an attribute the dataframe - meta = _get_metadata(eclfiles) + meta = _get_metadata(resfiles) # Slice meta to dataframe columns: compdat_df.attrs["meta"] = { column_key: meta[column_key] for column_key in compdat_df if column_key in meta @@ -90,7 +90,7 @@ def df( return compdat_df -def _get_ecl_unit_system(eclfiles: EclFiles) -> EclipseUnitSystem: +def _get_ecl_unit_system(resfiles: ResFiles) -> EclipseUnitSystem: """Returns the unit system of an eclipse deck. The options are \ METRIC, FIELD, LAB and PVT-M. @@ -98,16 +98,16 @@ def _get_ecl_unit_system(eclfiles: EclFiles) -> EclipseUnitSystem: default unit system in Eclipse. """ unit_systems = [unitsystem.value for unitsystem in EclipseUnitSystem] - for keyword in eclfiles.get_ecldeck(): + for keyword in resfiles.get_ecldeck(): if keyword.name in unit_systems: return EclipseUnitSystem(keyword.name) return EclipseUnitSystem.METRIC -def _get_metadata(eclfiles: EclFiles) -> Dict[str, Dict[str, Any]]: +def _get_metadata(resfiles: ResFiles) -> Dict[str, Dict[str, Any]]: """Provide metadata for the well completion data export""" meta: Dict[str, Dict[str, str]] = {} - unitsystem = _get_ecl_unit_system(eclfiles) + unitsystem = _get_ecl_unit_system(resfiles) kh_units = { EclipseUnitSystem.METRIC: KHUnit.METRIC, EclipseUnitSystem.FIELD: KHUnit.FIELD, @@ -288,14 +288,14 @@ def wellcompletiondata_main(args): """Entry-point for module, for command line utility""" logger = getLogger_res2csv(__name__, vars(args)) - eclfiles = EclFiles(args.DATAFILE) + resfiles = ResFiles(args.DATAFILE) if not Path(args.zonemap).is_file(): wellcompletiondata_df = pd.DataFrame() logger.info(f"Zonemap not found: {args.zonemap}") else: zonemap = common.convert_lyrlist_to_zonemap(common.parse_lyrfile(args.zonemap)) wellcompletiondata_df = df( - eclfiles, zonemap, args.use_wellconnstatus, args.excl_well_startswith + resfiles, zonemap, args.use_wellconnstatus, args.excl_well_startswith ) logger.info( f"Well completion data successfully generated with zonemap: {zonemap}" diff --git a/res2df/wellconnstatus.py b/res2df/wellconnstatus.py index 1dcebec8e..2287d9493 100644 --- a/res2df/wellconnstatus.py +++ b/res2df/wellconnstatus.py @@ -9,14 +9,14 @@ import pandas as pd from res2df import getLogger_res2csv, summary -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles from .common import write_dframe_stdout_file logger = logging.getLogger(__name__) -def df(eclfiles: EclFiles) -> pd.DataFrame: +def df(resfiles: ResFiles) -> pd.DataFrame: """Exctracts connection status history for each compdat connection that is included in the summary data on the form CPI:WELL,I,J,K. CPI stands for connection productivity index. @@ -28,7 +28,7 @@ def df(eclfiles: EclFiles) -> pd.DataFrame: The output data set is very sparse compared to the CPI summary data. """ - smry = summary.df(eclfiles, column_keys="CPI*") + smry = summary.df(resfiles, column_keys="CPI*") return _extract_status_changes(smry) @@ -115,9 +115,9 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: def wellconnstatus_main(args): """Entry-point for module, for command line utility""" logger = getLogger_res2csv(__name__, vars(args)) - eclfiles = EclFiles(args.DATAFILE) + resfiles = ResFiles(args.DATAFILE) - wellconnstatus_df = df(eclfiles) + wellconnstatus_df = df(resfiles) write_dframe_stdout_file( wellconnstatus_df, args.output, index=False, caller_logger=logger ) diff --git a/tests/test_common.py b/tests/test_common.py index ed5547312..bf5a8ca2d 100644 --- a/tests/test_common.py +++ b/tests/test_common.py @@ -9,7 +9,7 @@ import pandas as pd import pytest -from res2df import common, eclfiles, equil +from res2df import common, resfiles, equil try: # pylint: disable=unused-import @@ -147,7 +147,7 @@ def test_parse_opmio_deckrecord(): ) def test_handle_wanted_keywords(wanted, deckstr, supported, expected): """Test that we can handle list of wanted, supported and available keywords.""" - deck = eclfiles.EclFiles.str2deck(deckstr) + deck = resfiles.ResFiles.str2deck(deckstr) assert common.handle_wanted_keywords(wanted, deck, supported) == expected diff --git a/tests/test_compdat.py b/tests/test_compdat.py index 4fc483b75..21f619f0b 100644 --- a/tests/test_compdat.py +++ b/tests/test_compdat.py @@ -5,7 +5,7 @@ import pandas as pd import pytest -from res2df import EclFiles, compdat, res2csv +from res2df import ResFiles, compdat, res2csv try: # pylint: disable=unused-import @@ -32,8 +32,8 @@ def test_df(): """Test main dataframe API, only testing that something comes out""" - eclfiles = EclFiles(EIGHTCELLS) - compdat_df = compdat.df(eclfiles) + resfiles = ResFiles(EIGHTCELLS) + compdat_df = compdat.df(resfiles) assert not compdat_df.empty assert "ZONE" in compdat_df assert "K1" in compdat_df @@ -42,8 +42,8 @@ def test_df(): def test_comp2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(EIGHTCELLS) - compdfs = compdat.deck2dfs(eclfiles.get_ecldeck()) + resfiles = ResFiles(EIGHTCELLS) + compdfs = compdat.deck2dfs(resfiles.get_ecldeck()) assert not compdfs["COMPDAT"].empty assert not compdfs["WELSEGS"].empty @@ -53,7 +53,7 @@ def test_comp2df(): def test_schfile2df(): """Test that we can process individual files""" - deck = EclFiles.file2deck(SCHFILE) + deck = ResFiles.file2deck(SCHFILE) compdfs = compdat.deck2dfs(deck) assert not compdfs["COMPDAT"].columns.empty assert not compdfs["COMPDAT"].empty @@ -67,7 +67,7 @@ def test_str_compdat(): -- comments. / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) compdfs = compdat.deck2dfs(deck) compdat_df = compdfs["COMPDAT"] assert compdat_df.loc[0, "SATN"] == 0 @@ -79,7 +79,7 @@ def test_str_compdat(): 'FOO' 303 1010 031 39 / / """ - compdat_df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"] + compdat_df = compdat.deck2dfs(ResFiles.str2deck(schstr))["COMPDAT"] assert len(compdat_df) == 9 assert not compdat_df["DFACT"].values[0] assert not compdat_df["TRAN"].values[0] @@ -114,7 +114,7 @@ def test_str2df(): 'OP1' 166 1 7.4294683E-06 0 / icd on segment 17, cell 41 125 29 / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) compdfs = compdat.deck2dfs(deck) compdat_df = compdfs["COMPDAT"] welsegs = compdfs["WELSEGS"] @@ -182,7 +182,7 @@ def test_tstep(): 'OP1' 35 111 33 33 'SHUT' / / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) compdf = compdat.deck2dfs(deck)["COMPDAT"] dates = [str(x) for x in compdf["DATE"].unique()] assert len(dates) == 3 @@ -202,7 +202,7 @@ def test_tstep(): 'OP1' 34 111 32 32 'OPEN' / / """ - assert compdat.deck2dfs(EclFiles.str2deck(schstr_nodate)) == {} + assert compdat.deck2dfs(ResFiles.str2deck(schstr_nodate)) == {} # (critical error logged) @@ -215,14 +215,14 @@ def test_unrollcompdatk1k2(): 'OP1' 33 44 10 20 / / """ - df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"] + df = compdat.deck2dfs(ResFiles.str2deck(schstr))["COMPDAT"] assert df["I"].unique() == 33 assert df["J"].unique() == 44 assert (df["K1"].values == range(10, 20 + 1)).all() assert (df["K2"].values == range(10, 20 + 1)).all() # Check that we can read withoug unrolling: - df_noroll = compdat.deck2dfs(EclFiles.str2deck(schstr), unroll=False)["COMPDAT"] + df_noroll = compdat.deck2dfs(ResFiles.str2deck(schstr), unroll=False)["COMPDAT"] assert len(df_noroll) == 1 @@ -234,7 +234,7 @@ def test_samecellperf(): 'OP2' 1 1 1 1 / / """ - df = compdat.deck2dfs(EclFiles.str2deck(schstr))["COMPDAT"] + df = compdat.deck2dfs(ResFiles.str2deck(schstr))["COMPDAT"] assert len(df) == 2 @@ -248,10 +248,10 @@ def test_unrollwelsegs(): 2 3 1 1 1923.9 1689.000 0.1172 0.000015 / / """ - df = compdat.deck2dfs(EclFiles.str2deck(schstr))["WELSEGS"] + df = compdat.deck2dfs(ResFiles.str2deck(schstr))["WELSEGS"] assert len(df) == 2 - df = compdat.deck2dfs(EclFiles.str2deck(schstr), unroll=False)["WELSEGS"] + df = compdat.deck2dfs(ResFiles.str2deck(schstr), unroll=False)["WELSEGS"] assert len(df) == 1 @@ -267,24 +267,24 @@ def test_unrollbogus(): def test_initmerging(): """Test that we can ask for INIT vectors to be merged into the data""" - eclfiles = EclFiles(REEK) - noinit_df = compdat.df(eclfiles) - df = compdat.df(eclfiles, initvectors=[]) + resfiles = ResFiles(REEK) + noinit_df = compdat.df(resfiles) + df = compdat.df(resfiles, initvectors=[]) assert isinstance(df, pd.DataFrame) assert not df.empty - df = compdat.df(eclfiles, initvectors=["FIPNUM", "EQLNUM", "SATNUM"]) + df = compdat.df(resfiles, initvectors=["FIPNUM", "EQLNUM", "SATNUM"]) assert "FIPNUM" in df assert "EQLNUM" in df assert "SATNUM" in df assert len(df) == len(noinit_df) - df = compdat.df(eclfiles, initvectors="FIPNUM") + df = compdat.df(resfiles, initvectors="FIPNUM") assert "FIPNUM" in df assert len(df) == len(noinit_df) with pytest.raises(AssertionError): - compdat.df(eclfiles, initvectors=2) + compdat.df(resfiles, initvectors=2) def test_main_subparsers(tmp_path, mocker): @@ -369,29 +369,29 @@ def test_defaulted_compdat_i_j(): # pylint: disable=expression-not-assigned with pytest.raises(ValueError, match="WELSPECS must be provided when I"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_i))["COMPDAT"] + compdat.deck2dfs(ResFiles.str2deck(compdat_str_def_i))["COMPDAT"] # I value of 0 also means defaulted: with pytest.raises(ValueError, match="WELSPECS must be provided when I"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_i.replace("1*", "0")))[ + compdat.deck2dfs(ResFiles.str2deck(compdat_str_def_i.replace("1*", "0")))[ "COMPDAT" ] with pytest.raises(ValueError, match="WELSPECS must be provided when J"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_j))["COMPDAT"] + compdat.deck2dfs(ResFiles.str2deck(compdat_str_def_j))["COMPDAT"] # J value of 0 also means defaulted: with pytest.raises(ValueError, match="WELSPECS must be provided when J"): - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_j.replace("1*", "0")))[ + compdat.deck2dfs(ResFiles.str2deck(compdat_str_def_j.replace("1*", "0")))[ "COMPDAT" ] with pytest.raises(ValueError, match="WELSPECS must be provided"): # Wrong order: - compdat.deck2dfs(EclFiles.str2deck(compdat_str_def_i + welspecs_str))["COMPDAT"] + compdat.deck2dfs(ResFiles.str2deck(compdat_str_def_i + welspecs_str))["COMPDAT"] # Simplest example: - compdat_df = compdat.deck2dfs(EclFiles.str2deck(welspecs_str + compdat_str_def_i))[ + compdat_df = compdat.deck2dfs(ResFiles.str2deck(welspecs_str + compdat_str_def_i))[ "COMPDAT" ] assert compdat_df["I"].unique() == [20] @@ -399,7 +399,7 @@ def test_defaulted_compdat_i_j(): # Two wells: compdat_df = compdat.deck2dfs( - EclFiles.str2deck( + ResFiles.str2deck( welspecs_str.replace("OP1", "OP2").replace("30", "99") + welspecs_str + compdat_str_def_i @@ -408,14 +408,14 @@ def test_defaulted_compdat_i_j(): # Partial defaulting compdat_df = compdat.deck2dfs( - EclFiles.str2deck(welspecs_str + compdat_str_def_i + compdat_str_nodefaults) + ResFiles.str2deck(welspecs_str + compdat_str_def_i + compdat_str_nodefaults) )["COMPDAT"] assert set(compdat_df["I"].unique()) == {20, 55} assert set(compdat_df["J"].unique()) == {30, 66} compdat_df = compdat.deck2dfs( - EclFiles.str2deck( + ResFiles.str2deck( welspecs_str.replace("OP1", "OP2").replace("30", "99") + welspecs_str + compdat_str_def_i @@ -430,7 +430,7 @@ def test_defaulted_compdat_i_j(): # Same well redrilled to new location compdat_df = compdat.deck2dfs( - EclFiles.str2deck( + ResFiles.str2deck( "DATES\n 1 JAN 2030 /\n/\n" + welspecs_str + compdat_str_def_i @@ -450,17 +450,17 @@ def test_defaulted_compdat_i_j(): # Multisegement well testing def test_msw_schfile2df(): """Test that we can process individual files with AICD and ICD MSW""" - deck = EclFiles.file2deck(SCHFILE_AICD) + deck = ResFiles.file2deck(SCHFILE_AICD) compdfs = compdat.deck2dfs(deck) assert not compdfs["WSEGAICD"].empty assert not compdfs["WSEGAICD"].columns.empty - deck = EclFiles.file2deck(SCHFILE_ICD) + deck = ResFiles.file2deck(SCHFILE_ICD) compdfs = compdat.deck2dfs(deck) assert not compdfs["WSEGSICD"].empty assert not compdfs["WSEGSICD"].columns.empty - deck = EclFiles.file2deck(SCHFILE_VALV) + deck = ResFiles.file2deck(SCHFILE_VALV) compdfs = compdat.deck2dfs(deck) assert not compdfs["WSEGVALV"].empty assert not compdfs["WSEGVALV"].columns.empty @@ -507,7 +507,7 @@ def test_msw_str2df(): OP_6 31 0.0084252 0.00075 1* / / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) compdfs = compdat.deck2dfs(deck) wsegaicd = compdfs["WSEGAICD"] wsegsicd = compdfs["WSEGSICD"] @@ -539,7 +539,7 @@ def test_wsegaicd(): OPEN 1.0 1.0 1.0 2.43 1.18 10.0 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) wsegaicd = compdat.deck2dfs(deck)["WSEGAICD"] pd.testing.assert_frame_equal( wsegaicd, @@ -585,7 +585,7 @@ def test_wsegsicd(): OPEN / / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) wsegsicd = compdat.deck2dfs(deck)["WSEGSICD"] pd.testing.assert_frame_equal( wsegsicd, @@ -620,7 +620,7 @@ def test_wsegvalv(): WELL_A 31 0.0084252 0.00075 0.5 0.216 0.0005 0.0366 SHUT 0.0008 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) wsegvalv = compdat.deck2dfs(deck)["WSEGVALV"] pd.testing.assert_frame_equal( wsegvalv, @@ -654,7 +654,7 @@ def test_wsegvalv_max_blank(): WELL_A 31 0.0084252 0.00075 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) wsegvalv = compdat.deck2dfs(deck)["WSEGVALV"] pd.testing.assert_frame_equal( wsegvalv, @@ -688,7 +688,7 @@ def test_wsegvalv_max_default(): WELL_A 31 0.0084252 0.00075 6* / / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) wsegvalv = compdat.deck2dfs(deck)["WSEGVALV"] pd.testing.assert_frame_equal( wsegvalv, diff --git a/tests/test_eclfiles.py b/tests/test_eclfiles.py index 1102a6ad0..e3b5dcebf 100644 --- a/tests/test_eclfiles.py +++ b/tests/test_eclfiles.py @@ -3,7 +3,7 @@ import pytest -from res2df import EclFiles +from res2df import ResFiles try: # pylint: disable=unused-import @@ -29,52 +29,52 @@ def test_filedescriptors(): pre_fd_count = len(list(fd_dir.glob("*"))) - eclfiles = EclFiles(EIGHTCELLS) + resfiles = ResFiles(EIGHTCELLS) # No opened files yet: assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.close() + resfiles.close() # No change, no files to close: assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.get_egrid() + resfiles.get_egrid() # This should not leave any file descriptor open assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.get_initfile() + resfiles.get_initfile() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._initfile is not None - eclfiles.close() + assert resfiles._initfile is not None + resfiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._initfile is None + assert resfiles._initfile is None - eclfiles.get_rstfile() + resfiles.get_rstfile() # Automatically closed by libecl assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rstfile is not None - eclfiles.close() + assert resfiles._rstfile is not None + resfiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rstfile is None + assert resfiles._rstfile is None - eclfiles.get_eclsum() + resfiles.get_eclsum() assert len(list(fd_dir.glob("*"))) == pre_fd_count + 1 - eclfiles.close() + resfiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - eclfiles.get_egridfile() + resfiles.get_egridfile() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._egridfile is not None - eclfiles.close() + assert resfiles._egridfile is not None + resfiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._egridfile is None + assert resfiles._egridfile is None - eclfiles.get_rftfile() + resfiles.get_rftfile() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rftfile is not None - eclfiles.close() + assert resfiles._rftfile is not None + resfiles.close() assert len(list(fd_dir.glob("*"))) == pre_fd_count - assert eclfiles._rftfile is None + assert resfiles._rftfile is None - eclfiles.get_ecldeck() + resfiles.get_ecldeck() # This should not leave any file descriptor open assert len(list(fd_dir.glob("*"))) == pre_fd_count diff --git a/tests/test_equil.py b/tests/test_equil.py index e886c6939..94464d3c6 100644 --- a/tests/test_equil.py +++ b/tests/test_equil.py @@ -9,7 +9,7 @@ import pytest from res2df import csv2res, equil, res2csv -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles try: # pylint: disable=unused-import @@ -28,8 +28,8 @@ def test_equil2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - equildf = equil.df(eclfiles) + resfiles = ResFiles(REEK) + equildf = equil.df(resfiles) expected = {} expected["EQUIL"] = pd.DataFrame( [ @@ -85,8 +85,8 @@ def test_equil2df(): def test_df2ecl(tmp_path): """Test that we can write include files to disk""" os.chdir(tmp_path) - eclfiles = EclFiles(EIGHTCELLS) - equildf = equil.df(eclfiles) + resfiles = ResFiles(EIGHTCELLS) + equildf = equil.df(resfiles) equil.df2ecl(equildf, filename="equil.inc") assert Path("equil.inc").is_file() @@ -255,7 +255,7 @@ def test_equil_fromdeck(): assert len(equil.equil_fromdeck(deckstr)) == 2 # correct assert len(equil.equil_fromdeck(deckstr, 2)) == 2 assert len(equil.equil_fromdeck(deckstr, 1)) == 1 - assert len(equil.equil_fromdeck(EclFiles.str2deck(deckstr))) == 1 # (watch out!) + assert len(equil.equil_fromdeck(ResFiles.str2deck(deckstr))) == 1 # (watch out!) wrongdeck = """ EQUIL @@ -611,7 +611,7 @@ def test_main_subparser(tmp_path, mocker): ) def test_phases_from_deck(deckstring, expected): """Test that we can extract phase configuration from a deck""" - deck = EclFiles.str2deck(deckstring) + deck = ResFiles.str2deck(deckstring) assert equil.phases_from_deck(deck) == expected diff --git a/tests/test_faults.py b/tests/test_faults.py index 99d652c67..85af1f75f 100644 --- a/tests/test_faults.py +++ b/tests/test_faults.py @@ -8,7 +8,7 @@ import pytest from res2df import faults, res2csv -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles try: # pylint: disable=unused-import @@ -26,8 +26,8 @@ def test_faults2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - faultsdf = faults.df(eclfiles.get_ecldeck()) + resfiles = ResFiles(REEK) + faultsdf = faults.df(resfiles.get_ecldeck()) assert "NAME" in faultsdf assert "I" in faultsdf @@ -46,7 +46,7 @@ def test_str2df(): 'B' 2 3 4 5 6 7 'J' / / """ - deck = EclFiles.str2deck(deckstr) + deck = ResFiles.str2deck(deckstr) faultsdf = faults.df(deck) assert len(faultsdf) == 16 @@ -54,8 +54,8 @@ def test_str2df(): def test_nofaults(): """Test on a dataset with no faults""" - eclfiles = EclFiles(EIGHTCELLS) - faultsdf = faults.df(eclfiles.get_ecldeck()) + resfiles = ResFiles(EIGHTCELLS) + faultsdf = faults.df(resfiles.get_ecldeck()) assert faultsdf.empty @@ -71,7 +71,7 @@ def test_multiplestr2df(): 'D' 2 2 4 4 10 10 'J' / / """ - deck = EclFiles.str2deck(deckstr) + deck = ResFiles.str2deck(deckstr) faultsdf = faults.df(deck).set_index("NAME") assert len(faultsdf) == 23 diff --git a/tests/test_fipreports.py b/tests/test_fipreports.py index 7a75ea301..922bfde62 100644 --- a/tests/test_fipreports.py +++ b/tests/test_fipreports.py @@ -9,7 +9,7 @@ import pytest from res2df import fipreports, res2csv -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles from res2df.fipreports import report_block_lineparser as parser TESTDIR = Path(__file__).absolute().parent @@ -19,7 +19,7 @@ def test_fipreports2df(): """Test parsing of Reek dataset""" - prtfile = EclFiles(DATAFILE).get_prtfilename() + prtfile = ResFiles(DATAFILE).get_prtfilename() fipreport_df = fipreports.df(prtfile) assert len(fipreport_df["REGION"].unique()) == 6 assert len(fipreport_df["DATE"].unique()) == 1 diff --git a/tests/test_grid.py b/tests/test_grid.py index e9df854b4..c0b33a1e7 100644 --- a/tests/test_grid.py +++ b/tests/test_grid.py @@ -9,7 +9,7 @@ import pytest from res2df import common, grid, res2csv -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -18,8 +18,8 @@ def test_gridgeometry2df(mocker): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - grid_geom = grid.gridgeometry2df(eclfiles) + resfiles = ResFiles(REEK) + grid_geom = grid.gridgeometry2df(resfiles) assert isinstance(grid_geom, pd.DataFrame) assert not grid_geom.empty @@ -50,38 +50,38 @@ def test_gridgeometry2df(mocker): grid.gridgeometry2df(None) with pytest.raises(ValueError, match="No EGRID file supplied"): - mocker.patch("res2df.eclfiles.EclFiles.get_egridfile", return_value=None) - grid.gridgeometry2df(eclfiles) + mocker.patch("res2df.resfiles.ResFiles.get_egridfile", return_value=None) + grid.gridgeometry2df(resfiles) def test_wrongfile(): - """Test the EclFiles object on nonexistent files""" + """Test the ResFiles object on nonexistent files""" # pylint: disable=invalid-name,redefined-builtin # We can initalize this object with bogus: - eclfiles = EclFiles("FOO.DATA") + resfiles = ResFiles("FOO.DATA") # but when we try to use it, things should fail: with pytest.raises(FileNotFoundError): - grid.init2df(eclfiles) + grid.init2df(resfiles) def test_gridzonemap(): """Check that zonemap can be merged automatically be default, and also that there is some API for supplying the zonemap directly as a dictionary""" - eclfiles = EclFiles(EIGHTCELLS) - grid_geom = grid.gridgeometry2df(eclfiles, zonemap=None) + resfiles = ResFiles(EIGHTCELLS) + grid_geom = grid.gridgeometry2df(resfiles, zonemap=None) default_zonemap = grid_geom["ZONE"] - grid_no_zone = grid.gridgeometry2df(eclfiles, zonemap={}) + grid_no_zone = grid.gridgeometry2df(resfiles, zonemap={}) assert "ZONE" not in grid_no_zone - assert (grid.df(eclfiles, zonemap=None)["ZONE"] == default_zonemap).all() + assert (grid.df(resfiles, zonemap=None)["ZONE"] == default_zonemap).all() - df_no_zone = grid.df(eclfiles, zonemap={}) + df_no_zone = grid.df(resfiles, zonemap={}) assert "ZONE" not in df_no_zone - df_custom_zone = grid.gridgeometry2df(eclfiles, zonemap={1: "FIRSTLAYER"}) + df_custom_zone = grid.gridgeometry2df(resfiles, zonemap={1: "FIRSTLAYER"}) assert "ZONE" in df_custom_zone assert set(df_custom_zone[df_custom_zone["K"] == 1]["ZONE"].unique()) == set( ["FIRSTLAYER"] @@ -89,14 +89,14 @@ def test_gridzonemap(): assert len(df_custom_zone) == len(grid_no_zone) df_bogus_zones = grid.gridgeometry2df( - eclfiles, zonemap={999999: "nonexistinglayer"} + resfiles, zonemap={999999: "nonexistinglayer"} ) assert pd.isnull(df_bogus_zones["ZONE"]).all() # Test a custom "subzone" map via direct usage of merge_zone on an dataframe # where ZONE already exists: - dframe = grid.df(eclfiles) + dframe = grid.df(resfiles) subzonemap = {1: "SUBZONE1", 2: "SUBZONE2"} dframe = common.merge_zones(dframe, subzonemap, zoneheader="SUBZONE", kname="K") assert (dframe["ZONE"] == default_zonemap).all() @@ -107,20 +107,20 @@ def test_gridzonemap(): def test_merge_initvectors(): """Test merging of INIT-vectors into the grid dataframe""" - eclfiles = EclFiles(REEK) - assert grid.merge_initvectors(eclfiles, pd.DataFrame(), []).empty + resfiles = ResFiles(REEK) + assert grid.merge_initvectors(resfiles, pd.DataFrame(), []).empty foo_df = pd.DataFrame([{"FOO": 1}]) - pd.testing.assert_frame_equal(grid.merge_initvectors(eclfiles, foo_df, []), foo_df) + pd.testing.assert_frame_equal(grid.merge_initvectors(resfiles, foo_df, []), foo_df) with pytest.raises(ValueError, match="All of the columns"): - grid.merge_initvectors(eclfiles, foo_df, ["NONEXISTING"]) + grid.merge_initvectors(resfiles, foo_df, ["NONEXISTING"]) minimal_df = pd.DataFrame([{"I": 10, "J": 11, "K": 12}]) with pytest.raises(KeyError): - grid.merge_initvectors(eclfiles, minimal_df, ["NONEXISTING"]) + grid.merge_initvectors(resfiles, minimal_df, ["NONEXISTING"]) - withporo = grid.merge_initvectors(eclfiles, minimal_df, ["PORO"]) + withporo = grid.merge_initvectors(resfiles, minimal_df, ["PORO"]) pd.testing.assert_frame_equal( withporo, minimal_df.assign(PORO=0.221848), check_dtype=False ) @@ -128,18 +128,18 @@ def test_merge_initvectors(): with pytest.raises(ValueError): # ijknames must be length 3 grid.merge_initvectors( - eclfiles, minimal_df, ["PORO"], ijknames=["I", "J", "K", "L"] + resfiles, minimal_df, ["PORO"], ijknames=["I", "J", "K", "L"] ) with pytest.raises(ValueError): - grid.merge_initvectors(eclfiles, minimal_df, ["PORO"], ijknames=["I", "J"]) + grid.merge_initvectors(resfiles, minimal_df, ["PORO"], ijknames=["I", "J"]) with pytest.raises(ValueError, match="All of the columns"): - grid.merge_initvectors(eclfiles, minimal_df, ["PORO"], ijknames=["A", "B", "C"]) + grid.merge_initvectors(resfiles, minimal_df, ["PORO"], ijknames=["A", "B", "C"]) def test_init2df(): """Test that dataframe with INIT vectors can be produced""" - eclfiles = EclFiles(REEK) - init_df = grid.init2df(eclfiles) + resfiles = ResFiles(REEK) + init_df = grid.init2df(resfiles) assert isinstance(init_df, pd.DataFrame) # pylint: disable=unsupported-membership-test # false positive on Dataframe @@ -158,8 +158,8 @@ def test_init2df(): def test_grid_df(): """Test that dataframe with INIT vectors and coordinates can be produced""" - eclfiles = EclFiles(EIGHTCELLS) - grid_df = grid.df(eclfiles) + resfiles = ResFiles(EIGHTCELLS) + grid_df = grid.df(resfiles) assert isinstance(grid_df, pd.DataFrame) assert not grid_df.empty @@ -184,8 +184,8 @@ def test_grid_df(): def test_df2ecl(tmp_path): """Test if we are able to output include files for grid data""" - eclfiles = EclFiles(REEK) - grid_df = grid.df(eclfiles) + resfiles = ResFiles(REEK) + grid_df = grid.df(resfiles) fipnum_str = grid.df2ecl(grid_df, "FIPNUM", dtype=int) assert grid.df2ecl(grid_df, "FIPNUM", dtype="int", nocomments=True) == grid.df2ecl( @@ -203,7 +203,7 @@ def test_df2ecl(tmp_path): fipnum_str_nocomment = grid.df2ecl(grid_df, "FIPNUM", dtype=int, nocomments=True) assert "--" not in fipnum_str_nocomment fipnum2_str = grid.df2ecl( - grid_df, "FIPNUM", dtype=int, eclfiles=eclfiles, nocomments=True + grid_df, "FIPNUM", dtype=int, resfiles=resfiles, nocomments=True ) # This would mean that we guessed the correct global size in the first run assert fipnum_str_nocomment == fipnum2_str @@ -237,20 +237,20 @@ def test_df2ecl(tmp_path): grid.df2ecl(grid_df, ["PERMRR"]) # Check when we have restart info included: - gr_rst = grid.df(eclfiles, rstdates="all") + gr_rst = grid.df(resfiles, rstdates="all") fipnum_str_rst = grid.df2ecl(gr_rst, "FIPNUM", dtype=int, nocomments=True) assert fipnum_str_rst == fipnum_str_nocomment # When dates are stacked, there are NaN's in the FIPNUM column, # which should be gracefully ignored. - gr_rst_stacked = grid.df(eclfiles, rstdates="all", stackdates=True) + gr_rst_stacked = grid.df(resfiles, rstdates="all", stackdates=True) fipnum_str_rst = grid.df2ecl(gr_rst_stacked, "FIPNUM", dtype=int, nocomments=True) assert fipnum_str_rst == fipnum_str_nocomment # dateinheaders here will be ignored due to stackdates: pd.testing.assert_frame_equal( gr_rst_stacked, - grid.df(eclfiles, rstdates="all", stackdates=True, dateinheaders=True), + grid.df(resfiles, rstdates="all", stackdates=True, dateinheaders=True), ) @@ -267,25 +267,25 @@ def test_df2ecl_mock(): def test_subvectors(): """Test that we can ask for a few vectors only""" - eclfiles = EclFiles(EIGHTCELLS) - init_df = grid.init2df(eclfiles, "PORO") + resfiles = ResFiles(EIGHTCELLS) + init_df = grid.init2df(resfiles, "PORO") assert "PORO" in init_df assert "PERMX" not in init_df assert "PORV" not in init_df - init_df = grid.init2df(eclfiles, "P*") + init_df = grid.init2df(resfiles, "P*") assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df assert "SATNUM" not in init_df - init_df = grid.init2df(eclfiles, ["P*"]) + init_df = grid.init2df(resfiles, ["P*"]) assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df assert "SATNUM" not in init_df - init_df = grid.init2df(eclfiles, ["P*", "*NUM"]) + init_df = grid.init2df(resfiles, ["P*", "*NUM"]) assert "PORO" in init_df assert "PERMX" in init_df assert "PVTNUM" in init_df @@ -313,55 +313,55 @@ def test_dropconstants(): def test_df(): """Test the df function""" - eclfiles = EclFiles(REEK) + resfiles = ResFiles(REEK) # assert error.. with pytest.raises(TypeError): # pylint: disable=no-value-for-parameter grid.df() - grid_df = grid.df(eclfiles) + grid_df = grid.df(resfiles) assert not grid_df.empty assert "I" in grid_df # From GRID assert "PORO" in grid_df # From INIT assert "SOIL" not in grid_df # We do not get RST unless we ask for it. - grid_df = grid.df(eclfiles, vectors="*") + grid_df = grid.df(resfiles, vectors="*") assert "I" in grid_df # From GRID assert "PORO" in grid_df # From INIT assert "SOIL" not in grid_df # We do not get RST unless we ask for it. - grid_df = grid.df(eclfiles, vectors=["*"]) + grid_df = grid.df(resfiles, vectors=["*"]) assert "I" in grid_df # From GRID assert "PORO" in grid_df # From INIT assert "SOIL" not in grid_df # We do not get RST unless we ask for it. - grid_df = grid.df(eclfiles, vectors="PRESSURE") + grid_df = grid.df(resfiles, vectors="PRESSURE") assert "I" in grid_df assert "PRESSURE" not in grid_df # that vector is only in RST assert len(grid_df) == 35817 assert "VOLUME" in grid_df - grid_df = grid.df(eclfiles, vectors=["PRESSURE"]) + grid_df = grid.df(resfiles, vectors=["PRESSURE"]) assert "I" in grid_df assert not grid_df.empty assert "PRESSURE" not in grid_df geometry_cols = len(grid_df.columns) - grid_df = grid.df(eclfiles, vectors=["PRESSURE"], rstdates="last", stackdates=True) + grid_df = grid.df(resfiles, vectors=["PRESSURE"], rstdates="last", stackdates=True) assert "PRESSURE" in grid_df assert len(grid_df.columns) == geometry_cols + 2 assert "DATE" in grid_df # Present because of stackdates - grid_df = grid.df(eclfiles, vectors="PRESSURE", rstdates="last") + grid_df = grid.df(resfiles, vectors="PRESSURE", rstdates="last") assert "PRESSURE" in grid_df assert len(grid_df.columns) == geometry_cols + 1 - grid_df = grid.df(eclfiles, vectors="PRESSURE", rstdates="last", dateinheaders=True) + grid_df = grid.df(resfiles, vectors="PRESSURE", rstdates="last", dateinheaders=True) assert "PRESSURE" not in grid_df assert "PRESSURE@2001-08-01" in grid_df grid_df = grid.df( - eclfiles, vectors=["PORO", "PRESSURE"], rstdates="all", stackdates=True + resfiles, vectors=["PORO", "PRESSURE"], rstdates="all", stackdates=True ) assert "PRESSURE" in grid_df assert len(grid_df.columns) == geometry_cols + 3 @@ -393,20 +393,20 @@ def test_df(): pd.testing.assert_frame_equal(df1, df3) pd.testing.assert_frame_equal(df1, df4) - grid_df = grid.df(eclfiles, vectors="PORO") + grid_df = grid.df(resfiles, vectors="PORO") assert "I" in grid_df assert "PORO" in grid_df assert len(grid_df) == 35817 assert "DATE" not in grid_df - grid_df = grid.df(eclfiles, vectors="PORO", rstdates="all") + grid_df = grid.df(resfiles, vectors="PORO", rstdates="all") assert "I" in grid_df assert "PORO" in grid_df assert "DATE" not in grid_df # (no RST columns, so no DATE info in the dataframe) # (warnings should be printed) - grid_df = grid.df(eclfiles, vectors="PORO", rstdates="all", stackdates=True) + grid_df = grid.df(resfiles, vectors="PORO", rstdates="all", stackdates=True) assert "I" in grid_df assert "PORO" in grid_df assert "DATE" not in grid_df @@ -499,13 +499,13 @@ def test_main_arrow(tmp_path, mocker): def test_get_available_rst_dates(): """Test the support of dates in restart files""" - eclfiles = EclFiles(REEK) - # rstfile = eclfiles.get_rstfile() + resfiles = ResFiles(REEK) + # rstfile = resfiles.get_rstfile() - alldates = grid.get_available_rst_dates(eclfiles) + alldates = grid.get_available_rst_dates(resfiles) assert len(alldates) == 4 - didx = grid.dates2rstindices(eclfiles, "all") + didx = grid.dates2rstindices(resfiles, "all") assert len(didx[0]) == len(alldates) assert len(didx[1]) == len(alldates) assert isinstance(didx[0][0], int) @@ -513,38 +513,38 @@ def test_get_available_rst_dates(): assert didx[1][0] == alldates[0] assert didx[1][-1] == alldates[-1] - somedate = grid.dates2rstindices(eclfiles, "2000-07-01") + somedate = grid.dates2rstindices(resfiles, "2000-07-01") assert somedate[1] == [alldates[1]] with pytest.raises(ValueError, match="date 1999-09-09 not found in UNRST file"): - grid.dates2rstindices(eclfiles, "1999-09-09") + grid.dates2rstindices(resfiles, "1999-09-09") with pytest.raises(ValueError, match="date 1999-0909 not understood"): - grid.dates2rstindices(eclfiles, "1999-0909") + grid.dates2rstindices(resfiles, "1999-0909") - expl_date = grid.dates2rstindices(eclfiles, datetime.date(2000, 7, 1)) + expl_date = grid.dates2rstindices(resfiles, datetime.date(2000, 7, 1)) assert expl_date[1] == [alldates[1]] expl_datetime = grid.dates2rstindices( - eclfiles, datetime.datetime(2000, 7, 1, 0, 0, 0) + resfiles, datetime.datetime(2000, 7, 1, 0, 0, 0) ) assert expl_datetime[1] == [alldates[1]] - expl_list_datetime = grid.dates2rstindices(eclfiles, [datetime.date(2000, 7, 1)]) + expl_list_datetime = grid.dates2rstindices(resfiles, [datetime.date(2000, 7, 1)]) assert expl_list_datetime[1] == [alldates[1]] # For list input, only datetime.date objects are allowed: expl_list2_date = grid.dates2rstindices( - eclfiles, [datetime.date(2000, 7, 1), datetime.date(2001, 2, 1)] + resfiles, [datetime.date(2000, 7, 1), datetime.date(2001, 2, 1)] ) assert expl_list2_date[1] == [alldates[1], alldates[2]] with pytest.raises(ValueError, match="None of the requested dates were found"): - grid.dates2rstindices(eclfiles, ["2000-07-01", "2001-02-01"]) + grid.dates2rstindices(resfiles, ["2000-07-01", "2001-02-01"]) with pytest.raises(ValueError, match="None of the requested dates were found"): grid.dates2rstindices( - eclfiles, + resfiles, [ datetime.datetime(2000, 7, 1, 0, 0, 0), datetime.datetime(2001, 2, 1, 0, 0, 0), @@ -552,40 +552,40 @@ def test_get_available_rst_dates(): ) with pytest.raises(ValueError, match="not understood"): - grid.dates2rstindices(eclfiles, {"2000-07-01": "2001-02-01"}) + grid.dates2rstindices(resfiles, {"2000-07-01": "2001-02-01"}) - first = grid.dates2rstindices(eclfiles, "first") + first = grid.dates2rstindices(resfiles, "first") assert first[1][0] == alldates[0] - last = grid.dates2rstindices(eclfiles, "last") + last = grid.dates2rstindices(resfiles, "last") assert last[1][0] == alldates[-1] - dates = grid.get_available_rst_dates(eclfiles) + dates = grid.get_available_rst_dates(resfiles) assert isinstance(dates, list) # Test with missing RST file: - eclfiles = EclFiles("BOGUS.DATA") + resfiles = ResFiles("BOGUS.DATA") with pytest.raises(IOError): - eclfiles.get_rstfile() + resfiles.get_rstfile() def test_rst2df(): """Test producing dataframes from restart files""" - eclfiles = EclFiles(REEK) - assert grid.rst2df(eclfiles, "first").shape == (35817, 24) - assert grid.rst2df(eclfiles, "last").shape == (35817, 24) - assert grid.rst2df(eclfiles, "all").shape == (35817, 23 * 4 + 1) + resfiles = ResFiles(REEK) + assert grid.rst2df(resfiles, "first").shape == (35817, 24) + assert grid.rst2df(resfiles, "last").shape == (35817, 24) + assert grid.rst2df(resfiles, "all").shape == (35817, 23 * 4 + 1) - assert "SOIL" in grid.rst2df(eclfiles, date="first", dateinheaders=False) + assert "SOIL" in grid.rst2df(resfiles, date="first", dateinheaders=False) assert ( - "SOIL@2000-01-01" in grid.rst2df(eclfiles, "first", dateinheaders=True).columns + "SOIL@2000-01-01" in grid.rst2df(resfiles, "first", dateinheaders=True).columns ) - rst_df = grid.rst2df(eclfiles, "first", stackdates=True) + rst_df = grid.rst2df(resfiles, "first", stackdates=True) assert "DATE" in rst_df assert rst_df["DATE"].unique()[0] == "2000-01-01" - rst_df = grid.rst2df(eclfiles, "all", stackdates=True) - assert len(rst_df["DATE"].unique()) == len(grid.get_available_rst_dates(eclfiles)) + rst_df = grid.rst2df(resfiles, "all", stackdates=True) + assert len(rst_df["DATE"].unique()) == len(grid.get_available_rst_dates(resfiles)) # "DATE" and "active" are now the extra columns: assert rst_df.shape == (4 * 35817, 23 + 2) @@ -599,21 +599,21 @@ def test_rst2df(): assert sum(nancols) == 1 # All other columns are "False" # Check vector slicing: - rst_df = grid.rst2df(eclfiles, "first", vectors="S???") + rst_df = grid.rst2df(resfiles, "first", vectors="S???") assert rst_df.shape == (35817, 4) assert "SGAS" in rst_df assert "SWAT" in rst_df assert "SOIL" in rst_df # This is actually computed assert "FIPWAT" not in rst_df - rst_df = grid.rst2df(eclfiles, "first", vectors=["PRESSURE", "SWAT"]) + rst_df = grid.rst2df(resfiles, "first", vectors=["PRESSURE", "SWAT"]) assert "PRESSURE" in rst_df assert "SWAT" in rst_df assert "SGAS" not in rst_df assert "SOIL" not in rst_df # Check that we can avoid getting SOIL if we are explicit: - rst_df = grid.rst2df(eclfiles, "first", vectors=["SGAS", "SWAT"]) + rst_df = grid.rst2df(resfiles, "first", vectors=["SGAS", "SWAT"]) assert "SOIL" not in rst_df assert "SGAS" in rst_df assert "SWAT" in rst_df diff --git a/tests/test_gruptree.py b/tests/test_gruptree.py index 5adfb6c8d..d880c349b 100644 --- a/tests/test_gruptree.py +++ b/tests/test_gruptree.py @@ -8,7 +8,7 @@ import pytest from res2df import gruptree, res2csv -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles try: # pylint: disable=unused-import @@ -27,8 +27,8 @@ def test_eightcells_dataset(): """Test Eightcells dataset""" - eclfiles = EclFiles(EIGHTCELLS) - gruptree_df = gruptree.df(eclfiles.get_ecldeck()) + resfiles = ResFiles(EIGHTCELLS) + gruptree_df = gruptree.df(resfiles.get_ecldeck()) expected_dframe = pd.DataFrame( [ @@ -44,8 +44,8 @@ def test_eightcells_dataset(): def test_gruptree2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - grupdf = gruptree.df(eclfiles.get_ecldeck()) + resfiles = ResFiles(REEK) + grupdf = gruptree.df(resfiles.get_ecldeck()) assert not grupdf.empty assert len(grupdf["DATE"].unique()) == 5 @@ -53,7 +53,7 @@ def test_gruptree2df(): assert len(grupdf["PARENT"].dropna().unique()) == 3 assert set(grupdf["KEYWORD"].unique()) == set(["GRUPTREE", "WELSPECS"]) - grupdfnowells = gruptree.df(eclfiles.get_ecldeck(), welspecs=False) + grupdfnowells = gruptree.df(resfiles.get_ecldeck(), welspecs=False) assert len(grupdfnowells["KEYWORD"].unique()) == 1 assert grupdf["PARENT"].dropna().unique()[0] == "FIELD" @@ -75,7 +75,7 @@ def test_str2df(): / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) grupdf = gruptree.df(deck) assert grupdf.dropna().empty # the DATE is empty @@ -118,7 +118,7 @@ def test_grupnet_rst_docs(tmp_path): / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) grupdf = gruptree.df(deck) grupdf[["DATE", "CHILD", "PARENT", "KEYWORD"]].to_csv("gruptree.csv", index=False) grupdf.to_csv("gruptreenet.csv", index=False) @@ -161,7 +161,7 @@ def test_grupnetdf(): / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) grupdf = gruptree.df(deck, startdate="2000-01-01") print(grupdf) assert "TERMINAL_PRESSURE" in grupdf @@ -308,7 +308,7 @@ def test_dict2treelib_deprecated(): def test_grupnetroot(schstr, expected_dframe, expected_tree): """Test that terminal pressure of the tree root can be included in the dataframe (with an empty parent)""" - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) grupdf = gruptree.df(deck, startdate="2000-01-01") non_default_columns = ["CHILD", "PARENT", "TERMINAL_PRESSURE"] pd.testing.assert_frame_equal( @@ -414,7 +414,7 @@ def test_edge_dataframe2dict(dframe, expected): def test_emptytree_strdeck(): """Test empty schedule sections. Don't want to crash""" schstr = "" - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) grupdf = gruptree.df(deck) assert grupdf.empty gruptreedict = gruptree.edge_dataframe2dict(grupdf) @@ -461,7 +461,7 @@ def test_tstep(): / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) grupdf = gruptree.df(deck) assert len(grupdf["DATE"].unique()) == 2 print(grupdf) @@ -724,7 +724,7 @@ def test_branprop_nodeprop(schstr, expected_dframe, check_columns): """Testing that the gruptree dataframe works correctly when the schedule string contains BRANPROP and NODEPROP """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) dframe = gruptree.df(deck).reset_index() expected_dframe.DATE = pd.to_datetime(expected_dframe.DATE) pd.testing.assert_frame_equal( @@ -789,5 +789,5 @@ def test_prettyprint(): """ - dframe = gruptree.df(EclFiles.str2deck(schstr)) + dframe = gruptree.df(ResFiles.str2deck(schstr)) assert gruptree.prettyprint(dframe).strip() == expected_prettyprint.strip() diff --git a/tests/test_init.py b/tests/test_init.py index 5fcdd7af9..d43f79fde 100644 --- a/tests/test_init.py +++ b/tests/test_init.py @@ -15,7 +15,7 @@ def test_init(): for submodule in res2df.SUBMODULES: assert "res2df." + submodule in sys.modules - # The Eclfiles object inside eclfiles should be lifted up to top-level: - assert hasattr(res2df, "EclFiles") + # The Eclfiles object inside resfiles should be lifted up to top-level: + assert hasattr(res2df, "ResFiles") assert isinstance(res2df.__version__, str) diff --git a/tests/test_nnc.py b/tests/test_nnc.py index 9aaa3b817..444f5f49d 100644 --- a/tests/test_nnc.py +++ b/tests/test_nnc.py @@ -9,7 +9,7 @@ import pytest from res2df import faults, nnc, res2csv, trans -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles try: # pylint: disable=unused-import @@ -27,8 +27,8 @@ def test_nnc2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - nncdf = nnc.df(eclfiles) + resfiles = ResFiles(REEK) + nncdf = nnc.df(resfiles) assert not nncdf.empty assert "I1" in nncdf @@ -48,14 +48,14 @@ def test_nnc2df(): def test_no_nnc(): """Test nnc on an Eclipse case with no NNCs""" - eclfiles = EclFiles(EIGHTCELLS) - assert nnc.df(eclfiles).empty + resfiles = ResFiles(EIGHTCELLS) + assert nnc.df(resfiles).empty def test_nnc2df_coords(): """Test that we are able to add coordinates""" - eclfiles = EclFiles(REEK) - gnncdf = nnc.df(eclfiles, coords=True) + resfiles = ResFiles(REEK) + gnncdf = nnc.df(resfiles, coords=True) assert not gnncdf.empty assert "X" in gnncdf assert "Y" in gnncdf @@ -65,9 +65,9 @@ def test_nnc2df_coords(): @pytest.mark.skipif(not HAVE_OPM, reason="Requires OPM") def test_nnc2df_faultnames(): """Add faultnames from FAULTS keyword to connections""" - eclfiles = EclFiles(REEK) - nncdf = nnc.df(eclfiles) - faultsdf = faults.df(eclfiles.get_ecldeck()) + resfiles = ResFiles(REEK) + nncdf = nnc.df(resfiles) + faultsdf = faults.df(resfiles.get_ecldeck()) merged = pd.merge( nncdf, @@ -89,8 +89,8 @@ def test_nnc2df_faultnames(): def test_df2ecl_editnnc(tmp_path): """Test generation of EDITNNC keyword""" - eclfiles = EclFiles(REEK) - nncdf = nnc.df(eclfiles) + resfiles = ResFiles(REEK) + nncdf = nnc.df(resfiles) os.chdir(tmp_path) nncdf["TRANM"] = 2 @@ -109,11 +109,11 @@ def test_df2ecl_editnnc(tmp_path): assert "avg multiplier" not in editnnc # Test compatibility with trans module: - trans_df = trans.df(eclfiles, addnnc=True) + trans_df = trans.df(resfiles, addnnc=True) editnnc = nnc.df2ecl_editnnc(trans_df.assign(TRANM=0.3)) assert "avg multiplier 0.3" in editnnc or "avg multiplier 0.29999" in editnnc - print(nnc.df2ecl_editnnc(nnc.df(eclfiles).head(4).assign(TRANM=0.1))) + print(nnc.df2ecl_editnnc(nnc.df(resfiles).head(4).assign(TRANM=0.1))) @pytest.mark.skipif(not HAVE_OPM, reason="Requires OPM") diff --git a/tests/test_parameters.py b/tests/test_parameters.py index a4920045d..f8c12cb42 100644 --- a/tests/test_parameters.py +++ b/tests/test_parameters.py @@ -7,7 +7,7 @@ import pytest import yaml -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles from res2df.parameters import find_parameter_files, load, load_all TESTDIR = Path(__file__).absolute().parent @@ -16,12 +16,12 @@ def test_parameters(): """Test import of parameters.txt++""" - eclfiles = EclFiles(DATAFILE) + resfiles = ResFiles(DATAFILE) # NB: This test easily fails due to remnants of other test code.. - assert not find_parameter_files(eclfiles) + assert not find_parameter_files(resfiles) - parameterstxt = Path(eclfiles.get_path()) / "parameters.txt" + parameterstxt = Path(resfiles.get_path()) / "parameters.txt" # If this exists, it is a remnant from test code that has # crashed. It should NOT be in git. if parameterstxt.is_file(): @@ -32,10 +32,10 @@ def test_parameters(): assert "FOO" in param_dict assert "BAR" in param_dict - assert len(find_parameter_files(eclfiles)) == 1 + assert len(find_parameter_files(resfiles)) == 1 parameterstxt.unlink() - parameterstxt = Path(eclfiles.get_path()).parent / "parameters.txt" + parameterstxt = Path(resfiles.get_path()).parent / "parameters.txt" if parameterstxt.is_file(): parameterstxt.unlink() parameterstxt.write_text("FOO 1\nBAR 3\nCONTACT:BARF 2700", encoding="utf-8") @@ -45,33 +45,33 @@ def test_parameters(): assert "BAR" in param_dict assert param_dict["BAR"] == 3 assert param_dict["CONTACT:BARF"] == 2700 - assert len(find_parameter_files(eclfiles)) == 1 + assert len(find_parameter_files(resfiles)) == 1 parameterstxt.unlink() # Typical parameters.json structure: The group "CONTACT" is assumed having # duplicate information, and is to be ignored dump_me = {"FOO": 1, "BAR": "com", "CONTACT:BARF": 2700, "CONTACT": {"BARF": 2700}} - parametersyml = Path(eclfiles.get_path()) / "parameters.yml" + parametersyml = Path(resfiles.get_path()) / "parameters.yml" if parametersyml.is_file(): parametersyml.unlink() parametersyml.write_text(yaml.dump(dump_me), encoding="utf-8") assert Path(parametersyml).is_file() - assert len(find_parameter_files(eclfiles)) == 1 + assert len(find_parameter_files(resfiles)) == 1 param_dict = load(parametersyml) assert "FOO" in param_dict assert "BAR" in param_dict assert param_dict["BAR"] == "com" parametersyml.unlink() - parametersjson = Path(eclfiles.get_path()) / "parameters.json" + parametersjson = Path(resfiles.get_path()) / "parameters.json" if parametersjson.is_file(): parametersjson.unlink() parametersjson.write_text(json.dumps(dump_me), encoding="utf-8") assert Path(parametersjson).is_file() - assert len(find_parameter_files(eclfiles)) == 1 - param_dict = load(find_parameter_files(eclfiles)[0]) - param_dict_m = load_all(find_parameter_files(eclfiles)) + assert len(find_parameter_files(resfiles)) == 1 + param_dict = load(find_parameter_files(resfiles)[0]) + param_dict_m = load_all(find_parameter_files(resfiles)) assert "FOO" in param_dict assert "BAR" in param_dict assert param_dict["BAR"] == "com" @@ -81,12 +81,12 @@ def test_parameters(): def test_multiple_parameters(): """Test what happens when we have duplicate parameter files""" - eclfiles = EclFiles(DATAFILE) - parametersjson = Path(eclfiles.get_path()) / "parameters.json" - parameterstxt = Path(eclfiles.get_path()).parent / "parameters.txt" + resfiles = ResFiles(DATAFILE) + parametersjson = Path(resfiles.get_path()) / "parameters.json" + parameterstxt = Path(resfiles.get_path()).parent / "parameters.txt" parameterstxt.write_text("FOO 1\nBAR 4", encoding="utf-8") parametersjson.write_text(json.dumps({"BAR": 5, "COM": 6}), encoding="utf-8") - param_dict = load_all(find_parameter_files(eclfiles)) + param_dict = load_all(find_parameter_files(resfiles)) assert len(param_dict) == 3 assert param_dict["BAR"] == 5 # json has precedence over txt parametersjson.unlink() diff --git a/tests/test_pillars.py b/tests/test_pillars.py index c604a9009..4007fe224 100644 --- a/tests/test_pillars.py +++ b/tests/test_pillars.py @@ -6,7 +6,7 @@ import pytest from res2df import grid, pillars, res2csv -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -14,8 +14,8 @@ def test_pillars(): """Test that we can build a dataframe of pillar statistics""" - eclfiles = EclFiles(REEK) - pillars_df = pillars.df(eclfiles) + resfiles = ResFiles(REEK) + pillars_df = pillars.df(resfiles) assert "PILLAR" in pillars_df assert "VOLUME" in pillars_df assert "PORV" in pillars_df @@ -30,25 +30,25 @@ def test_pillars(): assert "GOC" not in pillars_df assert len(pillars_df) == 2560 - pillars_df = pillars.df(eclfiles, region="FIPNUM") + pillars_df = pillars.df(resfiles, region="FIPNUM") assert "FIPNUM" in pillars_df assert len(pillars_df["FIPNUM"].unique()) == 6 assert "OILVOL" not in pillars_df - pillars_df = pillars.df(eclfiles, rstdates="first") - firstdate = str(grid.dates2rstindices(eclfiles, "first")[1][0]) + pillars_df = pillars.df(resfiles, rstdates="first") + firstdate = str(grid.dates2rstindices(resfiles, "first")[1][0]) assert "OILVOL@" + firstdate in pillars_df assert "GASVOL@" + firstdate in pillars_df assert "WATVOL@" + firstdate in pillars_df - pillars_df = pillars.df(eclfiles, rstdates="last", soilcutoff=0.2, sgascutoff=0.2) - lastdate = str(grid.dates2rstindices(eclfiles, "last")[1][0]) + pillars_df = pillars.df(resfiles, rstdates="last", soilcutoff=0.2, sgascutoff=0.2) + lastdate = str(grid.dates2rstindices(resfiles, "last")[1][0]) assert "OWC@" + lastdate in pillars_df assert "GOC@" + lastdate not in pillars_df # Because the dataset has no GAS... # Grouping by unknowns only trigger a warning pd.testing.assert_frame_equal( - pillars.df(eclfiles), pillars.df(eclfiles, region="FOOBAR") + pillars.df(resfiles), pillars.df(resfiles, region="FOOBAR") ) diff --git a/tests/test_pvt.py b/tests/test_pvt.py index 8f707a182..5ef868f80 100644 --- a/tests/test_pvt.py +++ b/tests/test_pvt.py @@ -9,7 +9,7 @@ import pytest from res2df import csv2res, pvt, res2csv -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles try: # pylint: disable=unused-import @@ -33,7 +33,7 @@ def test_pvto_strings(): 18 25 1.14 0.59 / / -- One table (pvtnum=1), two records (two gor's) """ - dframe = pvt.pvto_fromdeck(EclFiles.str2deck(pvto_deck)) + dframe = pvt.pvto_fromdeck(ResFiles.str2deck(pvto_deck)) assert "PVTNUM" in dframe assert "RS" in dframe assert "PRESSURE" in dframe @@ -64,7 +64,7 @@ def test_pvto_strings(): 19 30 1.14 0.59 / / """ - dframe = pvt.pvto_fromdeck(EclFiles.str2deck(pvto_deck)) + dframe = pvt.pvto_fromdeck(ResFiles.str2deck(pvto_deck)) assert len(dframe) == 6 assert "PVTNUM" in dframe assert set(dframe["PVTNUM"].astype(int).unique()) == {1, 2} @@ -160,8 +160,8 @@ def test_pvdo_string(): def test_pvt_reek(): """Test that the Reek PVT input can be parsed individually""" - eclfiles = EclFiles(REEK) - pvto_df = pvt.pvto_fromdeck(eclfiles.get_ecldeck()) + resfiles = ResFiles(REEK) + pvto_df = pvt.pvto_fromdeck(resfiles.get_ecldeck()) assert "PVTNUM" in pvto_df assert "PRESSURE" in pvto_df assert "VOLUMEFACTOR" in pvto_df @@ -180,7 +180,7 @@ def test_pvt_reek(): dframe_via_string = pvt.pvto_fromdeck(pvt.df2ecl_pvto(pvto_df)) pd.testing.assert_frame_equal(dframe_via_string, pvto_df) - density_df = pvt.density_fromdeck(eclfiles.get_ecldeck()) + density_df = pvt.density_fromdeck(resfiles.get_ecldeck()) pd.testing.assert_frame_equal( density_df, pd.DataFrame( @@ -192,14 +192,14 @@ def test_pvt_reek(): dframe_via_string = pvt.density_fromdeck(pvt.df2ecl_density(density_df)) pd.testing.assert_frame_equal(dframe_via_string, density_df) - rock_df = pvt.rock_fromdeck(eclfiles.get_ecldeck()) + rock_df = pvt.rock_fromdeck(resfiles.get_ecldeck()) assert "PVTNUM" in rock_df assert len(rock_df) == 1 assert "PRESSURE" in rock_df assert "COMPRESSIBILITY" in rock_df assert rock_df["PRESSURE"].values[0] == 327.3 - pvtw_df = pvt.pvtw_fromdeck(eclfiles.get_ecldeck()) + pvtw_df = pvt.pvtw_fromdeck(resfiles.get_ecldeck()) assert "PVTNUM" in pvtw_df assert pvtw_df["PVTNUM"].values[0] == 1 assert len(pvtw_df) == 1 @@ -210,7 +210,7 @@ def test_pvt_reek(): assert "VISCOSIBILITY" in pvtw_df assert pvtw_df["VISCOSITY"].values[0] == 0.25 - pvdg_df = pvt.pvdg_fromdeck(eclfiles.get_ecldeck()) + pvdg_df = pvt.pvdg_fromdeck(resfiles.get_ecldeck()) assert "PVTNUM" in pvdg_df assert "PRESSURE" in pvdg_df assert "VOLUMEFACTOR" in pvdg_df @@ -292,8 +292,8 @@ def test_pvtg_string(): def test_density(): """Test that DENSITY can be parsed from files and from strings""" - eclfiles = EclFiles(REEK) - density_df = pvt.density_fromdeck(eclfiles.get_ecldeck()) + resfiles = ResFiles(REEK) + density_df = pvt.density_fromdeck(resfiles.get_ecldeck()) assert len(density_df) == 1 assert "PVTNUM" in density_df assert "OILDENSITY" in density_df @@ -308,7 +308,7 @@ def test_density(): 800 950 1.05 / """ - density_df = pvt.density_fromdeck(EclFiles.str2deck(two_pvtnum_deck)) + density_df = pvt.density_fromdeck(ResFiles.str2deck(two_pvtnum_deck)) # (a warning will be printed that we cannot guess) assert len(density_df) == 1 density_df = pvt.density_fromdeck(two_pvtnum_deck) @@ -329,7 +329,7 @@ def test_pvtw(): """Test that PVTW can be parsed from a string""" deck = """PVTW 327.3 1.03 4.51E-005 0.25 0 /""" - pvtw_df = pvt.pvtw_fromdeck(EclFiles.str2deck(deck)) + pvtw_df = pvt.pvtw_fromdeck(ResFiles.str2deck(deck)) pd.testing.assert_frame_equal( pvtw_df, pd.DataFrame( @@ -362,7 +362,7 @@ def test_rock(): """Test parsing of the ROCK keyword from a string""" deck = """ROCK 100 1.1 /""" - rock_df = pvt.rock_fromdeck(EclFiles.str2deck(deck)) + rock_df = pvt.rock_fromdeck(ResFiles.str2deck(deck)) assert len(rock_df) == 1 assert "PRESSURE" in rock_df assert "COMPRESSIBILITY" in rock_df @@ -377,8 +377,8 @@ def test_rock(): def test_df(): """Test that aggregate dataframes are produced""" - eclfiles = EclFiles(REEK) - pvtdf = pvt.df(eclfiles) + resfiles = ResFiles(REEK) + pvtdf = pvt.df(resfiles) assert not pvtdf.empty assert set(pvtdf["KEYWORD"]) == {"PVTO", "PVDG", "DENSITY", "ROCK", "PVTW"} diff --git a/tests/test_rft.py b/tests/test_rft.py index a35e8ad2f..e4c122707 100644 --- a/tests/test_rft.py +++ b/tests/test_rft.py @@ -8,7 +8,7 @@ import pytest from res2df import res2csv, rft -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -20,7 +20,7 @@ def test_rftrecords2df(): """Test that we can construct a dataframe for navigating in RFT records""" - rftrecs = rft._rftrecords2df(EclFiles(EIGHTCELLS).get_rftfile()) + rftrecs = rft._rftrecords2df(ResFiles(EIGHTCELLS).get_rftfile()) assert len(rftrecs[rftrecs["recordname"] == "TIME"]) == len( rftrecs["timeindex"].unique() ) @@ -33,9 +33,9 @@ def test_rftrecords2df(): def test_rftrecords_generator(): - """Test the generator that will iterate over an EclFile/RFTFile + """Test the generator that will iterate over an ResFile/RFTFile and provide one yield pr. well pr. date""" - for rftrecord in rft.rftrecords(EclFiles(EIGHTCELLS).get_rftfile()): + for rftrecord in rft.rftrecords(ResFiles(EIGHTCELLS).get_rftfile()): assert isinstance(rftrecord, dict) assert "date" in rftrecord assert isinstance(rftrecord["date"], datetime.date) @@ -50,7 +50,7 @@ def test_rftrecords_generator(): def test_get_con_seg_data(): """Get CON data. Later add more code here to defend the name""" - rftfile = EclFiles(EIGHTCELLS).get_rftfile() + rftfile = ResFiles(EIGHTCELLS).get_rftfile() # Test the first record, it is a CON type (not multisegment) rftrecord = next(rft.rftrecords(rftfile)) @@ -464,8 +464,8 @@ def test_add_extras(dframe, inplace, expected): def test_rft2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(REEK) - rftdf = rft.df(eclfiles) + resfiles = ResFiles(REEK) + rftdf = rft.df(resfiles) assert "ZONE" in rftdf assert "LEAF" not in rftdf # Topology metadata should not be exported assert set(rftdf["WELLMODEL"]) == {"STANDARD"} diff --git a/tests/test_satfunc.py b/tests/test_satfunc.py index cf1940432..c841eca33 100644 --- a/tests/test_satfunc.py +++ b/tests/test_satfunc.py @@ -9,7 +9,7 @@ import pytest from res2df import csv2res, inferdims, res2csv, satfunc -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles try: # pylint: disable=unused-import @@ -29,8 +29,8 @@ def test_ecldeck_to_satfunc_dframe(): """Test that dataframes can be produced from a full Eclipse deck (the example Reek case)""" - eclfiles = EclFiles(REEK) - satdf = satfunc.df(eclfiles.get_ecldeck()) + resfiles = ResFiles(REEK) + satdf = satfunc.df(resfiles.get_ecldeck()) assert set(satdf["KEYWORD"]) == {"SWOF", "SGOF"} assert set(satdf["SATNUM"]) == {1} @@ -56,8 +56,8 @@ def test_ecldeck_to_satfunc_dframe(): def test_satfunc_roundtrip(): """Test that we can produce a SATNUM dataframe from the Reek case, convert it back to an include file, and then reinterpret it to the same""" - eclfiles = EclFiles(EIGHTCELLS) - satdf = satfunc.df(eclfiles.get_ecldeck()) + resfiles = ResFiles(EIGHTCELLS) + satdf = satfunc.df(resfiles.get_ecldeck()) inc = satfunc.df2ecl(satdf) df_from_inc = satfunc.df(inc) pd.testing.assert_frame_equal( @@ -69,8 +69,8 @@ def test_satfunc_roundtrip(): def test_df2ecl_order(): """Test that we can control the keyword order in generated strings by the list supplied in keywords argument""" - eclfiles = EclFiles(REEK) - satdf = satfunc.df(eclfiles.get_ecldeck()) + resfiles = ResFiles(REEK) + satdf = satfunc.df(resfiles.get_ecldeck()) swof_sgof = satfunc.df2ecl(satdf, keywords=["SWOF", "SGOF"]) assert swof_sgof.find("SWOF") < swof_sgof.find("SGOF") diff --git a/tests/test_summary.py b/tests/test_summary.py index e6f857896..57a6c784c 100644 --- a/tests/test_summary.py +++ b/tests/test_summary.py @@ -11,7 +11,7 @@ from resdata.summary import Summary from res2df import csv2res, res2csv, summary -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles from res2df.summary import ( _df2pyarrow, _fallback_date_roll, @@ -44,8 +44,8 @@ def test_df(): """Test that dataframes are produced""" - eclfiles = EclFiles(EIGHTCELLS) - sumdf = summary.df(eclfiles) + resfiles = ResFiles(EIGHTCELLS) + sumdf = summary.df(resfiles) assert sumdf.index.name == "DATE" assert sumdf.index.dtype in ["datetime64[ns]", "datetime64"] @@ -55,7 +55,7 @@ def test_df(): assert not sumdf.columns.empty assert "FOPT" in sumdf.columns - sumdf = summary.df(eclfiles, datetime=True) + sumdf = summary.df(resfiles, datetime=True) # (datetime=True is implicit when raw time reports are requested) assert sumdf.index.name == "DATE" assert sumdf.index.dtype in ["datetime64[ns]", "datetime64"] @@ -68,7 +68,7 @@ def test_df(): def test_df_column_keys(): """Test that we can slice the dataframe on columns""" - sumdf = summary.df(EclFiles(REEK), column_keys="FOPT") + sumdf = summary.df(ResFiles(REEK), column_keys="FOPT") assert set(sumdf.columns) == {"FOPT"} assert set(sumdf.attrs["meta"].keys()) == {"FOPT"} @@ -83,29 +83,29 @@ def test_df_column_keys(): "FOPTF", "FOPP", } - sumdf = summary.df(EclFiles(REEK), column_keys="FOP*") + sumdf = summary.df(ResFiles(REEK), column_keys="FOP*") assert set(sumdf.columns) == fop_cols assert set(sumdf.attrs["meta"].keys()) == fop_cols - sumdf = summary.df(EclFiles(REEK), column_keys=["FOP*"]) + sumdf = summary.df(ResFiles(REEK), column_keys=["FOP*"]) assert set(sumdf.columns) == fop_cols assert set(sumdf.attrs["meta"].keys()) == fop_cols - sumdf = summary.df(EclFiles(REEK), column_keys=["FOPR", "FOPT"]) + sumdf = summary.df(ResFiles(REEK), column_keys=["FOPR", "FOPT"]) assert set(sumdf.columns) == {"FOPT", "FOPR"} assert set(sumdf.attrs["meta"].keys()) == {"FOPT", "FOPR"} - sumdf_no_columns = summary.df(EclFiles(REEK), column_keys=["BOGUS"]) + sumdf_no_columns = summary.df(ResFiles(REEK), column_keys=["BOGUS"]) assert sumdf_no_columns.columns.empty assert all(sumdf_no_columns.index == sumdf.index) def test_summary2df_dates(): """Test that we have some API possibilities with ISO dates""" - eclfiles = EclFiles(REEK) + resfiles = ResFiles(REEK) sumdf = summary.df( - eclfiles, + resfiles, start_date=datetime.date(2002, 1, 2), end_date="2002-03-01", time_index="daily", @@ -119,12 +119,12 @@ def test_summary2df_dates(): assert sumdf.index.values[0] == np.datetime64("2002-01-02") assert sumdf.index.values[-1] == np.datetime64("2002-03-01") - sumdf = summary.df(eclfiles, time_index="last", datetime=True) + sumdf = summary.df(resfiles, time_index="last", datetime=True) assert len(sumdf) == 1 assert sumdf.index.values[0] == np.datetime64("2003-01-02") # Leave this test for the datetime=False behaviour: - sumdf = summary.df(eclfiles, time_index="first") + sumdf = summary.df(resfiles, time_index="first") assert len(sumdf) == 1 assert str(sumdf.index.values[0]) == "2000-01-01" @@ -191,9 +191,9 @@ def test_paramsupport(tmp_path, mocker): """ tmpcsvfile = tmp_path / "sum.csv" - eclfiles = EclFiles(EIGHTCELLS) + resfiles = ResFiles(EIGHTCELLS) - parameterstxt = Path(eclfiles.get_path()) / "parameters.txt" + parameterstxt = Path(resfiles.get_path()) / "parameters.txt" if parameterstxt.is_file(): parameterstxt.unlink() parameterstxt.write_text("FOO 1\nBAR 3", encoding="utf-8") @@ -208,7 +208,7 @@ def test_paramsupport(tmp_path, mocker): assert disk_df["BAR"].unique()[0] == 3 parameterstxt.unlink() - parametersyml = Path(eclfiles.get_path()) / "parameters.yml" + parametersyml = Path(resfiles.get_path()) / "parameters.yml" if parametersyml.is_file(): parametersyml.unlink() parametersyml.write_text(yaml.dump({"FOO": 1, "BAR": 3}), encoding="utf-8") @@ -226,22 +226,22 @@ def test_paramsupport(tmp_path, mocker): assert disk_df["BAR"].unique()[0] == 3 # Test the merging from summary.df() explicitly: - assert "FOO" in summary.df(eclfiles, params=True, paramfile=None) - assert "FOO" not in summary.df(eclfiles, params=False, paramfile=None) - assert "FOO" not in summary.df(eclfiles, params=None, paramfile=None) + assert "FOO" in summary.df(resfiles, params=True, paramfile=None) + assert "FOO" not in summary.df(resfiles, params=False, paramfile=None) + assert "FOO" not in summary.df(resfiles, params=None, paramfile=None) - assert "FOO" in summary.df(eclfiles, params=False, paramfile=parametersyml) - assert "FOO" in summary.df(eclfiles, params=None, paramfile=parametersyml) - assert "FOO" in summary.df(eclfiles, params=None, paramfile="parameters.yml") + assert "FOO" in summary.df(resfiles, params=False, paramfile=parametersyml) + assert "FOO" in summary.df(resfiles, params=None, paramfile=parametersyml) + assert "FOO" in summary.df(resfiles, params=None, paramfile="parameters.yml") # Non-existing relative path is a soft error: assert "FOO" not in summary.df( - eclfiles, params=None, paramfile="notexisting/parameters.yml" + resfiles, params=None, paramfile="notexisting/parameters.yml" ) # Non-existing absolute path is a hard error: with pytest.raises(FileNotFoundError): - summary.df(eclfiles, params=None, paramfile="/tmp/notexisting/parameters.yml") + summary.df(resfiles, params=None, paramfile="/tmp/notexisting/parameters.yml") parametersyml.unlink() @@ -334,15 +334,15 @@ def test_datenormalization(): """Test normalization of dates, where dates can be ensured to be on dategrid boundaries""" # realization-0 here has its last summary date at 2003-01-02 - eclfiles = EclFiles(REEK) - daily = summary.df(eclfiles, column_keys="FOPT", time_index="daily", datetime=True) + resfiles = ResFiles(REEK) + daily = summary.df(resfiles, column_keys="FOPT", time_index="daily", datetime=True) assert str(daily.index[-1])[0:10] == "2003-01-02" monthly = summary.df( - eclfiles, column_keys="FOPT", time_index="monthly", datetime=True + resfiles, column_keys="FOPT", time_index="monthly", datetime=True ) assert str(monthly.index[-1])[0:10] == "2003-02-01" yearly = summary.df( - eclfiles, column_keys="FOPT", time_index="yearly", datetime=True + resfiles, column_keys="FOPT", time_index="yearly", datetime=True ) assert str(yearly.index[-1])[0:10] == "2004-01-01" @@ -350,9 +350,9 @@ def test_datenormalization(): def test_extrapolation(): """Summary data should be possible to extrapolate into the future, rates should be zero, cumulatives should be constant""" - eclfiles = EclFiles(EIGHTCELLS) + resfiles = ResFiles(EIGHTCELLS) lastfopt = summary.df( - eclfiles, column_keys="FOPT", time_index="last", datetime=True + resfiles, column_keys="FOPT", time_index="last", datetime=True )["FOPT"].values[0] answer = pd.DataFrame( # This is the maximal date for datetime64[ns] @@ -363,7 +363,7 @@ def test_extrapolation(): pd.testing.assert_frame_equal( summary.df( - eclfiles, + resfiles, column_keys=["FOPT", "FOPR"], time_index="2262-04-11", datetime=True, @@ -372,7 +372,7 @@ def test_extrapolation(): ) pd.testing.assert_frame_equal( summary.df( - eclfiles, + resfiles, column_keys=["FOPT", "FOPR"], time_index=[datetime.date(2262, 4, 11)], # NB: df() does not support datetime64 for time_index @@ -384,7 +384,7 @@ def test_extrapolation(): # Pandas does not support DatetimeIndex beyound 2262: with pytest.raises(pd.errors.OutOfBoundsDatetime): summary.df( - eclfiles, + resfiles, column_keys=["FOPT"], time_index=[datetime.date(2300, 1, 1)], datetime=True, @@ -392,7 +392,7 @@ def test_extrapolation(): # But without datetime, we can get it extrapolated by libecl: assert summary.df( - eclfiles, column_keys=["FOPT"], time_index=[datetime.date(2300, 1, 1)] + resfiles, column_keys=["FOPT"], time_index=[datetime.date(2300, 1, 1)] )["FOPT"].values == [lastfopt] @@ -629,9 +629,9 @@ def test_date_range(start, end, freq, expected): def test_resample_smry_dates(): """Test resampling of summary dates""" - eclfiles = EclFiles(REEK) + resfiles = ResFiles(REEK) - ecldates = eclfiles.get_eclsum().dates + ecldates = resfiles.get_eclsum().dates assert isinstance(resample_smry_dates(ecldates), list) assert isinstance(resample_smry_dates(ecldates, freq="last"), list) @@ -792,7 +792,7 @@ def test_resample_smry_dates(): ], ) def test_unique_datetime_for_short_timesteps(filepath): - assert summary.df(EclFiles(filepath)).index.is_unique + assert summary.df(ResFiles(filepath)).index.is_unique @pytest.mark.parametrize( @@ -804,12 +804,12 @@ def test_unique_datetime_for_short_timesteps(filepath): ) def test_unique_datetime_retain_index_name(filepath): """Test _ensure_unique_datetime_index method retain index name""" - assert summary.df(EclFiles(filepath)).index.name == "DATE" + assert summary.df(ResFiles(filepath)).index.name == "DATE" def test_smry_meta(): """Test obtaining metadata dictionary for summary vectors from an EclSum object""" - meta = smry_meta(EclFiles(REEK)) + meta = smry_meta(ResFiles(REEK)) assert isinstance(meta, dict) assert "FOPT" in meta @@ -1068,7 +1068,7 @@ def test_duplicated_summary_vectors(caplog): / "EIGHTCELLS_DUPES.DATA" ) assert "SUMMARY\nFOPR\nFOPR" in dupe_datafile.read_text() - deduplicated_dframe = df(EclFiles(dupe_datafile)) + deduplicated_dframe = df(ResFiles(dupe_datafile)) assert (deduplicated_dframe.columns == ["YEARS", "FOPR"]).all() assert "Duplicated columns detected" in caplog.text @@ -1167,15 +1167,15 @@ def test_res2df_errors(tmp_path): # This is how libecl reacts to bogus binary data Summary("FOO.UNSMRY") - # But EclFiles should be more tolerant, as it should be possible + # But ResFiles should be more tolerant, as it should be possible # to extract other data if SMRY is corrupted Path("FOO.DATA").write_text("RUNSPEC", encoding="utf8") - assert str(EclFiles("FOO").get_ecldeck()).strip() == "RUNSPEC" + assert str(ResFiles("FOO").get_ecldeck()).strip() == "RUNSPEC" with pytest.raises(OSError): - EclFiles("FOO").get_eclsum() + ResFiles("FOO").get_eclsum() # Getting a dataframe from bogus data should give empty data: - assert df(EclFiles("FOO")).empty + assert df(ResFiles("FOO")).empty def test_df2eclsum_errors(): diff --git a/tests/test_trans.py b/tests/test_trans.py index 5bd819235..c3ba5f16f 100644 --- a/tests/test_trans.py +++ b/tests/test_trans.py @@ -14,7 +14,7 @@ import pandas as pd from res2df import res2csv, trans -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles TESTDIR = Path(__file__).absolute().parent REEK = str(TESTDIR / "data/reek/eclipse/model/2_R001_REEK-0.DATA") @@ -23,8 +23,8 @@ def test_trans(): """Test that we can build a dataframe of transmissibilities""" - eclfiles = EclFiles(REEK) - trans_df = trans.df(eclfiles) + resfiles = ResFiles(REEK) + trans_df = trans.df(resfiles) assert "TRAN" in trans_df assert "DIR" in trans_df assert set(trans_df["DIR"].unique()) == set(["I", "J", "K"]) @@ -33,45 +33,45 @@ def test_trans(): trans_full_length = len(trans_df) # Try including some vectors: - trans_df = trans.df(eclfiles, vectors="FIPNUM") + trans_df = trans.df(resfiles, vectors="FIPNUM") assert "FIPNUM" not in trans_df assert "FIPNUM1" in trans_df assert "EQLNUM2" not in trans_df - trans_df = trans.df(eclfiles, vectors=["FIPNUM", "EQLNUM"]) + trans_df = trans.df(resfiles, vectors=["FIPNUM", "EQLNUM"]) assert "FIPNUM1" in trans_df assert "EQLNUM2" in trans_df - trans_df = trans.df(eclfiles, vectors="BOGUS") + trans_df = trans.df(resfiles, vectors="BOGUS") assert "BOGUS1" not in trans_df assert "TRAN" in trans_df # (we should have gotten a warning only) - assert "K" not in trans.df(eclfiles, onlyijdir=True)["DIR"] - assert "I" not in trans.df(eclfiles, onlykdir=True)["DIR"] + assert "K" not in trans.df(resfiles, onlyijdir=True)["DIR"] + assert "I" not in trans.df(resfiles, onlykdir=True)["DIR"] # A warning is logged, seems strange to filter on both, but # the answer (empty) makes sense given the instruction. Alternative # would be a ValueError. - assert trans.df(eclfiles, onlykdir=True, onlyijdir=True).empty + assert trans.df(resfiles, onlykdir=True, onlyijdir=True).empty - transnnc_df = trans.df(eclfiles, addnnc=True) + transnnc_df = trans.df(resfiles, addnnc=True) assert len(transnnc_df) > trans_full_length - trans_df = trans.df(eclfiles, vectors=["FIPNUM", "EQLNUM"], boundaryfilter=True) + trans_df = trans.df(resfiles, vectors=["FIPNUM", "EQLNUM"], boundaryfilter=True) assert trans_df.empty - trans_df = trans.df(eclfiles, vectors="FIPNUM", boundaryfilter=True) + trans_df = trans.df(resfiles, vectors="FIPNUM", boundaryfilter=True) assert len(trans_df) < trans_full_length - trans_df = trans.df(eclfiles, coords=True) + trans_df = trans.df(resfiles, coords=True) assert "X" in trans_df assert "Y" in trans_df def test_grouptrans(): """Test grouping of transmissibilities""" - eclfiles = EclFiles(REEK) - trans_df = trans.df(eclfiles, vectors="FIPNUM", group=True, coords=True) + resfiles = ResFiles(REEK) + trans_df = trans.df(resfiles, vectors="FIPNUM", group=True, coords=True) assert "FIPNUMPAIR" in trans_df assert "FIPNUM1" in trans_df assert "FIPNUM2" in trans_df @@ -80,14 +80,14 @@ def test_grouptrans(): assert "X" in trans_df # (average X coord for that FIPNUM interface) # This gives a logged error: - assert trans.df(eclfiles, vectors=["FIPNUM", "EQLNUM"], group=True).empty + assert trans.df(resfiles, vectors=["FIPNUM", "EQLNUM"], group=True).empty @pytest.mark.skipif(not HAVE_NETWORKX, reason="Requires networkx being installed") def test_nx(tmp_path): """Test graph generation""" - eclfiles = EclFiles(REEK) - network = trans.make_nx_graph(eclfiles, region="FIPNUM") + resfiles = ResFiles(REEK) + network = trans.make_nx_graph(resfiles, region="FIPNUM") assert network.number_of_nodes() == 6 networkx.write_gexf(network, tmp_path / "reek-fipnum-trans.gxf", prettyprint=True) assert (tmp_path / "reek-fipnum-trans.gxf").is_file() diff --git a/tests/test_userapi.py b/tests/test_userapi.py index deb6fbe77..213eac612 100644 --- a/tests/test_userapi.py +++ b/tests/test_userapi.py @@ -28,22 +28,22 @@ def test_userapi(): To the user reading the source: Skip all 'assert' lines, read the rest. """ - eclfiles = res2df.EclFiles(REEK) - - compdatdf = res2df.compdat.df(eclfiles) - equil = res2df.equil.df(eclfiles) - faults = res2df.faults.df(eclfiles) - fipreports = res2df.fipreports.df(eclfiles) - grid_df = res2df.grid.df(eclfiles) - grst_df = res2df.grid.df(eclfiles, rstdates="last") - gruptree = res2df.gruptree.df(eclfiles) - nnc = res2df.nnc.df(eclfiles) - pillars = res2df.pillars.df(eclfiles) - rft = res2df.rft.df(eclfiles) - satfunc = res2df.satfunc.df(eclfiles) - smry = res2df.summary.df(eclfiles, datetime=True) - trans = res2df.trans.df(eclfiles) - wcon = res2df.wcon.df(eclfiles) + resfiles = res2df.ResFiles(REEK) + + compdatdf = res2df.compdat.df(resfiles) + equil = res2df.equil.df(resfiles) + faults = res2df.faults.df(resfiles) + fipreports = res2df.fipreports.df(resfiles) + grid_df = res2df.grid.df(resfiles) + grst_df = res2df.grid.df(resfiles, rstdates="last") + gruptree = res2df.gruptree.df(resfiles) + nnc = res2df.nnc.df(resfiles) + pillars = res2df.pillars.df(resfiles) + rft = res2df.rft.df(resfiles) + satfunc = res2df.satfunc.df(resfiles) + smry = res2df.summary.df(resfiles, datetime=True) + trans = res2df.trans.df(resfiles) + wcon = res2df.wcon.df(resfiles) assert "PORV" in grid_df assert "SOIL" not in grid_df diff --git a/tests/test_vfp.py b/tests/test_vfp.py index b77cf3a0a..5eee6fffd 100644 --- a/tests/test_vfp.py +++ b/tests/test_vfp.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from res2df import EclFiles, vfp +from res2df import ResFiles, vfp try: import opm # noqa @@ -993,7 +993,7 @@ @pytest.mark.parametrize("test_input, expected", VFPPROD_CASES) def test_res2df_vfpprod(test_input, expected): """Test res2df for VFPPROD""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfpdf = vfp.df(deck, "VFPPROD") pd.testing.assert_frame_equal(vfpdf, expected) @@ -1002,7 +1002,7 @@ def test_res2df_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", VFPPROD_CASES) def test_ecl2pyarrow_vfpprod(test_input, expected): """Test ecl2pyarrow for VFPPROD""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) # Read first into pyarrow tables vfppa = vfp.pyarrow_tables(deck, "VFPPROD") # Convert pyarrow table to basic data types for VFPPROD @@ -1025,7 +1025,7 @@ def test_df2ecl_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", [VFPPROD_CASES[0]]) def test_pyarrow2ecl_vfpprod(test_input, expected): """Test pyarrow2ecl for VFPPROD (case without default values)""" - deck = EclFiles.str2deck(vfp.df2ecl(expected, "VFPPROD")) + deck = ResFiles.str2deck(vfp.df2ecl(expected, "VFPPROD")) vfpprod_df = vfp.df(deck, "VFPPROD") vfpprod_data = vfp.df2basic_data(vfpprod_df) vfpprod_pa = vfp.basic_data2pyarrow(vfpprod_data) @@ -1039,7 +1039,7 @@ def test_pyarrow2ecl_vfpprod(test_input, expected): @pytest.mark.parametrize("test_input, expected", VFPINJ_CASES) def test_res2df_vfpinj(test_input, expected): """Test res2df for VFPINJ""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfpdf = vfp.df(deck, "VFPINJ") pd.testing.assert_frame_equal(vfpdf, expected) @@ -1056,7 +1056,7 @@ def test_df2ecl_vfpinj(test_input, expected): @pytest.mark.parametrize("test_input, expected", [VFPINJ_CASES[0]]) def test_pyarrow2ecl_vfpinj(test_input, expected): """Test pyarrow2ecl for VFPPROD (case without default values)""" - deck = EclFiles.str2deck(vfp.df2ecl(expected, "VFPINJ")) + deck = ResFiles.str2deck(vfp.df2ecl(expected, "VFPINJ")) vfpinj_df = vfp.df(deck, "VFPINJ") vfpinj_data = vfp.df2basic_data(vfpinj_df) vfpinj_pa = vfp.basic_data2pyarrow(vfpinj_data) @@ -1070,7 +1070,7 @@ def test_pyarrow2ecl_vfpinj(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_res2df_vfpprods(test_input, expected): """Test res2df for files with multiple VFPPROD""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD") # Two VFPPROD curves in file corresponding to curves 0 and 1 @@ -1081,7 +1081,7 @@ def test_res2df_vfpprods(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_ecl2pyarrow_vfpprods(test_input, expected): """Test res2df with pyarrow for files with multiple VFPPROD""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPPROD") # Two VFPPROD curves in file corresponding to curves 0 and 1 @@ -1094,7 +1094,7 @@ def test_ecl2pyarrow_vfpprods(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_res2df_vfpinjs(test_input, expected): """Test res2df for files with multiple VFPINJ""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPINJ") # Two VFPINJ curves in file corresponding to curves 2 and 3 @@ -1105,7 +1105,7 @@ def test_res2df_vfpinjs(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_eclpyarrow_vfpinjs(test_input, expected): """Test res2df for pyarrow for files with multiple VFPINJ""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ") # Two VFPINJ curves in file corresponding to curves 2 and 3 @@ -1118,7 +1118,7 @@ def test_eclpyarrow_vfpinjs(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_res2df_vfpprod_no(test_input, expected): """Test res2df for files with multiple VFPPROD with vfp number argument""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD", "2") # VFPPROD curve with VFP number 2 is curve 1 in file @@ -1130,7 +1130,7 @@ def test_ecl2pyarrow_vfpprod_no(test_input, expected): """Test res2df for pyarrow for files with multiple VFPPROD with vfp number argument """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPPROD", "2") vfpprod_data = vfp.pyarrow2basic_data(vfppas[0]) vfpdf = vfp.basic_data2df(vfpprod_data) @@ -1142,7 +1142,7 @@ def test_ecl2pyarrow_vfpprod_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_res2df_vfpinj_no(test_input, expected): """Test res2df for files with multiple VFPINJ with vfp number argument""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPINJ", "4") # VFPINJ curve with VFP number 4 is curve 3 in file @@ -1152,7 +1152,7 @@ def test_res2df_vfpinj_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_ecl2pyarrow_vfpinj_no(test_input, expected): """Test res2df for pyarrow files with multiple VFPINJ with vfp number argument""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ", "4") vfpinj_data = vfp.pyarrow2basic_data(vfppas[0]) @@ -1165,7 +1165,7 @@ def test_ecl2pyarrow_vfpinj_no(test_input, expected): @pytest.mark.parametrize("test_input, expected", MULTIPLE_VFP_CASES) def test_res2df_vfpprods_no(test_input, expected): """Test res2df for files with multiple VFPPROD with vfp number argument as range""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1178,7 +1178,7 @@ def test_ecl2pyarrow_vfpprods_no(test_input, expected): """Test res2df for pyarrow for files with multiple VFPPROD with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1193,7 +1193,7 @@ def test_res2df_vfpinjs_no(test_input, expected): """Test res2df for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfpdfs = vfp.dfs(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1206,7 +1206,7 @@ def test_ecl2pyarrow_vfpinjs_no(test_input, expected): """Test res2df for pyararow for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) vfppas = vfp.pyarrow_tables(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1221,7 +1221,7 @@ def test_basic_data_vfpprods_no(test_input, expected): """Test res2df basic_data reading for files with multiple VFPPROD with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) basic_data_vfps = vfp.basic_data(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1235,7 +1235,7 @@ def test_basic_data_vfpinjs_no(test_input, expected): """Test res2df basic_data reading for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) basic_data_vfps = vfp.basic_data(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1249,7 +1249,7 @@ def test_pyarrow2basic_data_vfpprods_no(test_input, expected): """Test res2df pyarrow2basic_data for files with multiple VFPPROD with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) pyarrow_vfps = vfp.pyarrow_tables(deck, "VFPPROD", "[1:2]") # VFPPROD curves with VFP numbers 1 and 2 are curves 0 and 1 @@ -1264,7 +1264,7 @@ def test_pyarrow2basic_data_vfpinjs_no(test_input, expected): """Test res2df pyarrow2basic_data for files with multiple VFPINJ with vfp number argument as range """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) pyarrow_vfps = vfp.pyarrow_tables(deck, "VFPINJ", "[3:4]") # VFPINJ curves with VFP numbers 3 and 4 are curves 2 and 3 @@ -1285,7 +1285,7 @@ def test_basic_data_key_exceptions_vfpprods(self, vfpprod_key, test_input, dummy """Test exceptions for basic data format (not containing all required keywords) for VFPPROD" """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) basic_data_vfpprods = vfp.basic_data(deck, "VFPPROD") # Check if exception is raises if one key is missing @@ -1312,7 +1312,7 @@ def test_basic_data_array_dim_exceptions_vfpprods( """Test exceptions for basic data format (inconsistency in array dimensions) for VFPPROD" """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) basic_data_vfpprods = vfp.basic_data(deck, "VFPPROD") # Check if exception is raises if array dimension is wrong @@ -1329,7 +1329,7 @@ def test_basic_data_dims_vfpprods(test_input, expected): """Test exceptions for dimensions consistency for basic data format (not containing all required keywords) for VFPPROD" """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) basic_data_vfpprods = vfp.basic_data(deck, "VFPPROD") # Check if exception is raised if dimensions are wrong @@ -1352,7 +1352,7 @@ def test_basic_data_key_exceptions_vfpinjs(self, vfpinj_key, test_input, dummy): """Test exceptions for basic data format (not containing all required keywords) for VFPINJ" """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) basic_data_vfpinjs = vfp.basic_data(deck, "VFPINJ") # Check if exception is raises if one key is missing @@ -1379,7 +1379,7 @@ def test_basic_data_array_dim_exceptions_vfpinjs( """Test exceptions for basic data format (inconsistency in array dimensions) for VFPINJ" """ - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) basic_data_vfpinjs = vfp.basic_data(deck, "VFPINJ") # Check if exception is raises if array dimension if wrong diff --git a/tests/test_wcon.py b/tests/test_wcon.py index 0978a1dcd..de1ba763d 100644 --- a/tests/test_wcon.py +++ b/tests/test_wcon.py @@ -8,7 +8,7 @@ import pytest from res2df import res2csv, wcon -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles try: # pylint: disable=unused-import @@ -24,8 +24,8 @@ def test_wcon2df(): """Test that dataframes are produced""" - eclfiles = EclFiles(EIGHTCELLS) - wcondf = wcon.df(eclfiles.get_ecldeck()) + resfiles = ResFiles(EIGHTCELLS) + wcondf = wcon.df(resfiles.get_ecldeck()) assert not wcondf.empty assert "DATE" in wcondf # for all data @@ -41,7 +41,7 @@ def test_wconhist(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResFiles.str2deck(wconstr) wconhist_df = wcon.df(deck) pd.testing.assert_frame_equal( wconhist_df, @@ -74,7 +74,7 @@ def test_wconinjh(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResFiles.str2deck(wconstr) wconinjh_df = wcon.df(deck) pd.testing.assert_frame_equal( wconinjh_df, @@ -108,7 +108,7 @@ def test_wconinje(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResFiles.str2deck(wconstr) wconinje_df = wcon.df(deck) pd.testing.assert_frame_equal( wconinje_df, @@ -145,7 +145,7 @@ def test_wconprod(): 'FOO' 0 1 / / """ - deck = EclFiles.str2deck(wconstr) + deck = ResFiles.str2deck(wconstr) wconprod_df = wcon.df(deck) pd.testing.assert_frame_equal( wconprod_df, @@ -207,7 +207,7 @@ def test_tstep(): 'OP1' 3000 / / """ - deck = EclFiles.str2deck(schstr) + deck = ResFiles.str2deck(schstr) wcondf = wcon.df(deck) dates = [str(x) for x in wcondf["DATE"].unique()] assert len(dates) == 3 diff --git a/tests/test_wellcompletiondata.py b/tests/test_wellcompletiondata.py index aeaff2129..b381e7cb0 100644 --- a/tests/test_wellcompletiondata.py +++ b/tests/test_wellcompletiondata.py @@ -6,7 +6,7 @@ import pytest from res2df import common, compdat, wellcompletiondata -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles from res2df.wellcompletiondata import ( _aggregate_layer_to_zone, _df2pyarrow, @@ -34,7 +34,7 @@ def test_eightcells_with_wellconnstatus(): """Test the Eightcells dataset with the well connection status option activated (connection status extracted from summary data) """ - eclfiles = EclFiles(EIGHTCELLS) + resfiles = ResFiles(EIGHTCELLS) expected_dframe = pd.DataFrame( [ { @@ -48,7 +48,7 @@ def test_eightcells_with_wellconnstatus(): ) pd.testing.assert_frame_equal( wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=True + resfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=True ), expected_dframe, check_dtype=False, @@ -58,7 +58,7 @@ def test_eightcells_with_wellconnstatus(): def test_eightcells_without_wellconnstatus(): """Test the Eightcells dataset with only the compdat export data (connection status extracted from parsing the schedule file)""" - eclfiles = EclFiles(EIGHTCELLS) + resfiles = ResFiles(EIGHTCELLS) expected_dframe = pd.DataFrame( [ { @@ -72,7 +72,7 @@ def test_eightcells_without_wellconnstatus(): ) pd.testing.assert_frame_equal( wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False + resfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False ), expected_dframe, check_dtype=False, @@ -81,9 +81,9 @@ def test_eightcells_without_wellconnstatus(): def test_df2pyarrow(): """Test that dataframe is conserved using _df2pyarrow""" - eclfiles = EclFiles(EIGHTCELLS) + resfiles = ResFiles(EIGHTCELLS) df = wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False + resfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False ) df["KH"] = df["KH"].astype(np.int32) pd.testing.assert_frame_equal(df, _df2pyarrow(df).to_pandas(), check_like=True) @@ -91,9 +91,9 @@ def test_df2pyarrow(): def test_metadata(): """Test that the KH column has metadata and that unit is mDm""" - eclfiles = EclFiles(EIGHTCELLS) + resfiles = ResFiles(EIGHTCELLS) df = wellcompletiondata.df( - eclfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False + resfiles, zonemap=EIGHTCELLS_ZONEMAP, use_wellconnstatus=False ) assert df.attrs["meta"] == {"KH": {"unit": "mDm"}} @@ -107,21 +107,21 @@ def test_empty_zonemap(): """Test empty zonemap and zonemap with layers that doesn't exist in the compdat table. Both returns an empty dataframe """ - eclfiles = EclFiles(EIGHTCELLS) - df = wellcompletiondata.df(eclfiles, zonemap={}, use_wellconnstatus=False) + resfiles = ResFiles(EIGHTCELLS) + df = wellcompletiondata.df(resfiles, zonemap={}, use_wellconnstatus=False) assert df.empty zonemap = {1000: "ZONE1", -1: "ZONE1"} - df = wellcompletiondata.df(eclfiles, zonemap=zonemap, use_wellconnstatus=False) + df = wellcompletiondata.df(resfiles, zonemap=zonemap, use_wellconnstatus=False) assert df.empty def test_zonemap_with_some_undefined_layers(): """Layers in the zonemap that don't exist in the compdat output will be ignored.""" - eclfiles = EclFiles(REEK) + resfiles = ResFiles(REEK) zonemap = {1: "ZONE1", 2: "ZONE1"} - df = wellcompletiondata.df(eclfiles, zonemap=zonemap, use_wellconnstatus=False) - compdat_df = compdat.df(eclfiles) + df = wellcompletiondata.df(resfiles, zonemap=zonemap, use_wellconnstatus=False) + compdat_df = compdat.df(resfiles) # Filter compdat on layer 1 and 2 compdat_df = compdat_df[compdat_df["K1"] <= 2] diff --git a/tests/test_wellconnstatus.py b/tests/test_wellconnstatus.py index 037d76640..323ce2e2a 100644 --- a/tests/test_wellconnstatus.py +++ b/tests/test_wellconnstatus.py @@ -4,7 +4,7 @@ import pytest from res2df import wellconnstatus -from res2df.eclfiles import EclFiles +from res2df.resfiles import ResFiles try: # pylint: disable=unused-import @@ -23,15 +23,15 @@ def test_reek_dataset(): """Test Reek dataset. It contains no CPI data and should return an empty dataframe. """ - eclfiles = EclFiles(REEK) - wellconnstatus_df = wellconnstatus.df(eclfiles) + resfiles = ResFiles(REEK) + wellconnstatus_df = wellconnstatus.df(resfiles) assert wellconnstatus_df.empty def test_eightcells_dataset(): """Test the Eightcells dataset which has CPI data""" - eclfiles = EclFiles(EIGHTCELLS) - wellconnstatus_df = wellconnstatus.df(eclfiles) + resfiles = ResFiles(EIGHTCELLS) + wellconnstatus_df = wellconnstatus.df(resfiles) expected_dframe = pd.DataFrame( [ { diff --git a/tests/test_welopen.py b/tests/test_welopen.py index f7a7d0540..48a2672f2 100644 --- a/tests/test_welopen.py +++ b/tests/test_welopen.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from res2df import EclFiles, compdat +from res2df import ResFiles, compdat try: # pylint: disable=unused-import @@ -930,7 +930,7 @@ @pytest.mark.parametrize("test_input, expected", WELOPEN_CASES) def test_welopen(test_input, expected): """Test with WELOPEN present""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) compdf = compdat.deck2dfs(deck)["COMPDAT"] columns_to_check = ["WELL", "I", "J", "K1", "K2", "OP/SH", "DATE"] @@ -1131,7 +1131,7 @@ def test_welopen(test_input, expected): ) def test_welopen_wlist(test_input, expected): """Test that WELOPEN can be used on well lists determined by WLIST""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) dfs = compdat.deck2dfs(deck) pd.testing.assert_frame_equal(dfs["COMPDAT"][expected.columns], expected) @@ -1139,7 +1139,7 @@ def test_welopen_wlist(test_input, expected): def test_welopen_df(): """Test that we can obtain WELOPEN information when it applies on well state, not on connections.""" - deck = EclFiles.str2deck( + deck = ResFiles.str2deck( """ DATES 1 JAN 2000 / @@ -1482,7 +1482,7 @@ def test_welopen_df(): ) def test_welopen_complump(test_input, expected): """Test the welopen_complump functionality through Eclipse decks""" - deck = EclFiles.str2deck(test_input) + deck = ResFiles.str2deck(test_input) dfs = compdat.deck2dfs(deck) pd.testing.assert_frame_equal(dfs["COMPDAT"][expected.columns], expected) diff --git a/tests/test_wlist.py b/tests/test_wlist.py index 87c4e02c3..466d05cd5 100644 --- a/tests/test_wlist.py +++ b/tests/test_wlist.py @@ -3,7 +3,7 @@ import pandas as pd import pytest -from res2df import EclFiles, compdat +from res2df import ResFiles, compdat try: # pylint: disable=unused-import @@ -145,7 +145,7 @@ ) def test_parse_wlist(deckstr, expected_df): """Test basic parsing of WLIST keywords into a dataframe representation""" - deck = EclFiles.str2deck(deckstr) + deck = ResFiles.str2deck(deckstr) wlistdf = compdat.deck2dfs(deck)["WLIST"] pd.testing.assert_frame_equal(wlistdf, expected_df, check_like=True) diff --git a/tests/test_zonemap.py b/tests/test_zonemap.py index 650fa28cb..d93fc5500 100644 --- a/tests/test_zonemap.py +++ b/tests/test_zonemap.py @@ -14,13 +14,13 @@ def test_stdzoneslyr(): """Test that we can read zones if the zonemap is in a standard location. - The eclfiles object defines what is the standard location for the file, while + The resfiles object defines what is the standard location for the file, while the actual parsing is done in res2df.common.parse_lyrfile() and converted to zonemap in common.convert_lyrlist_to_zonemap() """ - eclfiles = res2df.EclFiles(REEK) + resfiles = res2df.ResFiles(REEK) - zonemap = eclfiles.get_zonemap() + zonemap = resfiles.get_zonemap() assert isinstance(zonemap, dict) assert zonemap[3] == "UpperReek" assert zonemap[10] == "MidReek" @@ -37,8 +37,8 @@ def test_stdzoneslyr(): def test_nonexistingzones(): """Test an Eclipse case with non-existing zonemap (i.e. no zonemap file in the standard location)""" - eclfiles = res2df.EclFiles(REEK) - zonemap = eclfiles.get_zonemap("foobar") + resfiles = res2df.ResFiles(REEK) + zonemap = resfiles.get_zonemap("foobar") # (we got a warning and an empty dict) assert not zonemap @@ -74,7 +74,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResFiles(REEK).get_zonemap(str(lyrfile)) is None assert "From_layer higher than to_layer" in caplog.text lyrfile = tmp_path / "formations.lyr" @@ -85,7 +85,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResFiles(REEK).get_zonemap(str(lyrfile)) is None assert "Failed on content: foo 3- 4 #FFGGHH" in caplog.text lyrfile = tmp_path / "formations.lyr" @@ -96,7 +96,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResFiles(REEK).get_zonemap(str(lyrfile)) is None assert "Failed on content: foo 3- 4 bluez" in caplog.text lyrfile.write_text( @@ -105,7 +105,7 @@ def test_errors(tmp_path, caplog): """, encoding="utf-8", ) - assert res2df.EclFiles(REEK).get_zonemap(str(lyrfile)) is None + assert res2df.ResFiles(REEK).get_zonemap(str(lyrfile)) is None def test_lyrlist_format(tmp_path):