diff --git a/docs/usage/nnc.rst b/docs/usage/nnc.rst index 21ed2aa60..71b49f6f5 100644 --- a/docs/usage/nnc.rst +++ b/docs/usage/nnc.rst @@ -55,12 +55,12 @@ to an Eclipse include file: nnc_df = nnc.df(resdatafiles) nnc_df["TRANM"] = 0.1 # Reduce all NNC transmissibilities - nnc.df2ecl_editnnc(nnc_df, filename="editnnc.inc") + nnc.df2res_editnnc(nnc_df, filename="editnnc.inc") and the contents of the exported file can be: .. - print(nnc.df2ecl_editnnc(nnc.df(resdatafiles).head(4).assign(TRANM=0.1))) + print(nnc.df2res_editnnc(nnc.df(resdatafiles).head(4).assign(TRANM=0.1))) .. code-block:: console diff --git a/res2df/common.py b/res2df/common.py index 122ddd273..5fa3b8818 100644 --- a/res2df/common.py +++ b/res2df/common.py @@ -187,7 +187,7 @@ def datetime_to_eclipsedate( return string.replace("00:00:00", "").strip() -def ecl_keyworddata_to_df( +def res_keyworddata_to_df( deck, keyword: str, renamer: Optional[Dict[str, Union[str, List[str]]]] = None, @@ -353,7 +353,7 @@ def parse_opmio_date_rec(record: "opm.io.DeckRecord") -> datetime.date: day = record[0].get_int(0) month = record[1].get_str(0) year = record[2].get_int(0) - return datetime.date(year=year, month=parse_ecl_month(month), day=day) + return datetime.date(year=year, month=parse_res_month(month), day=day) def parse_opmio_tstep_rec(record: "opm.io.DeckRecord") -> List[Union[float, int]]: @@ -519,7 +519,7 @@ def df2ecl( """Generate resdata include strings from dataframes in res2df format. This function hands over the actual text generation pr. keyword - to functions named df2ecl_ in the calling module. + to functions named df2res_ in the calling module. These functions may again use generic_ecltable() from this module for the actual string construction. @@ -611,7 +611,7 @@ def df2ecl( string += comment_formatter(comments["master"]) for keyword in keywords: # Construct the associated function names - function_name = "df2ecl_" + keyword.lower() + function_name = "df2res_" + keyword.lower() function = getattr(calling_module, function_name) if keyword in comments: string += function(dataframe, comments[keyword]) diff --git a/res2df/equil.py b/res2df/equil.py index fc3df876e..9928630af 100644 --- a/res2df/equil.py +++ b/res2df/equil.py @@ -141,7 +141,7 @@ def rsvd_fromdeck( """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.res_keyworddata_to_df( deck, "RSVD", renamer=RENAMERS["RSVD"], recordcountername="EQLNUM" ) @@ -158,7 +158,7 @@ def rvvd_fromdeck( """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.res_keyworddata_to_df( deck, "RVVD", renamer=RENAMERS["RVVD"], recordcountername="EQLNUM" ) @@ -175,7 +175,7 @@ def pbvd_fromdeck( """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.res_keyworddata_to_df( deck, "PBVD", renamer=RENAMERS["PBVD"], recordcountername="EQLNUM" ) @@ -192,7 +192,7 @@ def pdvd_fromdeck( """ if "EQLDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("EQLDIMS", "NTEQUL", deck, ntequl) - return common.ecl_keyworddata_to_df( + return common.res_keyworddata_to_df( deck, "PDVD", renamer=RENAMERS["PDVD"], recordcountername="EQLNUM" ) @@ -264,7 +264,7 @@ def equil_fromdeck( raise ValueError(f"Could not determine phase configuration, got '{phases}'") columnrenamer = RENAMERS[phases_from_deck(deck)] - dataframe = common.ecl_keyworddata_to_df( + dataframe = common.res_keyworddata_to_df( deck, "EQUIL", renamer=columnrenamer, recordcountername="EQLNUM" ) @@ -392,7 +392,7 @@ def df2ecl( return string -def df2ecl_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print EQUIL keyword with data Args: @@ -427,7 +427,7 @@ def df2ecl_equil(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: ) -def df2ecl_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print RSVD keyword with data This data consists of one table (rs as a function @@ -437,10 +437,10 @@ def df2ecl_rsvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: dframe: Containing RSVD data comment Text that will be included as a comment """ - return _df2ecl_equilfuncs("RSVD", dframe, comment) + return _df2res_equilfuncs("RSVD", dframe, comment) -def df2ecl_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print RVVD keyword with data This data consists of one table (rv as a function @@ -450,10 +450,10 @@ def df2ecl_rvvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: dframe: Containing RVVD data comment: Text that will be included as a comment """ - return _df2ecl_equilfuncs("RVVD", dframe, comment) + return _df2res_equilfuncs("RVVD", dframe, comment) -def df2ecl_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PBVD keyword with data Bubble-point versus depth @@ -465,10 +465,10 @@ def df2ecl_pbvd(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: dframe: Containing PBVD data comment: Text that will be included as a comment """ - return _df2ecl_equilfuncs("PBVD", dframe, comment) + return _df2res_equilfuncs("PBVD", dframe, comment) -def df2ecl_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None): +def df2res_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None): """Print PDVD keyword with data. Dew-point versus depth. @@ -480,13 +480,13 @@ def df2ecl_pdvd(dframe: pd.DataFrame, comment: Optional[str] = None): dframe: Containing PDVD data comment: Text that will be included as a comment """ - return _df2ecl_equilfuncs("PDVD", dframe, comment) + return _df2res_equilfuncs("PDVD", dframe, comment) -def _df2ecl_equilfuncs( +def _df2res_equilfuncs( keyword: str, dframe: pd.DataFrame, comment: Optional[str] = None ) -> str: - """Internal function to be used by df2ecl_() functions""" + """Internal function to be used by df2res_() functions""" if dframe.empty: return "-- No data!" string = f"{keyword}\n" @@ -500,7 +500,7 @@ def _df2ecl_equilfuncs( else: subset = dframe[dframe["KEYWORD"] == keyword] - def _df2ecl_equilfuncs_eqlnum(dframe: pd.DataFrame) -> str: + def _df2res_equilfuncs_eqlnum(dframe: pd.DataFrame) -> str: """Print one equilibriation function table for a specific EQLNUM @@ -519,5 +519,5 @@ def _df2ecl_equilfuncs_eqlnum(dframe: pd.DataFrame) -> str: subset = subset.set_index("EQLNUM").sort_index() for eqlnum in subset.index.unique(): string += f"-- EQLNUM: {eqlnum}\n" - string += _df2ecl_equilfuncs_eqlnum(subset[subset.index == eqlnum]) + string += _df2res_equilfuncs_eqlnum(subset[subset.index == eqlnum]) return string + "\n" diff --git a/res2df/fipreports.py b/res2df/fipreports.py index 493a3073d..8a4df5787 100644 --- a/res2df/fipreports.py +++ b/res2df/fipreports.py @@ -11,7 +11,7 @@ import pandas as pd from res2df import ResdataFiles, getLogger_res2csv -from res2df.common import parse_ecl_month, write_dframe_stdout_file +from res2df.common import parse_res_month, write_dframe_stdout_file logger = logging.getLogger(__name__) @@ -126,7 +126,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra region_index = None date = None - ecl_datematcher = re.compile(r"\s\sREPORT\s+\d+\s+(\d+)\s+(\w+)\s+(\d+)") + res_datematcher = re.compile(r"\s\sREPORT\s+\d+\s+(\d+)\s+(\w+)\s+(\d+)") opm_datematcher = re.compile(r"Starting time step.*? date = (\d+)-(\w+)-(\d+)\s*") # When case insensitive, this one works with both Eclipse100 and OPM: @@ -144,7 +144,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra fipname, ) for line in prt_fh: - matcheddate = re.match(ecl_datematcher, line) + matcheddate = re.match(res_datematcher, line) if matcheddate is None: matcheddate = re.match(opm_datematcher, line) if matcheddate is not None: @@ -152,7 +152,7 @@ def df(prtfile: Union[str, ResdataFiles], fipname: str = "FIPNUM") -> pd.DataFra if matcheddate is not None: newdate = datetime.date( year=int(matcheddate.group(3)), - month=parse_ecl_month(matcheddate.group(2).upper()), + month=parse_res_month(matcheddate.group(2).upper()), day=int(matcheddate.group(1)), ) if newdate != date: diff --git a/res2df/nnc.py b/res2df/nnc.py index a798bc78e..015c28cf2 100644 --- a/res2df/nnc.py +++ b/res2df/nnc.py @@ -200,7 +200,7 @@ def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: return parser -def df2ecl_editnnc( +def df2res_editnnc( nnc_df: pd.DataFrame, filename: Optional[str] = None, nocomments: bool = False ) -> str: """Write an EDITNNC keyword diff --git a/res2df/pvt.py b/res2df/pvt.py index 864c1c536..60bf10f43 100644 --- a/res2df/pvt.py +++ b/res2df/pvt.py @@ -81,7 +81,7 @@ def pvtw_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - return common.ecl_keyworddata_to_df( + return common.res_keyworddata_to_df( deck, "PVTW", renamer=RENAMERS["PVTW"], recordcountername="PVTNUM" ) @@ -98,7 +98,7 @@ def density_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - return common.ecl_keyworddata_to_df( + return common.res_keyworddata_to_df( deck, "DENSITY", renamer=RENAMERS["DENSITY"], recordcountername="PVTNUM" ) @@ -115,7 +115,7 @@ def rock_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - return common.ecl_keyworddata_to_df( + return common.res_keyworddata_to_df( deck, "ROCK", renamer=RENAMERS["ROCK"], recordcountername="PVTNUM" ) @@ -132,7 +132,7 @@ def pvto_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvto_df = common.ecl_keyworddata_to_df( + pvto_df = common.res_keyworddata_to_df( deck, "PVTO", renamer=RENAMERS["PVTO"], emptyrecordcountername="PVTNUM" ) return pvto_df @@ -150,7 +150,7 @@ def pvdo_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvdg_df = common.ecl_keyworddata_to_df( + pvdg_df = common.res_keyworddata_to_df( deck, "PVDO", renamer=RENAMERS["PVDO"], recordcountername="PVTNUM" ) return pvdg_df @@ -168,7 +168,7 @@ def pvdg_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvdg_df = common.ecl_keyworddata_to_df( + pvdg_df = common.res_keyworddata_to_df( deck, "PVDG", renamer=RENAMERS["PVDG"], recordcountername="PVTNUM" ) return pvdg_df @@ -186,7 +186,7 @@ def pvtg_fromdeck( """ if "TABDIMS" not in deck: deck = inferdims.inject_xxxdims_ntxxx("TABDIMS", "NTPVT", deck, ntpvt) - pvtg_df = common.ecl_keyworddata_to_df( + pvtg_df = common.res_keyworddata_to_df( deck, "PVTG", renamer=RENAMERS["PVTG"], emptyrecordcountername="PVTNUM" ) return pvtg_df @@ -351,7 +351,7 @@ def df2ecl( ) -def df2ecl_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print ROCK keyword with data Args: @@ -379,7 +379,7 @@ def df2ecl_rock(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: return string + "\n" -def df2ecl_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print DENSITY keyword with data Args: @@ -408,7 +408,7 @@ def df2ecl_density(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: return string + "\n" -def df2ecl_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PVTW keyword with data PVTW is one line/record with data for a reference pressure @@ -444,7 +444,7 @@ def df2ecl_pvtw(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: return string + "\n" -def df2ecl_pvtg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pvtg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PVTG keyword with data Args: @@ -503,7 +503,7 @@ def _pvtg_pvtnum_pg(dframe): return string + "\n" -def df2ecl_pvdg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pvdg(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PVDG keyword with data This data consists of one table (volumefactor and visosity @@ -553,7 +553,7 @@ def _pvdg_pvtnum(dframe): return string + "\n" -def df2ecl_pvdo(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pvdo(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PVDO keyword with data Args: @@ -600,7 +600,7 @@ def _pvdo_pvtnum(dframe: pd.DataFrame) -> str: return string + "\n" -def df2ecl_pvto(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_pvto(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print PVTO-data from a dataframe Args: diff --git a/res2df/satfunc.py b/res2df/satfunc.py index a7ba81b56..d3f9b517e 100644 --- a/res2df/satfunc.py +++ b/res2df/satfunc.py @@ -104,7 +104,7 @@ def df( for keyword in wanted_keywords: frames.append( interpolate_defaults( - common.ecl_keyworddata_to_df( + common.res_keyworddata_to_df( deck, keyword, renamer=RENAMERS[keyword], recordcountername="SATNUM" ).assign(KEYWORD=keyword) ) @@ -268,87 +268,87 @@ def df2ecl( return string -def df2ecl_swof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_swof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print SWOF data. Used by df2ecl(). Args: dframe: Containing SWOF data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SWOF", dframe, comment) + return _df2res_satfuncs("SWOF", dframe, comment) -def df2ecl_sgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_sgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print SGOF data. Used by df2ecl(). Args: dframe: Containing SGOF data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SGOF", dframe, comment) + return _df2res_satfuncs("SGOF", dframe, comment) -def df2ecl_sgfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_sgfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print SGFN data. Used by df2ecl(). Args: dframe: Containing SGFN data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SGFN", dframe, comment) + return _df2res_satfuncs("SGFN", dframe, comment) -def df2ecl_sgwfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_sgwfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print SGWFN data. Used by df2ecl(). Args: dframe: Containing SGWFN data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SGWFN", dframe, comment) + return _df2res_satfuncs("SGWFN", dframe, comment) -def df2ecl_swfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_swfn(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print SWFN data. Used by df2ecl(). Args: dframe: Containing SWFN data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SWFN", dframe, comment) + return _df2res_satfuncs("SWFN", dframe, comment) -def df2ecl_slgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_slgof(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print SLGOF data. Used by df2ecl(). Args: dframe: Containing SLGOF data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SLGOF", dframe, comment) + return _df2res_satfuncs("SLGOF", dframe, comment) -def df2ecl_sof2(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_sof2(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print SOF2 data. Used by df2ecl(). Args: dframe: Containing SOF2 data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SOF2", dframe, comment) + return _df2res_satfuncs("SOF2", dframe, comment) -def df2ecl_sof3(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: +def df2res_sof3(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: """Print SOF3 data. Used by df2ecl(). Args: dframe: Containing SOF3 data comment: Text that will be included as a comment """ - return _df2ecl_satfuncs("SOF3", dframe, comment) + return _df2res_satfuncs("SOF3", dframe, comment) -def _df2ecl_satfuncs( +def _df2res_satfuncs( keyword: str, dframe: pd.DataFrame, comment: Optional[str] = None ) -> str: if dframe.empty: @@ -366,7 +366,7 @@ def _df2ecl_satfuncs( subset = subset.set_index("SATNUM").sort_index() # Make a function that is to be called for each SATNUM - def _df2ecl_satfuncs_satnum(keyword, dframe): + def _df2res_satfuncs_satnum(keyword, dframe): """Print one saturation function for one specific SATNUM""" col_headers = RENAMERS[keyword]["DATA"] string = ( @@ -380,5 +380,5 @@ def _df2ecl_satfuncs_satnum(keyword, dframe): # Loop over every SATNUM for satnum in subset.index.unique(): string += f"-- SATNUM: {satnum}\n" - string += _df2ecl_satfuncs_satnum(keyword, subset[subset.index == satnum]) + string += _df2res_satfuncs_satnum(keyword, subset[subset.index == satnum]) return string + "\n" diff --git a/res2df/summary.py b/res2df/summary.py index 008a09808..690e768ae 100644 --- a/res2df/summary.py +++ b/res2df/summary.py @@ -403,7 +403,7 @@ def df( ) # dframe = eclsum.pandas_frame(time_index_arg, column_keys) - dframe = _libecl_eclsum_pandas_frame(eclsum, time_index_arg, column_keys) + dframe = _libres_eclsum_pandas_frame(eclsum, time_index_arg, column_keys) logger.info( "Dataframe with smry data ready, %d columns and %d rows", @@ -701,11 +701,11 @@ def df2eclsum( raise ValueError(f"Do not use dots in casename {casename}") dframe = _fix_dframe_for_libecl(dframe) - return _libecl_eclsum_from_pandas(casename, dframe) + return _libres_eclsum_from_pandas(casename, dframe) # return Summary.from_pandas(casename, dframe) -def _libecl_eclsum_pandas_frame( +def _libres_eclsum_pandas_frame( eclsum: Summary, time_index: Optional[Union[List[dt.date], List[dt.datetime]]] = None, column_keys: Optional[List[str]] = None, @@ -754,7 +754,7 @@ def _libecl_eclsum_pandas_frame( return frame -def _libecl_eclsum_from_pandas( +def _libres_eclsum_from_pandas( case: str, frame: pd.DataFrame, dims: Optional[List[int]] = None, @@ -781,18 +781,18 @@ def _libecl_eclsum_from_pandas( header_list = Summary._compile_headers_list(headers, dims) if dims is None: dims = [1, 1, 1] - ecl_sum = Summary.writer(case, start_time, dims[0], dims[1], dims[2]) + res_sum = Summary.writer(case, start_time, dims[0], dims[1], dims[2]) for keyword, wgname, num, unit in header_list: var_list.append( - ecl_sum.add_variable(keyword, wgname=wgname, num=num, unit=unit).getKey1() + res_sum.add_variable(keyword, wgname=wgname, num=num, unit=unit).getKey1() ) for idx, time in enumerate(frame.index): days = (time - start_time).days - t_step = ecl_sum.add_t_step(idx + 1, days) + t_step = res_sum.add_t_step(idx + 1, days) for var in var_list: t_step[var] = frame.iloc[idx][var] - return ecl_sum + return res_sum def fill_parser(parser: argparse.ArgumentParser) -> argparse.ArgumentParser: diff --git a/res2df/vfp/_vfpcommon.py b/res2df/vfp/_vfpcommon.py index f63335ba5..062a2f068 100755 --- a/res2df/vfp/_vfpcommon.py +++ b/res2df/vfp/_vfpcommon.py @@ -195,12 +195,12 @@ def _write_vfp_range( if var_type != "UNDEFINED": var_type_str = var_type - ecl_str = f"-- {var_type_str} units - {unit_type} ( {len(values)} values )\n" + res_str = f"-- {var_type_str} units - {unit_type} ( {len(values)} values )\n" for i, value in enumerate(values): - ecl_str += format % value + res_str += format % value if (i + 1) % values_per_line == 0 and i < len(values) - 1: - ecl_str += "\n" - ecl_str += " /\n" - ecl_str += "\n" + res_str += "\n" + res_str += " /\n" + res_str += "\n" - return ecl_str + return res_str diff --git a/res2df/vfp/_vfpinj.py b/res2df/vfp/_vfpinj.py index 76e75af88..83add8613 100755 --- a/res2df/vfp/_vfpinj.py +++ b/res2df/vfp/_vfpinj.py @@ -564,15 +564,15 @@ def _write_basic_record( if unit_type != "DEFAULT": unit_type_str = unit_type - ecl_str = "-- Table Datum Depth Rate Type THP Type UNITS TAB Type\n" - ecl_str += "-- ----- ----------- --------- -------- -------- --------\n" - ecl_str += f" {tableno:5d}" - ecl_str += f" {datum:11.1f}" - ecl_str += f" {flo_type:>9s}" - ecl_str += f" {pressure_type:>8s}" - ecl_str += f" {unit_type_str:>8s}" - ecl_str += f" {tab_type:>8s} /\n\n" - return ecl_str + res_str = "-- Table Datum Depth Rate Type THP Type UNITS TAB Type\n" + res_str += "-- ----- ----------- --------- -------- -------- --------\n" + res_str += f" {tableno:5d}" + res_str += f" {datum:11.1f}" + res_str += f" {flo_type:>9s}" + res_str += f" {pressure_type:>8s}" + res_str += f" {unit_type_str:>8s}" + res_str += f" {tab_type:>8s} /\n\n" + return res_str def _write_table( @@ -589,23 +589,23 @@ def _write_table( values_per_line: Number of values per line in output """ - ecl_str = "" + res_str = "" for idx, row in table.iterrows(): - ecl_str += f"{idx:2d}" + res_str += f"{idx:2d}" no_flo = len(table.loc[idx].to_list()) for n, value in enumerate(table.loc[idx].to_list()): - ecl_str += format % value + res_str += format % value if (n + 1) % values_per_line == 0: if n < no_flo - 1: - ecl_str += "\n" - ecl_str += " " * 2 + res_str += "\n" + res_str += " " * 2 else: - ecl_str += "\n" + res_str += "\n" elif n == no_flo - 1: - ecl_str += "\n" - ecl_str += "/\n" + res_str += "\n" + res_str += "/\n" - return ecl_str + return res_str def _write_table_records( @@ -624,7 +624,7 @@ def _write_table_records( values_per_line: Number of values per line in output """ - ecl_str = "" + res_str = "" no_records = len(thp_indices) no_flow_values = table.size // no_records if table.size % no_records > 0: @@ -634,21 +634,21 @@ def _write_table_records( for row in range(0, no_records): thp = thp_indices[row] - ecl_str += f"{thp:2d}" + res_str += f"{thp:2d}" for n, value in enumerate(table[row, :]): - ecl_str += format % value + res_str += format % value if (n + 1) % values_per_line == 0: if n < no_flow_values - 1: - ecl_str += "\n" - ecl_str += " " * 2 + res_str += "\n" + res_str += " " * 2 else: - ecl_str += "\n" + res_str += "\n" elif n == no_flow_values - 1: - ecl_str += "\n" + res_str += "\n" - ecl_str += "/\n" + res_str += "/\n" - return ecl_str + return res_str def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: @@ -670,16 +670,16 @@ def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_type = vfpinj_data["UNIT_TYPE"] # Write dataframe to string with Eclipse format for VFPINJ - ecl_str = "VFPINJ\n" + res_str = "VFPINJ\n" if comment: - ecl_str += common.comment_formatter(comment) + res_str += common.comment_formatter(comment) else: - ecl_str += "\n" + res_str += "\n" unit_value = vfpinj_data["UNIT_TYPE"].value if vfpinj_data["UNIT_TYPE"] == UNITTYPE.DEFAULT: unit_value = "1*" - ecl_str += _write_basic_record( + res_str += _write_basic_record( vfpinj_data["TABLE_NUMBER"], vfpinj_data["DATUM"], vfpinj_data["RATE_TYPE"].value, @@ -687,22 +687,22 @@ def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_value, vfpinj_data["TAB_TYPE"].value, ) - ecl_str += _write_vfp_range( + res_str += _write_vfp_range( vfpinj_data["FLOW_VALUES"], rate_type.value, VFPINJ_UNITS[unit_type.value]["FLO"][rate_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + res_str += _write_vfp_range( vfpinj_data["THP_VALUES"], thp_type.value, VFPINJ_UNITS[unit_type.value]["THP"][thp_type.value], "%10.6g", ) - ecl_str += _write_table_records( + res_str += _write_table_records( vfpinj_data["THP_INDICES"], vfpinj_data["BHP_TABLE"], "%10.6g", ) - return ecl_str + return res_str diff --git a/res2df/vfp/_vfpprod.py b/res2df/vfp/_vfpprod.py index 4e2573127..d3383fcde 100755 --- a/res2df/vfp/_vfpprod.py +++ b/res2df/vfp/_vfpprod.py @@ -834,20 +834,20 @@ def _write_basic_record( if alq_type != "UNDEFINED": alq_type_str = alq_type - ecl_str = "-- Table Datum Depth Rate Type WFR Type " - ecl_str += "GFR Type THP Type ALQ Type UNITS TAB Type\n" - ecl_str += "-- ----- ----------- --------- -------- " - ecl_str += "-------- -------- -------- ------ --------\n" - ecl_str += f" {tableno:5d}" - ecl_str += f" {datum:11.1f}" - ecl_str += f" {flo_type:>8s}" - ecl_str += f" {wfr_type:>8s}" - ecl_str += f" {gfr_type:>8s}" - ecl_str += f" {pressure_type:>8s}" - ecl_str += f" {alq_type_str:>8s}" - ecl_str += f" {unit_type:>6s}" - ecl_str += f" {tab_type:>8s} /\n\n" - return ecl_str + res_str = "-- Table Datum Depth Rate Type WFR Type " + res_str += "GFR Type THP Type ALQ Type UNITS TAB Type\n" + res_str += "-- ----- ----------- --------- -------- " + res_str += "-------- -------- -------- ------ --------\n" + res_str += f" {tableno:5d}" + res_str += f" {datum:11.1f}" + res_str += f" {flo_type:>8s}" + res_str += f" {wfr_type:>8s}" + res_str += f" {gfr_type:>8s}" + res_str += f" {pressure_type:>8s}" + res_str += f" {alq_type_str:>8s}" + res_str += f" {unit_type:>6s}" + res_str += f" {tab_type:>8s} /\n\n" + return res_str def _write_table( @@ -864,23 +864,23 @@ def _write_table( values_per_line: Number of values per line in output """ - ecl_str = "" + res_str = "" for idx, row in table.iterrows(): - ecl_str += f"{idx[0]:2d} {idx[1]:2d} {idx[2]:2d} {idx[3]:2d}" + res_str += f"{idx[0]:2d} {idx[1]:2d} {idx[2]:2d} {idx[3]:2d}" no_flo = len(table.loc[idx].to_list()) for n, value in enumerate(table.loc[idx].to_list()): - ecl_str += format % value + res_str += format % value if (n + 1) % values_per_line == 0: if n < no_flo - 1: - ecl_str += "\n" - ecl_str += " " * 11 + res_str += "\n" + res_str += " " * 11 else: - ecl_str += "\n" + res_str += "\n" elif n == no_flo - 1: - ecl_str += "\n" - ecl_str += "/\n" + res_str += "\n" + res_str += "/\n" - return ecl_str + return res_str def _write_table_records( @@ -905,7 +905,7 @@ def _write_table_records( values_per_line: Number of values per line in output """ - ecl_str = "" + res_str = "" no_records = len(thp_indices) no_flow_values = table.size // no_records if table.size % no_records > 0: @@ -918,21 +918,21 @@ def _write_table_records( wfr = wfr_indices[row] gfr = gfr_indices[row] alq = alq_indices[row] - ecl_str += f"{thp:2d} {wfr:2d} {gfr:2d} {alq:2d}" + res_str += f"{thp:2d} {wfr:2d} {gfr:2d} {alq:2d}" for n, value in enumerate(table[row, :]): - ecl_str += format % value + res_str += format % value if (n + 1) % values_per_line == 0: if n < no_flow_values - 1: - ecl_str += "\n" - ecl_str += " " * 11 + res_str += "\n" + res_str += " " * 11 else: - ecl_str += "\n" + res_str += "\n" elif n == no_flow_values - 1: - ecl_str += "\n" + res_str += "\n" - ecl_str += "/\n" + res_str += "/\n" - return ecl_str + return res_str def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: @@ -957,16 +957,16 @@ def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_type = vfpprod_data["UNIT_TYPE"] # Write dataframe to string with Eclipse format for VFPPROD - ecl_str = "VFPPROD\n" + res_str = "VFPPROD\n" if comment: - ecl_str += common.comment_formatter(comment) + res_str += common.comment_formatter(comment) else: - ecl_str += "\n" + res_str += "\n" unit_value = vfpprod_data["UNIT_TYPE"].value if vfpprod_data["UNIT_TYPE"] == UNITTYPE.DEFAULT: unit_value = "1*" - ecl_str += _write_basic_record( + res_str += _write_basic_record( vfpprod_data["TABLE_NUMBER"], vfpprod_data["DATUM"], vfpprod_data["RATE_TYPE"].value, @@ -977,37 +977,37 @@ def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: unit_value, vfpprod_data["TAB_TYPE"].value, ) - ecl_str += _write_vfp_range( + res_str += _write_vfp_range( vfpprod_data["FLOW_VALUES"], rate_type.value, VFPPROD_UNITS[unit_type.value]["FLO"][rate_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + res_str += _write_vfp_range( vfpprod_data["THP_VALUES"], thp_type.value, VFPPROD_UNITS[unit_type.value]["THP"][thp_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + res_str += _write_vfp_range( vfpprod_data["WFR_VALUES"], wfr_type.value, VFPPROD_UNITS[unit_type.value]["WFR"][wfr_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + res_str += _write_vfp_range( vfpprod_data["GFR_VALUES"], gfr_type.value, VFPPROD_UNITS[unit_type.value]["GFR"][gfr_type.value], "%10.6g", ) - ecl_str += _write_vfp_range( + res_str += _write_vfp_range( vfpprod_data["ALQ_VALUES"], alq_type.value, VFPPROD_UNITS[unit_type.value]["ALQ"][alq_type.value], "%10.6g", ) - ecl_str += _write_table_records( + res_str += _write_table_records( vfpprod_data["THP_INDICES"], vfpprod_data["WFR_INDICES"], vfpprod_data["GFR_INDICES"], @@ -1016,4 +1016,4 @@ def df2ecl(dframe: pd.DataFrame, comment: Optional[str] = None) -> str: "%10.6g", ) - return ecl_str + return res_str diff --git a/res2df/wellcompletiondata.py b/res2df/wellcompletiondata.py index d6678e024..921f0c310 100644 --- a/res2df/wellcompletiondata.py +++ b/res2df/wellcompletiondata.py @@ -90,7 +90,7 @@ def df( return compdat_df -def _get_ecl_unit_system(resdatafiles: ResdataFiles) -> EclipseUnitSystem: +def _get_res_unit_system(resdatafiles: ResdataFiles) -> EclipseUnitSystem: """Returns the unit system of an Eclipse input deck. The options are \ METRIC, FIELD, LAB and PVT-M. @@ -107,7 +107,7 @@ def _get_ecl_unit_system(resdatafiles: ResdataFiles) -> EclipseUnitSystem: def _get_metadata(resdatafiles: ResdataFiles) -> Dict[str, Dict[str, Any]]: """Provide metadata for the well completion data export""" meta: Dict[str, Dict[str, str]] = {} - unitsystem = _get_ecl_unit_system(resdatafiles) + unitsystem = _get_res_unit_system(resdatafiles) kh_units = { EclipseUnitSystem.METRIC: KHUnit.METRIC, EclipseUnitSystem.FIELD: KHUnit.FIELD,