From acb8a5817aa00bcfb1c8ea3d4f78b4a1498d971f Mon Sep 17 00:00:00 2001 From: Brian Cherinka Date: Wed, 29 Nov 2023 15:03:50 -0500 Subject: [PATCH 01/18] initial ruff linting --- docs/nb/astro.py | 2 - python/lvmdrp/core/fluxcal.py | 1 - python/lvmdrp/core/rss.py | 54 ++++----- python/lvmdrp/core/sky.py | 12 +- python/lvmdrp/core/spectrum1d.py | 139 +++++++++++------------ python/lvmdrp/functions/cubeMethod.py | 4 +- python/lvmdrp/functions/fluxCalMethod.py | 1 - python/lvmdrp/functions/gmosMethod.py | 1 - python/lvmdrp/functions/headerMethod.py | 3 +- python/lvmdrp/functions/imageMethod.py | 2 +- python/lvmdrp/functions/plotMethod.py | 3 +- python/lvmdrp/functions/rssMethod.py | 35 +++--- python/lvmdrp/functions/run_drp.py | 2 +- python/lvmdrp/functions/run_quickdrp.py | 1 - python/lvmdrp/functions/skyMethod.py | 4 +- python/lvmdrp/functions/specialMethod.py | 2 +- python/lvmdrp/main.py | 1 - 17 files changed, 128 insertions(+), 139 deletions(-) diff --git a/docs/nb/astro.py b/docs/nb/astro.py index aa7be605..3d748f18 100644 --- a/docs/nb/astro.py +++ b/docs/nb/astro.py @@ -24,8 +24,6 @@ import math -import os -import sys # from pyraf import iraf diff --git a/python/lvmdrp/core/fluxcal.py b/python/lvmdrp/core/fluxcal.py index c973afa6..fe72bbdd 100644 --- a/python/lvmdrp/core/fluxcal.py +++ b/python/lvmdrp/core/fluxcal.py @@ -14,7 +14,6 @@ import numpy as np from dust_extinction.parameter_averages import F99 from pydl.pydlspec2d.spec2d import filter_thru -from pydl.pydlutils.bspline import bspline from pydl.pydlutils.sdss import sdss_flagval from scipy.interpolate import BSpline, interp1d, splrep from scipy.ndimage import median_filter diff --git a/python/lvmdrp/core/rss.py b/python/lvmdrp/core/rss.py index e7b471b9..cb608bb0 100644 --- a/python/lvmdrp/core/rss.py +++ b/python/lvmdrp/core/rss.py @@ -204,7 +204,7 @@ def __init__( self._sky = sky if sky_error is not None: self._sky_error = sky_error - + self.setSlitmap(slitmap) self.set_fluxcal(fluxcal) @@ -562,7 +562,7 @@ def loadFitsData( self._sky_error = hdu[extension_skyerror].data.astype("float32") if extension_fluxcal is not None: self.set_fluxcal(Table(hdu[extension_fluxcal].data)) - + self._fibers = self._data.shape[0] self._pixels = numpy.arange(self._data.shape[1]) @@ -687,13 +687,13 @@ def writeFitsData( hdu = pyfits.PrimaryHDU(self._sky) elif extension_sky > 0 and extension_sky is not None: hdus[extension_sky] = pyfits.ImageHDU(self._sky, name="SKY") - + # sky error hdu if extension_skyerror == 0: hdu = pyfits.PrimaryHDU(self._sky_error) elif extension_skyerror > 0 and extension_skyerror is not None: hdus[extension_skyerror] = pyfits.ImageHDU(self._sky_error, name="SKY_ERROR") - + # fluxcal hdu if extension_fluxcal == 0: hdu = pyfits.PrimaryHDU(self._fluxcal) @@ -750,19 +750,19 @@ def getSpec(self, fiber): mask = self._mask[fiber, :] else: mask = None - + if self._sky is not None: sky = self._sky[fiber, :] else: sky = None - + if self._sky_error is not None: sky_error = self._sky_error[fiber, :] else: sky_error = None spec = Spectrum1D(wave, data, error=error, mask=mask, inst_fwhm=inst_fwhm, sky=sky, sky_error=sky_error) - + return spec def combineRSS(self, rss_in, method="mean", replace_error=1e10): @@ -772,17 +772,17 @@ def combineRSS(self, rss_in, method="mean", replace_error=1e10): mask = numpy.zeros((len(rss_in), dim[0], dim[1]), dtype="bool") else: mask = None - + if rss_in[0]._error is not None: error = numpy.zeros((len(rss_in), dim[0], dim[1]), dtype=numpy.float32) else: error = None - + if rss_in[0]._sky is not None: sky = numpy.zeros((len(rss_in), dim[0], dim[1]), dtype=numpy.float32) else: sky = None - + for i in range(len(rss_in)): data[i, :, :] = rss_in[i]._data if mask is not None: @@ -795,7 +795,7 @@ def combineRSS(self, rss_in, method="mean", replace_error=1e10): combined_data = numpy.zeros(dim, dtype=numpy.float32) combined_error = numpy.zeros(dim, dtype=numpy.float32) combined_sky = numpy.zeros(dim, dtype=numpy.float32) - + if method == "sum": if mask is not None: data[mask] = 0 @@ -869,7 +869,7 @@ def combineRSS(self, rss_in, method="mean", replace_error=1e10): if mask is not None: good_pix = bn.nansum(numpy.logical_not(mask), 0) select_mean = good_pix > 0 - + var = error**2 weights = numpy.divide(1, var, out=numpy.zeros_like(var), where=var != 0) weights /= bn.nansum(weights, 0) @@ -918,7 +918,7 @@ def combineRSS(self, rss_in, method="mean", replace_error=1e10): combined_sky = bn.nanmedian(sky, 0) else: combined_sky = None - + else: if method == "weighted_mean": raise ValueError(f"Method {method} is not supported when error is None") @@ -979,9 +979,9 @@ def create1DSpec(self, method="mean"): # idx = numpy.argsort(wave) _, idx = numpy.unique(wave, return_index=True) wave = wave[idx] - + data = self._data[select].flatten()[idx] - + if self._error is not None: error = self._error[select].flatten()[idx] else: @@ -1001,9 +1001,9 @@ def create1DSpec(self, method="mean"): select = numpy.logical_not(self._mask) else: select = numpy.ones(self._data.shape, dtype="bool") - + data = numpy.zeros(self._data.shape[1], dtype=numpy.float32) - + if self._error is not None: error = numpy.zeros(self._data.shape[1], dtype=numpy.float32) else: @@ -1012,7 +1012,7 @@ def create1DSpec(self, method="mean"): sky = numpy.zeros(self._data.shape[1], dtype=numpy.float32) else: sky = None - + for i in range(self._data.shape[1]): if numpy.sum(select[:, i]) > 0: if method == "mean": @@ -1032,21 +1032,21 @@ def create1DSpec(self, method="mean"): ) if sky is not None: sky[i] = numpy.sum(self._sky[select[:, i], i]) - + if self._mask is not None: bad = numpy.sum(self._mask, 0) mask = bad == self._fibers else: mask = None - + wave = self._wave if self._inst_fwhm is not None and len(self._inst_fwhm.shape) == 2: inst_fwhm = numpy.mean(self._inst_fwhm, 0) else: inst_fwhm = self._inst_fwhm - + header = self._header - + spec = Spectrum1D( wave=wave, data=data, @@ -1062,12 +1062,12 @@ def selectSpec(self, min=0, max=0, method="median"): collapsed = numpy.zeros(self._fibers, dtype=numpy.float32) for i in range(self._fibers): spec = self[i] - + if spec._mask is not None: goodpix = numpy.logical_not(spec._mask) else: goodpix = numpy.ones(spec._data.dim[0], dtype=numpy.float32) - + if numpy.sum(goodpix) > 0: if method == "median": collapsed[i] = numpy.median(spec._data[goodpix]) @@ -2020,7 +2020,7 @@ def subRSS(self, select): inst_fwhm = self._inst_fwhm else: inst_fwhm = None - + if self._sky is not None: sky = self._sky[select, :] else: @@ -2167,7 +2167,7 @@ def getPositionTable(self): def getSlitmap(self): return self._slitmap - + def setSlitmap(self, slitmap): self._slitmap = slitmap @@ -2195,7 +2195,7 @@ def apply_pixelmask(self, mask=None): def set_fluxcal(self, fluxcal): self._fluxcal = fluxcal - + def get_fluxcal(self): return self._fluxcal diff --git a/python/lvmdrp/core/sky.py b/python/lvmdrp/core/sky.py index 3d04b6f6..f99ece8e 100644 --- a/python/lvmdrp/core/sky.py +++ b/python/lvmdrp/core/sky.py @@ -151,20 +151,20 @@ def skymodel_pars_from_header(header): observatory = header["OBSERVAT"] except KeyError: log.warning( - f"'OBSERVAT' is not in reference sky header. Assuming OBSERVAT='LCO'" + "'OBSERVAT' is not in reference sky header. Assuming OBSERVAT='LCO'" ) observatory = "LCO" try: obstime = Time(header["OBSTIME"], scale="tai") except KeyError: log.warning( - f"'OBSTIME' is not in reference sky header. Falling back to 'MJD'" + "'OBSTIME' is not in reference sky header. Falling back to 'MJD'" ) try: obstime = Time(header["MJD"], format="mjd") except KeyError: - log.error(f"'MJD' is not in reference sky header.") - raise ValueError(f"no datetime information found for reference sky.") + log.error("'MJD' is not in reference sky header.") + raise ValueError("no datetime information found for reference sky.") ra, dec = header["RA"], header["DEC"] # build quantities from information in sky_head @@ -402,7 +402,7 @@ def run_skymodel(skymodel_path=SKYMODEL_INST_PATH, **kwargs): log.info("calculating effective atmospheric transmission") os.chdir(os.path.join(skymodel_path, "sm-01_mod2")) - out = subprocess.run(f"bin/preplinetrans".split(), capture_output=True) + out = subprocess.run("bin/preplinetrans".split(), capture_output=True) if out.returncode == 0: log.info( "successfully finished effective atmospheric transmission calculations" @@ -413,7 +413,7 @@ def run_skymodel(skymodel_path=SKYMODEL_INST_PATH, **kwargs): ) log.error(out.stderr.decode("utf-8")) - out = subprocess.run(f"bin/calcskymodel".split(), capture_output=True) + out = subprocess.run("bin/calcskymodel".split(), capture_output=True) if out.returncode == 0: log.info("successfully finished 'calcskymodel'") else: diff --git a/python/lvmdrp/core/spectrum1d.py b/python/lvmdrp/core/spectrum1d.py index e14830f6..f5796c2f 100644 --- a/python/lvmdrp/core/spectrum1d.py +++ b/python/lvmdrp/core/spectrum1d.py @@ -1,12 +1,11 @@ from copy import deepcopy -import matplotlib.pyplot as plt import numpy import bottleneck as bn from astropy.io import fits as pyfits from numpy import polynomial from scipy.linalg import norm -from scipy import signal, interpolate, ndimage, sparse, linalg +from scipy import signal, interpolate, ndimage, sparse from scipy.ndimage import zoom from typing import List, Tuple @@ -288,7 +287,7 @@ def __sub__(self, other): data = numpy.zeros_like(self._data) select_zero = self._data == 0 data = self._data - other._data - + if self._mask is not None and other._mask is not None: mask = numpy.logical_or(self._mask, other._mask) select_zero = numpy.logical_and(select_zero, mask) @@ -301,7 +300,7 @@ def __sub__(self, other): data[select_zero] = 0 else: mask = None - + if self._error is not None and other._error is not None: error = numpy.sqrt(self._error**2 + other._error**2) elif self._error is not None: @@ -310,7 +309,7 @@ def __sub__(self, other): error = other._error else: error = None - + if self._sky is not None and other._sky is not None: sky = self._sky - other._sky elif self._sky is not None: @@ -319,7 +318,7 @@ def __sub__(self, other): sky = other._sky else: sky = None - + if self._sky_error is not None and other._sky_error is not None: sky_error = numpy.sqrt(self._sky_error**2 + other._sky_error**2) elif self._sky_error is not None: @@ -342,7 +341,7 @@ def __sub__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec elif isinstance(other, numpy.ndarray): @@ -351,7 +350,7 @@ def __sub__(self, other): mask = self._mask sky = self._sky sky_error = self._sky_error - + if data.dtype == numpy.float64 or data.dtype == numpy.dtype(">f8"): data = data.astype(numpy.float32) if error is not None: @@ -363,9 +362,9 @@ def __sub__(self, other): if sky_error is not None: if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): sky_error = sky_error.astype(numpy.float32) - + spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec else: # try to do addtion for other types, e.g. float, int, etc. @@ -389,7 +388,7 @@ def __sub__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec except Exception: # raise exception if the type are not matching in general @@ -404,7 +403,7 @@ def __add__(self, other): data = numpy.zeros_like(self._data) select_zero = self._data == 0 data = self._data + other._data - + if self._mask is not None and other._mask is not None: mask = numpy.logical_or(self._mask, other._mask) select_zero = numpy.logical_and(select_zero, mask) @@ -417,7 +416,7 @@ def __add__(self, other): data[select_zero] = 0 else: mask = None - + if self._error is not None and other._error is not None: error = numpy.sqrt(self._error**2 + other._error**2) elif self._error is not None: @@ -426,7 +425,7 @@ def __add__(self, other): error = other._error else: error = None - + if self._sky is not None and other._sky is not None: sky = self._sky + other._sky elif self._sky is not None: @@ -435,7 +434,7 @@ def __add__(self, other): sky = other._sky else: sky = None - + if self._sky_error is not None and other._sky_error is not None: sky_error = numpy.sqrt(self._sky_error**2 + other._sky_error**2) elif self._sky_error is not None: @@ -444,7 +443,7 @@ def __add__(self, other): sky_error = other._sky_error else: sky_error = None - + if data.dtype == numpy.float64 or data.dtype == numpy.dtype(">f8"): data = data.astype(numpy.float32) if error is not None: @@ -458,7 +457,7 @@ def __add__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec elif isinstance(other, numpy.ndarray): @@ -468,8 +467,8 @@ def __add__(self, other): error = self._error + other else: error = None - - if self._mask is not None: + + if self._mask is not None: mask = self._mask else: mask = None @@ -478,7 +477,7 @@ def __add__(self, other): sky = self._sky + other else: sky = None - + if self._sky_error is not None: sky_error = self._sky_error + other else: @@ -497,7 +496,7 @@ def __add__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec else: # try to do addtion for other types, e.g. float, int, etc. @@ -518,7 +517,7 @@ def __add__(self, other): sky = self._sky + other else: sky = None - + if self._sky_error is not None: sky_error = self._sky_error + other else: @@ -537,7 +536,7 @@ def __add__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec except Exception: # raise exception if the type are not matching in general @@ -563,7 +562,7 @@ def __truediv__(self, other): mask[~select] = True else: mask = None - + if self._error is not None and other._error is not None: error = numpy.zeros_like(self._error) error_a = numpy.divide(self._error, other._data, out=error, where=select) ** 2 @@ -584,7 +583,7 @@ def __truediv__(self, other): sky = other._sky else: sky = None - + if self._sky_error is not None and other._sky_error is not None: sky_error = numpy.zeros_like(self._sky_error) sky_error_a = numpy.divide(self._sky_error, other._data, out=sky_error, where=select) ** 2 @@ -596,7 +595,7 @@ def __truediv__(self, other): sky_error = numpy.divide(self._data * other._sky_error, other._data ** 2, out=numpy.zeros_like(self._sky_error), where=select) else: sky_error = None - + if data.dtype == numpy.float64 or data.dtype == numpy.dtype(">f8"): data = data.astype(numpy.float32) if error is not None: @@ -610,18 +609,18 @@ def __truediv__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec elif isinstance(other, numpy.ndarray): select = other != 0.0 data = numpy.divide(self._data, other, out=numpy.zeros_like(self._data), where=select) - + if self._error is not None: error = numpy.divide(self._error, other, out=numpy.zeros_like(self._error), where=select) else: error = None - + if self._mask is not None: mask = self._mask mask[~select] = True @@ -632,12 +631,12 @@ def __truediv__(self, other): sky = numpy.divide(self._sky, other, out=numpy.zeros_like(self._sky), where=select) else: sky = None - + if self._sky_error is not None: sky_error = numpy.divide(self._sky_error, other, out=numpy.zeros_like(self._sky_error), where=select) else: sky_error = None - + if data.dtype == numpy.float64 or data.dtype == numpy.dtype(">f8"): data = data.astype(numpy.float32) if error is not None: @@ -651,7 +650,7 @@ def __truediv__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec else: # try to do addtion for other types, e.g. float, int, etc. @@ -667,7 +666,7 @@ def __truediv__(self, other): if self._mask is not None: mask = self._mask mask[~select] = True - + if self._sky is not None: sky = numpy.divide(self._sky, other, out=numpy.zeros_like(self._sky), where=select) else: @@ -691,7 +690,7 @@ def __truediv__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec except Exception: # raise exception if the type are not matching in general @@ -729,7 +728,7 @@ def __rtruediv__(self, other): error = numpy.divide(other._data * self._error, self._data ** 2, out=numpy.zeros_like(self._error), where=select) else: error = None - + if other._sky is not None: sky = numpy.divide(other._sky, self._sky, out=numpy.zeros_like(self._sky), where=self._sky != 0.0) elif self._sky is not None: @@ -764,13 +763,13 @@ def __rtruediv__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec elif isinstance(other, numpy.ndarray): select = self._data != 0.0 data = numpy.divide(other, self._data, out=numpy.zeros_like(self._data), where=select) - + if self._error is not None: error = numpy.divide(other * self._error, self._data ** 2, out=numpy.zeros_like(self._error), where=select) else: @@ -781,12 +780,12 @@ def __rtruediv__(self, other): mask[~select] = True else: mask = None - + if self._sky is not None: sky = numpy.divide(other, self._sky, out=numpy.zeros_like(self._sky), where=self._sky != 0.0) else: sky = None - + if self._sky_error is not None: sky_error = numpy.divide(other * self._sky_error, self._data ** 2, out=numpy.zeros_like(self._sky_error), where=select) else: @@ -815,18 +814,18 @@ def __rtruediv__(self, other): error = numpy.divide(other * self._error, self._data ** 2, out=numpy.zeros_like(self._error), where=select) else: error = None - + if self._mask is not None: mask = self._mask mask[~select] = True else: mask = None - + if self._sky is not None: sky = numpy.divide(other, self._sky, out=numpy.zeros_like(self._sky), where=self._sky != 0.0) else: sky = None - + if self._sky_error is not None: sky_error = numpy.divide(other * self._sky_error, self._data ** 2, out=numpy.zeros_like(self._sky_error), where=select) else: @@ -843,9 +842,9 @@ def __rtruediv__(self, other): if sky_error is not None: if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): sky_error = sky_error.astype(numpy.float32) - + spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec def __mul__(self, other): @@ -871,7 +870,7 @@ def __mul__(self, other): error = other._error else: error = None - + if self._sky is not None: sky = self._sky * other._data elif self._sky is not None: @@ -905,7 +904,7 @@ def __mul__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec elif isinstance(other, numpy.ndarray): @@ -915,17 +914,17 @@ def __mul__(self, other): mask = self._mask else: mask = None - + if self._error is not None: error = self._error * other else: error = None - + if self._sky is not None: sky = self._sky * other else: sky = None - + if self._sky_error is not None: sky_error = self._sky_error * other else: @@ -944,7 +943,7 @@ def __mul__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec else: # try to do addtion for other types, e.g. float, int, etc. @@ -960,7 +959,7 @@ def __mul__(self, other): error = self._error * other else: error = None - + if self._sky is not None: sky = self._sky * other else: @@ -984,17 +983,17 @@ def __mul__(self, other): sky_error = sky_error.astype(numpy.float32) spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec def __pow__(self, other): data = self._data ** other - + if self._error is not None: error = 1.0 / float(other) * self._data ** (other - 1) * self._error else: error = None - + if self._mask is not None: mask = self._mask else: @@ -1021,9 +1020,9 @@ def __pow__(self, other): if sky_error is not None: if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): sky_error = sky_error.astype(numpy.float32) - + spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) - + return spec def __rpow__(self, other): @@ -1033,7 +1032,7 @@ def __rpow__(self, other): error = numpy.log(other) * data * self._error else: error = None - + if self._mask is not None: mask = self._mask else: @@ -1043,7 +1042,7 @@ def __rpow__(self, other): sky = other ** self._sky else: sky = None - + if self._sky_error is not None: sky_error = numpy.log(other) * data * self._sky_error else: @@ -1060,7 +1059,7 @@ def __rpow__(self, other): if sky_error is not None: if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): sky_error = sky_error.astype(numpy.float32) - + spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) return spec @@ -1634,7 +1633,7 @@ def resampleSpec( # replace error values in masked pixels if new_error is not None: new_error[new_mask] = replace_error - + # interpolate sky --------------------------------------------------------------------------------------------------------------------------------- if self._sky is not None: intp = interpolate.interp1d( @@ -1757,7 +1756,7 @@ def resampleSpec_flux_conserving(self, ref_wave, method="spline", new_spec._data *= old_dlambda / new_dlambda if self._error is not None: new_spec._error *= old_dlambda / new_dlambda - + # print(old_dlambda, new_dlambda, old_dlambda / new_dlambda) # print(new_spec._data) # plt.plot(ref_wave, new_spec._data, lw=1, color="r") @@ -1848,24 +1847,24 @@ def binSpec(self, new_wave): mask_out = numpy.zeros(len(new_wave), dtype="bool") sky_out = numpy.zeros(len(new_wave), dtype=numpy.float32) sky_error_out = numpy.zeros(len(new_wave), dtype=numpy.float32) - + if self._mask is not None: mask_in = numpy.logical_and(self._mask) else: mask_in = numpy.ones(len(self._wave), dtype="bool") # masked_data = self._wave[mask_in] masked_wave = self._wave[mask_in] - + if self._error is not None: error_out = numpy.zeros(len(new_wave), dtype=numpy.float32) masked_error = self._error[mask_in] else: error_out = None - + if self._sky_error is not None: sky_error_out = numpy.zeros(len(new_wave), dtype=numpy.float32) masked_sky_error = self._sky_error[mask_in] - + bound_min = new_wave - new_disp / 2.0 bound_max = new_wave + new_disp / 2.0 @@ -1895,10 +1894,10 @@ def binSpec(self, new_wave): mask_out[i] = True data_out = numpy.interp(new_wave, masked_wave, self._data[mask_in]) if self._sky is not None: - sky_out = numpy.interp(new_wave, masked_wave, self._sky[mask_in]) - + sky_out = numpy.interp(new_wave, masked_wave, self._sky[mask_in]) + spec = Spectrum1D(data=data_out, wave=new_wave, error=error_out, mask=mask_out, sky=sky_out, sky_error=sky_error_out) - + return spec def smoothSpec(self, size, method="gauss", mode="nearest"): @@ -1980,7 +1979,7 @@ def smoothGaussVariable(self, diff_fwhm): inst_fwhm = numpy.sqrt(self._inst_fwhm**2 + diff_fwhm**2) else: inst_fwhm = diff_fwhm - + if self._sky is not None: sky = numpy.zeros_like(self._sky) sky[:] = self._sky @@ -2744,7 +2743,7 @@ def collapseSpec(self, method="mean", start=None, end=None, transmission_func=No select = numpy.logical_and(select_start, select_end) if self._mask is not None: select = numpy.logical_and(select, numpy.logical_not(self._mask)) - + if method != "mean" and method != "median" and method != "sum": raise ValueError("method must be either 'mean', 'median' or 'sum'") elif method == "mean": diff --git a/python/lvmdrp/functions/cubeMethod.py b/python/lvmdrp/functions/cubeMethod.py index b92c2661..d4b398e7 100644 --- a/python/lvmdrp/functions/cubeMethod.py +++ b/python/lvmdrp/functions/cubeMethod.py @@ -1,5 +1,4 @@ import sys -import time import numpy @@ -9,7 +8,8 @@ from matplotlib import pyplot as plt except: pass -from scipy import ndimage, stats +from copy import deepcopy +from scipy import stats from lvmdrp.core.cube import Cube from lvmdrp.core.passband import PassBand diff --git a/python/lvmdrp/functions/fluxCalMethod.py b/python/lvmdrp/functions/fluxCalMethod.py index 4dc08bbc..305d9eba 100644 --- a/python/lvmdrp/functions/fluxCalMethod.py +++ b/python/lvmdrp/functions/fluxCalMethod.py @@ -49,7 +49,6 @@ from astropy.coordinates import SkyCoord, EarthLocation, AltAz from astropy.stats import biweight_location, biweight_scale from astropy import units as u -from astropy.io import fits from astropy.table import Table from lvmdrp.core.rss import RSS, loadRSS diff --git a/python/lvmdrp/functions/gmosMethod.py b/python/lvmdrp/functions/gmosMethod.py index 85463860..96e66fbf 100644 --- a/python/lvmdrp/functions/gmosMethod.py +++ b/python/lvmdrp/functions/gmosMethod.py @@ -1,5 +1,4 @@ import os -from multiprocessing import Pool, cpu_count from astropy.io import fits as pyfits diff --git a/python/lvmdrp/functions/headerMethod.py b/python/lvmdrp/functions/headerMethod.py index d3be4d28..9eca511e 100644 --- a/python/lvmdrp/functions/headerMethod.py +++ b/python/lvmdrp/functions/headerMethod.py @@ -1,4 +1,3 @@ -import sys import numpy @@ -119,7 +118,7 @@ def expandHdrKeys_drp( hdr.loadFitsHeader(file, extension=int(extension), removeEmpty=int(removeEmpty)) keys = hdr.getHdrKeys() for k in keys: - if not k in exclude_list: + if k not in exclude_list: if keywords == "" or k in key_list: hdr.extendHierarch(k, prefix, verbose=int(verbose)) hdr.writeFitsHeader() diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index 587cb049..ff74b52f 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -25,7 +25,7 @@ from typing import List, Tuple from lvmdrp import log -from lvmdrp.utils.decorators import skip_on_missing_input_path, drop_missing_input_paths, skip_if_drpqual_flags +from lvmdrp.utils.decorators import skip_on_missing_input_path, drop_missing_input_paths from lvmdrp.utils.bitmask import QualityFlag from lvmdrp.core.fit_profile import Gaussians from lvmdrp.core.fiberrows import FiberRows, _read_fiber_ypix diff --git a/python/lvmdrp/functions/plotMethod.py b/python/lvmdrp/functions/plotMethod.py index c27e74da..c6fff6b8 100644 --- a/python/lvmdrp/functions/plotMethod.py +++ b/python/lvmdrp/functions/plotMethod.py @@ -1,11 +1,10 @@ from lvmdrp.core.header import Header - try: - import matplotlib from matplotlib import pyplot as plt except: pass +import numpy description = "Provides Methods to make some plots" diff --git a/python/lvmdrp/functions/rssMethod.py b/python/lvmdrp/functions/rssMethod.py index 27b42244..edab16e3 100644 --- a/python/lvmdrp/functions/rssMethod.py +++ b/python/lvmdrp/functions/rssMethod.py @@ -9,7 +9,6 @@ import matplotlib import matplotlib.gridspec as gridspec import numpy -from numpy.lib import recfunctions as rfn import yaml import bottleneck as bn from astropy import units as u @@ -21,7 +20,7 @@ from numpy import polynomial from scipy import interpolate, ndimage -from lvmdrp.utils.decorators import skip_on_missing_input_path, drop_missing_input_paths, skip_if_drpqual_flags +from lvmdrp.utils.decorators import skip_on_missing_input_path, skip_if_drpqual_flags from lvmdrp.core.constants import CONFIG_PATH, ARC_LAMPS from lvmdrp.core.header import Header, combineHdr from lvmdrp.core.cube import Cube @@ -263,7 +262,7 @@ def determine_wavelength_solution(in_arcs: List[str], out_wave: str, out_lsf: st ilamps.extend(lamps) # append arc iarcs.append(arc) - + # combine RSS objects arc = RSS() arc.combineRSS(iarcs, method="sum") @@ -329,7 +328,7 @@ def determine_wavelength_solution(in_arcs: List[str], out_wave: str, out_lsf: st stretch_factors=numpy.linspace(0.9,1.1,10000), shift_range=[-cc_max_shift, cc_max_shift], ) - + log.info(f"max CC = {cc:.2f} for strech = {mhat:.2f} and shift = {bhat:.2f}") else: mhat, bhat = 1.0, 0.0 @@ -476,7 +475,7 @@ def determine_wavelength_solution(in_arcs: List[str], out_wave: str, out_lsf: st wave_cls = polynomial.Legendre elif kind_disp == "chebyshev": wave_cls = polynomial.Chebyshev - + wave_poly = wave_cls.fit(cent_wave[i, use_line], ref_lines[use_line], deg=poly_disp) wave_coeffs[i, :] = wave_poly.convert().coef @@ -509,7 +508,7 @@ def determine_wavelength_solution(in_arcs: List[str], out_wave: str, out_lsf: st fwhm_cls = polynomial.Legendre elif kind_fwhm == "chebyshev": fwhm_cls = polynomial.Chebyshev - + fwhm_poly = fwhm_cls.fit(cent_wave[i, use_line], fwhm_wave[use_line], deg=poly_fwhm) lsf_coeffs[i, :] = fwhm_poly.convert().coef @@ -1143,7 +1142,7 @@ def resample_wavelength(in_rss: str, out_rss: str, method: str = "spline", sky_error = numpy.zeros((rss._fibers, len(ref_wave)), dtype=numpy.float32) else: sky_error = None - + if compute_densities: width_pix = numpy.zeros_like(rss._data) width_pix[:, :-1] = numpy.fabs(rss._wave[:, 1:] - rss._wave[:, :-1]) @@ -1152,7 +1151,7 @@ def resample_wavelength(in_rss: str, out_rss: str, method: str = "spline", rss._header["BUNIT"] = rss._header["BUNIT"] + "/angstrom" if rss._error is not None: rss._error = rss._error / width_pix - + if rss._wave is not None and len(rss._wave.shape) == 2: if parallel == "auto": cpus = cpu_count() @@ -1308,7 +1307,7 @@ def create_fiberflat(in_rsss: List[str], out_rsss: List[str], median_box: int = illumination_corr: bool = False, display_plots: bool = False) -> RSS: """computes a fiberflat from a wavelength calibrated continuum exposure - + This function computes a fiberflat from a extracted and wavelength calibrated continuum exposure. The fiberflat is computed by dividing the continuum exposure by the median spectrum of the continuum exposure. The fiberflat @@ -1375,7 +1374,7 @@ def create_fiberflat(in_rsss: List[str], out_rsss: List[str], median_box: int = return None else: wdelt = numpy.diff(rss._wave, axis=1).mean() - + # copy original data into output fiberflat object fiberflat = copy(rss) fiberflat._error = None @@ -1385,7 +1384,7 @@ def create_fiberflat(in_rsss: List[str], out_rsss: List[str], median_box: int = median_box_pix = int(median_box / wdelt) log.info(f"applying median smoothing with box size {[1, median_box]} angstroms ({[1, median_box_pix]} pixels)") fiberflat._data = ndimage.filters.median_filter(fiberflat._data, (1, median_box_pix)) - + # calculate median spectrum log.info(f"caculating normalization in full wavelength range ({fiberflat._wave.min():.2f} - {fiberflat._wave.max():.2f} angstroms)") norm = bn.nanmedian(fiberflat._data, axis=0) @@ -1396,7 +1395,7 @@ def create_fiberflat(in_rsss: List[str], out_rsss: List[str], median_box: int = log.info(f"limiting wavelength range to {wave_range[0]:.2f} - {wave_range[1]:.2f} angstroms") wave_select = (wave_range[0] <= norm_wave) & (norm_wave <= wave_range[1]) norm[~wave_select] = numpy.nan - + # normalize fibers where norm has valid values log.info(f"computing fiberflat across {fiberflat._fibers} fibers and {(~numpy.isnan(norm)).sum()} wavelength bins") normalized = fiberflat._data / norm[None, :] @@ -1424,7 +1423,7 @@ def create_fiberflat(in_rsss: List[str], out_rsss: List[str], median_box: int = spec = fiberflat.getSpec(ifiber) spec.smoothPoly(deg=poly_deg, poly_kind=poly_kind) fiberflat._data[ifiber, :] = spec._data - + # interpolate masked pixels in fiberflat for ifiber in range(fiberflat._fibers): wave, data, mask = fiberflat._wave[ifiber], fiberflat._data[ifiber], fiberflat._mask[ifiber] @@ -1646,7 +1645,7 @@ def apply_fiberflat(in_rss: str, out_rss: str, in_flat: str, clip_below: float = log.info(f"reading target data from {os.path.basename(in_rss)}") rss = RSS() rss.loadFitsData(in_rss) - + # load fiberflat log.info(f"reading fiberflat from {os.path.basename(in_flat)}") flat = RSS() @@ -1656,7 +1655,7 @@ def apply_fiberflat(in_rss: str, out_rss: str, in_flat: str, clip_below: float = if rss._fibers != flat._fibers: log.error(f"number of fibers in target data ({rss._fibers}) and fiberflat ({flat._fibers}) do not match") return None - + # check if fiberflat has the same wavelength grid as the target data if not numpy.isclose(rss._wave, flat._wave).all(): log.warning("target data and fiberflat have different wavelength grids") @@ -1671,7 +1670,7 @@ def apply_fiberflat(in_rss: str, out_rss: str, in_flat: str, clip_below: float = # interpolate fiberflat to target wavelength grid to fill in missing values if not numpy.isclose(spec_flat._wave, spec_data._wave).all(): spec_flat = spec_flat.resampleSpec(spec_data._wave, err_sim=0) - + # apply clipping select_clip_below = (spec_flat < clip_below) | numpy.isnan(spec_flat._data) spec_flat._data[select_clip_below] = 1 @@ -1680,7 +1679,7 @@ def apply_fiberflat(in_rss: str, out_rss: str, in_flat: str, clip_below: float = # correct spec_new = spec_data / spec_flat rss.setSpec(i, spec_new) - + # write out corrected RSS log.info(f"writing fiberflat corrected RSS to {os.path.basename(out_rss)}") rss.writeFitsData(out_rss) @@ -1807,7 +1806,7 @@ def stack_rss(in_rsss: List[str], out_rss: str, axis: int = 0) -> RSS: hdr_out = combineHdr(hdrs) else: hdr_out = None - + # update slitmap slitmap_out = rss._slitmap diff --git a/python/lvmdrp/functions/run_drp.py b/python/lvmdrp/functions/run_drp.py index cbfe628c..a56ff1d1 100644 --- a/python/lvmdrp/functions/run_drp.py +++ b/python/lvmdrp/functions/run_drp.py @@ -20,7 +20,7 @@ from lvmdrp.core.rss import RSS from lvmdrp.functions.imageMethod import (preproc_raw_frame, create_master_frame, create_pixelmask, detrend_frame, - find_peaks_auto, trace_peaks, + trace_peaks, extract_spectra) from lvmdrp.functions.rssMethod import (determine_wavelength_solution, create_pixel_table, resample_wavelength, join_spec_channels, stack_rss) diff --git a/python/lvmdrp/functions/run_quickdrp.py b/python/lvmdrp/functions/run_quickdrp.py index b9728ad6..538184ae 100644 --- a/python/lvmdrp/functions/run_quickdrp.py +++ b/python/lvmdrp/functions/run_quickdrp.py @@ -8,7 +8,6 @@ # @Copyright: SDSS-V LVM import os -import click import numpy as np from typing import Tuple diff --git a/python/lvmdrp/functions/skyMethod.py b/python/lvmdrp/functions/skyMethod.py index c12d3223..8b0e61ac 100644 --- a/python/lvmdrp/functions/skyMethod.py +++ b/python/lvmdrp/functions/skyMethod.py @@ -604,7 +604,7 @@ def sepContinuumLine_drp( sci_spec.loadFitsData(sky_sci, extension_hdr=0) else: raise ValueError( - f"You need to provide a science spectrum to perform the continuum/line separation using skycorr." + "You need to provide a science spectrum to perform the continuum/line separation using skycorr." ) if np.any(sky_spec._wave != sci_spec._wave): sky_spec = sky_spec.resampleSpec(ref_wave=sci_spec._wave, method="linear") @@ -1384,7 +1384,7 @@ def interpolate_sky(in_rss: str, out_sky: str, out_rss: str = None, which: str = """ if subtract and out_rss is None: - raise ValueError(f"need to provide an output file to write sky-subtracted data") + raise ValueError("need to provide an output file to write sky-subtracted data") # load input RSS log.info(f"loading input RSS file '{os.path.basename(in_rss)}'") diff --git a/python/lvmdrp/functions/specialMethod.py b/python/lvmdrp/functions/specialMethod.py index 72c48c18..e6fa3df9 100644 --- a/python/lvmdrp/functions/specialMethod.py +++ b/python/lvmdrp/functions/specialMethod.py @@ -1,5 +1,5 @@ import numpy -from scipy import stats +import pylab from lvmdrp.core.fiberrows import FiberRows from lvmdrp.core.fit_profile import Exponential_constant diff --git a/python/lvmdrp/main.py b/python/lvmdrp/main.py index 8998f43d..6edfa95e 100644 --- a/python/lvmdrp/main.py +++ b/python/lvmdrp/main.py @@ -12,7 +12,6 @@ from argparse import Namespace import numpy as np -import yaml from lvmdrp.core import image from lvmdrp.core.constants import CALIBRATION_TYPES, FRAMES_PRIORITY From bac3b722a60d964707ac28a191a8e11094cf25bb Mon Sep 17 00:00:00 2001 From: Brian Cherinka Date: Wed, 29 Nov 2023 15:29:05 -0500 Subject: [PATCH 02/18] linting and formatting --- python/lvmdrp/external/ancillary_func.py | 211 +++++++--- python/lvmdrp/external/astrolib.py | 466 ++++++++++++----------- python/lvmdrp/external/skycorr.py | 20 +- 3 files changed, 405 insertions(+), 292 deletions(-) diff --git a/python/lvmdrp/external/ancillary_func.py b/python/lvmdrp/external/ancillary_func.py index b3b8d931..dd7a5cac 100644 --- a/python/lvmdrp/external/ancillary_func.py +++ b/python/lvmdrp/external/ancillary_func.py @@ -1,53 +1,112 @@ -import numpy - import os.path as path +import pathlib + +import numpy import requests -from gaiaxpy import calibrate from astropy.table import Table -import pathlib +from gaiaxpy import calibrate +from scipy import interpolate from scipy.integrate import simps -from lvmdrp.core.spectrum1d import * +from lvmdrp.core.spectrum1d import Spectrum1D + + +sdss_g_w = numpy.array( + [ + 3630, + 3640, + 3680, + 3780, + 3880, + 3980, + 4080, + 4180, + 4280, + 4380, + 4480, + 4580, + 4680, + 4780, + 4880, + 4980, + 5080, + 5180, + 5280, + 5380, + 5480, + 5580, + 5680, + 5780, + 5880, + 5980, + ] +) +sdss_g_f = numpy.array( + [ + 0.0000, + 0.0000, + 0.0013, + 0.0055, + 0.0500, + 0.1629, + 0.2609, + 0.3105, + 0.3385, + 0.3596, + 0.3736, + 0.3863, + 0.3973, + 0.4019, + 0.4073, + 0.4147, + 0.4201, + 0.4147, + 0.3233, + 0.1043, + 0.0128, + 0.0024, + 0.0010, + 0.0003, + 0.0000, + 0.0000, + ] +) -sdss_g_w = numpy.array([3630,3640,3680,3780,3880,3980,4080,4180,4280,4380,4480,4580,4680,4780,\ - 4880,4980,5080,5180,5280,5380,5480,5580,5680,5780,5880, 5980]) -sdss_g_f = numpy.array([0.0000,0.0000,0.0013,0.0055,0.0500,0.1629,0.2609,0.3105,0.3385,0.3596,\ - 0.3736,0.3863,0.3973,0.4019,0.4073,0.4147,0.4201,0.4147,0.3233,0.1043,0.0128,\ - 0.0024,0.0010,0.0003,0.0000,0.0000]) def spec_to_mAB(lam, spec, lamf, filt): - ''' + """ Calculate AB magnitude in filter (lamf, filt) given a spectrum (lam, spec) in ergs/s/cm^2/A - ''' - c_AAs = 2.99792458e18 # Speed of light in Angstrom/s - filt_int = numpy.interp(lam,lamf,filt) # Interpolate to common wavelength axis - I1 = simps(spec*filt_int*lam,lam) - I2 = simps(filt_int/lam,lam) - fnu = I1/I2 / c_AAs # Average flux density - mab = -2.5*numpy.log10(fnu) - 48.6 # AB magnitude + """ + c_AAs = 2.99792458e18 # Speed of light in Angstrom/s + filt_int = numpy.interp(lam, lamf, filt) # Interpolate to common wavelength axis + I1 = simps(spec * filt_int * lam, lam) + I2 = simps(filt_int / lam, lam) + fnu = I1 / I2 / c_AAs # Average flux density + mab = -2.5 * numpy.log10(fnu) - 48.6 # AB magnitude if numpy.isnan(mab): mab = -9999.9 return mab + def spec_to_LVM_mAB(camera, w, f): - ''' + """ LVM photometric system: Gaussian filter with sigma 250A centered in channels at 4500, 6500, and 8500A - ''' - if camera[0] == 'b': - return spec_to_mAB(w,f,w,numpy.exp(-0.5*((w-4500)/250)**2)) - elif camera[0] == 'r': - return spec_to_mAB(w,f,w,numpy.exp(-0.5*((w-6500)/250)**2)) + """ + if camera[0] == "b": + return spec_to_mAB(w, f, w, numpy.exp(-0.5 * ((w - 4500) / 250) ** 2)) + elif camera[0] == "r": + return spec_to_mAB(w, f, w, numpy.exp(-0.5 * ((w - 6500) / 250) ** 2)) else: - return spec_to_mAB(w,f,w,numpy.exp(-0.5*((w-8500)/250)**2)) + return spec_to_mAB(w, f, w, numpy.exp(-0.5 * ((w - 8500) / 250) ** 2)) -def interpolate_mask(x, y, mask, kind='linear', fill_value=0): +def interpolate_mask(x, y, mask, kind="linear", fill_value=0): """ :param x, y: numpy arrays, samples and values :param mask: boolean mask, True for masked values - :param method: interpolation method, one of linear, nearest, + :param method: interpolation method, one of linear, nearest, nearest-up, zero, slinear, quadratic, cubic, previous, or next. :param fill_value: which value to use for filling up data outside the convex hull of known pixel values. @@ -66,55 +125,107 @@ def interpolate_mask(x, y, mask, kind='linear', fill_value=0): return yy + class GaiaStarNotFound(Exception): - ''' - Signal that the star has no BP-RP spectrum - ''' + """ + Signal that the star has no BP-RP spectrum + """ + pass def retrive_gaia_star(gaiaID, GAIA_CACHE_DIR): - ''' + """ Load or download and load from cache the spectrum of a gaia star, converted to erg/s/cm^2/A - ''' + """ # create cache dir if it does not exist pathlib.Path(GAIA_CACHE_DIR).mkdir(parents=True, exist_ok=True) - if path.exists(GAIA_CACHE_DIR+'/gaia_spec_'+str(gaiaID)+'.csv') == True: + if path.exists(GAIA_CACHE_DIR + "/gaia_spec_" + str(gaiaID) + ".csv") is True: # read the tables from our cache - gaiaflux = Table.read(GAIA_CACHE_DIR+"/gaia_spec_"+str(gaiaID)+".csv", format="csv") - gaiawave = Table.read(GAIA_CACHE_DIR+"/gaia_spec_"+str(gaiaID)+"_sampling.csv", format="csv") + gaiaflux = Table.read( + GAIA_CACHE_DIR + "/gaia_spec_" + str(gaiaID) + ".csv", format="csv" + ) + gaiawave = Table.read( + GAIA_CACHE_DIR + "/gaia_spec_" + str(gaiaID) + "_sampling.csv", format="csv" + ) else: # need to download from Gaia archive - CSV_URL = 'https://gea.esac.esa.int/data-server/data?RETRIEVAL_TYPE=XP_CONTINUOUS&ID=Gaia+DR3+'+\ - str(gaiaID)+'&format=CSV&DATA_STRUCTURE=RAW' - FILE = GAIA_CACHE_DIR+'/XP_'+str(gaiaID)+'_RAW.csv' + CSV_URL = ( + "https://gea.esac.esa.int/data-server/data?RETRIEVAL_TYPE=XP_CONTINUOUS&ID=Gaia+DR3+" + + str(gaiaID) + + "&format=CSV&DATA_STRUCTURE=RAW" + ) + FILE = GAIA_CACHE_DIR + "/XP_" + str(gaiaID) + "_RAW.csv" with requests.get(CSV_URL, stream=True) as r: r.raise_for_status() if len(r.content) < 2: raise GaiaStarNotFound(f"Gaia DR3 {gaiaID} has no BP-RP spectrum!") - with open(FILE, 'w') as f: - f.write(r.content.decode('utf-8')) + with open(FILE, "w") as f: + f.write(r.content.decode("utf-8")) # convert coefficients to sampled spectrum - _, _ = calibrate(FILE, output_path=GAIA_CACHE_DIR, output_file='gaia_spec_'+str(gaiaID), output_format='csv') + _, _ = calibrate( + FILE, + output_path=GAIA_CACHE_DIR, + output_file="gaia_spec_" + str(gaiaID), + output_format="csv", + ) # read the flux and wavelength tables - gaiaflux = Table.read(GAIA_CACHE_DIR+"/gaia_spec_"+str(gaiaID)+".csv", format="csv") - gaiawave = Table.read(GAIA_CACHE_DIR+"/gaia_spec_"+str(gaiaID)+"_sampling.csv", format="csv") + gaiaflux = Table.read( + GAIA_CACHE_DIR + "/gaia_spec_" + str(gaiaID) + ".csv", format="csv" + ) + gaiawave = Table.read( + GAIA_CACHE_DIR + "/gaia_spec_" + str(gaiaID) + "_sampling.csv", format="csv" + ) # make numpy arrays from whatever weird objects the Gaia stuff creates - wave = numpy.fromstring(gaiawave['pos'][0][1:-1], sep=',')*10 # in Angstrom - flux = 1e7*1e-1*1e-4*numpy.fromstring(gaiaflux['flux'][0][1:-1], sep=',') # W/s/micron -> in erg/s/cm^2/A + wave = numpy.fromstring(gaiawave["pos"][0][1:-1], sep=",") * 10 # in Angstrom + flux = ( + 1e7 * 1e-1 * 1e-4 * numpy.fromstring(gaiaflux["flux"][0][1:-1], sep=",") + ) # W/s/micron -> in erg/s/cm^2/A return wave, flux def extinctLaSilla(wave): - # digitized version of LaSilla extinctin curve from - w = [3520.83333,3562.50000,3979.16667,4489.58333,4802.08333,5312.50000,5614.58333,5760.41667,\ - 6041.66667,6572.91667,7145.83333,7541.66667,8052.08333,8770.83333,9781.25000,10197.91667] - f = [0.53533,0.52174,0.34511,0.22283,0.18071,0.14402,0.13315,0.14130,0.11685,0.07880,0.05299,\ - 0.04348,0.03533,0.02717,0.01902,0.02038] + # digitized version of LaSilla extinctin curve from + w = [ + 3520.83333, + 3562.50000, + 3979.16667, + 4489.58333, + 4802.08333, + 5312.50000, + 5614.58333, + 5760.41667, + 6041.66667, + 6572.91667, + 7145.83333, + 7541.66667, + 8052.08333, + 8770.83333, + 9781.25000, + 10197.91667, + ] + f = [ + 0.53533, + 0.52174, + 0.34511, + 0.22283, + 0.18071, + 0.14402, + 0.13315, + 0.14130, + 0.11685, + 0.07880, + 0.05299, + 0.04348, + 0.03533, + 0.02717, + 0.01902, + 0.02038, + ] spec_raw = Spectrum1D(wave=w, data=f) return spec_raw.resampleSpec(wave) diff --git a/python/lvmdrp/external/astrolib.py b/python/lvmdrp/external/astrolib.py index e77e8aab..d4e5a054 100644 --- a/python/lvmdrp/external/astrolib.py +++ b/python/lvmdrp/external/astrolib.py @@ -1,12 +1,12 @@ #### -#### This code is a collection of routines taken from the astrolib project +# This code is a collection of routines taken from the astrolib project #### -from numpy import * +import numpy as np -_radeg = 180.0 / pi +_radeg = 180.0 / np.pi def premat(equinox1, equinox2, fk4=False): @@ -50,7 +50,7 @@ def premat(equinox1, equinox2, fk4=False): Converted to IDL V5.0 W. Landsman September 1997 """ - deg_to_rad = pi / 180.0e0 + deg_to_rad = np.pi / 180.0e0 sec_to_rad = deg_to_rad / 3600.0e0 t = 0.001e0 * (equinox2 - equinox1) @@ -106,29 +106,29 @@ def premat(equinox1, equinox2, fk4=False): ) ) - sina = sin(a) - sinb = sin(b) - sinc = sin(c) - cosa = cos(a) - cosb = cos(b) - cosc = cos(c) + sina = np.np.sin(a) + sinb = np.np.sin(b) + sinc = np.np.sin(c) + cosa = np.cos(a) + cosb = np.cos(b) + cosc = np.cos(c) - r = zeros((3, 3)) - r[0, :] = array( + r = np.zeroes((3, 3)) + r[0, :] = np.array( [ cosa * cosb * cosc - sina * sinb, sina * cosb + cosa * sinb * cosc, cosa * sinc, ] ) - r[1, :] = array( + r[1, :] = np.array( [ -cosa * sinb - sina * cosb * cosc, cosa * cosb - sina * sinb * cosc, -sina * sinc, ] ) - r[2, :] = array([-cosb * sinc, -sinb * sinc, cosc]) + r[2, :] = np.array([-cosb * sinc, -sinb * sinc, cosc]) return r @@ -211,19 +211,15 @@ def precess(ra0, dec0, equinox1, equinox2, doprint=None, fk4=None, radian=False) Correct negative output RA values when /RADIAN used March 1999 Work for arrays, not just vectors W. Landsman September 2003 """ - if isinstance(ra0, ndarray): + if isinstance(ra0, np.ndarray): ra = ra0.copy() dec = dec0.copy() npts = min(ra.size, dec.size) - xarray = ra.ndim >= 2 - if xarray: - dimen = ra.ndim else: - ra = array([ra0]) - dec = array([dec0]) + ra = np.array([ra0]) + dec = np.array([dec0]) npts = 1 - xarray = False - deg_to_rad = pi / 180.0e0 + deg_to_rad = np.pi / 180.0e0 if npts == 0: print("ERROR - Input RA and DEC must be vectors or scalars") @@ -233,41 +229,39 @@ def precess(ra0, dec0, equinox1, equinox2, doprint=None, fk4=None, radian=False) ra_rad = ra * deg_to_rad # Convert to double precision if not already dec_rad = dec * deg_to_rad else: - ra_rad = array(ra).astype(float) - dec_rad = array(dec).astype(float) + ra_rad = np.array(ra).astype(float) + dec_rad = np.array(dec).astype(float) - a = cos(dec_rad) + a = np.cos(dec_rad) _expr = npts # Is RA a vector or scalar? if _expr == 1: - x = concatenate( - [a * cos(ra_rad), a * sin(ra_rad), sin(dec_rad)] + x = np.concatenate( + [a * np.cos(ra_rad), a * np.np.sin(ra_rad), np.np.sin(dec_rad)] ) # input direction else: - x = numpy.zeros((npts, 3)) - x[:, 0] = a * cos(ra_rad) - x[:, 1] = a * sin(ra_rad) - x[:, 2] = sin(dec_rad) - x = transpose(x) - - sec_to_rad = deg_to_rad / 3600.0e0 + x = np.zeroes((npts, 3)) + x[:, 0] = a * np.cos(ra_rad) + x[:, 1] = a * np.np.sin(ra_rad) + x[:, 2] = np.np.sin(dec_rad) + x = np.transpose(x) # Use PREMAT function to get precession matrix from Equinox1 to Equinox2 r = premat(equinox1, equinox2, fk4=fk4) - x2 = transpose( - dot(transpose(r), transpose(x)) + x2 = np.transpose( + np.dot(np.transpose(r), np.transpose(x)) ) # rotate to get output direction cosines if npts == 1: # Scalar - ra_rad = arctan2(x2[1], x2[0]) - dec_rad = arcsin(x2[2]) + ra_rad = np.arctan2(x2[1], x2[0]) + dec_rad = np.arcsin(x2[2]) else: # Vector - ra_rad = zeros(npts) + arctan2(x2[:, 1], x2[:, 0]) - dec_rad = zeros(npts) + arcsin(x2[:, 2]) + ra_rad = np.zeroes(npts) + np.arctan2(x2[:, 1], x2[:, 0]) + dec_rad = np.zeroes(npts) + np.arcsin(x2[:, 2]) if not radian: ra = ra_rad / deg_to_rad @@ -276,14 +270,14 @@ def precess(ra0, dec0, equinox1, equinox2, doprint=None, fk4=None, radian=False) else: ra = ra_rad dec = dec_rad - ra = ra + (ra < 0.0) * 2.0e0 * pi + ra = ra + (ra < 0.0) * 2.0e0 * np.pi # if array: # ra = reform(ra, dimen, over=True) # dec = reform(dec, dimen, over=True) if doprint is not None: - print("Equinox (" + strtrim(equinox2, 2) + "): ", adstring(ra, dec, 1)) + print("Equinox (" + np.strtrim(equinox2, 2) + "): ", np.adstring(ra, dec, 1)) return ra, dec @@ -339,8 +333,8 @@ def _ret(): # Adjustment needed because Julian day starts at noon, calendar day at midnight - jd = array(xjd).astype(int) # Truncate to integral day - frac = array(xjd).astype(float) - jd + 0.5 # Fractional part of calendar day + jd = np.array(xjd).astype(int) # Truncate to integral day + frac = np.array(xjd).astype(float) - jd + 0.5 # Fractional part of calendar day after_noon = frac >= 1.0 next = 1 if next > 0: # Is it really the next calendar day? @@ -354,16 +348,16 @@ def _ret(): else: jd = jd + 1 hr = frac * 24.0 - l = jd + 68569 - n = 4 * l / 146097 - l = l - (146097 * n + 3) / 4 - yr = 4000 * (l + 1) / 1461001 - l = l - 1461 * yr / 4 + 31 # 1461 = 365.25 * 4 - mn = 80 * l / 2447 - day = l - 2447 * mn / 80 - l = mn / 11 - mn = mn + 2 - 12 * l - yr = 100 * (n - 49) + yr + l + tmp = jd + 68569 + n = 4 * tmp / 146097 + tmp = tmp - (146097 * n + 3) / 4 + yr = 4000 * (tmp + 1) / 1461001 + tmp = tmp - 1461 * yr / 4 + 31 # 1461 = 365.25 * 4 + mn = 80 * tmp / 2447 + day = tmp - 2447 * mn / 80 + tmp = mn / 11 + mn = mn + 2 - 12 * tmp + yr = 100 * (n - 49) + yr + tmp return (yr, mn, day, hr) @@ -451,53 +445,53 @@ def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): """ scal = True - if isinstance(ra0, ndarray): + if isinstance(ra0, np.ndarray): ra = ra0 dec = dec0 n = ra.size scal = False else: n = 1 - ra = array([ra0]) - dec = array([dec0]) + ra = np.array([ra0]) + dec = np.array([dec0]) if n == 0: - message("ERROR - First parameter (RA vector) is undefined") + np.message("ERROR - First parameter (RA vector) is undefined") if rad_vel is None: - rad_vel = zeros(n) + rad_vel = np.zeroes(n) else: rad_vel = rad_vel * 1.0 - if array(rad_vel).size != n: + if np.array(rad_vel).size != n: print( "ERROR - RAD_VEL keyword vector must contain " - + strtrim(n, 2) + + np.strtrim(n, 2) + " values" ) if mu_radec is not None: - if array(mu_radec).size != 2 * n: - message( + if np.array(mu_radec).size != 2 * n: + np.message( "ERROR - MU_RADEC keyword (proper motion) be dimensioned (2," - + strtrim(n, 2) + + np.strtrim(n, 2) + ")" ) mu_radec = mu_radec * 1.0 if parallax is None: - parallax = zeros(n) + parallax = np.zeroes(n) else: parallax = parallax * 1.0 if epoch is None: epoch = 2000.0e0 - radeg = 180.0e0 / pi + radeg = 180.0e0 / np.pi sec_to_radian = 1.0e0 / radeg / 3600.0e0 - m = array( + m = np.array( [ - array( + np.array( [ +0.9999256795e0, -0.0111814828e0, @@ -507,7 +501,7 @@ def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): +0.435730e0, ] ), - array( + np.array( [ +0.0111814828e0, +0.9999374849e0, @@ -517,7 +511,7 @@ def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): -0.008541e0, ] ), - array( + np.array( [ +0.0048590039e0, -0.0000271771e0, @@ -527,7 +521,7 @@ def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): +0.002117e0, ] ), - array( + np.array( [ -0.00000242389840e0, +0.00000002710544e0, @@ -537,7 +531,7 @@ def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): -0.00485852e0, ] ), - array( + np.array( [ -0.00000002710544e0, -0.00000242392702e0, @@ -547,7 +541,7 @@ def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): -0.00002716e0, ] ), - array( + np.array( [ -0.00000001177742e0, +0.00000000006585e0, @@ -560,29 +554,29 @@ def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): ] ) - a_dot = 1e-3 * array([1.244e0, -1.579e0, -0.660e0]) # in arc seconds per century + a_dot = 1e-3 * np.array([1.244e0, -1.579e0, -0.660e0]) # in arc seconds per century ra_rad = ra / radeg dec_rad = dec / radeg - cosra = cos(ra_rad) - sinra = sin(ra_rad) - cosdec = cos(dec_rad) - sindec = sin(dec_rad) + cosra = np.cos(ra_rad) + sinra = np.np.sin(ra_rad) + cosdec = np.cos(dec_rad) + sindec = np.np.sin(dec_rad) dec_1950 = dec * 0.0 ra_1950 = ra * 0.0 - for i in arange(0, (n - 1) + (1)): + for i in np.arange(0, (n - 1) + (1)): # Following statement moved inside loop in Feb 2000. - a = 1e-6 * array([-1.62557e0, -0.31919e0, -0.13843e0]) # in radians + a = 1e-6 * np.array([-1.62557e0, -0.31919e0, -0.13843e0]) # in radians - r0 = array([cosra[i] * cosdec[i], sinra[i] * cosdec[i], sindec[i]]) + r0 = np.array([cosra[i] * cosdec[i], sinra[i] * cosdec[i], sindec[i]]) if mu_radec is not None: mu_a = mu_radec[i, 0] mu_d = mu_radec[i, 1] r0_dot = ( - array( + np.array( [ -mu_a * sinra[i] * cosdec[i] - mu_d * cosra[i] * sindec[i], mu_a * cosra[i] * cosdec[i] - mu_d * sinra[i] * sindec[i], @@ -593,10 +587,10 @@ def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): ) else: - r0_dot = array([0.0e0, 0.0e0, 0.0e0]) + r0_dot = np.array([0.0e0, 0.0e0, 0.0e0]) - r_0 = concatenate((r0, r0_dot)) - r_1 = transpose(dot(transpose(m), transpose(r_0))) + r_0 = np.concatenate((r0, r0_dot)) + r_1 = np.transpose(np.dot(np.transpose(m), np.transpose(r_0))) # Include the effects of the E-terms of aberration to form r and r_dot. @@ -610,20 +604,20 @@ def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): x1 = r_1[0] y1 = r_1[1] z1 = r_1[2] - rmag = sqrt(x1**2 + y1**2 + z1**2) + rmag = np.sqrt(x1**2 + y1**2 + z1**2) s1 = r1 / rmag s1_dot = r1_dot / rmag s = s1 - for j in arange(0, 3): + for j in np.arange(0, 3): r = s1 + a - ((s * a).sum()) * s s = r / rmag x = r[0] y = r[1] z = r[2] r2 = x**2 + y**2 + z**2 - rmag = sqrt(r2) + rmag = np.sqrt(r2) if mu_radec is not None: r_dot = s1_dot + a_dot - ((s * a_dot).sum()) * s @@ -631,12 +625,12 @@ def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): y_dot = r_dot[1] z_dot = r_dot[2] mu_radec[i, 0] = (x * y_dot - y * x_dot) / (x**2 + y**2) - mu_radec[i, 1] = ( - z_dot * (x**2 + y**2) - z * (x * x_dot + y * y_dot) - ) / (r2 * sqrt(x**2 + y**2)) + mu_radec[i, 1] = (z_dot * (x**2 + y**2) - z * (x * x_dot + y * y_dot)) / ( + r2 * np.sqrt(x**2 + y**2) + ) - dec_1950[i] = arcsin(z / rmag) - ra_1950[i] = arctan2(y, x) + dec_1950[i] = np.arcsin(z / rmag) + ra_1950[i] = np.arctan2(y, x) if parallax[i] > 0.0: rad_vel[i] = (x * x_dot + y * y_dot + z * z_dot) / ( @@ -646,7 +640,7 @@ def bprecess(ra0, dec0, mu_radec=None, parallax=None, rad_vel=None, epoch=None): neg = ra_1950 < 0 if neg.any() > 0: - ra_1950[neg] = ra_1950[neg] + 2.0e0 * pi + ra_1950[neg] = ra_1950[neg] + 2.0e0 * np.pi ra_1950 = ra_1950 * radeg dec_1950 = dec_1950 * radeg @@ -710,17 +704,17 @@ def _ret(): # take input coords and convert to ra and dec (in radians) - ra = atan(y, x) - _del = sqrt(x * x + y * y + z * z) # magnitude of distance to Sun - dec = asin(z / _del) + ra = np.atan(y, x) + _del = np.sqrt(x * x + y * y + z * z) # magnitude of distance to Sun + dec = np.asin(z / _del) # precess the ra and dec precess(ra, dec, equinox1, equinox2, radian=True) # convert back to x, y, z - xunit = cos(ra) * cos(dec) - yunit = sin(ra) * cos(dec) - zunit = sin(dec) + xunit = np.cos(ra) * np.cos(dec) + yunit = np.np.sin(ra) * np.cos(dec) + zunit = np.np.sin(dec) x = xunit * _del y = yunit * _del @@ -795,7 +789,6 @@ def xyz(date, equinox=None): """ n_params = 7 - _opt = (equinox,) # def _ret(): # _optrv = zip(_opt, [equinox]) # _rv = [date, x, y, z, xvel, yvel, zvel] @@ -807,9 +800,9 @@ def xyz(date, equinox=None): if n_params == 0: print("Syntax - XYZ, date, x, y, z, [ xvel, yvel, zvel, EQUINOX= ]") print(" (date is REDUCED Julian date (JD - 2400000.0) )") - return _ret() + return - picon = pi / 180.0e0 + picon = np.pi / 180.0e0 t = (date - 15020.0e0) / 36525.0e0 # Relative Julian century from 1900 # NOTE: longitude arguments below are given in *equinox* of date. @@ -849,51 +842,51 @@ def xyz(date, equinox=None): # Calculate X,Y,Z using trigonometric series x = ( - 0.999860e0 * cos(el) - - 0.025127e0 * cos(g - el) - + 0.008374e0 * cos(g + el) - + 0.000105e0 * cos(g + g + el) - + 0.000063e0 * t * cos(g - el) - + 0.000035e0 * cos(g + g - el) - - 0.000026e0 * sin(g - el - j) - - 0.000021e0 * t * cos(g + el) - + 0.000018e0 * sin(2.0e0 * g + el - 2.0e0 * v) - + 0.000017e0 * cos(c) - - 0.000014e0 * cos(c - 2.0e0 * el) - + 0.000012e0 * cos(4.0e0 * g + el - 8.0e0 * m + 3.0e0 * j) - - 0.000012e0 * cos(4.0e0 * g - el - 8.0e0 * m + 3.0e0 * j) - - 0.000012e0 * cos(g + el - v) - + 0.000011e0 * cos(2.0e0 * g + el - 2.0e0 * v) - + 0.000011e0 * cos(2.0e0 * g - el - 2.0e0 * j) + 0.999860e0 * np.cos(el) + - 0.025127e0 * np.cos(g - el) + + 0.008374e0 * np.cos(g + el) + + 0.000105e0 * np.cos(g + g + el) + + 0.000063e0 * t * np.cos(g - el) + + 0.000035e0 * np.cos(g + g - el) + - 0.000026e0 * np.np.sin(g - el - j) + - 0.000021e0 * t * np.cos(g + el) + + 0.000018e0 * np.np.sin(2.0e0 * g + el - 2.0e0 * v) + + 0.000017e0 * np.cos(c) + - 0.000014e0 * np.cos(c - 2.0e0 * el) + + 0.000012e0 * np.cos(4.0e0 * g + el - 8.0e0 * m + 3.0e0 * j) + - 0.000012e0 * np.cos(4.0e0 * g - el - 8.0e0 * m + 3.0e0 * j) + - 0.000012e0 * np.cos(g + el - v) + + 0.000011e0 * np.cos(2.0e0 * g + el - 2.0e0 * v) + + 0.000011e0 * np.cos(2.0e0 * g - el - 2.0e0 * j) ) y = ( - 0.917308e0 * sin(el) - + 0.023053e0 * sin(g - el) - + 0.007683e0 * sin(g + el) - + 0.000097e0 * sin(g + g + el) - - 0.000057e0 * t * sin(g - el) - - 0.000032e0 * sin(g + g - el) - - 0.000024e0 * cos(g - el - j) - - 0.000019e0 * t * sin(g + el) - - 0.000017e0 * cos(2.0e0 * g + el - 2.0e0 * v) - + 0.000016e0 * sin(c) - + 0.000013e0 * sin(c - 2.0e0 * el) - + 0.000011e0 * sin(4.0e0 * g + el - 8.0e0 * m + 3.0e0 * j) - + 0.000011e0 * sin(4.0e0 * g - el - 8.0e0 * m + 3.0e0 * j) - - 0.000011e0 * sin(g + el - v) - + 0.000010e0 * sin(2.0e0 * g + el - 2.0e0 * v) - - 0.000010e0 * sin(2.0e0 * g - el - 2.0e0 * j) + 0.917308e0 * np.np.sin(el) + + 0.023053e0 * np.np.sin(g - el) + + 0.007683e0 * np.np.sin(g + el) + + 0.000097e0 * np.sin(g + g + el) + - 0.000057e0 * t * np.sin(g - el) + - 0.000032e0 * np.sin(g + g - el) + - 0.000024e0 * np.cos(g - el - j) + - 0.000019e0 * t * np.sin(g + el) + - 0.000017e0 * np.cos(2.0e0 * g + el - 2.0e0 * v) + + 0.000016e0 * np.sin(c) + + 0.000013e0 * np.sin(c - 2.0e0 * el) + + 0.000011e0 * np.sin(4.0e0 * g + el - 8.0e0 * m + 3.0e0 * j) + + 0.000011e0 * np.sin(4.0e0 * g - el - 8.0e0 * m + 3.0e0 * j) + - 0.000011e0 * np.sin(g + el - v) + + 0.000010e0 * np.sin(2.0e0 * g + el - 2.0e0 * v) + - 0.000010e0 * np.sin(2.0e0 * g - el - 2.0e0 * j) ) z = ( - 0.397825e0 * sin(el) - + 0.009998e0 * sin(g - el) - + 0.003332e0 * sin(g + el) - + 0.000042e0 * sin(g + g + el) - - 0.000025e0 * t * sin(g - el) - - 0.000014e0 * sin(g + g - el) - - 0.000010e0 * cos(g - el - j) + 0.397825e0 * np.sin(el) + + 0.009998e0 * np.sin(g - el) + + 0.003332e0 * np.sin(g + el) + + 0.000042e0 * np.sin(g + g + el) + - 0.000025e0 * t * np.sin(g - el) + - 0.000014e0 * np.sin(g + g - el) + - 0.000010e0 * np.cos(g - el - j) ) # Precess_to new equator? @@ -901,33 +894,33 @@ def xyz(date, equinox=None): precess_xyz(x, y, z, 1950, equinox) if n_params <= 3: - return _ret() + return xvel = ( - -0.017200e0 * sin(el) - - 0.000288e0 * sin(g + el) - - 0.000005e0 * sin(2.0e0 * g + el) - - 0.000004e0 * sin(c) - + 0.000003e0 * sin(c - 2.0e0 * el) - + 0.000001e0 * t * sin(g + el) - - 0.000001e0 * sin(2.0e0 * g - el) + -0.017200e0 * np.sin(el) + - 0.000288e0 * np.sin(g + el) + - 0.000005e0 * np.sin(2.0e0 * g + el) + - 0.000004e0 * np.sin(c) + + 0.000003e0 * np.sin(c - 2.0e0 * el) + + 0.000001e0 * t * np.sin(g + el) + - 0.000001e0 * np.sin(2.0e0 * g - el) ) yvel = ( - 0.015780 * cos(el) - + 0.000264 * cos(g + el) - + 0.000005 * cos(2.0e0 * g + el) - + 0.000004 * cos(c) - + 0.000003 * cos(c - 2.0e0 * el) - - 0.000001 * t * cos(g + el) + 0.015780 * np.cos(el) + + 0.000264 * np.cos(g + el) + + 0.000005 * np.cos(2.0e0 * g + el) + + 0.000004 * np.cos(c) + + 0.000003 * np.cos(c - 2.0e0 * el) + - 0.000001 * t * np.cos(g + el) ) zvel = ( - 0.006843 * cos(el) - + 0.000115 * cos(g + el) - + 0.000002 * cos(2.0e0 * g + el) - + 0.000002 * cos(c) - + 0.000001 * cos(c - 2.0e0 * el) + 0.006843 * np.cos(el) + + 0.000115 * np.cos(g + el) + + 0.000002 * np.cos(2.0e0 * g + el) + + 0.000002 * np.cos(c) + + 0.000001 * np.cos(c - 2.0e0 * el) ) # Precess to new equator? @@ -1009,11 +1002,11 @@ def helio_jd(date, ra, dec, b1950=False, time_diff=False): ra1 = ra dec1 = dec - radeg = 180.0 / pi + radeg = 180.0 / np.pi # zparcheck('HELIO_JD', date, 1, concatenate([3, 4, 5]), concatenate([0, 1]), 'Reduced Julian Date') - delta_t = (array(date).astype(float) - 33282.42345905e0) / 36525.0e0 - epsilon_sec = poly1d([44.836e0, -46.8495, -0.00429, 0.00181][::-1])(delta_t) + delta_t = (np.array(date).astype(float) - 33282.42345905e0) / 36525.0e0 + epsilon_sec = np.poly1d([44.836e0, -46.8495, -0.00429, 0.00181][::-1])(delta_t) epsilon = (23.433333e0 + epsilon_sec / 3600.0e0) / radeg ra1 = ra1 / radeg dec1 = dec1 / radeg @@ -1024,12 +1017,13 @@ def helio_jd(date, ra, dec, b1950=False, time_diff=False): # and divide by the speed of light, and multiply by 86400 second/year time = -499.00522e0 * ( - cos(dec1) * cos(ra1) * x + (tan(epsilon) * sin(dec1) + cos(dec1) * sin(ra1)) * y + np.cos(dec1) * np.cos(ra1) * x + + (np.tan(epsilon) * np.sin(dec1) + np.cos(dec1) * np.sin(ra1)) * y ) if time_diff: return time else: - return array(date).astype(float) + time / 86400.0e0 + return np.array(date).astype(float) + time / 86400.0e0 def baryvel(dje, deq=0): @@ -1092,8 +1086,8 @@ def baryvel(dje, deq=0): IDL> ra = ten(19,50,46.77)*15/!RADEG ;RA in radians IDL> dec = ten(08,52,3.5)/!RADEG ;Dec in radians - IDL> v = vb[0]*cos(dec)*cos(ra) + $ ;Project velocity toward star - vb[1]*cos(dec)*sin(ra) + vb[2]*sin(dec) + IDL> v = vb[0]*np.cos(dec)*np.cos(ra) + $ ;Project velocity toward star + vb[1]*np.cos(dec)*np.sin(ra) + vb[2]*np.sin(dec) REVISION HISTORY: Jeff Valenti, U.C. Berkeley Translated BARVEL.FOR to IDL. @@ -1105,8 +1099,8 @@ def baryvel(dje, deq=0): """ # Define constants - dc2pi = 2 * pi - cc2pi = 2 * pi + dc2pi = 2 * np.pi + cc2pi = 2 * np.pi dc1 = 1.0e0 dcto = 2415020.0e0 dcjul = 36525.0e0 # days in Julian year @@ -1116,7 +1110,7 @@ def baryvel(dje, deq=0): au = 1.4959787e8 # Constants dcfel(i,k) of fast changing elements. - dcfel = array( + dcfel = np.array( [ 1.7400353e00, 6.2833195099091e02, @@ -1144,11 +1138,11 @@ def baryvel(dje, deq=0): 5.6093e-6, ] ) - dcfel = reshape(dcfel, (8, 3)) + dcfel = np.reshape(dcfel, (8, 3)) # constants dceps and ccsel(i,k) of slowly changing elements. - dceps = array([4.093198e-1, -2.271110e-4, -2.860401e-8]) - ccsel = array( + dceps = np.array([4.093198e-1, -2.271110e-4, -2.860401e-8]) + ccsel = np.array( [ 1.675104e-2, -4.179579e-5, @@ -1203,10 +1197,10 @@ def baryvel(dje, deq=0): -1.590188e-7, ] ) - ccsel = reshape(ccsel, (17, 3)) + ccsel = np.reshape(ccsel, (17, 3)) # Constants of the arguments of the short-period perturbations. - dcargs = array( + dcargs = np.array( [ 5.0974222e0, -7.8604195454652e2, @@ -1240,10 +1234,10 @@ def baryvel(dje, deq=0): -5.4868336758022e2, ] ) - dcargs = reshape(dcargs, (15, 2)) + dcargs = np.reshape(dcargs, (15, 2)) # Amplitudes ccamps(n,k) of the short-period perturbations. - ccamps = array( + ccamps = np.array( [ -2.279594e-5, 1.407414e-5, @@ -1322,11 +1316,11 @@ def baryvel(dje, deq=0): 0.0e0, ] ) - ccamps = reshape(ccamps, (15, 5)) + ccamps = np.reshape(ccamps, (15, 5)) # Constants csec3 and ccsec(n,k) of the secular perturbations in longitude. ccsec3 = -7.757020e-8 - ccsec = array( + ccsec = np.array( [ 1.289600e-6, 5.550147e-1, @@ -1342,7 +1336,7 @@ def baryvel(dje, deq=0): 1.559103e01, ] ) - ccsec = reshape(ccsec, (4, 3)) + ccsec = np.reshape(ccsec, (4, 3)) # Sidereal rates. dcsld = 1.990987e-7 # sidereal rate in longitude @@ -1355,7 +1349,7 @@ def baryvel(dje, deq=0): # Constants dcargm(i,k) of the arguments of the perturbations of the motion # of the moon. - dcargm = array( + dcargm = np.array( [ 5.1679830e0, 8.3286911095275e3, @@ -1365,10 +1359,10 @@ def baryvel(dje, deq=0): 1.5542754389685e4, ] ) - dcargm = reshape(dcargm, (3, 2)) + dcargm = np.reshape(dcargm, (3, 2)) # Amplitudes ccampm(n,k) of the perturbations of the moon. - ccampm = array( + ccampm = np.array( [ 1.097594e-1, 2.896773e-7, @@ -1384,29 +1378,31 @@ def baryvel(dje, deq=0): 4.063015e-8, ] ) - ccampm = reshape(ccampm, (3, 4)) + ccampm = np.reshape(ccampm, (3, 4)) # ccpamv(k)=a*m*dl,dt (planets), dc1mme=1-mass(earth+moon) - ccpamv = array([8.326827e-11, 1.843484e-11, 1.988712e-12, 1.881276e-12]) + ccpamv = np.array([8.326827e-11, 1.843484e-11, 1.988712e-12, 1.881276e-12]) dc1mme = 0.99999696e0 # Time arguments. dt = (dje - dcto) / dcjul - tvec = array([1e0, dt, dt * dt]) + tvec = np.array([1e0, dt, dt * dt]) # Values of all elements for the instant(aneous?) dje. - temp = (transpose(dot(transpose(tvec), transpose(dcfel)))) % dc2pi + temp = (np.transpose(np.dot(np.transpose(tvec), np.transpose(dcfel)))) % dc2pi dml = temp[0] forbel = temp[1:8] g = forbel[0] # old fortran equivalence deps = (tvec * dceps).sum() % dc2pi - sorbel = (transpose(dot(transpose(tvec), transpose(ccsel)))) % dc2pi + sorbel = (np.transpose(np.dot(np.transpose(tvec), np.transpose(ccsel)))) % dc2pi e = sorbel[0] # old fortran equivalence # Secular perturbations in longitude. - dummy = cos(2.0) - sn = sin((transpose(dot(transpose(tvec[0:2]), transpose(ccsec[:, 1:3])))) % cc2pi) + sn = np.sin( + (np.transpose(np.dot(np.transpose(tvec[0:2]), np.transpose(ccsec[:, 1:3])))) + % cc2pi + ) # Periodic perturbations of the emb (earth-moon barycenter). pertl = (ccsec[:, 0] * sn).sum() + dt * ccsec3 * sn[2] @@ -1415,8 +1411,8 @@ def baryvel(dje, deq=0): pertrd = 0.0 for k in range(0, 15): a = (dcargs[k, 0] + dt * dcargs[k, 1]) % dc2pi - cosa = cos(a) - sina = sin(a) + cosa = np.cos(a) + sina = np.sin(a) pertl = pertl + ccamps[k, 0] * cosa + ccamps[k, 1] * sina pertr = pertr + ccamps[k, 2] * cosa + ccamps[k, 3] * sina if k < 11: @@ -1425,22 +1421,22 @@ def baryvel(dje, deq=0): # Elliptic part of the motion of the emb. phi = (e * e / 4e0) * ( - ((8e0 / e) - e) * sin(g) + 5 * sin(2 * g) + (13 / 3e0) * e * sin(3 * g) + ((8e0 / e) - e) * np.sin(g) + 5 * np.sin(2 * g) + (13 / 3e0) * e * np.sin(3 * g) ) f = g + phi - sinf = sin(f) - cosf = cos(f) + sinf = np.sin(f) + cosf = np.cos(f) dpsi = (dc1 - e * e) / (dc1 + e * cosf) phid = 2 * e * ccsgd * ((1 + 1.5 * e * e) * cosf + e * (1.25 - 0.5 * sinf * sinf)) - psid = ccsgd * e * sinf / sqrt(dc1 - e * e) + psid = ccsgd * e * sinf / np.sqrt(dc1 - e * e) # Perturbed heliocentric motion of the emb. d1pdro = dc1 + pertr drd = d1pdro * (psid + dpsi * pertrd) drld = d1pdro * dpsi * (dcsld + phid + pertld) dtl = (dml + phi + pertl) % dc2pi - dsinls = sin(dtl) - dcosls = cos(dtl) + dsinls = np.sin(dtl) + dcosls = np.cos(dtl) dxhd = drd * dcosls - drld * dsinls dyhd = drd * dsinls + drld * dcosls @@ -1452,8 +1448,8 @@ def baryvel(dje, deq=0): pertpd = 0.0 for k in range(0, 3): a = (dcargm[k, 0] + dt * dcargm[k, 1]) % dc2pi - sina = sin(a) - cosa = cos(a) + sina = np.sin(a) + cosa = np.cos(a) pertl = pertl + ccampm[k, 0] * sina pertld = pertld + ccampm[k, 1] * cosa pertp = pertp + ccampm[k, 2] * cosa @@ -1461,14 +1457,14 @@ def baryvel(dje, deq=0): # Heliocentric motion of the earth. tl = forbel[1] + pertl - sinlm = sin(tl) - coslm = cos(tl) + sinlm = np.sin(tl) + coslm = np.cos(tl) sigma = cckm / (1.0 + pertp) a = sigma * (ccmld + pertld) b = sigma * pertpd dxhd = dxhd + a * sinlm + b * coslm dyhd = dyhd - a * coslm + b * sinlm - dzhd = -sigma * ccfdi * cos(forbel[2]) + dzhd = -sigma * ccfdi * np.cos(forbel[2]) # Barycentric motion of the earth. dxbd = dxhd * dc1mme @@ -1478,14 +1474,14 @@ def baryvel(dje, deq=0): plon = forbel[k + 3] pomg = sorbel[k + 1] pecc = sorbel[k + 9] - tl = (plon + 2.0 * pecc * sin(plon - pomg)) % cc2pi - dxbd = dxbd + ccpamv[k] * (sin(tl) + pecc * sin(pomg)) - dybd = dybd - ccpamv[k] * (cos(tl) + pecc * cos(pomg)) - dzbd = dzbd - ccpamv[k] * sorbel[k + 13] * cos(plon - sorbel[k + 5]) + tl = (plon + 2.0 * pecc * np.sin(plon - pomg)) % cc2pi + dxbd = dxbd + ccpamv[k] * (np.sin(tl) + pecc * np.sin(pomg)) + dybd = dybd - ccpamv[k] * (np.cos(tl) + pecc * np.cos(pomg)) + dzbd = dzbd - ccpamv[k] * sorbel[k + 13] * np.cos(plon - sorbel[k + 5]) # Transition to mean equator of date. - dcosep = cos(deps) - dsinep = sin(deps) + dcosep = np.cos(deps) + dsinep = np.sin(deps) dyahd = dcosep * dyhd - dsinep * dzhd dzahd = dsinep * dyhd + dcosep * dzhd dyabd = dcosep * dybd - dsinep * dzbd @@ -1494,8 +1490,8 @@ def baryvel(dje, deq=0): # Epoch of mean equinox (deq) of zero implies that we should use # Julian ephemeris date (dje) as epoch of mean equinox. if deq == 0: - dvelh = au * (array([dxhd, dyahd, dzahd])) - dvelb = au * (array([dxbd, dyabd, dzabd])) + dvelh = au * (np.array([dxhd, dyahd, dzahd])) + dvelb = au * (np.array([dxbd, dyabd, dzabd])) return (dvelh, dvelb) # General precession from epoch dje to deq. @@ -1503,10 +1499,14 @@ def baryvel(dje, deq=0): prema = premat(deqdat, deq, fk4=True) dvelh = au * ( - transpose(dot(transpose(prema), transpose(array([dxhd, dyahd, dzahd])))) + np.transpose( + np.dot(np.transpose(prema), np.transpose(np.array([dxhd, dyahd, dzahd]))) + ) ) dvelb = au * ( - transpose(dot(transpose(prema), transpose(array([dxbd, dyabd, dzabd])))) + np.transpose( + np.dot(np.transpose(prema), np.transpose(np.array([dxbd, dyabd, dzabd]))) + ) ) return (dvelh, dvelb) @@ -1542,7 +1542,7 @@ def helcorr(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, debug=False): # 2005-June-20 Kochukhov Included precession of RA2000 and DEC2000 to current epoch # covert JD to Gregorian calendar date - xjd = array(2400000.0).astype(float) + jd + xjd = np.array(2400000.0).astype(float) + jd year, month, day, ut = daycnv(xjd) # current epoch @@ -1551,15 +1551,15 @@ def helcorr(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, debug=False): # precess ra2000 and dec2000 to current epoch ra, dec = precess(ra2000 * 15.0, dec2000, 2000.0, epoch) # calculate heliocentric julian date - hjd = array(helio_jd(jd, ra, dec)).astype(float) + hjd = np.array(helio_jd(jd, ra, dec)).astype(float) # DIURNAL VELOCITY (see IRAF task noao.astutil.rvcorrect) # convert geodetic latitude into geocentric latitude to correct # for rotation of earth dlat = ( - -(11.0 * 60.0 + 32.743) * sin(2 * obs_lat / _radeg) - + 1.1633 * sin(4 * obs_lat / _radeg) - - 0.0026 * sin(6 * obs_lat / _radeg) + -(11.0 * 60.0 + 32.743) * np.np.sin(2 * obs_lat / _radeg) + + 1.1633 * np.np.sin(4 * obs_lat / _radeg) + - 0.0026 * np.np.sin(6 * obs_lat / _radeg) ) lat = obs_lat + dlat / 3600 @@ -1568,30 +1568,32 @@ def helcorr(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, debug=False): 6378160.0 * ( 0.998327073 - + 0.001676438 * cos(2 * lat / _radeg) - - 0.00000351 * cos(4 * lat / _radeg) - + 0.000000008 * cos(6 * lat / _radeg) + + 0.001676438 * np.cos(2 * lat / _radeg) + - 0.00000351 * np.cos(4 * lat / _radeg) + + 0.000000008 * np.cos(6 * lat / _radeg) ) + obs_alt ) # calculate rotational velocity (perpendicular to the radius vector) in km/s # 23.934469591229 is the siderial day in hours for 1986 - v = 2.0 * pi * (r / 1000.0) / (23.934469591229 * 3600.0) + v = 2.0 * np.pi * (r / 1000.0) / (23.934469591229 * 3600.0) # calculating local mean siderial time (see astronomical almanach) tu = (jd - 51545.0) / 36525 gmst = ( 6.697374558 + ut - + (236.555367908 * (jd - 51545.0) + 0.093104 * tu**2 - 6.2e-6 * tu**3) - / 3600 + + (236.555367908 * (jd - 51545.0) + 0.093104 * tu**2 - 6.2e-6 * tu**3) / 3600 ) lmst = (gmst - obs_long / 15) % 24 # projection of rotational velocity along the line of sight vdiurnal = ( - v * cos(lat / _radeg) * cos(dec / _radeg) * sin((ra - lmst * 15) / _radeg) + v + * np.cos(lat / _radeg) + * np.cos(dec / _radeg) + * np.np.sin((ra - lmst * 15) / _radeg) ) # BARICENTRIC and HELIOCENTRIC VELOCITIES @@ -1599,14 +1601,14 @@ def helcorr(obs_long, obs_lat, obs_alt, ra2000, dec2000, jd, debug=False): # project to line of sight vbar = ( - vb[0] * cos(dec / _radeg) * cos(ra / _radeg) - + vb[1] * cos(dec / _radeg) * sin(ra / _radeg) - + vb[2] * sin(dec / _radeg) + vb[0] * np.cos(dec / _radeg) * np.cos(ra / _radeg) + + vb[1] * np.cos(dec / _radeg) * np.np.sin(ra / _radeg) + + vb[2] * np.np.sin(dec / _radeg) ) vhel = ( - vh[0] * cos(dec / _radeg) * cos(ra / _radeg) - + vh[1] * cos(dec / _radeg) * sin(ra / _radeg) - + vh[2] * sin(dec / _radeg) + vh[0] * np.cos(dec / _radeg) * np.cos(ra / _radeg) + + vh[1] * np.cos(dec / _radeg) * np.np.sin(ra / _radeg) + + vh[2] * np.np.sin(dec / _radeg) ) bcorr = vdiurnal + vbar # using baricentric velocity for correction diff --git a/python/lvmdrp/external/skycorr.py b/python/lvmdrp/external/skycorr.py index ab58ac5e..9478c07e 100644 --- a/python/lvmdrp/external/skycorr.py +++ b/python/lvmdrp/external/skycorr.py @@ -87,7 +87,7 @@ def createParFile( """ # create output parfilename - if parfile == None: + if parfile is None: parfile = objfile[0 : objfile.rfind(".fits")] + ".skycorr.par" # open file handler @@ -120,24 +120,24 @@ def createParFile( fp.write("\nVAC_AIR=" + vacOrAir) # expert mode - if dateVal != None: - if dateVal == None: + if dateVal is not None: + if dateVal is None: raise ValueError("Must give dateVal if dateKey=DATE_VAL") fp.write("\nDATE_KEY=DATE_VAL") fp.write("\nDATE_VAL=" + str(dateVal)) else: fp.write("\nDATE_KEY=" + dateKey) - if timeVal != None: - if timeVal == None: + if timeVal is not None: + if timeVal is None: raise ValueError("Must give timeVal if timeKey=TIME_VAL") fp.write("\nTIME_KEY=TIME_VAL") fp.write("\nTIME_VAL=" + str(timeVal)) else: fp.write("\nTIME_KEY=" + timeKey) - if telAltVal != None: - if telAltVal == None: + if telAltVal is not None: + if telAltVal is None: raise ValueError("Must give telAltVal if telAltKey=TELALT_VAL") fp.write("\nTELALT_KEY=TELALT_VAL") fp.write("\nTELALT_VAL=" + str(telAltVal)) @@ -362,7 +362,7 @@ def asciiSkyCorrWrapper( try: results = ap.Table() results.read(newfile, type="fits", verbose=False) - except: + except Exception: # print "Failed to open skycorr results...." # pdb.set_trace() results = None @@ -371,7 +371,7 @@ def asciiSkyCorrWrapper( # get new wave axis if asked if calcNewWave & (type(results) != type(None)): coefs = readWaveInfo(dirname, resFile) - if coefs != None: + if coefs is not None: x = 2.0 * (wave - wave.min()) / (wave.max() - wave.min()) - 1.0 newx = cheby(x, coefs) newwave = 0.5 * (newx + 1.0) * (wave.max() - wave.min()) + wave.min() @@ -509,7 +509,7 @@ def test2( ofh.close() - results = asciiSkyCorrWrapper( + asciiSkyCorrWrapper( otab["lambda"], otab["flux"], stab["flux"], From a6af34f6feb1828a7ca822ab6f0c77d27ef3b0bd Mon Sep 17 00:00:00 2001 From: Brian Cherinka Date: Wed, 29 Nov 2023 15:46:23 -0500 Subject: [PATCH 03/18] linting and formatting --- python/lvmdrp/core/apertures.py | 2 +- python/lvmdrp/core/astrometry.py | 2 +- python/lvmdrp/core/cube.py | 14 +- python/lvmdrp/core/positionTable.py | 38 +- python/lvmdrp/core/spectrum1d.py | 612 ++++++++++++++++++----- python/lvmdrp/functions/headerMethod.py | 6 +- python/lvmdrp/functions/imageMethod.py | 52 +- python/lvmdrp/functions/plotMethod.py | 2 +- python/lvmdrp/functions/run_quickdrp.py | 4 +- python/lvmdrp/functions/specialMethod.py | 14 +- 10 files changed, 557 insertions(+), 189 deletions(-) diff --git a/python/lvmdrp/core/apertures.py b/python/lvmdrp/core/apertures.py index 959cff0b..fe79a694 100644 --- a/python/lvmdrp/core/apertures.py +++ b/python/lvmdrp/core/apertures.py @@ -8,7 +8,7 @@ def __init__(self, xcenter, ycenter, radius, kmax=1000, grid_fixed=False): self._xCenter = xcenter self._yCenter = ycenter self._radius = radius - if grid_fixed == True: + if grid_fixed is True: subres = int(numpy.sqrt(kmax)) self._sampeling = numpy.indices((subres, subres)).reshape( 2, subres * subres diff --git a/python/lvmdrp/core/astrometry.py b/python/lvmdrp/core/astrometry.py index 5fb1d413..f04f58be 100644 --- a/python/lvmdrp/core/astrometry.py +++ b/python/lvmdrp/core/astrometry.py @@ -76,7 +76,7 @@ def register_image( np.arange(-search_box / 2.0, search_box / 2.0 + step_search, step_search) + guess_y ) - angles_off = np.arange(-10, 10, 2) + # angles_off = np.arange(-10, 10, 2) offsets_xIFU = np.zeros((len(offsets_x), len(offsets_y))) offsets_yIFU = np.zeros((len(offsets_x), len(offsets_y))) chisq = np.zeros((len(offsets_x), len(offsets_y))) diff --git a/python/lvmdrp/core/cube.py b/python/lvmdrp/core/cube.py index 0f48ad02..ea4c8b7b 100644 --- a/python/lvmdrp/core/cube.py +++ b/python/lvmdrp/core/cube.py @@ -180,7 +180,7 @@ def loadFitsData( if self._wave is None: try: crpix = self.getHdrValue("CRPIX3") - 1 - except: + except KeyError: crpix = 0 try: self._wave = ( @@ -220,7 +220,7 @@ def loadFitsData( self._dim_x = self._cover.shape[2] try: crpix = self.getHdrValue("CRPIX3") - 1 - except: + except KeyError: crpix = 0 try: self._wave = ( @@ -393,8 +393,8 @@ def collapseCube( if self._mask is not None: data = self._data * numpy.logical_not(self._mask) - if self._error is not None: - error = self._error * numpy.logical_not(self._mask) + # if self._error is not None: + # error = self._error * numpy.logical_not(self._mask) else: data = self._data @@ -532,8 +532,8 @@ def glueCubeSets( wave = numpy.arange(wave1[0], wave2[-1] + disp2, disp2) select1 = wave1 < wave2[0] select2 = wave2 > wave1[-1] - select_overlap1 = wave1 >= wave2[0] - select_overlap2 = wave2 <= wave1[-1] + # select_overlap1 = wave1 >= wave2[0] + # select_overlap2 = wave2 <= wave1[-1] else: raise ValueError("The wavelength ranges do not match with each other") else: @@ -676,7 +676,7 @@ def glueCubeSets( ) ) - if mergeHdr == True: + if mergeHdr is True: hdrs = [cube1, cube2] combined_header = combineHdr(hdrs) self.setHeader(combined_header.getHeader()) diff --git a/python/lvmdrp/core/positionTable.py b/python/lvmdrp/core/positionTable.py index 0bd0861e..d82ad439 100644 --- a/python/lvmdrp/core/positionTable.py +++ b/python/lvmdrp/core/positionTable.py @@ -39,7 +39,7 @@ def __init__( try: self._fiber_type = numpy.array(fiber_type) - except: + except Exception: self._fiber_type = None def loadTxtPosTab(self, file): @@ -69,27 +69,19 @@ def loadTxtPosTab(self, file): self._fiber_type = fiber_type def writeTxtPosTab(self, file, fiber_type=False): - dat = open(file, "w") - print >> dat, "%s %.2f %.2f %i" % (self._shape, self._size[0], self._size[1], 0) - for i in range(len(self._arc_position_x)): - if fiber_type: - print >> dat, "%i %.2f %.2f %i %s" % ( - i + 1, - self._arc_position_x[i], - self._arc_position_y[i], - self._good_fibers[i], - self._fiber_type[i], - ) - else: - print >> dat, "%i %.2f %.2f %i" % ( - i + 1, - self._arc_position_x[i], - self._arc_position_y[i], - self._good_fibers[i], - ) - # print >> dat, "%i %.2f %.2f %i"%(i+1, self._arc_position_x[i], self._arc_position_y[i], self._good_fibers[i]) - # print >> dat, "%i %.2f %.2f"%(i+1, self._arc_position_x[i], self._arc_position_y[i]) - dat.close() + with open(file, "w") as dat: + dat.write(f"{self._shape} {self._size[0]:.2f} {self._size[1]:.2f} {0}\n") + for i in range(len(self._arc_position_x)): + if fiber_type: + dat.write( + f"{i + 1} {self._arc_position_x[i]:.2f} {self._arc_position_y[i]:.2f} " + f"{self._good_fibers[i]} {self._fiber_type[i]}\n" + ) + else: + dat.write( + f"{i + 1} {self._arc_position_x[i]:.2f} {self._arc_position_y[i]:.2f} " + f"{self._good_fibers[i]}\n" + ) def writeFitsPosTable(self): columns = [] @@ -231,7 +223,7 @@ def loadPosTable(infile): if hdu[i].header["EXTNAME"].split()[0] == "POSTABLE": posTab.loadFitsPosTable(hdu[i]) found = True - if found == False: + if found is False: raise RuntimeError( "No position table information found in file %s." % (infile) ) diff --git a/python/lvmdrp/core/spectrum1d.py b/python/lvmdrp/core/spectrum1d.py index f5796c2f..186c172d 100644 --- a/python/lvmdrp/core/spectrum1d.py +++ b/python/lvmdrp/core/spectrum1d.py @@ -14,7 +14,13 @@ from lvmdrp.core.header import Header -def _spec_from_lines(lines: numpy.ndarray, sigma: float, wavelength: numpy.ndarray, heights: numpy.ndarray = None, names: numpy.ndarray = None): +def _spec_from_lines( + lines: numpy.ndarray, + sigma: float, + wavelength: numpy.ndarray, + heights: numpy.ndarray = None, + names: numpy.ndarray = None, +): rss = numpy.zeros((len(lines), wavelength.size)) for i, line in enumerate(lines): rss[i] = gaussian(wavelength, mean=line, stddev=sigma) @@ -268,7 +274,15 @@ def wave_little_interpol(wavelist): class Spectrum1D(Header): def __init__( - self, wave=None, data=None, error=None, mask=None, inst_fwhm=None, sky=None, sky_error=None, header=None + self, + wave=None, + data=None, + error=None, + mask=None, + inst_fwhm=None, + sky=None, + sky_error=None, + header=None, ): self._wave = wave self._data = data @@ -337,10 +351,19 @@ def __sub__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec @@ -360,10 +383,19 @@ def __sub__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec else: @@ -378,16 +410,28 @@ def __sub__(self, other): if data.dtype == numpy.float64 or data.dtype == numpy.dtype(">f8"): data = data.astype(numpy.float32) if error is not None: - if error.dtype == numpy.float64 or error.dtype == numpy.dtype(">f8"): + if error.dtype == numpy.float64 or error.dtype == numpy.dtype( + ">f8" + ): error = error.astype(numpy.float32) if sky is not None: if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if ( + sky_error.dtype == numpy.float64 + or sky_error.dtype == numpy.dtype(">f8") + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec except Exception: @@ -453,10 +497,19 @@ def __add__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec @@ -492,10 +545,19 @@ def __add__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec else: @@ -526,16 +588,28 @@ def __add__(self, other): if data.dtype == numpy.float64 or data.dtype == numpy.dtype(">f8"): data = data.astype(numpy.float32) if error is not None: - if error.dtype == numpy.float64 or error.dtype == numpy.dtype(">f8"): + if error.dtype == numpy.float64 or error.dtype == numpy.dtype( + ">f8" + ): error = error.astype(numpy.float32) if sky is not None: if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if ( + sky_error.dtype == numpy.float64 + or sky_error.dtype == numpy.dtype(">f8") + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec except Exception: @@ -549,7 +623,9 @@ def __truediv__(self, other): if isinstance(other, Spectrum1D): other._data = other._data.astype(numpy.float32) select = other._data != 0.0 - data = numpy.divide(self._data, other._data, out=numpy.zeros_like(self._data), where=select) + data = numpy.divide( + self._data, other._data, out=numpy.zeros_like(self._data), where=select + ) if self._mask is not None and other._mask is not None: mask = numpy.logical_or(self._mask, other._mask) @@ -565,18 +641,43 @@ def __truediv__(self, other): if self._error is not None and other._error is not None: error = numpy.zeros_like(self._error) - error_a = numpy.divide(self._error, other._data, out=error, where=select) ** 2 - error_b = numpy.divide(self._data * other._error, other._data ** 2, out=error, where=select) ** 2 + error_a = ( + numpy.divide(self._error, other._data, out=error, where=select) ** 2 + ) + error_b = ( + numpy.divide( + self._data * other._error, + other._data**2, + out=error, + where=select, + ) + ** 2 + ) error = numpy.sqrt(error_a + error_b) elif self._error is not None: - error = numpy.divide(self._error, other._data, out=numpy.zeros_like(self._error), where=select) + error = numpy.divide( + self._error, + other._data, + out=numpy.zeros_like(self._error), + where=select, + ) elif other._error is not None: - error = numpy.divide(self._data * other._error, other._data ** 2, out=numpy.zeros_like(self._error), where=select) + error = numpy.divide( + self._data * other._error, + other._data**2, + out=numpy.zeros_like(self._error), + where=select, + ) else: error = None if self._sky is not None and other._sky is not None: - sky = numpy.divide(self._sky, other._sky, out=numpy.zeros_like(self._sky), where=other._sky != 0.0) + sky = numpy.divide( + self._sky, + other._sky, + out=numpy.zeros_like(self._sky), + where=other._sky != 0.0, + ) elif self._sky is not None: sky = self._sky elif other._sky is not None: @@ -586,13 +687,36 @@ def __truediv__(self, other): if self._sky_error is not None and other._sky_error is not None: sky_error = numpy.zeros_like(self._sky_error) - sky_error_a = numpy.divide(self._sky_error, other._data, out=sky_error, where=select) ** 2 - sky_error_b = numpy.divide(self._data * other._sky_error, other._data ** 2, out=sky_error, where=select) ** 2 + sky_error_a = ( + numpy.divide( + self._sky_error, other._data, out=sky_error, where=select + ) + ** 2 + ) + sky_error_b = ( + numpy.divide( + self._data * other._sky_error, + other._data**2, + out=sky_error, + where=select, + ) + ** 2 + ) sky_error = numpy.sqrt(sky_error_a + sky_error_b) elif self._sky_error is not None: - sky_error = numpy.divide(self._sky_error, other._data, out=numpy.zeros_like(self._sky_error), where=select) + sky_error = numpy.divide( + self._sky_error, + other._data, + out=numpy.zeros_like(self._sky_error), + where=select, + ) elif other._sky_error is not None: - sky_error = numpy.divide(self._data * other._sky_error, other._data ** 2, out=numpy.zeros_like(self._sky_error), where=select) + sky_error = numpy.divide( + self._data * other._sky_error, + other._data**2, + out=numpy.zeros_like(self._sky_error), + where=select, + ) else: sky_error = None @@ -605,19 +729,32 @@ def __truediv__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec elif isinstance(other, numpy.ndarray): select = other != 0.0 - data = numpy.divide(self._data, other, out=numpy.zeros_like(self._data), where=select) + data = numpy.divide( + self._data, other, out=numpy.zeros_like(self._data), where=select + ) if self._error is not None: - error = numpy.divide(self._error, other, out=numpy.zeros_like(self._error), where=select) + error = numpy.divide( + self._error, other, out=numpy.zeros_like(self._error), where=select + ) else: error = None @@ -628,12 +765,19 @@ def __truediv__(self, other): mask = None if self._sky is not None: - sky = numpy.divide(self._sky, other, out=numpy.zeros_like(self._sky), where=select) + sky = numpy.divide( + self._sky, other, out=numpy.zeros_like(self._sky), where=select + ) else: sky = None if self._sky_error is not None: - sky_error = numpy.divide(self._sky_error, other, out=numpy.zeros_like(self._sky_error), where=select) + sky_error = numpy.divide( + self._sky_error, + other, + out=numpy.zeros_like(self._sky_error), + where=select, + ) else: sky_error = None @@ -646,20 +790,36 @@ def __truediv__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec else: # try to do addtion for other types, e.g. float, int, etc. try: select = other != 0.0 - data = numpy.divide(self._data, other, out=numpy.zeros_like(self._data), where=select) + data = numpy.divide( + self._data, other, out=numpy.zeros_like(self._data), where=select + ) if self._error is not None: - error = numpy.divide(self._error, other, out=numpy.zeros_like(self._error), where=select) + error = numpy.divide( + self._error, + other, + out=numpy.zeros_like(self._error), + where=select, + ) else: error = None @@ -668,28 +828,47 @@ def __truediv__(self, other): mask[~select] = True if self._sky is not None: - sky = numpy.divide(self._sky, other, out=numpy.zeros_like(self._sky), where=select) + sky = numpy.divide( + self._sky, other, out=numpy.zeros_like(self._sky), where=select + ) else: sky = None if self._sky_error is not None: - sky_error = numpy.divide(self._sky_error, other, out=numpy.zeros_like(self._sky_error), where=select) + sky_error = numpy.divide( + self._sky_error, + other, + out=numpy.zeros_like(self._sky_error), + where=select, + ) else: sky_error = None if data.dtype == numpy.float64 or data.dtype == numpy.dtype(">f8"): data = data.astype(numpy.float32) if error is not None: - if error.dtype == numpy.float64 or error.dtype == numpy.dtype(">f8"): + if error.dtype == numpy.float64 or error.dtype == numpy.dtype( + ">f8" + ): error = error.astype(numpy.float32) if sky is not None: if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if ( + sky_error.dtype == numpy.float64 + or sky_error.dtype == numpy.dtype(">f8") + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec except Exception: @@ -703,7 +882,9 @@ def __rtruediv__(self, other): if isinstance(other, Spectrum1D): other._data = other._data.astype(numpy.float32) select = self._data != 0.0 - data = numpy.divide(other._data, self._data, out=numpy.zeros_like(self._data), where=select) + data = numpy.divide( + other._data, self._data, out=numpy.zeros_like(self._data), where=select + ) if self._mask is not None and other._mask is not None: mask = numpy.logical_or(self._mask, other._mask) @@ -719,18 +900,43 @@ def __rtruediv__(self, other): if self._error is not None and other._error is not None: error = numpy.zeros_like(self._error) - error_a = numpy.divide(other._error, self._data, out=error, where=select) ** 2 - error_b = numpy.divide(other._data * self._error, self._data ** 2, out=error, where=select) ** 2 + error_a = ( + numpy.divide(other._error, self._data, out=error, where=select) ** 2 + ) + error_b = ( + numpy.divide( + other._data * self._error, + self._data**2, + out=error, + where=select, + ) + ** 2 + ) error = numpy.sqrt(error_a + error_b) elif self._error is not None: - error = numpy.divide(other._error, self._data, out=numpy.zeros_like(self._error), where=select) + error = numpy.divide( + other._error, + self._data, + out=numpy.zeros_like(self._error), + where=select, + ) elif other._error is not None: - error = numpy.divide(other._data * self._error, self._data ** 2, out=numpy.zeros_like(self._error), where=select) + error = numpy.divide( + other._data * self._error, + self._data**2, + out=numpy.zeros_like(self._error), + where=select, + ) else: error = None if other._sky is not None: - sky = numpy.divide(other._sky, self._sky, out=numpy.zeros_like(self._sky), where=self._sky != 0.0) + sky = numpy.divide( + other._sky, + self._sky, + out=numpy.zeros_like(self._sky), + where=self._sky != 0.0, + ) elif self._sky is not None: sky = self._sky elif other._sky is not None: @@ -740,13 +946,36 @@ def __rtruediv__(self, other): if self._sky_error is not None and other._sky_error is not None: sky_error = numpy.zeros_like(self._sky_error) - sky_error_a = numpy.divide(other._sky_error, self._data, out=sky_error, where=select) ** 2 - sky_error_b = numpy.divide(other._data * self._sky_error, self._data ** 2, out=sky_error, where=select) ** 2 + sky_error_a = ( + numpy.divide( + other._sky_error, self._data, out=sky_error, where=select + ) + ** 2 + ) + sky_error_b = ( + numpy.divide( + other._data * self._sky_error, + self._data**2, + out=sky_error, + where=select, + ) + ** 2 + ) sky_error = numpy.sqrt(sky_error_a + sky_error_b) elif self._sky_error is not None: - sky_error = numpy.divide(other._sky_error, self._data, out=numpy.zeros_like(self._sky_error), where=select) + sky_error = numpy.divide( + other._sky_error, + self._data, + out=numpy.zeros_like(self._sky_error), + where=select, + ) elif other._sky_error is not None: - sky_error = numpy.divide(other._data * self._sky_error, self._data ** 2, out=numpy.zeros_like(self._sky_error), where=select) + sky_error = numpy.divide( + other._data * self._sky_error, + self._data**2, + out=numpy.zeros_like(self._sky_error), + where=select, + ) else: sky_error = None @@ -759,19 +988,35 @@ def __rtruediv__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec elif isinstance(other, numpy.ndarray): select = self._data != 0.0 - data = numpy.divide(other, self._data, out=numpy.zeros_like(self._data), where=select) + data = numpy.divide( + other, self._data, out=numpy.zeros_like(self._data), where=select + ) if self._error is not None: - error = numpy.divide(other * self._error, self._data ** 2, out=numpy.zeros_like(self._error), where=select) + error = numpy.divide( + other * self._error, + self._data**2, + out=numpy.zeros_like(self._error), + where=select, + ) else: error = None @@ -782,12 +1027,22 @@ def __rtruediv__(self, other): mask = None if self._sky is not None: - sky = numpy.divide(other, self._sky, out=numpy.zeros_like(self._sky), where=self._sky != 0.0) + sky = numpy.divide( + other, + self._sky, + out=numpy.zeros_like(self._sky), + where=self._sky != 0.0, + ) else: sky = None if self._sky_error is not None: - sky_error = numpy.divide(other * self._sky_error, self._data ** 2, out=numpy.zeros_like(self._sky_error), where=select) + sky_error = numpy.divide( + other * self._sky_error, + self._data**2, + out=numpy.zeros_like(self._sky_error), + where=select, + ) else: sky_error = None @@ -800,18 +1055,34 @@ def __rtruediv__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec else: select = self._data != 0.0 - data = numpy.divide(other, self._data, out=numpy.zeros_like(self._data), where=select) + data = numpy.divide( + other, self._data, out=numpy.zeros_like(self._data), where=select + ) if self._error is not None: - error = numpy.divide(other * self._error, self._data ** 2, out=numpy.zeros_like(self._error), where=select) + error = numpy.divide( + other * self._error, + self._data**2, + out=numpy.zeros_like(self._error), + where=select, + ) else: error = None @@ -822,12 +1093,22 @@ def __rtruediv__(self, other): mask = None if self._sky is not None: - sky = numpy.divide(other, self._sky, out=numpy.zeros_like(self._sky), where=self._sky != 0.0) + sky = numpy.divide( + other, + self._sky, + out=numpy.zeros_like(self._sky), + where=self._sky != 0.0, + ) else: sky = None if self._sky_error is not None: - sky_error = numpy.divide(other * self._sky_error, self._data ** 2, out=numpy.zeros_like(self._sky_error), where=select) + sky_error = numpy.divide( + other * self._sky_error, + self._data**2, + out=numpy.zeros_like(self._sky_error), + where=select, + ) else: sky_error = None @@ -840,10 +1121,19 @@ def __rtruediv__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec @@ -863,7 +1153,7 @@ def __mul__(self, other): if self._error is not None and other._error is not None: error_a = self._error * other._data error_b = self._data * other._error - error = numpy.sqrt(error_a ** 2 + error_b ** 2) + error = numpy.sqrt(error_a**2 + error_b**2) elif self._error is not None: error = self._error elif other._error is not None: @@ -883,7 +1173,7 @@ def __mul__(self, other): if self._sky_error is not None and other._sky_error is not None: sky_error_a = self._sky_error * other._data sky_error_b = self._data * other._sky_error - sky_error = numpy.sqrt(sky_error_a ** 2 + sky_error_b ** 2) + sky_error = numpy.sqrt(sky_error_a**2 + sky_error_b**2) elif self._sky_error is not None: sky_error = self._sky_error elif other._sky_error is not None: @@ -900,10 +1190,19 @@ def __mul__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec @@ -939,10 +1238,19 @@ def __mul__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec else: @@ -979,15 +1287,24 @@ def __mul__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec def __pow__(self, other): - data = self._data ** other + data = self._data**other if self._error is not None: error = 1.0 / float(other) * self._data ** (other - 1) * self._error @@ -1000,7 +1317,7 @@ def __pow__(self, other): mask = None if self._sky is not None: - sky = self._sky ** other + sky = self._sky**other else: sky = None @@ -1018,15 +1335,24 @@ def __pow__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec def __rpow__(self, other): - data = other ** self._data + data = other**self._data if self._error is not None: error = numpy.log(other) * data * self._error @@ -1039,7 +1365,7 @@ def __rpow__(self, other): mask = None if self._sky is not None: - sky = other ** self._sky + sky = other**self._sky else: sky = None @@ -1057,10 +1383,19 @@ def __rpow__(self, other): if sky.dtype == numpy.float64 or sky.dtype == numpy.dtype(">f8"): sky = sky.astype(numpy.float32) if sky_error is not None: - if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype(">f8"): + if sky_error.dtype == numpy.float64 or sky_error.dtype == numpy.dtype( + ">f8" + ): sky_error = sky_error.astype(numpy.float32) - spec = Spectrum1D(wave=self._wave, data=data, error=error, mask=mask, sky=sky, sky_error=sky_error) + spec = Spectrum1D( + wave=self._wave, + data=data, + error=error, + mask=mask, + sky=sky, + sky_error=sky_error, + ) return spec @@ -1145,7 +1480,9 @@ def loadFitsData( elif hdu[i].header["EXTNAME"].split()[0] == "SKY_ERROR": self._sky_error = hdu[i].data if self._wave is None: - self._wave = (self._pixels * self._header["CDELT1"] + self._header["CRVAL1"]) + self._wave = ( + self._pixels * self._header["CDELT1"] + self._header["CRVAL1"] + ) else: if extension_data is not None: self._data = hdu[extension_data].data @@ -1211,7 +1548,15 @@ def writeFitsData( if self._sky_error is not None: self._sky_error = self._sky_error.astype("float32") - hdus = [None, None, None, None, None, None, None] # create empty list for hdu storage + hdus = [ + None, + None, + None, + None, + None, + None, + None, + ] # create empty list for hdu storage # create primary hdus and image hdus # data hdu @@ -1280,7 +1625,9 @@ def writeFitsData( if extension_skyerror == 0: hdu = pyfits.PrimaryHDU(self._sky_error) elif extension_skyerror > 0 and extension_skyerror is not None: - hdus[extension_skyerror] = pyfits.ImageHDU(self._sky_error, name="SKY_ERROR") + hdus[extension_skyerror] = pyfits.ImageHDU( + self._sky_error, name="SKY_ERROR" + ) # header hdu if extension_hdr == 0: @@ -1577,7 +1924,8 @@ def resampleSpec( for i in range(err_sim): error[select_goodpix] = numpy.random.normal( # NOTE: patching negative errors - clean_data[select_goodpix], numpy.abs(self._error[select_goodpix]) + clean_data[select_goodpix], + numpy.abs(self._error[select_goodpix]), ).astype(numpy.float32) if method == "spline": @@ -1686,7 +2034,8 @@ def resampleSpec( for i in range(err_sim): sky_error[select_goodpix] = numpy.random.normal( # NOTE: patching negative sky_errors - clean_data[select_goodpix], numpy.abs(self._sky_error[select_goodpix]) + clean_data[select_goodpix], + numpy.abs(self._sky_error[select_goodpix]), ).astype(numpy.float32) if method == "spline": @@ -1726,7 +2075,9 @@ def resampleSpec( if new_sky is not None: new_sky = numpy.where(select_out, extrapolate._sky, new_sky) if new_sky_error is not None: - new_sky_error = numpy.where(select_out, extrapolate._sky_error, new_error) + new_sky_error = numpy.where( + select_out, extrapolate._sky_error, new_error + ) spec_out = Spectrum1D( wave=ref_wave, @@ -1739,18 +2090,27 @@ def resampleSpec( ) return spec_out - def resampleSpec_flux_conserving(self, ref_wave, method="spline", + def resampleSpec_flux_conserving( + self, + ref_wave, + method="spline", err_sim=500, replace_error=1e10, - extrapolate=None): - + extrapolate=None, + ): old_dlambda = numpy.interp(ref_wave, self._wave[:-1], numpy.diff(self._wave)) # plt.plot(self._wave, self._data, lw=1, color="k") # plt.plot(self._wave, ) new_dlambda = numpy.diff(ref_wave, append=ref_wave[-1]) - new_spec = self.resampleSpec(ref_wave, method=method, err_sim=err_sim, replace_error=replace_error, extrapolate=extrapolate) + new_spec = self.resampleSpec( + ref_wave, + method=method, + err_sim=err_sim, + replace_error=replace_error, + extrapolate=extrapolate, + ) # print(self._data) # print(new_spec._data) new_spec._data *= old_dlambda / new_dlambda @@ -1883,12 +2243,13 @@ def binSpec(self, new_wave): ) if self._sky is not None: sky_out[i] = numpy.sum( - numpy.abs(masked_wave[select] - new_wave[i]) - * self._sky[mask_in][select] - ) / numpy.sum(numpy.abs(masked_wave[select] - new_wave[i])) + numpy.abs(masked_wave[select] - new_wave[i]) + * self._sky[mask_in][select] + ) / numpy.sum(numpy.abs(masked_wave[select] - new_wave[i])) if self._sky_error is not None: sky_error_out[i] = numpy.sqrt( - numpy.sum(masked_sky_error[select] ** 2) / numpy.sum(select) ** 2 + numpy.sum(masked_sky_error[select] ** 2) + / numpy.sum(select) ** 2 ) else: mask_out[i] = True @@ -1896,7 +2257,14 @@ def binSpec(self, new_wave): if self._sky is not None: sky_out = numpy.interp(new_wave, masked_wave, self._sky[mask_in]) - spec = Spectrum1D(data=data_out, wave=new_wave, error=error_out, mask=mask_out, sky=sky_out, sky_error=sky_error_out) + spec = Spectrum1D( + data=data_out, + wave=new_wave, + error=error_out, + mask=mask_out, + sky=sky_out, + sky_error=sky_error_out, + ) return spec @@ -2000,7 +2368,7 @@ def smoothGaussVariable(self, diff_fwhm): mask=self._mask, inst_fwhm=inst_fwhm, sky=sky, - sky_error=sky_error + sky_error=sky_error, ) return spec @@ -2198,14 +2566,14 @@ def measurePeaks( ) mask = numpy.zeros(len(init_pos), dtype="bool") # minimum counts of three pixels around each peak - min = numpy.amin( - [ - numpy.take(self._data, init_pos[select] + 1), - numpy.take(self._data, init_pos[select]), - numpy.take(self._data, init_pos[select] - 1), - ], - axis=0, - ) + # min = numpy.amin( + # [ + # numpy.take(self._data, init_pos[select] + 1), + # numpy.take(self._data, init_pos[select]), + # numpy.take(self._data, init_pos[select] - 1), + # ], + # axis=0, + # ) # minimum counts of three pixels around each peak max = numpy.amax( [ @@ -2593,8 +2961,14 @@ def fitSepGauss( out = numpy.zeros(3 * ncomp, dtype=numpy.float32) back = [deepcopy(init_back) for _ in centres] - error = self._error if self._error is not None else numpy.ones(self._dim, dtype=numpy.float32) - mask = self._mask if self._mask is not None else numpy.zeros(self._dim, dtype=bool) + error = ( + self._error + if self._error is not None + else numpy.ones(self._dim, dtype=numpy.float32) + ) + mask = ( + self._mask if self._mask is not None else numpy.zeros(self._dim, dtype=bool) + ) for i, centre in enumerate(centres): select = self._get_select(centre, aperture, mask) @@ -2611,10 +2985,12 @@ def fitSepGauss( out[2 * ncomp + i] = out_fit[2] if axs is not None: - axs[i] = gauss.plot(self._wave[select], self._data[select], ax=axs[i]) + axs[i] = gauss.plot( + self._wave[select], self._data[select], ax=axs[i] + ) axs[i].axvline(centres[i], ls="--", lw=1, color="tab:red") else: - out[i:ncomp + i + 1] = 0.0 + out[i : ncomp + i + 1] = 0.0 return out @@ -2714,9 +3090,7 @@ def obtainGaussFluxPeaks(self, pos, sigma, indices, replace_error=1e10, plot=Fal shape=(self._dim, fibers), ) # print(B) - out = sparse.linalg.lsmr( - B, self._data / self._error, atol=1e-4, btol=1e-4 - ) + out = sparse.linalg.lsmr(B, self._data / self._error, atol=1e-4, btol=1e-4) # out = linalg.lstsq(A, self._data / self._error, lapack_driver='gelsy', check_finite=False) # print(out) @@ -2821,12 +3195,12 @@ def coaddSpec(self, other, wave=None): # There are no masked quantities yet, so make sure they are filled here. weights = 1.0 / errors**2 norm = bn.nansum(weights, axis=0) - weights = weights / norm[None,:] + weights = weights / norm[None, :] fluxes = bn.nansum(fluxes * weights, axis=0) fwhms = bn.nansum(fwhms * weights, axis=0) errors = numpy.sqrt(1.0 / bn.nansum(weights * norm, axis=0)) skies = bn.nansum(skies * weights, axis=0) - sky_errors = numpy.sqrt(bn.nansum(sky_errors ** 2 * weights ** 2), axis=0) + sky_errors = numpy.sqrt(bn.nansum(sky_errors**2 * weights**2), axis=0) masks = numpy.logical_and(masks[0], masks[1]) masks = numpy.logical_or(masks, numpy.isnan(fluxes)) @@ -2835,4 +3209,12 @@ def coaddSpec(self, other, wave=None): masks = numpy.logical_or(masks, numpy.isnan(skies)) masks = numpy.logical_or(masks, numpy.isnan(sky_errors)) - return Spectrum1D(wave=wave, data=fluxes, error=errors, inst_fwhm=fwhms, mask=masks, sky=skies, sky_error=sky_errors) + return Spectrum1D( + wave=wave, + data=fluxes, + error=errors, + inst_fwhm=fwhms, + mask=masks, + sky=skies, + sky_error=sky_errors, + ) diff --git a/python/lvmdrp/functions/headerMethod.py b/python/lvmdrp/functions/headerMethod.py index 9eca511e..2ae18a80 100644 --- a/python/lvmdrp/functions/headerMethod.py +++ b/python/lvmdrp/functions/headerMethod.py @@ -218,9 +218,9 @@ def addHdrKey_drp(file, key, value, comment="", extension="0"): if v % 1 == 0: try: v = int(value) - except: + except Exception: pass - except: + except Exception: v = value hdr.setHdrValue(key, v, comment) hdr.writeFitsHeader(extension=int(extension)) @@ -286,7 +286,7 @@ def mergeHdr_drp(files_in, file_out, exclude="", extension="0", removeEmpty="0") for k in keys: try: combined_header.setHdrValue(k, hdr.getValue(k)) - except: + except Exception: pass combined_header.writeFitsHeader(file_out, extension=int(extension)) diff --git a/python/lvmdrp/functions/imageMethod.py b/python/lvmdrp/functions/imageMethod.py index ff74b52f..5e22168f 100644 --- a/python/lvmdrp/functions/imageMethod.py +++ b/python/lvmdrp/functions/imageMethod.py @@ -1468,7 +1468,7 @@ def trace_peaks( # read slitmap extension slitmap = img.getSlitmap() slitmap = slitmap[slitmap["spectrographid"] == int(img._header["CCD"][1])] - + channel = img._header["CCD"][0] positions = slitmap[f"ypix_{channel}"] fibers = positions.size @@ -1576,7 +1576,7 @@ def trace_peaks( numpy.savetxt(coords_file, 1+numpy.asarray(table), fmt="%.5f") numpy.savetxt(poly_file, 1+numpy.asarray(table_poly), fmt="%.5f") numpy.savetxt(poly_all_file, 1+numpy.asarray(table_poly_all), fmt="%.5f") - + # linearly interpolate coefficients at masked fibers log.info(f"interpolating coefficients at {bad_fibers.sum()} masked fibers") x_pixels = numpy.arange(trace._data.shape[1]) @@ -1982,9 +1982,9 @@ def traceFWHM_drp( median_box = max(median_box, 1) median_cross = max(median_cross, 1) img = img.medianImg((median_cross, median_box)) - + img._mask[...] = False - + # plt.figure(figsize=(20,10)) # plt.plot(img.getSlice(1300, axis="y")._error.tolist(), lw=0.6, color="0.7") # plt.plot(img.getSlice(1200, axis="y")._error.tolist(), lw=1) @@ -2073,7 +2073,7 @@ def traceFWHM_drp( traceFWHM = TraceMask(data=fwhm, mask=mask | orig_trace._mask) - + # smooth the FWHM trace with a polynomial fit along dispersion axis (uncertain pixels are not used) # traceFWHM.fit_polynomial(deg=poly_disp, poly_kind=poly_kind, clip=clip) @@ -2463,7 +2463,7 @@ def extract_spectra( trace_fwhm.setData(data=numpy.ones(trace_mask._data.shape) * float(fwhm)) else: trace_fwhm.loadFitsData(in_fwhm, extension_data=0) - + # set up parallel run if parallel == "auto": fragments = multiprocessing.cpu_count() @@ -2508,7 +2508,7 @@ def extract_spectra( ) elif method == "aperture": (data, error, mask) = img.extractSpecAperture(trace_mask, aperture) - + # apply aperture correction given in in_acorr if in_acorr is not None and os.path.isfile(in_acorr): log.info(f"applying aperture correction in {os.path.basename(in_acorr)}") @@ -3288,7 +3288,7 @@ def detrend_frame( log.info( "target frame parameters: " f"MJD = {org_img._header['MJD']}, " - f"exptime = {org_img._header['EXPTIME']}, " + f"exptime = {exptime}, " f"camera = {org_img._header['CCD']}" ) @@ -3524,7 +3524,7 @@ def create_master_frame(in_images: List[str], out_image: str, batch_size: int = log.info(f"selecting {batch_size} random images") in_images = numpy.random.choice(in_images, batch_size, replace=False) nexp = len(in_images) - + # load images org_imgs, imagetyps = [], [] for in_image in in_images: @@ -3560,7 +3560,7 @@ def create_master_frame(in_images: List[str], out_image: str, batch_size: int = org_imgs, method="median", normalize=True, normalize_percentile=75 ) - # write output master + # write output master log.info(f"writing master frame to '{os.path.basename(out_image)}'") if master_mjd is not None: master_img._header["MJD"] = master_mjd @@ -3767,7 +3767,7 @@ def create_pixelmask(in_short_dark, in_long_dark, out_pixmask, in_flat_a=None, i # define ratio of darks ratio_dark = short_dark / long_dark - + # define quadrant sections sections = short_dark.getHdrValue("AMP? TRIMSEC") @@ -3782,7 +3782,7 @@ def create_pixelmask(in_short_dark, in_long_dark, out_pixmask, in_flat_a=None, i fig_rat, axs_rat = create_subplots(to_display=display_plots, nrows=2, ncols=2, figsize=(15, 10), sharex=True, sharey=True) plt.subplots_adjust(hspace=0.1, wspace=0.1) fig_rat.suptitle(os.path.basename(out_pixmask)) - + log.info(f"creating pixel mask using dark current threshold = {dc_low} {unit}") for iquad, section in enumerate(sections.values()): log.info(f"processing quadrant = {section}") @@ -3880,7 +3880,7 @@ def create_pixelmask(in_short_dark, in_long_dark, out_pixmask, in_flat_a=None, i # write output mask log.info(f"writing pixel mask to '{os.path.basename(out_pixmask)}'") pixmask.writeFitsData(out_pixmask) - + return pixmask, ratio_dark, ratio_flat @@ -3918,11 +3918,11 @@ def trace_fibers( header. The reference fiber positions are corrected using a cross-correlation between the reference fiber profile and the observed fiber profile. - + The first measurement of the fiber centroids is performed using individual Gaussian fittings per column in a selection of ncolumns across the X-axis. This first guess of the centroids is fitted with a polynomial. - + The centroids measured and fitted in the previous step are used to fit for the fiber profiles in the image along each ncolumns columns in the image. A number of nblocks of Gaussians are fitted simultaneously along each column. @@ -4091,7 +4091,7 @@ def trace_fibers( for i, icolumn in iterator: # extract column profile img_slice = img.getSlice(icolumn, axis="y") - + # get fiber positions along previous column if icolumn == LVM_REFERENCE_COLUMN: # trace reference column first or skip if already traced @@ -4101,7 +4101,7 @@ def trace_fibers( continue else: cent_guess, _, mask_guess = centroids.getSlice(columns[i-1], axis="y") - + # update masked fibers mask_guess |= numpy.isnan(cent_guess) @@ -4124,7 +4124,7 @@ def trace_fibers( # linearly interpolate coefficients at masked fibers log.info(f"interpolating coefficients at {bad_fibers.sum()} masked fibers") centroids.interpolate_coeffs() - + # select columns to fit for amplitudes, centroids and FWHMs per fiber block step = img._dim[1] // ncolumns_full columns = numpy.concatenate((numpy.arange(LVM_REFERENCE_COLUMN, 0, -step), numpy.arange(LVM_REFERENCE_COLUMN+step, img._dim[1], step))) @@ -4153,7 +4153,7 @@ def trace_fibers( # apply flux threshold cen_idx = cen_block.round().astype("int16") msk_block |= (img_slice._data[cen_idx] < counts_threshold) - + # mask bad fibers cen_block = cen_block[~msk_block] # initialize parameters with the full block size @@ -4178,7 +4178,7 @@ def trace_fibers( # store joint model mod_columns.append(mod_joint) - + # get parameters of joint model amp_slice = par_joint[0] cent_slice = par_joint[1] @@ -4216,7 +4216,7 @@ def trace_fibers( dummy_amp_mask[iblock] = amp_mask_split[j] dummy_cent_mask[iblock] = cent_mask_split[j] dummy_fwhm_mask[iblock] = fwhm_mask_split[j] - + # update traces trace_amp.setSlice(icolumn, axis="y", data=numpy.concatenate(dummy_amp), mask=numpy.concatenate(dummy_amp_mask)) trace_cent.setSlice(icolumn, axis="y", data=numpy.concatenate(dummy_cent), mask=numpy.concatenate(dummy_cent_mask)) @@ -4229,12 +4229,12 @@ def trace_fibers( trace_amp.setSlice(icolumn, axis="y", data=amp_slice, mask=amp_mask) trace_cent.setSlice(icolumn, axis="y", data=cent_slice, mask=cent_mask) trace_fwhm.setSlice(icolumn, axis="y", data=fwhm_slice, mask=fwhm_mask) - + # compute residuals integral_mod = numpy.trapz(mod_joint(img_slice._pixels), img_slice._pixels) or numpy.nan integral_dat = numpy.trapz(img_slice._data, img_slice._pixels) residuals.append((integral_dat - integral_mod) / integral_dat * 100) - + # compute fitted model stats chisq_red = bn.nansum((mod_joint(img_slice._pixels) - img_slice._data)[~img_slice._mask]**2 / img_slice._error[~img_slice._mask]**2) / (img._dim[0] - 1 - 3) log.info(f"joint model {chisq_red = :.2f}") @@ -4246,7 +4246,7 @@ def trace_fibers( log.info(f"joint model amplitudes: {min_amp = :.2f}, {max_amp = :.2f}, {median_amp = :.2f}") log.info(f"joint model centroids: {min_cent = :.2f}, {max_cent = :.2f}, {median_cent = :.2f}") log.info(f"joint model FWHMs: {min_fwhm = :.2f}, {max_fwhm = :.2f}, {median_fwhm = :.2f}") - + # smooth all trace by a polynomial if fit_poly: log.info(f"fitting peak trace with {deg_amp}-deg polynomial") @@ -4282,7 +4282,7 @@ def trace_fibers( trace_amp.interpolate_data(axis="Y") trace_cent.interpolate_data(axis="Y") trace_fwhm.interpolate_data(axis="Y") - + # write output traces log.info(f"writing amplitude trace to '{os.path.basename(out_trace_amp)}'") trace_amp.writeFitsData(out_trace_amp) @@ -4312,7 +4312,7 @@ def trace_fibers( figure_path="qa", label="residuals_int_columns" ) - + # profile models vs data fig, ax = create_subplots(to_display=display_plots, figsize=(15,7)) fig.suptitle(f"Profile fitting residuals for {camera = }") diff --git a/python/lvmdrp/functions/plotMethod.py b/python/lvmdrp/functions/plotMethod.py index c6fff6b8..ecf683ad 100644 --- a/python/lvmdrp/functions/plotMethod.py +++ b/python/lvmdrp/functions/plotMethod.py @@ -2,7 +2,7 @@ try: from matplotlib import pyplot as plt -except: +except ImportError: pass import numpy diff --git a/python/lvmdrp/functions/run_quickdrp.py b/python/lvmdrp/functions/run_quickdrp.py index 538184ae..80021d4f 100644 --- a/python/lvmdrp/functions/run_quickdrp.py +++ b/python/lvmdrp/functions/run_quickdrp.py @@ -198,7 +198,7 @@ def quick_science_reduction(expnum: int, use_fiducial_master: bool = False, mpixflat_path = os.path.join(masters_path, f"lvm-mpixflat-{sci_camera}.fits") mtrace_path = os.path.join(masters_path, f"lvm-mtrace-{sci_camera}.fits") mwidth_path = os.path.join(masters_path, f"lvm-mwidth-{sci_camera}.fits") - macorr_path = os.path.join(masters_path, f"lvm-apercorr-{sci_camera}.fits") + # macorr_path = os.path.join(masters_path, f"lvm-apercorr-{sci_camera}.fits") mwave_path = os.path.join(masters_path, f"lvm-mwave_{lamps}-{sci_camera}.fits") mlsf_path = os.path.join(masters_path, f"lvm-mlsf_{lamps}-{sci_camera}.fits") mflat_path = os.path.join(masters_path, f"lvm-mfiberflat-{sci_camera}.fits") @@ -213,7 +213,7 @@ def quick_science_reduction(expnum: int, use_fiducial_master: bool = False, mpixflat_path = None mtrace_path = path.full("lvm_master", drpver=drpver, kind="mtrace", **masters["trace"].to_dict()) mwidth_path = None - macorr_path = None + # macorr_path = None mwave_path = path.full("lvm_master", drpver=drpver, kind=f"mwave_{lamps}", **masters["wave"].to_dict()) mlsf_path = path.full("lvm_master", drpver=drpver, kind=f"mlsf_{lamps}", **masters["lsf"].to_dict()) mflat_path = path.full("lvm_master", drpver=drpver, kind="mfiberflat", **masters["fiberflat"].to_dict()) diff --git a/python/lvmdrp/functions/specialMethod.py b/python/lvmdrp/functions/specialMethod.py index e6fa3df9..e19bfd1d 100644 --- a/python/lvmdrp/functions/specialMethod.py +++ b/python/lvmdrp/functions/specialMethod.py @@ -166,14 +166,8 @@ def matchMasterTrace_drp( poly_cross = int(poly_cross) if poly_disp != "": poly_disp = int(poly_disp) - if start_pix == "": - start_wave = None - else: - start_wave = int(start_pix) - if end_pix == "": - end_wave = None - else: - end_wave = int(end_pix) + #start_wave = None if start_pix == "" else int(start_pix) + #end_wave = None if end_pix == "" else int(end_pix) calib_trc = loadRSS(CALIB_trace) master_trc = loadRSS(Master_trace) if split != "": @@ -307,7 +301,7 @@ def matchARCLamp_drp( spec = Spectrum1D(wave=fibers[good_pix], data=pix_shift[good_pix, i]) spec.smoothPoly(order=poly_cross, ref_base=fibers) pix_shift[:, i] = spec._data - pix_shift_mean = numpy.mean(pix_shift, 1) + #pix_shift_mean = numpy.mean(pix_shift, 1) rss_disp = loadRSS(disp_ref) for i in range(rss_disp._fibers): @@ -504,7 +498,7 @@ def matchSkySpecTime_drp( for j in range(err_sim): try: rnormal = numpy.random.normal(sky_data[:, i], sky_error[:, i]) - except: + except Exception: rnormal = numpy.zeros(sky_data[:, i].shape) err_fit = numpy.polyfit(sky_time, rnormal, poly_order) out[j] = numpy.mean(numpy.polyval(err_fit, object_time)) From 17c6d7d8311fe08a643ba6f8d91cb416d31e29d0 Mon Sep 17 00:00:00 2001 From: Brian Cherinka Date: Wed, 29 Nov 2023 16:17:45 -0500 Subject: [PATCH 04/18] linting and formatting fluxcal --- python/lvmdrp/functions/fluxCalMethod.py | 228 ++++++++++++++--------- 1 file changed, 138 insertions(+), 90 deletions(-) diff --git a/python/lvmdrp/functions/fluxCalMethod.py b/python/lvmdrp/functions/fluxCalMethod.py index 305d9eba..8e8588dc 100644 --- a/python/lvmdrp/functions/fluxCalMethod.py +++ b/python/lvmdrp/functions/fluxCalMethod.py @@ -69,6 +69,7 @@ "correctTelluric_drp", ] + def apply_fluxcal(in_rss: str, out_rss: str, display_plots: bool = False): """applies flux calibration to spectrograph-combined data @@ -104,7 +105,9 @@ def apply_fluxcal(in_rss: str, out_rss: str, display_plots: bool = False): # define exposure time factors exptimes = np.zeros(len(slitmap)) - exptimes[(slitmap["targettype"] == "science") | (slitmap["targettype"] == "SKY")] = rss._header["EXPTIME"] + exptimes[ + (slitmap["targettype"] == "science") | (slitmap["targettype"] == "SKY") + ] = rss._header["EXPTIME"] for std_hd in rss._fluxcal.colnames: exptime = rss._header[f"{std_hd[:-3]}EXP"] fiberid = rss._header[f"{std_hd[:-3]}FIB"] @@ -119,13 +122,15 @@ def apply_fluxcal(in_rss: str, out_rss: str, display_plots: bool = False): # weights = std_exp / std_exp.sum() # TODO: reject sensitivity curves based on the overall shape by normalizing using a median curve # calculate the biweight mean sensitivity - sens_arr = rss._fluxcal.to_pandas().values# * (std_exp / std_exp.sum())[None] + sens_arr = rss._fluxcal.to_pandas().values # * (std_exp / std_exp.sum())[None] sens_ave = biweight_location(sens_arr, axis=1, ignore_nan=True) sens_rms = biweight_scale(sens_arr, axis=1, ignore_nan=True) # fix all zeros if (sens_ave == 0).all() or np.isnan(sens_ave).all(): - log.warning("all sensitivity values are zero or NaN, impossible to flux-calibrate") + log.warning( + "all sensitivity values are zero or NaN, impossible to flux-calibrate" + ) sens_ave = np.ones_like(sens_ave) sens_rms = np.zeros_like(sens_rms) rss.setHdrValue("FLUXCAL", False, "flux-calibrated?") @@ -142,45 +147,54 @@ def apply_fluxcal(in_rss: str, out_rss: str, display_plots: bool = False): std_hd = rss._fluxcal.colnames[j][:-3] std_id = rss._header.get(f"{std_hd}FIB", "unknown") - ax.plot(rss._wave, sens_arr[:,j], "-", lw=1, label=std_id) + ax.plot(rss._wave, sens_arr[:, j], "-", lw=1, label=std_id) ax.plot(rss._wave, sens_ave, "-r", lw=2, label="mean") ax.set_yscale("log") ax.set_xlabel("wavelength (Angstrom)") ax.set_ylabel("sensitivity [(ergs/s/cm^2/A) / (e-/s/A)]") ax.legend(loc="upper right") fig.tight_layout() - save_fig(fig, product_path=out_rss, to_display=display_plots, figure_path="qa", label="fluxcal") + save_fig( + fig, + product_path=out_rss, + to_display=display_plots, + figure_path="qa", + label="fluxcal", + ) # flux-calibrate and extinction correct data # Note that we assume a constant extinction curve here! - log.info(f"Extinction correcting science and sky spectra, curve {os.getenv('LVMCORE_DIR')+'/etc/lco_extinction.txt'}") - txt = np.genfromtxt(os.getenv('LVMCORE_DIR')+'/etc/lco_extinction.txt') - lext, ext = txt[:,0], txt[:,1] + log.info( + f"Extinction correcting science and sky spectra, curve {os.getenv('LVMCORE_DIR')+'/etc/lco_extinction.txt'}" + ) + txt = np.genfromtxt(os.getenv("LVMCORE_DIR") + "/etc/lco_extinction.txt") + lext, ext = txt[:, 0], txt[:, 1] ext = np.interp(rss._wave, lext, ext) - sci_secz = rss._header['TESCIAM'] + sci_secz = rss._header["TESCIAM"] log.info("flux-calibrating data science and sky spectra") - rss._data *= sens_ave * 10**(0.4*ext*(sci_secz)) / exptimes[:, None] - rss._error *= sens_ave * 10**(0.4*ext*(sci_secz)) / exptimes[:, None] - rss._sky *= sens_ave * 10**(0.4*ext*(sci_secz)) / exptimes[:, None] - rss._sky_error *= sens_ave * 10**(0.4*ext*(sci_secz)) / exptimes[:, None] + rss._data *= sens_ave * 10 ** (0.4 * ext * (sci_secz)) / exptimes[:, None] + rss._error *= sens_ave * 10 ** (0.4 * ext * (sci_secz)) / exptimes[:, None] + rss._sky *= sens_ave * 10 ** (0.4 * ext * (sci_secz)) / exptimes[:, None] + rss._sky_error *= sens_ave * 10 ** (0.4 * ext * (sci_secz)) / exptimes[:, None] log.info(f"writing output file in {os.path.basename(out_rss)}") rss.writeFitsData(out_rss) return rss + def fluxcal_Gaia(camera, in_rss, plot=True, GAIA_CACHE_DIR=None): - ''' + """ Flux calibrate LVM data using the 12 spectra of stars observed through the Spec telescope. - Uses Gaia BP-RP spectra for calibration. To be replaced or extended by using fitted stellar + Uses Gaia BP-RP spectra for calibration. To be replaced or extended by using fitted stellar atmmospheres. - ''' - GAIA_CACHE_DIR = './' if GAIA_CACHE_DIR is None else GAIA_CACHE_DIR + """ + GAIA_CACHE_DIR = "./" if GAIA_CACHE_DIR is None else GAIA_CACHE_DIR log.info(f"Using Gaia CACHE DIR '{GAIA_CACHE_DIR}'") - + # load input RSS log.info(f"loading input RSS file '{os.path.basename(in_rss)}'") rss = RSS() @@ -199,8 +213,10 @@ def fluxcal_Gaia(camera, in_rss, plot=True, GAIA_CACHE_DIR=None): colnames = [f"{std_fib[:-3]}SEN" for std_fib in rss._header["STD*FIB"]] if len(colnames) == 0: NSTD = 15 - colnames = [f"STD{i}SEN" for i in range(1, NSTD+1)] - res = Table(np.full(w.size, np.nan, dtype=list(zip(colnames, ["f8"]*len(colnames))))) + colnames = [f"STD{i}SEN" for i in range(1, NSTD + 1)] + res = Table( + np.full(w.size, np.nan, dtype=list(zip(colnames, ["f8"] * len(colnames)))) + ) mean, rms = np.full(w.size, np.nan), np.full(w.size, np.nan) # set dummy sensitivity array in RSS object @@ -213,59 +229,67 @@ def fluxcal_Gaia(camera, in_rss, plot=True, GAIA_CACHE_DIR=None): log.warning("no standard star metadata found, skipping sensitivity measurement") rss.writeFitsData(in_rss) return res, mean, rms, rss - + # early stop if not standards exposed in current spectrograph if len(stds) == 0: - log.warning(f"no standard star acquired/exposed in spectrograph {sci_spec}, skipping sensitivity measurement") + log.warning( + f"no standard star acquired/exposed in spectrograph {sci_spec}, skipping sensitivity measurement" + ) rss.writeFitsData(in_rss) return res, mean, rms, rss # load extinction curve # Note that we assume a constant extinction curve here! - txt = np.genfromtxt(os.getenv('LVMCORE_DIR')+'/etc/lco_extinction.txt') - lext, ext = txt[:,0], txt[:,1] + txt = np.genfromtxt(os.getenv("LVMCORE_DIR") + "/etc/lco_extinction.txt") + lext, ext = txt[:, 0], txt[:, 1] ext = np.interp(w, lext, ext) if plot: plt.subplot fig1 = plt.figure(1) - frame1 = fig1.add_axes((.1,.3,.8,.6)) + frame1 = fig1.add_axes((0.1, 0.3, 0.8, 0.6)) frame1.set_xticklabels([]) # load the sky masks m = skyMethod.get_sky_mask_uves(w, width=3) m2 = None - if camera[0] == 'z': + if camera[0] == "z": m2 = skyMethod.get_z_continuum_mask(w) # iterate over standard stars, derive sensitivity curve for each for s in stds: - nn, fiber, gaia_id, exptime, secz = s # unpack standard star tuple - + nn, fiber, gaia_id, exptime, secz = s # unpack standard star tuple + # find the fiber with our spectrum of that Gaia star, if it is not in the current spectrograph, continue - select = (slitmap["orig_ifulabel"] == fiber) + select = slitmap["orig_ifulabel"] == fiber fibidx = np.where(select)[0] - log.info(f"standard fiber '{fiber}', index '{fibidx}', star '{gaia_id}', exptime '{exptime:.2f}', secz '{secz:.2f}'") + log.info( + f"standard fiber '{fiber}', index '{fibidx}', star '{gaia_id}', exptime '{exptime:.2f}', secz '{secz:.2f}'" + ) # load Gaia BP-RP spectrum from cache, or download from webapp try: - gw, gf = ancillary_func.retrive_gaia_star(gaia_id, GAIA_CACHE_DIR=GAIA_CACHE_DIR) - stdflux = np.interp(w, gw, gf) # interpolate to our wavelength grid + gw, gf = ancillary_func.retrive_gaia_star( + gaia_id, GAIA_CACHE_DIR=GAIA_CACHE_DIR + ) + stdflux = np.interp(w, gw, gf) # interpolate to our wavelength grid except ancillary_func.GaiaStarNotFound as e: log.warning(e) continue - + # divide by our exptime for that standard - spec = rss._data[fibidx[0],:]/exptime - + spec = rss._data[fibidx[0], :] / exptime + # interpolate over bright sky lines - spec = ancillary_func.interpolate_mask(w, spec, m, fill_value='extrapolate') - if camera[0] == 'z': - spec = ancillary_func.interpolate_mask(w, spec, ~m2, fill_value='extrapolate') - + spec = ancillary_func.interpolate_mask(w, spec, m, fill_value="extrapolate") + if camera[0] == "z": + spec = ancillary_func.interpolate_mask( + w, spec, ~m2, fill_value="extrapolate" + ) + # correct for extinction - spec *= 10**(0.4*ext*secz) + spec *= 10 ** (0.4 * ext * secz) # TODO: fit continuum to instrumental std spectrum (stdflux) and normalize # TODO: mask telluric absorption lines from stdflux @@ -273,44 +297,63 @@ def fluxcal_Gaia(camera, in_rss, plot=True, GAIA_CACHE_DIR=None): # TODO: downgrade best fit template to instrumental LSF and calculate sensitivity curve (after lifting telluric mask) # divide to find sensitivity and smooth - sens = stdflux/spec + sens = stdflux / spec wgood, sgood = filter_channel(w, sens, 2) - s = interpolate.make_smoothing_spline(wgood, sgood, lam=1e4) + s = interpolate.make_smoothing_spline(wgood, sgood, lam=1e4) res[f"STD{nn}SEN"] = s(w).astype(np.float32) # caluculate SDSS g band magnitudes for QC mAB_std = np.round(ancillary_func.spec_to_LVM_mAB(camera, w, stdflux), 2) - mAB_obs = np.round(ancillary_func.spec_to_LVM_mAB(camera, w[np.isfinite(spec)], spec[np.isfinite(spec)]), 2) + mAB_obs = np.round( + ancillary_func.spec_to_LVM_mAB( + camera, w[np.isfinite(spec)], spec[np.isfinite(spec)] + ), + 2, + ) # update input file header label = camera[0].upper() rss.setHdrValue(f"STD{nn}{label}AB", mAB_std, f"Gaia AB mag in {label}-band") rss.setHdrValue(f"STD{nn}{label}IN", mAB_obs, f"Obs AB mag in {label}-band") - log.info(f"AB mag in LVM_{camera[0]}: Gaia {mAB_std:.2f}, instrumental {mAB_obs:.2f}") + log.info( + f"AB mag in LVM_{camera[0]}: Gaia {mAB_std:.2f}, instrumental {mAB_obs:.2f}" + ) if plot: - plt.plot(wgood, sgood, 'r.', markersize=4) + plt.plot(wgood, sgood, "r.", markersize=4) plt.plot(w, res[f"STD{nn}SEN"], linewidth=0.5) - #plt.ylim(0,0.1e-11) + # plt.ylim(0,0.1e-11) rms = biweight_scale(res.to_pandas().values, axis=1, ignore_nan=True) mean = biweight_location(res.to_pandas().values, axis=1, ignore_nan=True) if plot: - plt.ylabel('sensitivity [(ergs/s/cm^2/A) / (e-/s/A)]') - plt.xlabel('wavelength [A]') + plt.ylabel("sensitivity [(ergs/s/cm^2/A) / (e-/s/A)]") + plt.xlabel("wavelength [A]") plt.ylim(1e-14, 0.1e-11) plt.semilogy() - fig1.add_axes((.1,.1,.8,.2)) - plt.plot([w[0],w[-1]], [0.05, 0.05], color='k', linewidth=1, linestyle='dotted') - plt.plot([w[0],w[-1]], [-0.05, -0.05], color='k', linewidth=1, linestyle='dotted') - plt.plot([w[0],w[-1]], [0.1, 0.1], color='k', linewidth=1, linestyle='dashed') - plt.plot([w[0],w[-1]], [-0.1, -0.1], color='k', linewidth=1, linestyle='dashed') - plt.plot(w, rms/mean) - plt.plot(w, -rms/mean) + fig1.add_axes((0.1, 0.1, 0.8, 0.2)) + plt.plot( + [w[0], w[-1]], [0.05, 0.05], color="k", linewidth=1, linestyle="dotted" + ) + plt.plot( + [w[0], w[-1]], [-0.05, -0.05], color="k", linewidth=1, linestyle="dotted" + ) + plt.plot([w[0], w[-1]], [0.1, 0.1], color="k", linewidth=1, linestyle="dashed") + plt.plot( + [w[0], w[-1]], [-0.1, -0.1], color="k", linewidth=1, linestyle="dashed" + ) + plt.plot(w, rms / mean) + plt.plot(w, -rms / mean) plt.ylim(-0.2, 0.2) - plt.ylabel('relative residuals') - plt.xlabel('wavelength [A]') - save_fig(plt.gcf(), product_path=in_rss, to_display=False, figure_path="qa", label="fluxcal") + plt.ylabel("relative residuals") + plt.xlabel("wavelength [A]") + save_fig( + plt.gcf(), + product_path=in_rss, + to_display=False, + figure_path="qa", + label="fluxcal", + ) # update sensitivity extension rss.set_fluxcal(fluxcal=res) @@ -318,62 +361,67 @@ def fluxcal_Gaia(camera, in_rss, plot=True, GAIA_CACHE_DIR=None): return res, mean, rms, rss + def retrieve_header_stars(rss): - ''' + """ Retrieve fiber, Gaia ID, exposure time and airmass for the 12 standard stars in the header. return a list of tuples of the above quatities. - ''' - lco = EarthLocation(lat=-29.008999964*u.deg, lon=-70.688663912*u.deg, height=2800*u.m) + """ + lco = EarthLocation( + lat=-29.008999964 * u.deg, lon=-70.688663912 * u.deg, height=2800 * u.m + ) h = rss._header - slitmap = rss._slitmap[rss._slitmap["spectrographid"] == int(h['SPEC'][-1])] + slitmap = rss._slitmap[rss._slitmap["spectrographid"] == int(h["SPEC"][-1])] # retrieve the data for the 12 standards from the header stddata = [] for i in range(12): - stdi = 'STD'+str(i+1) - if h[stdi+'ACQ'] and h[stdi+'FIB'] in slitmap['orig_ifulabel']: - gaia_id = h[stdi+'ID'] - if gaia_id==None: - log.warning(f'{stdi} acquired but Gaia ID is {gaia_id}') + stdi = "STD" + str(i + 1) + if h[stdi + "ACQ"] and h[stdi + "FIB"] in slitmap["orig_ifulabel"]: + gaia_id = h[stdi + "ID"] + if gaia_id is None: + log.warning(f"{stdi} acquired but Gaia ID is {gaia_id}") continue - fiber = h[stdi+'FIB'] - obstime = Time(h[stdi+'T0']) - exptime = h[stdi+'EXP'] - c = SkyCoord(float(h[stdi+'RA']), float(h[stdi+'DE']), unit="deg") - stdT = c.transform_to(AltAz(obstime=obstime,location=lco)) + fiber = h[stdi + "FIB"] + obstime = Time(h[stdi + "T0"]) + exptime = h[stdi + "EXP"] + c = SkyCoord(float(h[stdi + "RA"]), float(h[stdi + "DE"]), unit="deg") + stdT = c.transform_to(AltAz(obstime=obstime, location=lco)) secz = stdT.secz.value - #print(gid, fib, et, secz) - stddata.append((i+1, fiber, gaia_id, exptime, secz)) + # print(gid, fib, et, secz) + stddata.append((i + 1, fiber, gaia_id, exptime, secz)) return stddata + def mean_absolute_deviation(vals): - ''' + """ Robust estimate of RMS - see https://en.wikipedia.org/wiki/Median_absolute_deviation - ''' + """ mval = np.nanmedian(vals) - rms = 1.4826*np.nanmedian(np.abs(vals-mval)) + rms = 1.4826 * np.nanmedian(np.abs(vals - mval)) return mval, rms - #ok=np.abs(vals-mval)<4*rms + # ok=np.abs(vals-mval)<4*rms + def butter_lowpass_filter(data, cutoff_freq, nyq_freq, order=4): normal_cutoff = float(cutoff_freq) / nyq_freq - b, a = signal.butter(order, normal_cutoff, btype='lowpass') + b, a = signal.butter(order, normal_cutoff, btype="lowpass") y = signal.filtfilt(b, a, data) return y + def filter_channel(w, f, k=3): c = np.where(np.isfinite(f)) s = butter_lowpass_filter(f[c], 0.01, 2) res = s - f[c] - #plt.plot(w[c], f[c], 'k.') - #plt.plot(w[c], s, 'b-') + # plt.plot(w[c], f[c], 'k.') + # plt.plot(w[c], s, 'b-') mres, rms = mean_absolute_deviation(res) - good = np.where(np.abs(res-mres) Date: Wed, 29 Nov 2023 16:41:43 -0500 Subject: [PATCH 05/18] lint and format of gmos and vimos --- python/lvmdrp/functions/gmosMethod.py | 303 +++++++++++++------------ python/lvmdrp/functions/vimosMethod.py | 266 +++++++++++----------- 2 files changed, 287 insertions(+), 282 deletions(-) diff --git a/python/lvmdrp/functions/gmosMethod.py b/python/lvmdrp/functions/gmosMethod.py index 96e66fbf..9dbdb4ab 100644 --- a/python/lvmdrp/functions/gmosMethod.py +++ b/python/lvmdrp/functions/gmosMethod.py @@ -1,12 +1,13 @@ import os - +import numpy from astropy.io import fits as pyfits from lvmdrp.core.header import Header from lvmdrp.core.image import Image -from lvmdrp.functions.headerMethod import * -from lvmdrp.functions.imageMethod import * -from lvmdrp.functions.rssMethod import * +from lvmdrp.functions import skyMethod as sky +from lvmdrp.functions import imageMethod as im +from lvmdrp.functions import rssMethod as rss +from lvmdrp.functions import fluxCalMethod as flux description = "Provides Methods to reduce GMOS data" @@ -38,7 +39,7 @@ def createCCDfromArchive_drp( CCD = numpy.zeros((4608, 6144), dtype=numpy.float32) CCD_err = numpy.zeros((4608, 6144), dtype=numpy.float32) CCD_mask = numpy.zeros((4608, 6144), dtype="bool") - if single == True: + if single is True: sections = sections / 3 if master_bias is not None: hdu_bias = pyfits.open(master_bias, do_not_scale_image_data=True, memmap=False) @@ -61,7 +62,7 @@ def createCCDfromArchive_drp( temp_dat = hdu[i].header["DATASEC"].replace("[", "").replace("]", "").split(",") DATAsec_x = [int(temp_dat[0].split(":")[0]) - 1, int(temp_dat[0].split(":")[1])] DATAsec_y = [int(temp_dat[1].split(":")[0]) - 1, int(temp_dat[1].split(":")[1])] - img = hdu[i].data[DATAsec_y[0] : DATAsec_y[1], DATAsec_x[0] : DATAsec_x[1]] + img = hdu[i].data[DATAsec_y[0]: DATAsec_y[1], DATAsec_x[0]: DATAsec_x[1]] temp_bias = ( hdu[i].header["BIASSEC"].replace("[", "").replace("]", "").split(",") @@ -75,7 +76,7 @@ def createCCDfromArchive_drp( int(temp_bias[1].split(":")[1]), ) bias = numpy.median( - hdu[i].data[BIASsec_y[0] : BIASsec_y[1], BIASsec_x[0] : BIASsec_x[1]] + hdu[i].data[BIASsec_y[0]: BIASsec_y[1], BIASsec_x[0]: BIASsec_x[1]] ) gain = float(hdu[i].header["gain"]) rdnoise = float(hdu[i].header["rdnoise"]) @@ -85,125 +86,125 @@ def createCCDfromArchive_drp( # gain = hdu_bias[i].header['GAINORIG'] # rdnoise = hdu_bias[i].header['RONORIG'] bias_bias = numpy.median( - img_bias[BIASsec_y[0] : BIASsec_y[1], BIASsec_x[0] : BIASsec_x[1]] + img_bias[BIASsec_y[0]: BIASsec_y[1], BIASsec_x[0]: BIASsec_x[1]] ) bias = img_bias[ - DATAsec_y[0] : DATAsec_y[1], DATAsec_x[0] : DATAsec_x[1] + DATAsec_y[0]: DATAsec_y[1], DATAsec_x[0]: DATAsec_x[1] ] - (bias_bias - bias) if master_bias is not None: img[(img - bias) <= 0] = bias[(img - bias) <= 0] else: img[(img - bias) <= 0] = bias - CCD[CCDsec_y[0] : CCDsec_y[1], CCDsec_x[0] : CCDsec_x[1]] = (img - bias) * gain - CCD_err[CCDsec_y[0] : CCDsec_y[1], CCDsec_x[0] : CCDsec_x[1]] = numpy.sqrt( + CCD[CCDsec_y[0]: CCDsec_y[1], CCDsec_x[0]: CCDsec_x[1]] = (img - bias) * gain + CCD_err[CCDsec_y[0]: CCDsec_y[1], CCDsec_x[0]: CCDsec_x[1]] = numpy.sqrt( (img - bias) * gain + rdnoise**2 ) select_nan = numpy.isnan(CCD_err) CCD_err[select_nan] = rdnoise if mask_saturated: select = img == saturate_value - CCD_mask[CCDsec_y[0] : CCDsec_y[1], CCDsec_x[0] : CCDsec_x[1]] = select + CCD_mask[CCDsec_y[0]: CCDsec_y[1], CCDsec_x[0]: CCDsec_x[1]] = select if splits[0] == 0: if not mask_saturated: CCD1_out = Image( - data=CCD[:, 0 : 2046 / bins[0] + 1], - error=CCD_err[:, 0 : 0 : 2046 / bins[0] + 1], + data=CCD[:, 0: 2046 / bins[0] + 1], + error=CCD_err[:, 0: 0: 2046 / bins[0] + 1], header=hdr, ) else: CCD1_out = Image( - data=CCD[:, 0 : 2046 / bins[0] + 1], - error=CCD_err[:, 0 : 2046 / bins[0] + 1], - mask=CCD_mask[:, 0 : 2046 / bins[0] + 1], + data=CCD[:, 0: 2046 / bins[0] + 1], + error=CCD_err[:, 0: 2046 / bins[0] + 1], + mask=CCD_mask[:, 0: 2046 / bins[0] + 1], header=hdr, ) else: if not mask_saturated: CCD1_out = Image( - data=CCD[:, splits[0] / bins[0] : 2046 / bins[0] + 1], - error=CCD_err[:, splits[0] / bins[0] : 2046 / bins[0] + 1], + data=CCD[:, splits[0] / bins[0]: 2046 / bins[0] + 1], + error=CCD_err[:, splits[0] / bins[0]: 2046 / bins[0] + 1], header=hdr, ) else: CCD1_out = Image( - data=CCD[:, splits[0] / bins[0] : 2046 / bins[0] + 1], - error=CCD_err[:, splits[0] / bins[0] : 2046 / bins[0] + 1], - mask=CCD_mask[:, splits[0] / bins[0] : 2046 / bins[0] + 1], + data=CCD[:, splits[0] / bins[0]: 2046 / bins[0] + 1], + error=CCD_err[:, splits[0] / bins[0]: 2046 / bins[0] + 1], + mask=CCD_mask[:, splits[0] / bins[0]: 2046 / bins[0] + 1], header=hdr, ) CCD1_out.writeFitsData(prefix + ".CCD1.fits") if splits[1] == 0 and splits[2] == 0: if not mask_saturated: CCD2_out = Image( - data=CCD[:, 2048 / bins[0] : 4094 / bins[0] + 1], - error=CCD_err[:, 2048 / bins[0] : 4094 / bins[0] + 1], + data=CCD[:, 2048 / bins[0]: 4094 / bins[0] + 1], + error=CCD_err[:, 2048 / bins[0]: 4094 / bins[0] + 1], header=hdr, ) else: CCD2_out = Image( - data=CCD[:, 2048 / bins[0] : 4094 / bins[0] + 1], - error=CCD_err[:, 2048 / bins[0] : 4094 / bins[0] + 1], - mask=CCD_mask[:, 2048 / bins[0] : 4094 / bins[0] + 1], + data=CCD[:, 2048 / bins[0]: 4094 / bins[0] + 1], + error=CCD_err[:, 2048 / bins[0]: 4094 / bins[0] + 1], + mask=CCD_mask[:, 2048 / bins[0]: 4094 / bins[0] + 1], header=hdr, ) CCD2_out.writeFitsData(prefix + ".CCD2.fits") else: if not mask_saturated: CCD2_out = Image( - data=CCD[:, 2048 : 2048 + splits[1]], - error=CCD_err[:, 2048 : 2048 + splits[1]], + data=CCD[:, 2048: 2048 + splits[1]], + error=CCD_err[:, 2048: 2048 + splits[1]], header=hdr, ) else: CCD2_out = Image( - data=CCD[:, 2048 : 2048 + splits[1]], - error=CCD_err[:, 2048 : 2048 + splits[1]], - mask=CCD_mask[:, 2048 : 2048 + splits[1]], + data=CCD[:, 2048: 2048 + splits[1]], + error=CCD_err[:, 2048: 2048 + splits[1]], + mask=CCD_mask[:, 2048: 2048 + splits[1]], header=hdr, ) CCD2_out.writeFitsData(prefix + ".CCD2L.fits") if not mask_saturated: CCD2_out = Image( - data=CCD[:, 2048 + splits[2] : 4095], - error=CCD_err[:, 2048 + splits[2] : 4095], + data=CCD[:, 2048 + splits[2]: 4095], + error=CCD_err[:, 2048 + splits[2]: 4095], header=hdr, ) else: CCD2_out = Image( - data=CCD[:, 2048 + splits[2] : 4095], - error=CCD_err[:, 2048 + splits[2] : 4095], - mask=CCD_mask[:, 2048 + splits[2] : 4095], + data=CCD[:, 2048 + splits[2]: 4095], + error=CCD_err[:, 2048 + splits[2]: 4095], + mask=CCD_mask[:, 2048 + splits[2]: 4095], header=hdr, ) CCD2_out.writeFitsData(prefix + ".CCD2R.fits") if splits[3] == 0: if not mask_saturated: CCD3_out = Image( - data=CCD[:, 4096 / bins[0] : 6142 / bins[0] + 1], - error=CCD_err[:, 4096 / bins[0] : 6142 / bins[0] + 1], + data=CCD[:, 4096 / bins[0]: 6142 / bins[0] + 1], + error=CCD_err[:, 4096 / bins[0]: 6142 / bins[0] + 1], header=hdr, ) else: CCD3_out = Image( - data=CCD[:, 4096 / bins[0] : 6142 / bins[0] + 1], - error=CCD_err[:, 4096 / bins[0] : 6142 / bins[0] + 1], - mask=CCD_mask[:, 4096 / bins[0] : 6142 / bins[0] + 1], + data=CCD[:, 4096 / bins[0]: 6142 / bins[0] + 1], + error=CCD_err[:, 4096 / bins[0]: 6142 / bins[0] + 1], + mask=CCD_mask[:, 4096 / bins[0]: 6142 / bins[0] + 1], header=hdr, ) else: if not mask_saturated: CCD3_out = Image( - data=CCD[:, 4096 / bins[0] : 4096 / bins[0] + splits[3]], - error=CCD_err[:, 4096 / bins[0] : 4096 / bins[0] + splits[3]], + data=CCD[:, 4096 / bins[0]: 4096 / bins[0] + splits[3]], + error=CCD_err[:, 4096 / bins[0]: 4096 / bins[0] + splits[3]], header=hdr, ) else: CCD3_out = Image( - data=CCD[:, 4096 / bins[0] : 4096 / bins[0] + splits[3]], - error=CCD_err[:, 4096 / bins[0] : 4096 / bins[0] + splits[3]], - mask=CCD_mask[:, 4096 / bins[0] : 4096 / bins[0] + splits[3]], + data=CCD[:, 4096 / bins[0]: 4096 / bins[0] + splits[3]], + error=CCD_err[:, 4096 / bins[0]: 4096 / bins[0] + splits[3]], + mask=CCD_mask[:, 4096 / bins[0]: 4096 / bins[0] + splits[3]], header=hdr, ) CCD3_out.writeFitsData(prefix + ".CCD3.fits") @@ -301,7 +302,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") ) if arc != "": createCCDfromArchive_drp(arc, "ARC", master_bias=master_bias, splits=splits) - findPeaksAuto_drp( + im.find_peaks_auto( "FLAT.CCD1.fits", "peaks.CCD1", nfibers=750, @@ -310,7 +311,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") median_box=20, verbose=0, ) - findPeaksAuto_drp( + im.find_peaks_auto( "FLAT.CCD2L.fits", "peaks.CCD2L", nfibers=750, @@ -319,7 +320,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") median_box=20, verbose=0, ) - findPeaksMaster2_drp( + im.findPeaksMaster2_drp( "FLAT.CCD2R.fits", "%s/master_peaks.BLUE_slit_2019" % (gmos_calib), "peaks.CCD2R", @@ -329,7 +330,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") median_box=20, verbose=0, ) - findPeaksMaster2_drp( + im.findPeaksMaster2_drp( "FLAT.CCD3.fits", "%s/master_peaks.BLUE_slit_2019" % (gmos_calib), "peaks.CCD3", @@ -339,7 +340,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") median_box=20, verbose=0, ) - tracePeaks_drp( + im.trace_peaks( "FLAT.CCD1.fits", "peaks.CCD1", "tjunk.CCD1.trc.fits", @@ -350,7 +351,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") median_cross=1, threshold_peak=400, ) - tracePeaks_drp( + im.trace_peaks( "FLAT.CCD2L.fits", "peaks.CCD2L", "tjunk.CCD2L.trc.fits", @@ -361,7 +362,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") threshold_peak=400, median_cross=1, ) - tracePeaks_drp( + im.trace_peaks( "FLAT.CCD2R.fits", "peaks.CCD2R", "tjunk.CCD2R.trc.fits", @@ -372,7 +373,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") median_cross=1, threshold_peak=400, ) - tracePeaks_drp( + im.trace_peaks( "FLAT.CCD3.fits", "peaks.CCD3", "tjunk.CCD3.trc.fits", @@ -384,7 +385,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") threshold_peak=400, ) for i in range(len(ccds)): - subtractStraylight_drp( + im.subtractStraylight_drp( "FLAT.%s.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "FLAT.%s.back.fits" % (ccds[i]), @@ -395,7 +396,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") minfit=20, maxfit=-10, ) - traceFWHM_drp( + im.traceFWHM_drp( "FLAT.%s.stray.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "tjunk.%s.fwhm.fits" % (ccds[i]), @@ -407,7 +408,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") threshold_flux=2000, ) if arc != "": - LACosmic_drp( + im.LACosmic_drp( "ARC.%s.fits" % (ccds[i]), "ARC.%s.cosmic.fits" % (ccds[i]), sigma_det=5.0, @@ -420,7 +421,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") increase_radius=1, parallel=2, ) - subtractStraylight_drp( + im.subtractStraylight_drp( "ARC.%s.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "ARC.%s.back.fits" % (ccds[i]), @@ -431,14 +432,14 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") minfit=20, maxfit=-10, ) - extractSpec_drp( + im.extract_spectra( "ARC.%s.cosmic.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "ARC.%s.ms.fits" % (ccds[i]), method="aperture", aperture=5, ) - detWaveSolution_drp( + rss.determine_wavelength_solution( "ARC.%s.ms.fits" % (ccds[i]), "ARC.%s" % (ccds[i]), "%s/arc.%s.%s.txt" % (gmos_calib, ccds[i], setup), @@ -450,7 +451,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") aperture=20, ) if fiberflat == 1: - extractSpec_drp( + im.extract_spectra( "FLAT.%s.stray.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "FLAT.%s.ms.fits" % (ccds[i]), @@ -458,16 +459,16 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), parallel=1, ) - createPixTable_drp( + rss.create_pixel_table( "FLAT.%s.ms.fits" % (ccds[i]), "FLAT.%s.rss.fits" % (ccds[i]), "ARC.%s.disp.fits" % (ccds[i]), "ARC.%s.res.fits" % (ccds[i]), ) if fiberflat == 1: - glueRSS_drp("FLAT.CCD1.rss.fits,FLAT.CCD2L.rss.fits", "FLAT_red.rss.fits") - glueRSS_drp("FLAT.CCD2R.rss.fits,FLAT.CCD3.rss.fits", "FLAT_blue.rss.fits") - resampleWave_drp( + rss.glueRSS_drp("FLAT.CCD1.rss.fits,FLAT.CCD2L.rss.fits", "FLAT_red.rss.fits") + rss.glueRSS_drp("FLAT.CCD2R.rss.fits,FLAT.CCD3.rss.fits", "FLAT_blue.rss.fits") + rss.resample_wavelength( "FLAT_blue.rss.fits", "FLAT_blue.disp_cor.fits", start_wave=start_wave, @@ -477,7 +478,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") method="linear", parallel=1, ) - resampleWave_drp( + rss.resample_wavelength( "FLAT_red.rss.fits", "FLAT_red.disp_cor.fits", start_wave=start_wave, @@ -486,10 +487,10 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") err_sim=0, method="linear", ) - mergeRSS_drp( + rss.mergeRSS_drp( "FLAT_red.disp_cor.fits,FLAT_blue.disp_cor.fits", "FLAT.disp_cor.fits" ) - createFiberFlat_drp("FLAT.disp_cor.fits", "FIBERFLAT.fits", clip="0.25,2.0") + rss.create_fiberflat("FLAT.disp_cor.fits", "FIBERFLAT.fits", clip="0.25,2.0") elif IFU_mask == "IFU-R" or IFU_mask == "IFU-B": ccds = numpy.array(["CCD1", "CCD2", "CCD3"]) @@ -550,12 +551,12 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") elif reduce_ccd == "CCD3": indices = [2] for i in indices: - addCCDMask_drp( + im.addCCDMask_drp( "FLAT.%s.fits" % (ccds[i]), "%s/MASK.%s.Hamamatsu.fits" % (gmos_calib, ccds[i]), ) if instrument == "GMOS-N": - findPeaksAuto_drp( + im.find_peaks_auto( "FLAT.%s.fits" % (ccds[i]), "peaks.%s" % (ccds[i]), nfibers=750, @@ -566,7 +567,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") ) else: # findPeaksOffset_drp('FLAT.%s.fits'%(ccds[i]), '%s/master_peaks.RED_slit'%(gmos_calib), 'peaks.%s'%(ccds[i]), threshold=7000, slice=slice_CCD[i],median_cross=1, median_box=20) - findPeaksMaster_drp( + im.findPeaksMaster_drp( "FLAT.%s.fits" % (ccds[i]), "%s/master_peaks.RED_slit" % (gmos_calib), "peaks.%s" % (ccds[i]), @@ -574,20 +575,20 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") median_cross=1, median_box=10, ) - # tracePeaks_drp('FLAT.%s.fits'%(ccds[i]), 'peaks.%s'%(ccds[i]), 'tjunk.%s.trc.fits'%(ccds[i]), poly_disp='-5', steps=20/bins[1], max_diff=1,threshold_peak=50, median_box=50, verbose=1) - # subtractStraylight_drp('FLAT.%s.fits' % (ccds[i]), 'tjunk.%s.trc.fits' % (ccds[i]), 'FLAT.%s.back.fits' % (ccds[i]), 'FLAT.%s.stray.fits' % (ccds[i]), aperture=10, poly_cross=6, smooth_disp=70/bins[0],smooth_gauss=10) - # traceFWHM_drp('FLAT.%s.stray.fits'%(ccds[i]), 'tjunk.%s.trc.fits'%(ccds[i]), 'tjunk.%s.fwhm.fits'%(ccds[i]), blocks=16, steps=50/bins[0], poly_disp=-5, init_fwhm=3, clip='1.0,8.0', threshold_flux=500) + # im.trace_peaks('FLAT.%s.fits'%(ccds[i]), 'peaks.%s'%(ccds[i]), 'tjunk.%s.trc.fits'%(ccds[i]), poly_disp='-5', steps=20/bins[1], max_diff=1,threshold_peak=50, median_box=50, verbose=1) + # im.subtractStraylight_drp('FLAT.%s.fits' % (ccds[i]), 'tjunk.%s.trc.fits' % (ccds[i]), 'FLAT.%s.back.fits' % (ccds[i]), 'FLAT.%s.stray.fits' % (ccds[i]), aperture=10, poly_cross=6, smooth_disp=70/bins[0],smooth_gauss=10) + # im.traceFWHM_drp('FLAT.%s.stray.fits'%(ccds[i]), 'tjunk.%s.trc.fits'%(ccds[i]), 'tjunk.%s.fwhm.fits'%(ccds[i]), blocks=16, steps=50/bins[0], poly_disp=-5, init_fwhm=3, clip='1.0,8.0', threshold_flux=500) if arc != "": - # LACosmic_drp('ARC.%s.fits'%(ccds[i]), 'ARC.%s.cosmic.fits'%(ccds[i]), sigma_det=5.0, flim=2.0, iter=4, error_box='20,3', replace_box='20,3', rdnoise=3.5,sig_gauss='1.4,1.4', increase_radius=1, parallel=2) - # addCCDMask_drp('ARC.%s.cosmic.fits'%(ccds[i]),'%s/MASK.%s.Hamamatsu.fits'%(gmos_calib,ccds[i])) - extractSpec_drp( + # im.LACosmic_drp('ARC.%s.fits'%(ccds[i]), 'ARC.%s.cosmic.fits'%(ccds[i]), sigma_det=5.0, flim=2.0, iter=4, error_box='20,3', replace_box='20,3', rdnoise=3.5,sig_gauss='1.4,1.4', increase_radius=1, parallel=2) + # im.addCCDMask_drp('ARC.%s.cosmic.fits'%(ccds[i]),'%s/MASK.%s.Hamamatsu.fits'%(gmos_calib,ccds[i])) + im.extract_spectra( "ARC.%s.cosmic.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "ARC.%s.ms.fits" % (ccds[i]), method="aperture", aperture=5, ) - detWaveSolution_drp( + rss.determine_wavelength_solution( "ARC.%s.ms.fits" % (ccds[i]), "ARC.%s" % (ccds[i]), "%s/arc.%s.%s.txt" % (gmos_calib, ccds[i], setup), @@ -599,7 +600,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") rel_flux_limits="0.1,6.0", ) if fiberflat == 1: - extractSpec_drp( + im.extract_spectra( "FLAT.%s.stray.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "FLAT.%s.ms.fits" % (ccds[i]), @@ -607,7 +608,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), parallel=1, ) - createPixTable_drp( + rss.create_pixel_table( "FLAT.%s.ms.fits" % (ccds[i]), "FLAT.%s.rss.fits" % (ccds[i]), "ARC.%s.disp.fits" % (ccds[i]), @@ -616,9 +617,9 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") if fiberflat == 1: if reduce_ccd == "ALL": - glueRSS_drp("FLAT.CCD1.rss.fits,FLAT.CCD2.rss.fits", "FLAT.rss.fits") + rss.glueRSS_drp("FLAT.CCD1.rss.fits,FLAT.CCD2.rss.fits", "FLAT.rss.fits") - resampleWave_drp( + rss.resample_wavelength( "FLAT.rss.fits", "FLAT.disp_cor.fits", start_wave=start_wave, @@ -630,7 +631,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") parallel=1, ) else: - resampleWave_drp( + rss.resample_wavelength( "FLAT.%s.rss.fits" % (reduce_ccd), "FLAT.disp_cor.fits", start_wave=start_wave, @@ -641,7 +642,7 @@ def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL") method="linear", parallel=1, ) - createFiberFlat_drp( + rss.create_fiberflat( "FLAT.disp_cor.fits", "FIBERFLAT.fits", smooth_median=smooth_median_flat, @@ -685,16 +686,16 @@ def reduceSTD_drp( smooth_poly = -40 elif grating == "R150+_G5308" and centwave == 730.0: splits = "0,700,1200,1900" - setup = "2R150_730" + # setup = "2R150_730" start_wave = 4800 end_wave = 9900 disp_pix = 2.0 ccds = ["CCD1", "CCD2L", "CCD2R", "CCD3"] - steps = [40, 20, 20, 40] + # steps = [40, 20, 20, 40] createCCDfromArchive_drp(std, "STD", master_bias=master_bias, splits=splits) for i in range(len(ccds)): - LACosmic_drp( + im.LACosmic_drp( "STD.%s.fits" % (ccds[i]), "STD.%s.cosmic.fits" % (ccds[i]), sigma_det=5.0, @@ -707,12 +708,12 @@ def reduceSTD_drp( increase_radius=1, parallel=2, ) - addCCDMask_drp( + im.addCCDMask_drp( "STD.%s.cosmic.fits" % (ccds[i]), "%s/MASK.%s.Hamamatsu.fits" % (gmos_calib, ccds[i]), ) if int(straylight) == 1: - subtractStraylight_drp( + im.subtractStraylight_drp( "STD.%s.cosmic.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "STD.%s.back.fits" % (ccds[i]), @@ -723,7 +724,7 @@ def reduceSTD_drp( minfit=20, maxfit=10, ) - extractSpec_drp( + im.extract_spectra( "STD.%s.cosmic.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "STD.%s.ms.fits" % (ccds[i]), @@ -732,7 +733,7 @@ def reduceSTD_drp( parallel=1, ) else: - extractSpec_drp( + im.extract_spectra( "STD.%s.stray.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "STD.%s.ms.fits" % (ccds[i]), @@ -740,15 +741,15 @@ def reduceSTD_drp( fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), parallel=1, ) - createPixTable_drp( + rss.create_pixel_table( "STD.%s.ms.fits" % (ccds[i]), "STD.%s.rss.fits" % (ccds[i]), "ARC.%s.disp.fits" % (ccds[i]), "ARC.%s.res.fits" % (ccds[i]), ) - glueRSS_drp("STD.CCD1.rss.fits,STD.CCD2L.rss.fits", "STD_red.rss.fits") - glueRSS_drp("STD.CCD2R.rss.fits,STD.CCD3.rss.fits", "STD_blue.rss.fits") - resampleWave_drp( + rss.glueRSS_drp("STD.CCD1.rss.fits,STD.CCD2L.rss.fits", "STD_red.rss.fits") + rss.glueRSS_drp("STD.CCD2R.rss.fits,STD.CCD3.rss.fits", "STD_blue.rss.fits") + rss.resample_wavelength( "STD_blue.rss.fits", "STD_blue.disp_cor.fits", start_wave=start_wave, @@ -758,7 +759,7 @@ def reduceSTD_drp( method="linear", parallel=1, ) - resampleWave_drp( + rss.resample_wavelength( "STD_red.rss.fits", "STD_red.disp_cor.fits", start_wave=start_wave, @@ -767,26 +768,26 @@ def reduceSTD_drp( err_sim=0, method="linear", ) - mergeRSS_drp( + rss.mergeRSS_drp( "STD_red.disp_cor.fits,STD_blue.disp_cor.fits", "STD.disp_cor.fits" ) - correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "FIBERFLAT.fits") - includePosTab_drp("STD.flat.fits", "%s/GMOS_2slit_pt.txt" % (gmos_calib)) - splitFibers_drp( + rss.correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "FIBERFLAT.fits") + rss.includePosTab_drp("STD.flat.fits", "%s/GMOS_2slit_pt.txt" % (gmos_calib)) + rss.splitFibers_drp( "STD.flat.fits", "STD.obj_red.fits,STD.sky_red.fits,STD.obj_blue.fits,STD.sky_blue.fits", "O_R,S_R,O_B,S_B", ) - constructSkySpec_drp("STD.sky_red.fits", "STD.skyspec_red.fits", nsky=200) - constructSkySpec_drp("STD.sky_blue.fits", "STD.skyspec_blue.fits", nsky=200) - subtractSkySpec_drp( + sky.constructSkySpec_drp("STD.sky_red.fits", "STD.skyspec_red.fits", nsky=200) + sky.constructSkySpec_drp("STD.sky_blue.fits", "STD.skyspec_blue.fits", nsky=200) + sky.subtractSkySpec_drp( "STD.obj_red.fits", "STD.sobj_red.fits", "STD.skyspec_red.fits" ) - subtractSkySpec_drp( + sky.subtractSkySpec_drp( "STD.obj_blue.fits", "STD.sobj_blue.fits", "STD.skyspec_blue.fits" ) - mergeRSS_drp("STD.sobj_red.fits,STD.sobj_blue.fits", "STD.sobj.fits") - createCube_drp( + rss.mergeRSS_drp("STD.sobj_red.fits,STD.sobj_blue.fits", "STD.sobj.fits") + rss.createCube_drp( "STD.sobj.fits", "STD.cube.fits", mode="drizzle", resolution=0.2, parallel=1 ) elif IFU_mask == "IFU-R" or IFU_mask == "IFU-B": @@ -801,7 +802,7 @@ def reduceSTD_drp( ) smooth_poly = -15 elif grating == "R400+_G5325" and centwave == 690.0 and IFU_mask == "IFU-R": - setup = "1RR400_690" + # setup = "1RR400_690" if reduce_ccd == "ALL": start_wave = 4600 end_wave = 9000 @@ -825,7 +826,7 @@ def reduceSTD_drp( indices = [2] for i in indices: # print(i) - LACosmic_drp( + im.LACosmic_drp( "STD.%s.fits" % (ccds[i]), "STD.%s.cosmic.fits" % (ccds[i]), sigma_det=5.0, @@ -838,12 +839,12 @@ def reduceSTD_drp( increase_radius=1, parallel=2, ) - addCCDMask_drp( + im.addCCDMask_drp( "STD.%s.cosmic.fits" % (ccds[i]), "%s/MASK.%s.Hamamatsu.fits" % (gmos_calib, ccds[i]), ) if int(straylight) == 1: - subtractStraylight_drp( + im.subtractStraylight_drp( "STD.%s.cosmic.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "STD.%s.back.fits" % (ccds[i]), @@ -853,7 +854,7 @@ def reduceSTD_drp( smooth_disp=70, smooth_gauss=10, ) - extractSpec_drp( + im.extract_spectra( "STD.%s.stray.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "STD.%s.ms.fits" % (ccds[i]), @@ -862,7 +863,7 @@ def reduceSTD_drp( parallel=1, ) else: - extractSpec_drp( + im.extract_spectra( "STD.%s.cosmic.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "STD.%s.ms.fits" % (ccds[i]), @@ -870,17 +871,17 @@ def reduceSTD_drp( fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), parallel=1, ) - createPixTable_drp( + rss.create_pixel_table( "STD.%s.ms.fits" % (ccds[i]), "STD.%s.rss.fits" % (ccds[i]), "ARC.%s.disp.fits" % (ccds[i]), "ARC.%s.res.fits" % (ccds[i]), ) if reduce_ccd == "ALL": - glueRSS_drp( + rss.glueRSS_drp( "STD.CCD1.rss.fits,STD.CCD2.rss.fits,STD.CCD3.rss.fits", "STD.rss.fits" ) - resampleWave_drp( + rss.resample_wavelength( "STD.rss.fits", "STD.disp_cor.fits", start_wave=start_wave, @@ -890,7 +891,7 @@ def reduceSTD_drp( method="linear", ) else: - resampleWave_drp( + rss.resample_wavelength( "STD.%s.rss.fits" % (ccds[i]), "STD.disp_cor.fits", start_wave=start_wave, @@ -899,20 +900,20 @@ def reduceSTD_drp( err_sim=0, method="linear", ) - correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "FIBERFLAT.fits") + rss.correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "FIBERFLAT.fits") if IFU_mask == "IFU-R": - includePosTab_drp("STD.flat.fits", "%s/GMOS_1slitR_pt.txt" % (gmos_calib)) - splitFibers_drp("STD.flat.fits", "STD.obj.fits,STD.sky.fits", "O_R,S_R") + rss.includePosTab_drp("STD.flat.fits", "%s/GMOS_1slitR_pt.txt" % (gmos_calib)) + rss.splitFibers_drp("STD.flat.fits", "STD.obj.fits,STD.sky.fits", "O_R,S_R") elif IFU_mask == "IFU-B": - includePosTab_drp("STD.flat.fits", "%s/GMOS_1slitB_pt.txt" % (gmos_calib)) - splitFibers_drp("STD.flat.fits", "STD.obj.fits,STD.sky.fits", "O_B,S_B") - constructSkySpec_drp("STD.sky.fits", "STD.skyspec_red.fits", nsky=200) - subtractSkySpec_drp("STD.obj.fits", "STD.sobj.fits", "STD.skyspec_red.fits") - createCube_drp( + rss.includePosTab_drp("STD.flat.fits", "%s/GMOS_1slitB_pt.txt" % (gmos_calib)) + rss.splitFibers_drp("STD.flat.fits", "STD.obj.fits,STD.sky.fits", "O_B,S_B") + sky.constructSkySpec_drp("STD.sky.fits", "STD.skyspec_red.fits", nsky=200) + sky.subtractSkySpec_drp("STD.obj.fits", "STD.sobj.fits", "STD.skyspec_red.fits") + rss.createCube_drp( "STD.obj.fits", "STD.cube.fits", mode="drizzle", resolution=0.2, parallel=1 ) if ref_star != "": - createSensFunction_drp( + flux.createSensFunction_drp( "STD.sobj.fits", "ratio.txt", "%s/%s" % (gmos_calib, ref_star), @@ -1017,7 +1018,7 @@ def reduceObject_drp( elif reduce_ccd == "CCD3": indices = [2] for i in indices: - LACosmic_drp( + im.LACosmic_drp( "OBJ.%s.fits" % (ccds[i]), "OBJ.%s.cosmic.fits" % (ccds[i]), sigma_det=5.0, @@ -1031,12 +1032,12 @@ def reduceObject_drp( parallel=2, ) if instrument == "GMOS-S": - addCCDMask_drp( + im.addCCDMask_drp( "OBJ.%s.cosmic.fits" % (ccds[i]), "%s/MASK.%s.Hamamatsu.fits" % (gmos_calib, ccds[i]), ) if sky_line_list != "": - offsetTrace_drp( + im.offsetTrace_drp( "OBJ.%s.cosmic.fits" % (ccds[i]), "tjunk.%s.trc.fits" % (ccds[i]), "ARC.%s.disp.fits" % (ccds[i]), @@ -1045,19 +1046,19 @@ def reduceObject_drp( blocks="10", size="30", ) - correctTraceMask_drp( + rss.correctTraceMask_drp( "tjunk.%s.trc.fits" % (ccds[i]), "tjunk.%s.trc_temp.fits" % (ccds[i]), "offsetTrace_%s.log" % (ccds[i]), "OBJ.%s.cosmic.fits" % (ccds[i]), - poly_smooth=flexure_order, + poly_smooth=flexure_correct, ) else: os.system( "cp tjunk.%s.trc.fits tjunk.%s.trc_temp.fits" % (ccds[i], ccds[i]) ) if straylight == 1: - subtractStraylight_drp( + im.subtractStraylight_drp( "OBJ.%s.cosmic.fits" % (ccds[i]), "tjunk.%s.trc_temp.fits" % (ccds[i]), "OBJ.%s.back.fits" % (ccds[i]), @@ -1067,7 +1068,7 @@ def reduceObject_drp( smooth_disp=70, smooth_gauss=15, ) - extractSpec_drp( + im.extract_spectra( "OBJ.%s.stray.fits" % (ccds[i]), "tjunk.%s.trc_temp.fits" % (ccds[i]), "OBJ.%s.ms.fits" % (ccds[i]), @@ -1076,7 +1077,7 @@ def reduceObject_drp( parallel=3, ) else: - extractSpec_drp( + im.extract_spectra( "OBJ.%s.cosmic.fits" % (ccds[i]), "tjunk.%s.trc_temp.fits" % (ccds[i]), "OBJ.%s.ms.fits" % (ccds[i]), @@ -1084,21 +1085,21 @@ def reduceObject_drp( fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), parallel=2, ) - createPixTable_drp( + rss.create_pixel_table( "OBJ.%s.ms.fits" % (ccds[i]), "OBJ.%s.rss.fits" % (ccds[i]), "ARC.%s.disp.fits" % (ccds[i]), "ARC.%s.res.fits" % (ccds[i]), ) if sky_line_list != "": - checkPixTable_drp( + rss.checkPixTable_drp( "OBJ.%s.rss.fits" % (ccds[i]), sky_line_list, "offsetWave_%s.log" % (ccds[i]), aperture="12", ) if float(res_fwhm) != 0.0: - matchResolution_drp( + rss.matchResolution_drp( "OBJ.%s.rss.fits" % (ccds[i]), "OBJ.%s.rss.fits" % (ccds[i]), res_fwhm, @@ -1106,9 +1107,9 @@ def reduceObject_drp( ) if IFU_mask == "IFU-2": - glueRSS_drp("OBJ.CCD1.rss.fits,OBJ.CCD2L.rss.fits", "OBJ_red.rss.fits") - glueRSS_drp("OBJ.CCD2R.rss.fits,OBJ.CCD3.rss.fits", "OBJ_blue.rss.fits") - resampleWave_drp( + rss.glueRSS_drp("OBJ.CCD1.rss.fits,OBJ.CCD2L.rss.fits", "OBJ_red.rss.fits") + rss.glueRSS_drp("OBJ.CCD2R.rss.fits,OBJ.CCD3.rss.fits", "OBJ_blue.rss.fits") + rss.resample_wavelength( "OBJ_blue.rss.fits", "OBJ_blue.disp_cor.fits", start_wave=start_wave, @@ -1118,7 +1119,7 @@ def reduceObject_drp( method="linear", parallel=1, ) - resampleWave_drp( + rss.resample_wavelength( "OBJ_red.rss.fits", "OBJ_red.disp_cor.fits", start_wave=start_wave, @@ -1127,16 +1128,16 @@ def reduceObject_drp( err_sim=200, method="linear", ) - mergeRSS_drp( + rss.mergeRSS_drp( "OBJ_red.disp_cor.fits,OBJ_blue.disp_cor.fits", "OBJ.disp_cor.fits" ) - includePosTab_drp("OBJ.disp_cor.fits", "%s/GMOS_2slit_pt.txt" % (gmos_calib)) + rss.includePosTab_drp("OBJ.disp_cor.fits", "%s/GMOS_2slit_pt.txt" % (gmos_calib)) elif IFU_mask == "IFU-R": if reduce_ccd == "ALL": - glueRSS_drp( + rss.glueRSS_drp( "OBJ.CCD1.rss.fits,OBJ.CCD2.rss.fits,OBJ.CCD3.rss.fits", "OBJ.rss.fits" ) - resampleWave_drp( + rss.resample_wavelength( "OBJ.rss.fits", "OBJ.disp_cor.fits", start_wave=start_wave, @@ -1146,7 +1147,7 @@ def reduceObject_drp( method="linear", ) else: - resampleWave_drp( + rss.resample_wavelength( "OBJ.%s.rss.fits" % (reduce_ccd), "OBJ.disp_cor.fits", start_wave=start_wave, @@ -1155,14 +1156,14 @@ def reduceObject_drp( err_sim=200, method="linear", ) - includePosTab_drp("OBJ.disp_cor.fits", "%s/GMOS_1slitR_pt.txt" % (gmos_calib)) + rss.includePosTab_drp("OBJ.disp_cor.fits", "%s/GMOS_1slitR_pt.txt" % (gmos_calib)) if fiberflat == 1: - correctFiberFlat_drp("OBJ.disp_cor.fits", "OBJ.flat.fits", "FIBERFLAT.fits") + rss.correctFiberFlat_drp("OBJ.disp_cor.fits", "OBJ.flat.fits", "FIBERFLAT.fits") if flux_calib == 1: if fiberflat == 1: - fluxCalibration_drp( + flux.fluxCalibration_drp( "OBJ.flat.fits", "OBJ.fobj.fits", "ratio.txt", @@ -1174,7 +1175,7 @@ def reduceObject_drp( norm_sb_fib="", ) else: - fluxCalibration_drp( + flux.fluxCalibration_drp( "OBJ.disp_cor.fits", "OBJ.fobj.fits", "ratio.txt", @@ -1186,7 +1187,7 @@ def reduceObject_drp( norm_sb_fib="", ) if telluric_cor == 1: - correctTelluric_drp( + flux.correctTelluric_drp( "OBJ.fobj.fits", "OBJ.fobj.fits", "telluric_template.fits", diff --git a/python/lvmdrp/functions/vimosMethod.py b/python/lvmdrp/functions/vimosMethod.py index c9206d4f..c9acb582 100644 --- a/python/lvmdrp/functions/vimosMethod.py +++ b/python/lvmdrp/functions/vimosMethod.py @@ -4,10 +4,12 @@ from astropy.io import fits as pyfits from lvmdrp.core.spectrum1d import Spectrum1D -from lvmdrp.functions.headerMethod import * -from lvmdrp.functions.imageMethod import * -from lvmdrp.functions.rssMethod import * -from lvmdrp.functions.specialMethod import * +from lvmdrp.functions import headerMethod as head +from lvmdrp.functions import imageMethod as im +from lvmdrp.functions import rssMethod as rss +from lvmdrp.functions import specialMethod as spec +from lvmdrp.functions import fluxCalMethod as flux +from lvmdrp.functions import skyMethod as sky description = "Provides Methods to reduce VIMOS data" @@ -28,32 +30,32 @@ def renameFiles_drp(year): def createBIAS_drp(night): night = int(night) os.system("ls VIMOS_SPEC_BIAS%03d_*_B.1.fits* > combine_BIAS.B1" % (night)) - combineImages_drp("combine_BIAS.B1", "BIAS_B.1.fits", method="median") + im.combineImages_drp("combine_BIAS.B1", "BIAS_B.1.fits", method="median") os.system("rm combine_BIAS.B1") os.system("ls VIMOS_SPEC_BIAS%03d_*_A.2.fits* > combine_BIAS.A2" % (night)) - combineImages_drp("combine_BIAS.A2", "BIAS_A.2.fits", method="median") + im.combineImages_drp("combine_BIAS.A2", "BIAS_A.2.fits", method="median") os.system("rm combine_BIAS.A2") os.system("ls VIMOS_SPEC_BIAS%03d_*_A.3.fits* > combine_BIAS.A3" % (night)) - combineImages_drp("combine_BIAS.A3", "BIAS_A.3.fits", method="median") + im.combineImages_drp("combine_BIAS.A3", "BIAS_A.3.fits", method="median") os.system("rm combine_BIAS.A3") os.system("ls VIMOS_SPEC_BIAS%03d_*_B.4.fits* > combine_BIAS.B4" % (night)) - combineImages_drp("combine_BIAS.B4", "BIAS_B.4.fits", method="median") + im.combineImages_drp("combine_BIAS.B4", "BIAS_B.4.fits", method="median") os.system("rm combine_BIAS.B4") def combineLAMP_drp(night): night = int(night) os.system("ls VIMOS_IFU_LAMP%03d_*_B.1.fits* > combine_LAMP.B1" % (night)) - combineImages_drp("combine_LAMP.B1", "LAMP_B.1.fits", method="median") + im.combineImages_drp("combine_LAMP.B1", "LAMP_B.1.fits", method="median") os.system("rm combine_LAMP.B1") os.system("ls VIMOS_IFU_LAMP%03d_*_A.2.fits* > combine_LAMP.A2" % (night)) - combineImages_drp("combine_LAMP.A2", "LAMP_A.2.fits", method="median") + im.combineImages_drp("combine_LAMP.A2", "LAMP_A.2.fits", method="median") os.system("rm combine_LAMP.A2") os.system("ls VIMOS_IFU_LAMP%03d_*_A.3.fits* > combine_LAMP.A3" % (night)) - combineImages_drp("combine_LAMP.A3", "LAMP_A.3.fits", method="median") + im.combineImages_drp("combine_LAMP.A3", "LAMP_A.3.fits", method="median") os.system("rm combine_LAMP.A3") os.system("ls VIMOS_IFU_LAMP%03d_*_B.4.fits* > combine_LAMP.B4" % (night)) - combineImages_drp("combine_LAMP.B4", "LAMP_B.4.fits", method="median") + im.combineImages_drp("combine_LAMP.B4", "LAMP_B.4.fits", method="median") os.system("rm combine_LAMP.B4") @@ -97,10 +99,10 @@ def combineTwilight_drp(night, numbers): % (night, int(list[i])) ) - combineImages_drp("combine_LAMP.B1", "LAMP_B.1.fits", method="mean") - combineImages_drp("combine_LAMP.A2", "LAMP_A.2.fits", method="mean") - combineImages_drp("combine_LAMP.A3", "LAMP_A.3.fits", method="mean") - combineImages_drp("combine_LAMP.B4", "LAMP_B.4.fits", method="mean") + im.combineImages_drp("combine_LAMP.B1", "LAMP_B.1.fits", method="mean") + im.combineImages_drp("combine_LAMP.A2", "LAMP_A.2.fits", method="mean") + im.combineImages_drp("combine_LAMP.A3", "LAMP_A.3.fits", method="mean") + im.combineImages_drp("combine_LAMP.B4", "LAMP_B.4.fits", method="mean") os.system("rm combine_LAMP.B1") os.system("rm combine_LAMP.A2") os.system("rm combine_LAMP.A3") @@ -132,7 +134,7 @@ def prepareCalib_drp( disp_wave = float(disp_wave) fiberflat = int(fiberflat) fiberflat_wave = int(fiberflat_wave) - subtractBias_drp( + im.subtractBias_drp( "LAMP_%s.fits" % (chip), "LAMP_%s.sub.fits" % (chip), "BIAS_%s.fits" % (chip), @@ -143,12 +145,12 @@ def prepareCalib_drp( subtract_light="1", ) if CCD_mask != "": - addCCDMask_drp( + im.addCCDMask_drp( "LAMP_%s.sub.fits" % (chip), "%s/CCDMASK_%s_%s.fits" % (vimos_calib, chip, CCD_mask), ) if setup != "" and setup == "orange": - findPeaksMaster2_drp( + im.findPeaksMaster2_drp( "LAMP_%s.sub.fits" % (chip), peaks_ref, "peaks_%s.txt" % (chip), @@ -158,7 +160,7 @@ def prepareCalib_drp( verbose=0, ) elif setup != "" and setup == "blue": - findPeaksMaster2_drp( + im.findPeaksMaster2_drp( "LAMP_%s.sub.fits" % (chip), peaks_ref, "peaks_%s.txt" % (chip), @@ -168,7 +170,7 @@ def prepareCalib_drp( verbose=0, ) else: - findPeaksMaster2_drp( + im.findPeaksMaster2_drp( "LAMP_%s.sub.fits" % (chip), peaks_ref, "peaks_%s.txt" % (chip), @@ -177,7 +179,7 @@ def prepareCalib_drp( verbose=0, ) - tracePeaks_drp( + im.trace_peaks( "LAMP_%s.sub.fits" % (chip), "peaks_%s.txt" % (chip), "tjunk_%s.trc.fits" % (chip), @@ -189,7 +191,7 @@ def prepareCalib_drp( verbose=0, ) if trace_master == "": - subtractStraylight_drp( + im.subtractStraylight_drp( "LAMP_%s.sub.fits" % (chip), "tjunk_%s.trc.fits" % (chip), "LAMP_%s.back.fits" % (chip), @@ -200,7 +202,7 @@ def prepareCalib_drp( smooth_disp=30, parallel=parallel, ) - traceFWHM_drp( + im.traceFWHM_drp( "LAMP_%s.stray.fits" % (chip), "tjunk_%s.trc.fits" % (chip), "tjunk.fwhm_%s.fits" % (chip), @@ -213,7 +215,7 @@ def prepareCalib_drp( parallel=parallel, ) else: - matchMasterTrace_drp( + spec.matchMasterTrace_drp( "tjunk_%s.trc.fits" % (chip), "master_%s.trc.fits" % (chip), "tjunk_%s.trc.fits" % (chip), @@ -223,7 +225,7 @@ def prepareCalib_drp( start_pix=trace_master[0], end_pix=trace_master[1], ) - subtractStraylight_drp( + im.subtractStraylight_drp( "LAMP_%s.sub.fits" % (chip), "tjunk_%s.trc.fits" % (chip), "LAMP_%s.back.fits" % (chip), @@ -236,7 +238,7 @@ def prepareCalib_drp( ) os.system("cp master.fwhm_%s.fits tjunk.fwhm_%s.fits" % (chip, chip)) if fiberflat_wave == 1: - extractSpec_drp( + im.extract_spectra( "WAVE_%s.sub.fits" % (chip), "tjunk_%s.trc.fits" % (chip), "WAVE_%s.ms.fits" % (chip), @@ -245,7 +247,7 @@ def prepareCalib_drp( disp_axis="y", parallel=parallel, ) - detWaveSolution_drp( + rss.determine_wavelength_solution( "WAVE_%s.ms.fits" % (chip), "WAVE_%s" % (chip), ARC_ref, @@ -260,7 +262,7 @@ def prepareCalib_drp( verbose="1", ) else: - extractSpec_drp( + im.extract_spectra( "WAVE_%s.sub.fits" % (chip), "tjunk_%s.trc.fits" % (chip), "WAVE_%s.ms.fits" % (chip), @@ -269,7 +271,7 @@ def prepareCalib_drp( disp_axis="y", parallel=parallel, ) - detWaveSolution_drp( + rss.determine_wavelength_solution( "WAVE_%s.ms.fits" % (chip), "WAVE_%s" % (chip), ARC_ref, @@ -281,13 +283,13 @@ def prepareCalib_drp( rel_flux_limits="0.1,6.0", verbose="0", ) - createPixTable_drp( + rss.create_pixel_table( "WAVE_%s.ms.fits" % (chip), "WAVE_%s.rss.fits" % (chip), "WAVE_%s.disp.fits" % (chip), "WAVE_%s.res.fits" % (chip), ) - resampleWave_drp( + rss.resample_wavelength( "WAVE_%s.rss.fits" % (chip), "WAVE_%s.disp_cor.fits" % (chip), start_wave=start_wave, @@ -297,7 +299,7 @@ def prepareCalib_drp( parallel=parallel, ) if fiberflat == 1: - extractSpec_drp( + im.extract_spectra( "LAMP_%s.stray.fits" % (chip), "tjunk_%s.trc.fits" % (chip), "LAMP_%s.ms.fits" % (chip), @@ -306,13 +308,13 @@ def prepareCalib_drp( disp_axis="y", parallel=parallel, ) - createPixTable_drp( + rss.create_pixel_table( "LAMP_%s.ms.fits" % (chip), "LAMP_%s.rss.fits" % (chip), "WAVE_%s.disp.fits" % (chip), "WAVE_%s.res.fits" % (chip), ) - resampleWave_drp( + rss.resample_wavelength( "LAMP_%s.rss.fits" % (chip), "LAMP_%s.disp_cor.fits" % (chip), start_wave=start_wave, @@ -323,7 +325,7 @@ def prepareCalib_drp( parallel=parallel, ) if fiberflat == 1 or fiberflat_wave == 1: - includePosTab_drp( + rss.includePosTab_drp( "LAMP_%s.disp_cor.fits" % (chip), "%s/vimos_HR_%s_pt.txt" % (vimos_calib, chip), ) @@ -348,10 +350,10 @@ def prepareObject_drp( parallel="1", ): # if CCD_mask!='': - # addCCDMask_drp('%s_%s.cosmic.fits'%(name_obj,chip),'%s/CCDMASK_%s_%s.fits'%(vimos_calib,chip,CCD_mask)) + # im.addCCDMask_drp('%s_%s.cosmic.fits'%(name_obj,chip),'%s/CCDMASK_%s_%s.fits'%(vimos_calib,chip,CCD_mask)) if sky_line_list != "": # offsetTrace_drp('%s_%s.cosmic.fits'%(name_obj, chip), 'tjunk_%s.trc.fits'%(chip), 'WAVE_%s.disp.fits'%(chip), sky_line_list, 'offsetTrace_%s.log'%(chip), blocks='10', disp_axis='y', size='30') - correctTraceMask_drp( + rss.correctTraceMask_drp( "tjunk_%s.trc.fits" % (chip), "tjunk_%s_temp.trc.fits" % (chip), "offsetTrace_%s.log" % (chip), @@ -362,7 +364,7 @@ def prepareObject_drp( os.system("cp tjunk_%s.trc.fits tjunk_%s_temp.trc.fits" % (chip, chip)) if straylight: - subtractStraylight_drp( + im.subtractStraylight_drp( "%s_%s.cosmic.fits" % (name_obj, chip), "tjunk_%s_temp.trc.fits" % (chip), "%s_%s.back.fits" % (name_obj, chip), @@ -378,7 +380,7 @@ def prepareObject_drp( "cp %s_%s.cosmic.fits %s_%s.stray.fits" % (name_obj, chip, name_obj, chip) ) - extractSpec_drp( + im.extract_spectra( "%s_%s.stray.fits" % (name_obj, chip), "tjunk_%s_temp.trc.fits" % (chip), "%s_%s.ms.fits" % (name_obj, chip), @@ -388,14 +390,14 @@ def prepareObject_drp( parallel=parallel, ) - createPixTable_drp( + rss.create_pixel_table( "%s_%s.ms.fits" % (name_obj, chip), "%s_%s.pix_tab.fits" % (name_obj, chip), "WAVE_%s.disp.fits" % (chip), "WAVE_%s.res.fits" % (chip), ) - addHvelcorHdr_drp( + head.addHvelcorHdr_drp( "%s_%s.pix_tab.fits" % (name_obj, chip), "HVEL_COR", RAKey="RA", @@ -414,13 +416,13 @@ def prepareObject_drp( HVEL_key = "" if sky_line_list != "": - checkPixTable_drp( + rss.checkPixTable_drp( "%s_%s.pix_tab.fits" % (name_obj, chip), sky_line_list, "offsetWave_%s.log" % (chip), aperture="12", ) - correctPixTable_drp( + rss.correctPixTable_drp( "%s_%s.pix_tab.fits" % (name_obj, chip), "%s_%s.pix_tab.fits" % (name_obj, chip), "offsetWave_%s.log" % (chip), @@ -430,13 +432,13 @@ def prepareObject_drp( poly_disp="5", ) if float(resolution_fwhm) != 0.0: - matchResolution_drp( + rss.matchResolution_drp( "%s_%s.pix_tab.fits" % (name_obj, chip), "%s_%s.res.fits" % (name_obj, chip), resolution_fwhm, parallel=parallel, ) - resampleWave_drp( + rss.resample_wavelength( "%s_%s.res.fits" % (name_obj, chip), "%s_%s.disp_cor.fits" % (name_obj, chip), start_wave=start_wave, @@ -448,7 +450,7 @@ def prepareObject_drp( parallel=parallel, ) else: - resampleWave_drp( + rss.resample_wavelength( "%s_%s.pix_tab.fits" % (name_obj, chip), "%s_%s.disp_cor.fits" % (name_obj, chip), start_wave=start_wave, @@ -459,7 +461,7 @@ def prepareObject_drp( correctHvel=HVEL_key, parallel=parallel, ) - includePosTab_drp( + rss.includePosTab_drp( "%s_%s.disp_cor.fits" % (name_obj, chip), "%s/vimos_HR_%s_pt.txt" % (vimos_calib, chip), ) @@ -479,7 +481,7 @@ def reduceCalibMR_drp( fiberflat = int(fiberflat) fiberflat_wave = int(fiberflat_wave) - image = loadImage("LAMP_B.1.fits") + image = im.loadImage("LAMP_B.1.fits") date = image.getHdrValue("ESO OBS START").split("T")[0] year = int(date.split("-")[0]) @@ -509,7 +511,7 @@ def reduceCalibMR_drp( "VIMOS_IFU_WAVE%03d" % (int(night)) in file_name and chips[i] in file_name ): - subtractBias_drp( + im.subtractBias_drp( file_name, "WAVE_%s.sub.fits" % (chips[i]), "BIAS_%s.fits" % (chips[i]), @@ -576,11 +578,11 @@ def reduceCalibMR_drp( ) if fiberflat == 1: - mergeRSS_drp( + rss.mergeRSS_drp( "LAMP_B.1.disp_cor.fits,LAMP_A.2.disp_cor.fits,LAMP_A.3.disp_cor.fits,LAMP_B.4.disp_cor.fits", "LAMP.disp_cor.fits", ) - createFiberFlat_drp("LAMP.disp_cor.fits", "fiberflat.fits", valid="1200,1600") + rss.create_fiberflat("LAMP.disp_cor.fits", "fiberflat.fits", valid="1200,1600") def reduceCalibHR_drp( @@ -603,7 +605,7 @@ def reduceCalibHR_drp( fiberflat = int(fiberflat) fiberflat_wave = int(fiberflat_wave) - image = loadImage("LAMP_B.1.fits") + image = im.loadImage("LAMP_B.1.fits") date = image.getHdrValue("ESO OBS START").split("T")[0] year = int(date.split("-")[0]) if date > "2012-04-15" and setup == "blue": @@ -659,7 +661,7 @@ def reduceCalibHR_drp( "VIMOS_IFU_WAVE%03d" % (int(night)) in file_name and chips[i] in file_name ): - subtractBias_drp( + im.subtractBias_drp( file_name, "WAVE_%s.sub.fits" % (chips[i]), "BIAS_%s.fits" % (chips[i]), @@ -670,11 +672,11 @@ def reduceCalibHR_drp( ) if setup == "orange": - set = "O" + ss = "O" elif setup == "blue": - set = "B" + ss = "B" elif setup == "red": - set = "R" + ss = "R" if parallel == "auto": cpus = cpu_count() else: @@ -697,7 +699,7 @@ def reduceCalibHR_drp( boundaries_y[i], "%s/master_VIMOS_%s_%s" % (vimos_calib, chips[i], peaks_guess), "%s/ref_lines_ARC_VIMOS_HR%s_%s%s" - % (vimos_calib, set, chips[i], wave_guess), + % (vimos_calib, ss, chips[i], wave_guess), wave_start, wave_end, wave_disp, @@ -727,7 +729,7 @@ def reduceCalibHR_drp( boundaries_y[i], "%s/master_VIMOS_%s_%s" % (vimos_calib, chips[i], peaks_guess), "%s/ref_lines_ARC_VIMOS_HR%s_%s%s" - % (vimos_calib, set, chips[i], wave_guess), + % (vimos_calib, ss, chips[i], wave_guess), wave_start, wave_end, wave_disp, @@ -745,17 +747,17 @@ def reduceCalibHR_drp( merge_flat = "" for i in range(len(chips)): merge_flat = merge_flat + "LAMP_%s.disp_cor.fits," % (chips[i]) - mergeRSS_drp(merge_flat[:-1], "LAMP.disp_cor.fits") + rss.mergeRSS_drp(merge_flat[:-1], "LAMP.disp_cor.fits") if setup == "orange": if len(chips) > 2: - createFiberFlat_drp( + rss.create_fiberflat( "LAMP.disp_cor.fits", "fiberflat.fits", valid="800,1200", clip="0.3,1.7", ) else: - createFiberFlat_drp( + rss.create_fiberflat( "LAMP.disp_cor.fits", "fiberflat.fits", valid="400,800", @@ -763,31 +765,31 @@ def reduceCalibHR_drp( ) elif setup == "blue" and date < "2012-04-15": if len(chips) > 2: - createFiberFlat_drp( + rss.create_fiberflat( "LAMP.disp_cor.fits", "fiberflat.fits", valid="800,1200", clip="0.2,4.0", ) else: - createFiberFlat_drp( + rss.create_fiberflat( "LAMP.disp_cor.fits", "fiberflat.fits", valid="400,800", clip="0.3,1.7", ) elif setup == "blue" and date >= "2012-04-15": - createFiberFlat_drp("LAMP.disp_cor.fits", "fiberflat.fits", clip="0.3,1.7") + rss.create_fiberflat("LAMP.disp_cor.fits", "fiberflat.fits", clip="0.3,1.7") elif setup == "red": if len(chips) > 2: - createFiberFlat_drp( + rss.create_fiberflat( "LAMP.disp_cor.fits", "fiberflat.fits", valid="800,1200", clip="0.2,4.0", ) else: - createFiberFlat_drp( + rss.create_fiberflat( "LAMP.disp_cor.fits", "fiberflat.fits", valid="400,800", @@ -816,14 +818,14 @@ def reduceObjectMR_drp( night = int(night) flexure_correct = int(flexure_correct) straylight = int(straylight) - correctHVEL = bool(int(correct_HVEL)) + #correctHVEL = bool(int(correct_HVEL)) chips = ["B.1", "A.2", "A.3", "B.4"] try: - image = loadImage( + image = im.loadImage( "VIMOS_IFU_OBS%03d_%04d_%s.fits.gz" % (night, object_nr, "B.1") ) except IOError: - image = loadImage("VIMOS_IFU_OBS%03d_%04d_%s.fits" % (night, object_nr, "B.1")) + image = im.loadImage("VIMOS_IFU_OBS%03d_%04d_%s.fits" % (night, object_nr, "B.1")) date = image.getHdrValue("ESO OBS START").split("T")[0] year = int(date.split("-")[0]) @@ -854,7 +856,7 @@ def reduceObjectMR_drp( for i in range(len(chips)): try: - subtractBias_drp( + im.subtractBias_drp( "VIMOS_IFU_OBS%03d_%04d_%s.fits.gz" % (night, object_nr, chips[i]), "%s_%s.sub.fits" % (name_obj, chips[i]), "BIAS_%s.fits" % (chips[i]), @@ -864,7 +866,7 @@ def reduceObjectMR_drp( rdnoise="ESO DET OUT1 RON", ) except IOError: - subtractBias_drp( + im.subtractBias_drp( "VIMOS_IFU_OBS%03d_%04d_%s.fits" % (night, object_nr, chips[i]), "%s_%s.sub.fits" % (name_obj, chips[i]), "BIAS_%s.fits" % (chips[i]), @@ -873,7 +875,7 @@ def reduceObjectMR_drp( gain="ESO DET OUT1 CONAD", rdnoise="ESO DET OUT1 RON", ) - LACosmic_drp( + im.LACosmic_drp( "%s_%s.sub.fits" % (name_obj, chips[i]), "%s_%s.cosmic.fits" % (name_obj, chips[i]), sigma_det="5.0", @@ -941,17 +943,17 @@ def reduceObjectMR_drp( 4, ) - expandHdrKeys_drp("%s_B.1.disp_cor.fits" % (name_obj), "CCD1") - expandHdrKeys_drp("%s_A.2.disp_cor.fits" % (name_obj), "CCD2") - expandHdrKeys_drp("%s_A.3.disp_cor.fits" % (name_obj), "CCD3") - expandHdrKeys_drp("%s_B.4.disp_cor.fits" % (name_obj), "CCD4") - mergeRSS_drp( + head.expandHdrKeys_drp("%s_B.1.disp_cor.fits" % (name_obj), "CCD1") + head.expandHdrKeys_drp("%s_A.2.disp_cor.fits" % (name_obj), "CCD2") + head.expandHdrKeys_drp("%s_A.3.disp_cor.fits" % (name_obj), "CCD3") + head.expandHdrKeys_drp("%s_B.4.disp_cor.fits" % (name_obj), "CCD4") + rss.mergeRSS_drp( "%s_B.1.disp_cor.fits,%s_A.2.disp_cor.fits,%s_A.3.disp_cor.fits,%s_B.4.disp_cor.fits" % (name_obj, name_obj, name_obj, name_obj), "%s.disp_cor.fits" % (name_obj), ) if fiberflat == 1: - correctFiberFlat_drp( + rss.correctFiberFlat_drp( "%s.disp_cor.fits" % (name_obj), "%s.flat.fits" % (name_obj), "fiberflat.fits", @@ -959,7 +961,7 @@ def reduceObjectMR_drp( if flux_calib == 1: if fiberflat == 1: - fluxCalibration_drp( + flux.fluxCalibration_drp( "%s.flat.fits" % (name_obj), "%s.fobj.fits" % (name_obj), "ratio.txt", @@ -971,7 +973,7 @@ def reduceObjectMR_drp( norm_sb_fib="", ) # else: - fluxCalibration_drp( + flux.fluxCalibration_drp( "%s.disp_cor.fits" % (name_obj), "%s.fobj.fits" % (name_obj), "ratio.txt", @@ -983,7 +985,7 @@ def reduceObjectMR_drp( norm_sb_fib="", ) if telluric_cor == 1: - correctTelluric_drp( + flux.correctTelluric_drp( "%s.fobj.fits" % (name_obj), "%s.fobj.fits" % (name_obj), "telluric_template.fits", @@ -1025,7 +1027,7 @@ def reduceObjectHR_drp( flux_calib = int(flux_calib) telluric_cor = int(telluric_cor) - image = loadImage("VIMOS_IFU_OBS%03d_%04d_%s.fits.gz" % (night, object_nr, "B.1")) + image = im.loadImage("VIMOS_IFU_OBS%03d_%04d_%s.fits.gz" % (night, object_nr, "B.1")) date = image.getHdrValue("ESO OBS START").split("T")[0] year = int(date.split("-")[0]) if year == 2009: @@ -1043,7 +1045,7 @@ def reduceObjectHR_drp( boundaries_y = ["1,4096", "1,4096", "1,4096", "1,4096"] if setup == "orange": - set = "O" + # ss = "O" if flexure_correct == 1: sky_line_list = "5577.34,6300.30,6863.97,7276.42" flexure_order = 2 @@ -1051,7 +1053,7 @@ def reduceObjectHR_drp( sky_line_list = "" flexure_order = 0 elif setup == "blue" and date < "2012-04-15": - set = "B" + # ss = "B" if flexure_correct == 1: sky_line_list = "5577.34" flexure_order = 0 @@ -1060,12 +1062,12 @@ def reduceObjectHR_drp( flexure_order = 0 elif setup == "blue" and date >= "2012-04-15": - set = "B" + # ss = "B" sky_line_list = "" flexure_order = 0 elif setup == "red": - set = "R" + # ss = "R" if flexure_correct == 1: sky_line_list = "6863.97,7276.42,7913.72,8344.61" flexure_order = 2 @@ -1074,7 +1076,7 @@ def reduceObjectHR_drp( flexure_order = 0 for i in range(len(chips)): - subtractBias_drp( + im.subtractBias_drp( "VIMOS_IFU_OBS%03d_%04d_%s.fits.gz" % (night, object_nr, chips[i]), "%s_%s.sub.fits" % (name_obj, chips[i]), "BIAS_%s.fits" % (chips[i]), @@ -1083,7 +1085,7 @@ def reduceObjectHR_drp( gain="ESO DET OUT1 CONAD", rdnoise="ESO DET OUT1 RON", ) - LACosmic_drp( + im.LACosmic_drp( "%s_%s.sub.fits" % (name_obj, chips[i]), "%s_%s.cosmic.fits" % (name_obj, chips[i]), sigma_det="5.0", @@ -1152,13 +1154,13 @@ def reduceObjectHR_drp( 4, ) for i in range(len(chips)): - expandHdrKeys_drp( + head.expandHdrKeys_drp( "%s_%s.disp_cor.fits" % (name_obj, chips[i]), "CCD%s" % (chips[i].split(".")[1]), exclude="ESO TEL AIRM START,EXPTIME", ) if flux_calib == 1: - fluxCalibration_drp( + flux.fluxCalibration_drp( "%s_%s.disp_cor.fits" % (name_obj, chips[i]), "%s_%s.fobj.fits" % (name_obj, chips[i]), "ratio.txt", @@ -1175,7 +1177,7 @@ def reduceObjectHR_drp( % (name_obj, chips[i], name_obj, chips[i]) ) if telluric_cor == 1: - correctTelluric_drp( + flux.correctTelluric_drp( "%s.fobj.fits" % (name_obj), "%s.fobj.fits" % (name_obj), "telluric_template.fits", @@ -1184,10 +1186,10 @@ def reduceObjectHR_drp( merge_obj = "" for i in range(len(chips)): merge_obj = merge_obj + "%s_%s.fobj.fits," % (name_obj, chips[i]) - mergeRSS_drp(merge_obj[:-1], "%s.fobj.fits" % (name_obj)) + rss.mergeRSS_drp(merge_obj[:-1], "%s.fobj.fits" % (name_obj)) if fiberflat == 1: - correctFiberFlat_drp( + rss.correctFiberFlat_drp( "%s.fobj.fits" % (name_obj), "%s.flat.fits" % (name_obj), "fiberflat.fits" ) @@ -1207,29 +1209,30 @@ def reduceStdMR_drp( parallel="auto", ): chips = ["B.1", "A.2", "A.3", "B.4"] - if year >= 2013: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1300,3450", "1360,3510", "1165,3215", "900,3100"] - else: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1300,3450", "1340,3490", "1150,3300", "900,3150"] sky_line_list = "5577.34,6300.30,6863.97,7276.42,7750.65,8344.61,8885.85" night = int(night) std_nr = int(std_nr) straylight = int(straylight) - image = loadImage("VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr, "B.1")) + image = im.loadImage("VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr, "B.1")) date = image.getHdrValue("ESO OBS START").split("T")[0] year = int(date.split("-")[0]) + if year >= 2013: + boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] + boundaries_y = ["1300,3450", "1360,3510", "1165,3215", "900,3100"] + else: + boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] + boundaries_y = ["1300,3450", "1340,3490", "1150,3300", "900,3150"] + if year == 2009: CCD_mask = "2009" else: CCD_mask = "" for i in range(len(chips)): - subtractBias_drp( + im.subtractBias_drp( "VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr + i, chips[i]), "%s_%s.sub.fits" % ("STD" + str(i + 1), chips[i]), "BIAS_%s.fits" % (chips[i]), @@ -1238,7 +1241,7 @@ def reduceStdMR_drp( gain="ESO DET OUT1 CONAD", rdnoise="ESO DET OUT1 RON", ) - LACosmic_drp( + im.LACosmic_drp( "%s_%s.sub.fits" % ("STD" + str(i + 1), chips[i]), "%s_%s.cosmic.fits" % ("STD" + str(i + 1), chips[i]), sigma_det="5.0", @@ -1306,38 +1309,39 @@ def reduceStdMR_drp( 4, ) - mergeRSS_drp( + rss.mergeRSS_drp( "STD1_B.1.disp_cor.fits,STD2_A.2.disp_cor.fits,STD3_A.3.disp_cor.fits,STD4_B.4.disp_cor.fits", "STD.disp_cor.fits", ) - correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "fiberflat.fits") + rss.correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "fiberflat.fits") - splitFibers_drp( + rss.splitFibers_drp( "STD.flat.fits", "STD1.flat.fits,STD2.flat.fits,STD3.flat.fits,STD4.flat.fits", "QD1,QD2,QD3,QD4", ) std_ratios = [] for i in range(len(chips)): - constructSkySpec_drp( + sky.constructSkySpec_drp( "STD%d.flat.fits" % (i + 1), "STD%d.sky_spec.fits" % (i + 1), clip_sigma=0.0, nsky=150, ) - subtractSkySpec_drp( + sky.subtractSkySpec_drp( "STD%d.flat.fits" % (i + 1), "STD%d.sobj.fits" % (i + 1), "STD%d.sky_spec.fits" % (i + 1), ) - copyHdrKey_drp( + head.copyHdrKey_drp( "VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr + i, chips[i]), "STD%d.sobj.fits" % (i + 1), "ESO TEL AIRM START", ) + std_telluric = [] if ref_star != "": - createSensFunction_drp( + flux.createSensFunction_drp( "STD%d.sobj.fits" % (i + 1), "ratio_%d.txt" % (i + 1), ref_star, @@ -1410,7 +1414,7 @@ def reduceStdHR_drp( std_nr = int(std_nr) straylight = int(straylight) - image = loadImage("VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr, "B.1")) + image = im.loadImage("VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr, "B.1")) date = image.getHdrValue("ESO OBS START").split("T")[0] year = int(date.split("-")[0]) @@ -1429,7 +1433,7 @@ def reduceStdHR_drp( CCD_mask = "" for i in range(len(chips)): - subtractBias_drp( + im.subtractBias_drp( "VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr + int(chips[i].split(".")[1]) - 1, chips[i]), "%s_%s.sub.fits" % ("STD" + str(i + 1), chips[i]), @@ -1439,7 +1443,7 @@ def reduceStdHR_drp( gain="ESO DET OUT1 CONAD", rdnoise="ESO DET OUT1 RON", ) - LACosmic_drp( + im.LACosmic_drp( "%s_%s.sub.fits" % ("STD" + str(i + 1), chips[i]), "%s_%s.cosmic.fits" % ("STD" + str(i + 1), chips[i]), sigma_det="5.0", @@ -1510,38 +1514,38 @@ def reduceStdHR_drp( merge_obj = "" for i in range(len(chips)): merge_obj = merge_obj + "STD%d_%s.disp_cor.fits," % (i + 1, chips[i]) - mergeRSS_drp(merge_obj[:-1], "STD.disp_cor.fits") - correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "fiberflat.fits") + rss.mergeRSS_drp(merge_obj[:-1], "STD.disp_cor.fits") + rss.correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "fiberflat.fits") QDs = "" files = "" for i in range(len(chips)): QDs = QDs + "QD%s," % (chips[i].split(".")[1]) files = files + "STD%s.flat.fits," % (chips[i].split(".")[1]) - splitFibers_drp("STD.flat.fits", files[:-1], QDs[:-1]) + rss.splitFibers_drp("STD.flat.fits", files[:-1], QDs[:-1]) std_ratios = [] std_telluric = [] for i in range(len(chips)): - constructSkySpec_drp( + sky.constructSkySpec_drp( "STD%s.flat.fits" % (chips[i].split(".")[1]), "STD%s.sky_spec.fits" % (chips[i].split(".")[1]), clip_sigma=0.0, nsky=70, non_neg=0, ) - subtractSkySpec_drp( + sky.subtractSkySpec_drp( "STD%s.flat.fits" % (chips[i].split(".")[1]), "STD%s.sobj.fits" % (chips[i].split(".")[1]), "STD%s.sky_spec.fits" % (chips[i].split(".")[1]), ) - copyHdrKey_drp( + head.copyHdrKey_drp( "VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr + i, chips[i]), "STD%s.sobj.fits" % (chips[i].split(".")[1]), "ESO TEL AIRM START", ) if ref_star != "": - createSensFunction_drp( + flux.createSensFunction_drp( "STD%s.sobj.fits" % (chips[i].split(".")[1]), "ratio_%s.txt" % (chips[i].split(".")[1]), ref_star, @@ -1596,25 +1600,25 @@ def subtractSkyField_drp( clip_sigma=0.0, nsky=200, ): - splitFibers_drp( + rss.splitFibers_drp( object_in, "obj_QD1.fits,obj_QD2.fits,obj_QD3.fits,obj_QD4.fits", "QD1,QD2,QD3,QD4", ) - splitFibers_drp( + rss.splitFibers_drp( sky_field, "sky_QD1.fits,sky_QD2.fits,sky_QD3.fits,sky_QD4.fits", "QD1,QD2,QD3,QD4", ) for i in range(4): - constructSkySpec_drp( + sky.constructSkySpec_drp( "sky_QD%d.fits" % (i + 1), "sky_spec_QD%d.fits" % (i + 1), clip_sigma=clip_sigma, filter=vimos_calib + "R_Johnson.txt,0,1", nsky=nsky, ) - subtractSkySpec_drp( + sky.subtractSkySpec_drp( "obj_QD%d.fits" % (i + 1), "sobj_QD%d.fits" % (i + 1), "sky_spec_QD%d.fits" % (i + 1), @@ -1622,35 +1626,35 @@ def subtractSkyField_drp( scale_ind=scale_ind, scale_region=scale_region, ) - copyHdrKey_drp( + head.copyHdrKey_drp( "sky_QD%d.fits" % (i + 1), "sobj_QD%d.fits" % (i + 1), "hierarch PIPE NSKY FIB", ) - copyHdrKey_drp( + head.copyHdrKey_drp( "sky_QD%d.fits" % (i + 1), "sobj_QD%d.fits" % (i + 1), "hierarch PIPE SKY MEAN", ) - copyHdrKey_drp( + head.copyHdrKey_drp( "sky_QD%d.fits" % (i + 1), "sobj_QD%d.fits" % (i + 1), "hierarch PIPE SKY MIN", ) - copyHdrKey_drp( + head.copyHdrKey_drp( "sky_QD%d.fits" % (i + 1), "sobj_QD%d.fits" % (i + 1), "hierarch PIPE SKY MAX", ) - copyHdrKey_drp( + head.copyHdrKey_drp( "sky_QD%d.fits" % (i + 1), "sobj_QD%d.fits" % (i + 1), "hierarch PIPE SKY RMS", ) - expandHdrKeys_drp( + head.expandHdrKeys_drp( "sobj_QD%d.fits" % (i + 1), "CCD%d" % (i + 1), "PIPE NSKY FIB,PIPE SKY MEAN,PIPE SKY MIN,PIPE SKY MAX,PIPE SKY RMS,PIPE SKY SCALE", ) - mergeRSS_drp("sobj_QD1.fits,sobj_QD2.fits,sobj_QD3.fits,sobj_QD4.fits", object_out) + rss.mergeRSS_drp("sobj_QD1.fits,sobj_QD2.fits,sobj_QD3.fits,sobj_QD4.fits", object_out) os.system("rm *QD?.fits") From ef158ccb678275db626f71c87385f0540041e9c7 Mon Sep 17 00:00:00 2001 From: Brian Cherinka Date: Wed, 29 Nov 2023 17:00:56 -0500 Subject: [PATCH 06/18] linting and formatting --- python/lvmdrp/core/fluxcal.py | 46 +++++++++--------- python/lvmdrp/functions/cubeMethod.py | 68 ++++++++++++++------------- 2 files changed, 58 insertions(+), 56 deletions(-) diff --git a/python/lvmdrp/core/fluxcal.py b/python/lvmdrp/core/fluxcal.py index fe72bbdd..cd693de3 100644 --- a/python/lvmdrp/core/fluxcal.py +++ b/python/lvmdrp/core/fluxcal.py @@ -339,13 +339,13 @@ def cholesky_solve(a, b): kd = bw - 1 - ### first round + # first round spot = np.linspace(kd) + 1 for j in range(0, n - 1): b[j] = b[j] / a[0, j] b[j + spot] = b[j + spot] - b[j] * a[spot, j] - #### second round + # second round spot = kd - np.linspace(kd) for j in range(n - 1, 0, -1): @@ -415,10 +415,10 @@ def bsplvn(bkpt, nord, x, ileft): imj = ileft - j deltam[:, j] = x - bkpt[imj] vmprev = 0.0 - for l in range(0, j): - vm = vnikx[:, l] / (deltap[:, l] + deltam[:, j - l]) - vnikx[:, l] = vm * deltap[:, l] + vmprev - vmprev = vm * deltam[:, j - l] + for num in range(0, j): + vm = vnikx[:, num] / (deltap[:, num] + deltam[:, j - num]) + vnikx[:, num] = vm * deltap[:, num] + vmprev + vmprev = vm * deltam[:, j - num] j = j + 1 vnikx[:, j] = vmprev @@ -430,7 +430,7 @@ def bsplvn(bkpt, nord, x, ileft): def bspline_action(x, sset, x2=None): if not isinstance(sset, dict): print("Please send in a proper B-spline structure") - return -1, lower, upper + return -1 # , lower, upper npoly = 1 nx = len(x) @@ -441,7 +441,7 @@ def bspline_action(x, sset, x2=None): if x2 is not None: if len(x2) != nx: print("dimensions do not match between x and x2") - return -1, lower, upper + return -1 # , lower, upper if "npoly" in sset.keys(): npoly = sset["npoly"] @@ -450,7 +450,7 @@ def bspline_action(x, sset, x2=None): goodbk = np.where(sset["bkmask"] != 0) nbkpt = goodbk.size if nbkpt < 2 * nord: - return -2, lower, upper + return -2 # , lower, upper n = nbkpt - nord gb = sset["fullbkpt"][goodbk] @@ -586,8 +586,8 @@ def bspline_valu(x, sset, x2=None, action=None, upper=None, lower=None): ict = upper[i] - lower[i] + 1 if ict > 0: - yfit[lower[i] : upper[i]] = ( - goodcoeff[i * npoly + spot] @ action[lower[i] : upper[i], :] + yfit[lower[i]: upper[i]] = ( + goodcoeff[i * npoly + spot] @ action[lower[i]: upper[i], :] ) yy = yfit @@ -1133,8 +1133,8 @@ def spflux_medianfilt(loglam, objflux, objivar, width, **kwargs): # which will force the ratio of the two to be unity. hwidth = np.ceil((width - 1) / 2.0) thisback[0:hwidth] = objflux[0:hwidth, ispec] - thisback[npix - 1 - hwidth : npix - 1] = objflux[ - npix - 1 - hwidth : npix - 1, ispec + thisback[npix - 1 - hwidth: npix - 1] = objflux[ + npix - 1 - hwidth: npix - 1, ispec ] czero2 = np.where(thisback == 0)[0] count2 = czero2.size @@ -1202,11 +1202,11 @@ def spflux_bestmodel( # # NOTE: what is dslgpsize? if template == "kurucz": - _, kindx, dslgpsize = spflux_read_kurucz() ##Yanping test + _, kindx, dslgpsize = spflux_read_kurucz() # Yanping test elif template == "munari": - _, kindx, dslgpsize = spflux_read_munari() ##Yanping added + _, kindx, dslgpsize = spflux_read_munari() # Yanping added elif template == "BOSZ": - _, kindx, dslgpsize = spflux_read_bosz() ##Yanping added + _, kindx, dslgpsize = spflux_read_bosz() # Yanping added else: print( "Flux calibration templates has to be specified and be one of the three: 'kurucz','munari', 'BOSZ'." @@ -1277,15 +1277,15 @@ def spflux_bestmodel( if template == "kurucz": modflux, kindx, dslgpsize = spflux_read_kurucz( loglam - np.log10(1 + zpeak), dispimg - ) ##Yanping test + ) # Yanping test elif template == "munari": modflux, kindx, dslgpsize = spflux_read_munari( loglam - np.log10(1 + zpeak), dispimg - ) ##Yanping added + ) # Yanping added elif template == "BOSZ": modflux, kindx, dslgpsize = spflux_read_bosz( loglam - np.log10(1 + zpeak), dispimg - ) ##Yanping added + ) # Yanping added else: print( "Flux calibration templates has to be specified and be one of the three: 'kurucz','munari', 'BOSZ'." @@ -1493,10 +1493,10 @@ def spflux_bspline( ) outmask1 = 0 - if disp is not None: - x2 = disp[isort] - else: - pass + # if disp is not None: + # x2 = disp[isort] + # else: + # pass # BUG: this is actually done by bspline_iterfit sset = BSpline(*fullbkpt) diff --git a/python/lvmdrp/functions/cubeMethod.py b/python/lvmdrp/functions/cubeMethod.py index d4b398e7..d5b15326 100644 --- a/python/lvmdrp/functions/cubeMethod.py +++ b/python/lvmdrp/functions/cubeMethod.py @@ -6,12 +6,15 @@ try: import pylab from matplotlib import pyplot as plt -except: +except ImportError: pass from copy import deepcopy from scipy import stats -from lvmdrp.core.cube import Cube +from lvdrp.core import fit_profile +from lvmdrp.core.cube import Cube, loadCube +from lvmdrp.core.image import Image +from lvmdrp.core.rss import RSS from lvmdrp.core.passband import PassBand from lvmdrp.core.spectrum1d import Spectrum1D from lvmdrp.external import ancillary_func @@ -147,7 +150,7 @@ def measureDARPeak_drp( cube = Cube() cube.loadFitsData(cube_in) # kernel = numpy.ones((coadd,1,1),dtype='uint8') - coadd_cube = cube.medianFilter(coadd) + # coadd_cube = cube.medianFilter(coadd) select_slice = numpy.arange(cube._res_elements) % steps == 0 select_slice[0] = True @@ -158,7 +161,7 @@ def measureDARPeak_drp( cent_x = numpy.zeros(len(slices)) cent_y = numpy.zeros(len(slices)) - ref = int(numpy.rint(cube._res_elements / 2.0)) + # ref = int(numpy.rint(cube._res_elements / 2.0)) collapsed_img = cube.collapseCube("median", start_wave, end_wave) cent_guess = collapsed_img.centreMax( cent_x=collapsed_img._dim[1] / 2.0, @@ -167,7 +170,7 @@ def measureDARPeak_drp( ) m = 0 - plot = False + # plot = False for i in slices: if verbose == 1: print(i) @@ -180,13 +183,13 @@ def measureDARPeak_drp( m += 1 spec_x = Spectrum1D(data=cent_x, wave=wave) - poly_x = spec_x.smoothPoly( - order=smooth_poly, start_wave=start_wave, end_wave=end_wave - ) + # poly_x = spec_x.smoothPoly( + # order=smooth_poly, start_wave=start_wave, end_wave=end_wave + # ) spec_y = Spectrum1D(data=cent_y, wave=wave) - poly_y = spec_y.smoothPoly( - order=smooth_poly, start_wave=start_wave, end_wave=end_wave - ) + # poly_y = spec_y.smoothPoly( + # order=smooth_poly, start_wave=start_wave, end_wave=end_wave + # ) if start_wave is not None and end_wave is not None: select = numpy.logical_and(wave >= start_wave, wave <= end_wave) @@ -208,8 +211,8 @@ def measureDARPeak_drp( spec2_y = Spectrum1D(data=cent_y[select][select2], wave=wave[select][select2]) spec2_x = Spectrum1D(data=cent_x[select][select2], wave=wave[select][select2]) - poly_x = spec2_x.smoothPoly(order=smooth_poly, ref_base=cube._wave) - poly_y = spec2_y.smoothPoly(order=smooth_poly, ref_base=cube._wave) + #poly_x = spec2_x.smoothPoly(order=smooth_poly, ref_base=cube._wave) + #poly_y = spec2_y.smoothPoly(order=smooth_poly, ref_base=cube._wave) if fibers is None: fibers = cube._dim_x * cube._dim_y @@ -343,7 +346,7 @@ def fitDARPeak_drp( cube = Cube() cube.loadFitsData(cube_in) # kernel = numpy.ones((coadd,1,1),dtype='uint8') - coadd_cube = cube.medianFilter(coadd) + # coadd_cube = cube.medianFilter(coadd) select_slice = numpy.arange(cube._res_elements) % steps == 0 select_slice[0] = True @@ -354,7 +357,7 @@ def fitDARPeak_drp( cent_x = numpy.zeros(len(slices)) cent_y = numpy.zeros(len(slices)) - ref = int(numpy.rint(cube._res_elements / 2.0)) + #ref = int(numpy.rint(cube._res_elements / 2.0)) collapsed_img = cube.collapseCube("median", start_wave, end_wave) cent_guess = collapsed_img.centreMax( cent_x=collapsed_img._dim[1] / 2.0, @@ -363,7 +366,7 @@ def fitDARPeak_drp( ) m = 0 - plot = False + #plot = False for i in slices: if verbose == 1: print(i) @@ -376,13 +379,13 @@ def fitDARPeak_drp( m += 1 spec_x = Spectrum1D(data=cent_x, wave=wave) - poly_x = spec_x.smoothPoly( - order=smooth_poly, start_wave=start_wave, end_wave=end_wave - ) + # poly_x = spec_x.smoothPoly( + # order=smooth_poly, start_wave=start_wave, end_wave=end_wave + # ) spec_y = Spectrum1D(data=cent_y, wave=wave) - poly_y = spec_y.smoothPoly( - order=smooth_poly, start_wave=start_wave, end_wave=end_wave - ) + # poly_y = spec_y.smoothPoly( + # order=smooth_poly, start_wave=start_wave, end_wave=end_wave + # ) if start_wave is not None and end_wave is not None: select = numpy.logical_and(wave >= start_wave, wave <= end_wave) @@ -404,8 +407,8 @@ def fitDARPeak_drp( spec2_y = Spectrum1D(data=cent_y[select][select2], wave=wave[select][select2]) spec2_x = Spectrum1D(data=cent_x[select][select2], wave=wave[select][select2]) - poly_x = spec2_x.smoothPoly(order=smooth_poly, ref_base=cube._wave) - poly_y = spec2_y.smoothPoly(order=smooth_poly, ref_base=cube._wave) + # poly_x = spec2_x.smoothPoly(order=smooth_poly, ref_base=cube._wave) + # poly_y = spec2_y.smoothPoly(order=smooth_poly, ref_base=cube._wave) if fibers is None: fibers = cube._dim_x * cube._dim_y @@ -634,12 +637,12 @@ def matchCubeAperSpec_drp( ratio = spec_ref_resamp / spec_in_resamp # if verbose==1: # pylab.plot(ratio._wave, ratio._data, '-k') - out_par = ratio.smoothPoly( - order=poly_correct, - start_wave=start_wave, - end_wave=end_wave, - ref_base=cube1._wave, - ) + # out_par = ratio.smoothPoly( + # order=poly_correct, + # start_wave=start_wave, + # end_wave=end_wave, + # ref_base=cube1._wave, + # ) new_cube = cube1 * ratio new_cube.writeFitsData(cube_out) if verbose == 1 or outfig != "": @@ -957,7 +960,7 @@ def combineCubes_drp(incubes, outcube, method="mean", replace_error="1e10"): error_img[j, :, :] = cubes[j]._error[i, :, :] image[j, :, :] = cubes[j]._data[i, :, :] image2[j, :, :] = cubes[j]._data[i, :, :] - select = mask_img == True + select = mask_img is True image[select] = 0 error_img[select] = 0 good_pix = numpy.sum(numpy.logical_not(select), 0) @@ -970,8 +973,7 @@ def combineCubes_drp(incubes, outcube, method="mean", replace_error="1e10"): data[i, select_bad] = numpy.sum(image2, 0)[select_bad] / len(incubes) if error is not None: error[i, select_good] = numpy.sqrt( - numpy.sum(error_img**2, 0)[select_good] - / good_pix[select_good] ** 2 + numpy.sum(error_img**2, 0)[select_good] / good_pix[select_good] ** 2 ) error[i, select_bad] = replace_error if mask is not None: From a76c88d6542b4fabe433deed870bab1b4043a2f1 Mon Sep 17 00:00:00 2001 From: Brian Cherinka Date: Wed, 29 Nov 2023 17:01:34 -0500 Subject: [PATCH 07/18] turning on linting in commit workflow --- .github/workflows/test.yaml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 67a602b5..27e89542 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -47,10 +47,10 @@ jobs: pip install wheel pip install .[dev] - # - name: Lint with ruff - # run: | - # pip install ruff - # ruff . + - name: Lint with ruff + run: | + pip install ruff + ruff . - name: Test with pytest run: | From bf9a65914c1de6b48ee363ae819097a343555db8 Mon Sep 17 00:00:00 2001 From: Brian Cherinka Date: Wed, 29 Nov 2023 17:03:57 -0500 Subject: [PATCH 08/18] more linting --- docs/nb/astro.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/nb/astro.py b/docs/nb/astro.py index 3d748f18..32084750 100644 --- a/docs/nb/astro.py +++ b/docs/nb/astro.py @@ -4,21 +4,21 @@ Space Telescope Science Institute -Synopsis: +Synopsis: Thse are a series of subroutines/utilites that are intended to be of general use for astronomy applications -Description: +Description: Notes: Standards - do not include anything here that is specific to one project or file format that is not very generic. These routines should be as bulletproof as possible - + History: 090208 ksl Coding begun -120129 ksl Added routines having to do with images +120129 ksl Added routines having to do with images """ @@ -93,13 +93,13 @@ def radec2hms(ra="225.2", dec="-17.35", ra_format="", dec_format=""): """ - if isinstance(ra, float) == False: + if isinstance(ra, float) is False: try: ra = eval(ra) except TypeError: ra = float(ra) - if isinstance(dec, float) == False: + if isinstance(dec, float) is False: try: dec = eval(dec) except TypeError: From cb8404e3a374c01c293d0493cd86945380c9f65d Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 7 Dec 2023 09:07:50 -0300 Subject: [PATCH 09/18] implementing extendData method --- python/lvmdrp/core/rss.py | 69 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+) diff --git a/python/lvmdrp/core/rss.py b/python/lvmdrp/core/rss.py index e7b471b9..6f59f9a0 100644 --- a/python/lvmdrp/core/rss.py +++ b/python/lvmdrp/core/rss.py @@ -765,6 +765,75 @@ def getSpec(self, fiber): return spec + def extendData(self, new_wave): + """Extends data, error, mask, and sky to new wavelength array + + Given a new wavelength array `new_wave`, this function extends + the data, error, mask, and sky arrays to the new wavelength array, + filling in the new pixels with NaNs. + + Parameters + ---------- + new_wave : array-like + New wavelength array to extend to + + Returns + ------- + self : RSS + Returns self with extended arrays + """ + if self._wave is None: + raise ValueError("No wavelength array found in RSS object") + + if self._data is None: + raise ValueError("No data array found in RSS object") + + if len(new_wave) == 0: + raise ValueError("New wavelength array is empty") + + # find positions in new wavelength array that contain self._wave + ipix, fpix = numpy.searchsorted(new_wave, self._wave[[0, -1]], side="left") + + # define new arrays filled with NaNs + new_data = numpy.full((self._data.shape[0], new_wave.size), numpy.nan, dtype=numpy.float32) + new_data[:, ipix:fpix+1] = self._data + if self._error is not None: + new_error = numpy.full((self._error.shape[0], new_wave.size), numpy.nan, dtype=numpy.float32) + new_error[:, ipix:fpix+1] = self._error + else: + new_error = None + if self._mask is not None: + new_mask = numpy.full((self._mask.shape[0], new_wave.size), False, dtype=bool) + new_mask[:, ipix:fpix+1] = self._mask + else: + new_mask = None + if self._sky is not None: + new_sky = numpy.full((self._sky.shape[0], new_wave.size), numpy.nan, dtype=numpy.float32) + new_sky[:, ipix:fpix+1] = self._sky + else: + new_sky = None + if self._sky_error is not None: + new_sky_error = numpy.full((self._sky_error.shape[0], new_wave.size), numpy.nan, dtype=numpy.float32) + new_sky_error[:, ipix:fpix+1] = self._sky_error + else: + new_sky_error = None + if self._inst_fwhm is not None: + new_inst_fwhm = numpy.full((self._inst_fwhm.shape[0], new_wave.size), numpy.nan, dtype=numpy.float32) + new_inst_fwhm[:, ipix:fpix+1] = self._inst_fwhm + else: + new_inst_fwhm = None + + # set new arrays + self._data = new_data + self._error = new_error + self._mask = new_mask + self._sky = new_sky + self._sky_error = new_sky_error + self._inst_fwhm = new_inst_fwhm + self._wave = new_wave + + return self + def combineRSS(self, rss_in, method="mean", replace_error=1e10): dim = rss_in[0]._data.shape data = numpy.zeros((len(rss_in), dim[0], dim[1]), dtype=numpy.float32) From 6149d6d49597976c99aa6eafa257b47ef5d7e1e1 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 7 Dec 2023 09:08:10 -0300 Subject: [PATCH 10/18] skipping extra interpolation by default --- python/lvmdrp/functions/rssMethod.py | 56 ++++++++++++++++++---------- 1 file changed, 36 insertions(+), 20 deletions(-) diff --git a/python/lvmdrp/functions/rssMethod.py b/python/lvmdrp/functions/rssMethod.py index 27b42244..6ded788a 100644 --- a/python/lvmdrp/functions/rssMethod.py +++ b/python/lvmdrp/functions/rssMethod.py @@ -3098,28 +3098,44 @@ def join_spec_channels(in_rsss: List[str], out_rss: str, use_weights: bool = Tru [rss.apply_pixelmask() for rss in rsss] # get wavelengths - log.info("computing best wavelength array") + log.info("merging wavelength arrays") waves = [rss._wave for rss in rsss] - # compute the combined wavelengths - new_wave = wave_little_interpol(waves) + new_wave = numpy.unique(numpy.concatenate(waves)) sampling = numpy.diff(new_wave) - log.info(f"new wavelength sampling: min = {sampling.min():.2f}, max = {sampling.max():.2f}") - - # define interpolators - log.info("interpolating RSS data in new wavelength array") - fluxes_f = [interpolate.interp1d(rss._wave, rss._data, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - errors_f = [interpolate.interp1d(rss._wave, rss._error, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - masks_f = [interpolate.interp1d(rss._wave, rss._mask, axis=1, kind="nearest", bounds_error=False, fill_value=0) for rss in rsss] - lsfs_f = [interpolate.interp1d(rss._wave, rss._inst_fwhm, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - sky_f = [interpolate.interp1d(rss._wave, rss._sky, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - sky_error_f = [interpolate.interp1d(rss._wave, rss._sky_error, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - # evaluate interpolators - fluxes = numpy.asarray([f(new_wave).astype("float32") for f in fluxes_f]) - errors = numpy.asarray([f(new_wave).astype("float32") for f in errors_f]) - masks = numpy.asarray([f(new_wave).astype("uint8") for f in masks_f]) - lsfs = numpy.asarray([f(new_wave).astype("float32") for f in lsfs_f]) - skies = numpy.asarray([f(new_wave).astype("float32") for f in sky_f]) - sky_errors = numpy.asarray([f(new_wave).astype("float32") for f in sky_error_f]) + + # optionally interpolate if the merged wavelengths are not monotonic + if numpy.all(numpy.isclose(sampling, sampling[0])): + log.info(f"current wavelength sampling: min = {sampling.min():.2f}, max = {sampling.max():.2f}") + # extend rss._data to new_wave filling with NaNs + rsss = [rss.extendData(new_wave) for rss in rsss] + fluxes = numpy.asarray([rss._data for rss in rsss]) + errors = numpy.asarray([rss._error for rss in rsss]) + masks = numpy.asarray([rss._mask for rss in rsss]) + lsfs = numpy.asarray([rss._inst_fwhm for rss in rsss]) + skies = numpy.asarray([rss._sky for rss in rsss]) + sky_errors = numpy.asarray([rss._sky_error for rss in rsss]) + else: + log.warning("merged wavelengths are not monotonic, interpolation needed") + # compute the combined wavelengths + new_wave = wave_little_interpol(waves) + sampling = numpy.diff(new_wave) + log.info(f"new wavelength sampling: min = {sampling.min():.2f}, max = {sampling.max():.2f}") + + # define interpolators + log.info("interpolating RSS data in new wavelength array") + fluxes_f = [interpolate.interp1d(rss._wave, rss._data, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] + errors_f = [interpolate.interp1d(rss._wave, rss._error, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] + masks_f = [interpolate.interp1d(rss._wave, rss._mask, axis=1, kind="nearest", bounds_error=False, fill_value=0) for rss in rsss] + lsfs_f = [interpolate.interp1d(rss._wave, rss._inst_fwhm, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] + sky_f = [interpolate.interp1d(rss._wave, rss._sky, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] + sky_error_f = [interpolate.interp1d(rss._wave, rss._sky_error, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] + # evaluate interpolators + fluxes = numpy.asarray([f(new_wave).astype("float32") for f in fluxes_f]) + errors = numpy.asarray([f(new_wave).astype("float32") for f in errors_f]) + masks = numpy.asarray([f(new_wave).astype("uint8") for f in masks_f]) + lsfs = numpy.asarray([f(new_wave).astype("float32") for f in lsfs_f]) + skies = numpy.asarray([f(new_wave).astype("float32") for f in sky_f]) + sky_errors = numpy.asarray([f(new_wave).astype("float32") for f in sky_error_f]) # define weights for channel combination vars = errors ** 2 From 8bd9cb41482041e13a52899c1856755a80674184 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 7 Dec 2023 09:46:04 -0300 Subject: [PATCH 11/18] refactoring channel combination into a RSS class method & removing _chain_join --- python/lvmdrp/core/rss.py | 117 +++++++++++++++++++++++++-- python/lvmdrp/functions/rssMethod.py | 77 +----------------- 2 files changed, 112 insertions(+), 82 deletions(-) diff --git a/python/lvmdrp/core/rss.py b/python/lvmdrp/core/rss.py index 6f59f9a0..bfb0a6b0 100644 --- a/python/lvmdrp/core/rss.py +++ b/python/lvmdrp/core/rss.py @@ -1,6 +1,7 @@ import os import numpy import bottleneck as bn +from scipy import interpolate from astropy.io import fits as pyfits from astropy.wcs import WCS from astropy.table import Table @@ -13,7 +14,7 @@ from lvmdrp.core.fiberrows import FiberRows from lvmdrp.core.header import Header from lvmdrp.core.positionTable import PositionTable -from lvmdrp.core.spectrum1d import Spectrum1D +from lvmdrp.core.spectrum1d import Spectrum1D, wave_little_interpol def _read_pixwav_map(lamp: str, camera: str, pixels=None, waves=None): @@ -77,14 +78,114 @@ def _read_pixwav_map(lamp: str, camera: str, pixels=None, waves=None): return pixwav_map_path, ref_fiber, pixels, waves, use_line -def _chain_join(b, r, z): - ii = [i for i in [b, r, z] if i] - x = ii[0] - for e in ii[1:]: - x = x.coaddSpec(e) - return x - class RSS(FiberRows): + + @classmethod + def from_channels(cls, rss_b, rss_r, rss_z, use_weights=True): + """Stitch together RSS channels into a single RSS object + + Parameters + ---------- + rss_b : RSS + RSS object for the blue channel + rss_r : RSS + RSS object for the red channel + rss_z : RSS + RSS object for the z channel + use_weights : bool, optional + whether to use weights for channel combination, by default True + + Returns + ------- + RSS + """ + + rsss = [rss_b, rss_r, rss_z] + + # get wavelengths + log.info("merging wavelength arrays") + waves = [rss._wave for rss in rsss] + new_wave = numpy.unique(numpy.concatenate(waves)) + sampling = numpy.diff(new_wave) + + # optionally interpolate if the merged wavelengths are not monotonic + if numpy.all(numpy.isclose(sampling, sampling[0])): + log.info(f"current wavelength sampling: min = {sampling.min():.2f}, max = {sampling.max():.2f}") + # extend rss._data to new_wave filling with NaNs + rsss = [rss.extendData(new_wave) for rss in rsss] + fluxes = numpy.asarray([rss._data for rss in rsss]) + errors = numpy.asarray([rss._error for rss in rsss]) + masks = numpy.asarray([rss._mask for rss in rsss]) + lsfs = numpy.asarray([rss._inst_fwhm for rss in rsss]) + skies = numpy.asarray([rss._sky for rss in rsss]) + sky_errors = numpy.asarray([rss._sky_error for rss in rsss]) + else: + log.warning("merged wavelengths are not monotonic, interpolation needed") + # compute the combined wavelengths + new_wave = wave_little_interpol(waves) + sampling = numpy.diff(new_wave) + log.info(f"new wavelength sampling: min = {sampling.min():.2f}, max = {sampling.max():.2f}") + + # define interpolators + log.info("interpolating RSS data in new wavelength array") + fluxes_f = [interpolate.interp1d(rss._wave, rss._data, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] + errors_f = [interpolate.interp1d(rss._wave, rss._error, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] + masks_f = [interpolate.interp1d(rss._wave, rss._mask, axis=1, kind="nearest", bounds_error=False, fill_value=0) for rss in rsss] + lsfs_f = [interpolate.interp1d(rss._wave, rss._inst_fwhm, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] + sky_f = [interpolate.interp1d(rss._wave, rss._sky, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] + sky_error_f = [interpolate.interp1d(rss._wave, rss._sky_error, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] + # evaluate interpolators + fluxes = numpy.asarray([f(new_wave).astype("float32") for f in fluxes_f]) + errors = numpy.asarray([f(new_wave).astype("float32") for f in errors_f]) + masks = numpy.asarray([f(new_wave).astype("uint8") for f in masks_f]) + lsfs = numpy.asarray([f(new_wave).astype("float32") for f in lsfs_f]) + skies = numpy.asarray([f(new_wave).astype("float32") for f in sky_f]) + sky_errors = numpy.asarray([f(new_wave).astype("float32") for f in sky_error_f]) + + # define weights for channel combination + vars = errors ** 2 + log.info("combining channel data") + if use_weights: + weights = 1.0 / vars + weights = weights / bn.nansum(weights, axis=0)[None] + + new_data = bn.nansum(fluxes * weights, axis=0) + new_inst_fwhm = bn.nansum(lsfs * weights, axis=0) + new_error = numpy.sqrt(bn.nansum(vars, axis=0)) + new_mask = numpy.sum(masks, axis=0).astype("bool") + new_sky = bn.nansum(skies * weights, axis=0) + new_sky_error = numpy.sqrt(bn.nansum(sky_errors ** 2 * weights ** 2, axis=0)) + else: + # channel-combine RSS data + new_data = bn.nanmean(fluxes, axis=0) + new_inst_fwhm = bn.nanmean(lsfs, axis=0) + new_error = numpy.sqrt(bn.nanmean(vars, axis=0)) + new_mask = numpy.sum(masks, axis=0).astype("bool") + new_sky = bn.nansum(skies, axis=0) + new_sky_error = numpy.sqrt(bn.nanmean(sky_errors ** 2, axis=0)) + + # create RSS + new_hdr = rsss[0]._header.copy() + for rss in rsss[1:]: + new_hdr.update(rss._header) + new_hdr["NAXIS1"] = new_data.shape[1] + new_hdr["NAXIS2"] = new_data.shape[0] + new_hdr["CCD"] = ",".join([rss._header["CCD"][0] for rss in rsss]) + wcs = WCS(new_hdr) + wcs.spectral.wcs.cdelt[0] = new_wave[1] - new_wave[0] + wcs.spectral.wcs.crval[0] = new_wave[0] + new_hdr.update(wcs.to_header()) + return RSS( + data=new_data, + error=new_error, + mask=new_mask, + wave=new_wave, + inst_fwhm=new_inst_fwhm, + sky=new_sky, + sky_error=new_sky_error, + header=new_hdr + ) + @classmethod def from_spectra1d( cls, diff --git a/python/lvmdrp/functions/rssMethod.py b/python/lvmdrp/functions/rssMethod.py index 6ded788a..90a03836 100644 --- a/python/lvmdrp/functions/rssMethod.py +++ b/python/lvmdrp/functions/rssMethod.py @@ -30,7 +30,7 @@ from lvmdrp.core.passband import PassBand from lvmdrp.core.plot import plt, create_subplots, save_fig, plot_wavesol_residuals, plot_wavesol_coeffs from lvmdrp.core.rss import RSS, _read_pixwav_map, loadRSS -from lvmdrp.core.spectrum1d import Spectrum1D, wave_little_interpol, _spec_from_lines, _cross_match +from lvmdrp.core.spectrum1d import Spectrum1D, _spec_from_lines, _cross_match from lvmdrp.external import ancillary_func from lvmdrp.utils import flatten from lvmdrp import log @@ -3097,80 +3097,9 @@ def join_spec_channels(in_rsss: List[str], out_rss: str, use_weights: bool = Tru # set masked pixels to NaN [rss.apply_pixelmask() for rss in rsss] - # get wavelengths - log.info("merging wavelength arrays") - waves = [rss._wave for rss in rsss] - new_wave = numpy.unique(numpy.concatenate(waves)) - sampling = numpy.diff(new_wave) + # combine channels + new_rss = RSS.from_channels(*rsss, use_weights=use_weights) - # optionally interpolate if the merged wavelengths are not monotonic - if numpy.all(numpy.isclose(sampling, sampling[0])): - log.info(f"current wavelength sampling: min = {sampling.min():.2f}, max = {sampling.max():.2f}") - # extend rss._data to new_wave filling with NaNs - rsss = [rss.extendData(new_wave) for rss in rsss] - fluxes = numpy.asarray([rss._data for rss in rsss]) - errors = numpy.asarray([rss._error for rss in rsss]) - masks = numpy.asarray([rss._mask for rss in rsss]) - lsfs = numpy.asarray([rss._inst_fwhm for rss in rsss]) - skies = numpy.asarray([rss._sky for rss in rsss]) - sky_errors = numpy.asarray([rss._sky_error for rss in rsss]) - else: - log.warning("merged wavelengths are not monotonic, interpolation needed") - # compute the combined wavelengths - new_wave = wave_little_interpol(waves) - sampling = numpy.diff(new_wave) - log.info(f"new wavelength sampling: min = {sampling.min():.2f}, max = {sampling.max():.2f}") - - # define interpolators - log.info("interpolating RSS data in new wavelength array") - fluxes_f = [interpolate.interp1d(rss._wave, rss._data, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - errors_f = [interpolate.interp1d(rss._wave, rss._error, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - masks_f = [interpolate.interp1d(rss._wave, rss._mask, axis=1, kind="nearest", bounds_error=False, fill_value=0) for rss in rsss] - lsfs_f = [interpolate.interp1d(rss._wave, rss._inst_fwhm, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - sky_f = [interpolate.interp1d(rss._wave, rss._sky, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - sky_error_f = [interpolate.interp1d(rss._wave, rss._sky_error, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - # evaluate interpolators - fluxes = numpy.asarray([f(new_wave).astype("float32") for f in fluxes_f]) - errors = numpy.asarray([f(new_wave).astype("float32") for f in errors_f]) - masks = numpy.asarray([f(new_wave).astype("uint8") for f in masks_f]) - lsfs = numpy.asarray([f(new_wave).astype("float32") for f in lsfs_f]) - skies = numpy.asarray([f(new_wave).astype("float32") for f in sky_f]) - sky_errors = numpy.asarray([f(new_wave).astype("float32") for f in sky_error_f]) - - # define weights for channel combination - vars = errors ** 2 - log.info("combining channel data") - if use_weights: - weights = 1.0 / vars - weights = weights / bn.nansum(weights, axis=0)[None] - - new_data = bn.nansum(fluxes * weights, axis=0) - new_inst_fwhm = bn.nansum(lsfs * weights, axis=0) - new_error = numpy.sqrt(bn.nansum(vars, axis=0)) - new_mask = numpy.sum(masks, axis=0).astype("bool") - new_sky = bn.nansum(skies * weights, axis=0) - new_sky_error = numpy.sqrt(bn.nansum(sky_errors ** 2 * weights ** 2, axis=0)) - else: - # channel-combine RSS data - new_data = bn.nanmean(fluxes, axis=0) - new_inst_fwhm = bn.nanmean(lsfs, axis=0) - new_error = numpy.sqrt(bn.nanmean(vars, axis=0)) - new_mask = numpy.sum(masks, axis=0).astype("bool") - new_sky = bn.nansum(skies, axis=0) - new_sky_error = numpy.sqrt(bn.nanmean(sky_errors ** 2, axis=0)) - - # create RSS - new_hdr = rsss[0]._header.copy() - for rss in rsss[1:]: - new_hdr.update(rss._header) - new_hdr["NAXIS1"] = new_data.shape[1] - new_hdr["NAXIS2"] = new_data.shape[0] - new_hdr["CCD"] = ",".join([rss._header["CCD"][0] for rss in rsss]) - wcs = WCS(new_hdr) - wcs.spectral.wcs.cdelt[0] = new_wave[1] - new_wave[0] - wcs.spectral.wcs.crval[0] = new_wave[0] - new_hdr.update(wcs.to_header()) - new_rss = RSS(data=new_data, error=new_error, mask=new_mask, wave=new_wave, inst_fwhm=new_inst_fwhm, sky=new_sky, sky_error=new_sky_error, header=new_hdr) # write output RSS if out_rss is not None: log.info(f"writing output RSS to {os.path.basename(out_rss)}") From 2f014afcc3f93ceda978dfe685277843ecc20d98 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 7 Dec 2023 10:09:16 -0300 Subject: [PATCH 12/18] fixing docstrings --- python/lvmdrp/core/rss.py | 2 +- python/lvmdrp/functions/rssMethod.py | 13 ++++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/python/lvmdrp/core/rss.py b/python/lvmdrp/core/rss.py index bfb0a6b0..a3738cd2 100644 --- a/python/lvmdrp/core/rss.py +++ b/python/lvmdrp/core/rss.py @@ -93,7 +93,7 @@ def from_channels(cls, rss_b, rss_r, rss_z, use_weights=True): rss_z : RSS RSS object for the z channel use_weights : bool, optional - whether to use weights for channel combination, by default True + use inverse variance weights for channel combination, by default True Returns ------- diff --git a/python/lvmdrp/functions/rssMethod.py b/python/lvmdrp/functions/rssMethod.py index 90a03836..c1a27bd2 100644 --- a/python/lvmdrp/functions/rssMethod.py +++ b/python/lvmdrp/functions/rssMethod.py @@ -3072,18 +3072,21 @@ def DAR_registerSDSS_drp( def join_spec_channels(in_rsss: List[str], out_rss: str, use_weights: bool = True): - """combine the given RSS list through the overlaping wavelength range + """Stitch together the three RSS channels (brz) into a single RSS. - Run once per exposure, for one spectrograph at a time. - in_rss is a list of 3 files, one for each channel, for a given - exposure and spectrograph id. + Given a list of three rss files (one per channel), this function + stitches them together into a single RSS file. The output RSS file + will have the same number of fibers as the input RSS files, but + the wavelength range will be the union of the wavelength ranges + of the input RSS files. Parameters ---------- in_rsss : array_like list of RSS file paths for each spectrograph channel out_rss : str - output RSS file path + use_weights : bool, optional + use inverse variance weights for channel combination, by default True Returns ------- From 8783e58af1f5e71fdf341b47683e1adc39e19806 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 7 Dec 2023 11:21:06 -0300 Subject: [PATCH 13/18] single lopp in channel combination --- python/lvmdrp/core/rss.py | 60 +++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 22 deletions(-) diff --git a/python/lvmdrp/core/rss.py b/python/lvmdrp/core/rss.py index a3738cd2..82e6248a 100644 --- a/python/lvmdrp/core/rss.py +++ b/python/lvmdrp/core/rss.py @@ -87,9 +87,9 @@ def from_channels(cls, rss_b, rss_r, rss_z, use_weights=True): Parameters ---------- rss_b : RSS - RSS object for the blue channel + RSS object for the b channel rss_r : RSS - RSS object for the red channel + RSS object for the r channel rss_z : RSS RSS object for the z channel use_weights : bool, optional @@ -98,6 +98,7 @@ def from_channels(cls, rss_b, rss_r, rss_z, use_weights=True): Returns ------- RSS + RSS object with data from all three channels """ rsss = [rss_b, rss_r, rss_z] @@ -112,13 +113,21 @@ def from_channels(cls, rss_b, rss_r, rss_z, use_weights=True): if numpy.all(numpy.isclose(sampling, sampling[0])): log.info(f"current wavelength sampling: min = {sampling.min():.2f}, max = {sampling.max():.2f}") # extend rss._data to new_wave filling with NaNs - rsss = [rss.extendData(new_wave) for rss in rsss] - fluxes = numpy.asarray([rss._data for rss in rsss]) - errors = numpy.asarray([rss._error for rss in rsss]) - masks = numpy.asarray([rss._mask for rss in rsss]) - lsfs = numpy.asarray([rss._inst_fwhm for rss in rsss]) - skies = numpy.asarray([rss._sky for rss in rsss]) - sky_errors = numpy.asarray([rss._sky_error for rss in rsss]) + fluxes, errors, masks, lsfs, skies, sky_errors = [], [], [], [], [], [] + for rss in rsss: + rss = rss.extendData(new_wave) + fluxes.append(rss._data) + errors.append(rss._error) + masks.append(rss._mask) + lsfs.append(rss._inst_fwhm) + skies.append(rss._sky) + sky_errors.append(rss._sky_error) + fluxes = numpy.asarray(fluxes) + errors = numpy.asarray(errors) + masks = numpy.asarray(masks) + lsfs = numpy.asarray(lsfs) + skies = numpy.asarray(skies) + sky_errors = numpy.asarray(sky_errors) else: log.warning("merged wavelengths are not monotonic, interpolation needed") # compute the combined wavelengths @@ -128,19 +137,26 @@ def from_channels(cls, rss_b, rss_r, rss_z, use_weights=True): # define interpolators log.info("interpolating RSS data in new wavelength array") - fluxes_f = [interpolate.interp1d(rss._wave, rss._data, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - errors_f = [interpolate.interp1d(rss._wave, rss._error, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - masks_f = [interpolate.interp1d(rss._wave, rss._mask, axis=1, kind="nearest", bounds_error=False, fill_value=0) for rss in rsss] - lsfs_f = [interpolate.interp1d(rss._wave, rss._inst_fwhm, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - sky_f = [interpolate.interp1d(rss._wave, rss._sky, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - sky_error_f = [interpolate.interp1d(rss._wave, rss._sky_error, axis=1, bounds_error=False, fill_value=numpy.nan) for rss in rsss] - # evaluate interpolators - fluxes = numpy.asarray([f(new_wave).astype("float32") for f in fluxes_f]) - errors = numpy.asarray([f(new_wave).astype("float32") for f in errors_f]) - masks = numpy.asarray([f(new_wave).astype("uint8") for f in masks_f]) - lsfs = numpy.asarray([f(new_wave).astype("float32") for f in lsfs_f]) - skies = numpy.asarray([f(new_wave).astype("float32") for f in sky_f]) - sky_errors = numpy.asarray([f(new_wave).astype("float32") for f in sky_error_f]) + fluxes, errors, masks, lsfs, skies, sky_errors = [], [], [], [], [], [] + for rss in rsss: + f = interpolate.interp1d(rss._wave, rss._data, axis=1, bounds_error=False, fill_value=numpy.nan) + fluxes.append(f(new_wave).astype("float32")) + f = interpolate.interp1d(rss._wave, rss._error, axis=1, bounds_error=False, fill_value=numpy.nan) + errors.append(f(new_wave).astype("float32")) + f = interpolate.interp1d(rss._wave, rss._mask, axis=1, kind="nearest", bounds_error=False, fill_value=0) + masks.append(f(new_wave).astype("uint8")) + f = interpolate.interp1d(rss._wave, rss._inst_fwhm, axis=1, bounds_error=False, fill_value=numpy.nan) + lsfs.append(f(new_wave).astype("float32")) + f = interpolate.interp1d(rss._wave, rss._sky, axis=1, bounds_error=False, fill_value=numpy.nan) + skies.append(f(new_wave).astype("float32")) + f = interpolate.interp1d(rss._wave, rss._sky_error, axis=1, bounds_error=False, fill_value=numpy.nan) + sky_errors.append(f(new_wave).astype("float32")) + fluxes = numpy.asarray(fluxes) + errors = numpy.asarray(errors) + masks = numpy.asarray(masks) + lsfs = numpy.asarray(lsfs) + skies = numpy.asarray(skies) + sky_errors = numpy.asarray(sky_errors) # define weights for channel combination vars = errors ** 2 From 3492eaaca5026ad251a6b75f309ea6d7f3985993 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 7 Dec 2023 11:22:47 -0300 Subject: [PATCH 14/18] refactoring spectrograph stacking --- python/lvmdrp/core/rss.py | 109 +++++++++++++++++++- python/lvmdrp/functions/rssMethod.py | 146 ++++++--------------------- python/lvmdrp/functions/run_drp.py | 4 +- 3 files changed, 142 insertions(+), 117 deletions(-) diff --git a/python/lvmdrp/core/rss.py b/python/lvmdrp/core/rss.py index 82e6248a..4122a2f3 100644 --- a/python/lvmdrp/core/rss.py +++ b/python/lvmdrp/core/rss.py @@ -12,7 +12,7 @@ from lvmdrp.core.apertures import Aperture from lvmdrp.core.cube import Cube from lvmdrp.core.fiberrows import FiberRows -from lvmdrp.core.header import Header +from lvmdrp.core.header import Header, combineHdr from lvmdrp.core.positionTable import PositionTable from lvmdrp.core.spectrum1d import Spectrum1D, wave_little_interpol @@ -80,6 +80,113 @@ def _read_pixwav_map(lamp: str, camera: str, pixels=None, waves=None): class RSS(FiberRows): + @classmethod + def from_spectrographs(cls, rss_sp1, rss_sp2, rss_sp3): + """Stacks together RSS objects from the three spectrographs + + Parameters + ---------- + rss_sp1 : RSS + RSS object for spectrograph 1 + rss_sp2 : RSS + RSS object for spectrograph 2 + rss_sp3 : RSS + RSS object for spectrograph 3 + + Returns + ------- + RSS + RSS object with data from all three spectrographs + """ + # load and stack each extension + hdrs = [] + rsss = [rss_sp1, rss_sp2, rss_sp3] + for i in range(len(rsss)): + rss = rsss[i] + if i == 0: + data_out = rss._data + if rss._error is not None: + error_out = rss._error + if rss._mask is not None: + mask_out = rss._mask + if rss._wave is not None: + wave_out = rss._wave + if rss._inst_fwhm is not None: + fwhm_out = rss._inst_fwhm + if rss._sky is not None: + sky_out = rss._sky + if rss._sky_error is not None: + sky_error_out = rss._sky_error + if rss._header is not None: + hdrs.append(Header(rss.getHeader())) + if rss._fluxcal is not None: + fluxcal_out = rss._fluxcal + else: + data_out = numpy.concatenate((data_out, rss._data), axis=0) + if rss._wave is not None: + if len(wave_out.shape) == 2 and len(rss._wave.shape) == 2: + wave_out = numpy.concatenate((wave_out, rss._wave), axis=0) + elif len(wave_out.shape) == 1 and len(rss._wave.shape) == 1 and numpy.isclose(wave_out, rss._wave).all(): + wave_out = wave_out + else: + raise ValueError(f"Cannot concatenate wavelength arrays of different shapes: {wave_out.shape} and {rss._wave.shape} or inhomogeneous wavelength arrays") + else: + wave_out = None + if rss._inst_fwhm is not None: + if len(fwhm_out.shape) == 2 and len(rss._inst_fwhm.shape) == 2: + fwhm_out = numpy.concatenate((fwhm_out, rss._inst_fwhm), axis=0) + elif len(fwhm_out.shape) == 1 and len(rss._inst_fwhm.shape) == 1 and numpy.isclose(fwhm_out, rss._inst_fwhm).all(): + fwhm_out = fwhm_out + else: + raise ValueError(f"Cannot concatenate FWHM arrays of different shapes: {fwhm_out.shape} and {rss._inst_fwhm.shape} or inhomogeneous FWHM arrays") + else: + fwhm_out = None + if rss._error is not None: + error_out = numpy.concatenate((error_out, rss._error), axis=0) + else: + error_out = None + if rss._mask is not None: + mask_out = numpy.concatenate((mask_out, rss._mask), axis=0) + else: + mask_out = None + if rss._sky is not None: + sky_out = numpy.concatenate((sky_out, rss._sky), axis=0) + else: + sky_out = None + if rss._sky_error is not None: + sky_error_out = numpy.concatenate((sky_error_out, rss._sky_error), axis=0) + else: + sky_error_out = None + if rss._header is not None: + hdrs.append(Header(rss.getHeader())) + if rss._fluxcal is not None: + f = fluxcal_out.to_pandas() + fluxcal_out = Table.from_pandas(f.combine_first(rss._fluxcal.to_pandas())) + else: + fluxcal_out = None + + # update header + if len(hdrs) > 0: + hdr_out = combineHdr(hdrs) + else: + hdr_out = None + + # update slitmap + slitmap_out = rss._slitmap + + return cls( + data=data_out, + error=error_out, + mask=mask_out, + wave=wave_out, + inst_fwhm=fwhm_out, + sky=sky_out, + sky_error=sky_error_out, + header=hdr_out._header, + slitmap=slitmap_out, + fluxcal=fluxcal_out, + ) + @classmethod def from_channels(cls, rss_b, rss_r, rss_z, use_weights=True): """Stitch together RSS channels into a single RSS object diff --git a/python/lvmdrp/functions/rssMethod.py b/python/lvmdrp/functions/rssMethod.py index c1a27bd2..580a0384 100644 --- a/python/lvmdrp/functions/rssMethod.py +++ b/python/lvmdrp/functions/rssMethod.py @@ -1716,120 +1716,6 @@ def combineRSS_drp(in_rsss, out_rss, method="mean"): combined_rss.writeFitsData(out_rss) -def stack_rss(in_rsss: List[str], out_rss: str, axis: int = 0) -> RSS: - """stacks a list of RSS objects along a given axis - - Parameters - ---------- - in_rsss : List[str] - list of RSS file paths - out_rss : str - output RSS file path - axis : int, optional - axis along which to stack the RSS objects, by default 0 - - Returns - ------- - RSS - stacked RSS object - """ - - # load and stack each extension - log.info(f"stacking frames in {','.join([os.path.basename(in_rss) for in_rss in in_rsss])} along axis {axis}") - hdrs = [] - for i in range(len(in_rsss)): - rss = loadRSS(in_rsss[i]) - if i == 0: - data_out = rss._data - if rss._error is not None: - error_out = rss._error - if rss._mask is not None: - mask_out = rss._mask - if rss._wave is not None: - wave_out = rss._wave - if rss._inst_fwhm is not None: - fwhm_out = rss._inst_fwhm - if rss._sky is not None: - sky_out = rss._sky - if rss._sky_error is not None: - sky_error_out = rss._sky_error - if rss._header is not None: - hdrs.append(Header(rss.getHeader())) - if rss._fluxcal is not None: - fluxcal_out = rss._fluxcal - else: - data_out = numpy.concatenate((data_out, rss._data), axis=axis) - if rss._wave is not None: - if len(wave_out.shape) == 2 and len(rss._wave.shape) == 2: - wave_out = numpy.concatenate((wave_out, rss._wave), axis=axis) - elif len(wave_out.shape) == 1 and len(rss._wave.shape) == 1 and numpy.isclose(wave_out, rss._wave).all(): - wave_out = wave_out - else: - raise ValueError(f"Cannot concatenate wavelength arrays of different shapes: {wave_out.shape} and {rss._wave.shape} or inhomogeneous wavelength arrays") - else: - wave_out = None - if rss._inst_fwhm is not None: - if len(fwhm_out.shape) == 2 and len(rss._inst_fwhm.shape) == 2: - fwhm_out = numpy.concatenate((fwhm_out, rss._inst_fwhm), axis=axis) - elif len(fwhm_out.shape) == 1 and len(rss._inst_fwhm.shape) == 1 and numpy.isclose(fwhm_out, rss._inst_fwhm).all(): - fwhm_out = fwhm_out - else: - raise ValueError(f"Cannot concatenate FWHM arrays of different shapes: {fwhm_out.shape} and {rss._inst_fwhm.shape} or inhomogeneous FWHM arrays") - else: - fwhm_out = None - if rss._error is not None: - error_out = numpy.concatenate((error_out, rss._error), axis=axis) - else: - error_out = None - if rss._mask is not None: - mask_out = numpy.concatenate((mask_out, rss._mask), axis=axis) - else: - mask_out = None - if rss._sky is not None: - sky_out = numpy.concatenate((sky_out, rss._sky), axis=axis) - else: - sky_out = None - if rss._sky_error is not None: - sky_error_out = numpy.concatenate((sky_error_out, rss._sky_error), axis=axis) - else: - sky_error_out = None - if rss._header is not None: - hdrs.append(Header(rss.getHeader())) - if rss._fluxcal is not None: - f = fluxcal_out.to_pandas() - fluxcal_out = Table.from_pandas(f.combine_first(rss._fluxcal.to_pandas())) - else: - fluxcal_out = None - - # update header - log.info("updating header") - if len(hdrs) > 0: - hdr_out = combineHdr(hdrs) - else: - hdr_out = None - - # update slitmap - slitmap_out = rss._slitmap - - # write output - log.info(f"writing stacked RSS to {os.path.basename(out_rss)}") - rss_out = RSS( - wave=wave_out, - data=data_out, - error=error_out, - mask=mask_out, - inst_fwhm=fwhm_out, - sky=sky_out, - sky_error=sky_error_out, - header=hdr_out.getHeader(), - slitmap=slitmap_out, - fluxcal=fluxcal_out - ) - rss_out.writeFitsData(out_rss) - - return rss_out - - def apertureFluxRSS_drp( in_rss, center_x, center_y, arc_radius, hdr_prefix, flux_type="mean,3900,4600" ): @@ -3071,6 +2957,38 @@ def DAR_registerSDSS_drp( plt.show() +def stack_spectrographs(in_rsss: List[str], out_rss: str) -> RSS: + """Stacks the given RSS list spectrograph-wise + + Given a list of RSS files, this function stacks them spectrograph-wise + (i.e. the RSS objects are stacked along the fiber ID axis). The output + RSS object will have the full set of fibers (i.e. 1944). + + Parameters + ---------- + in_rsss : List[str] + list of RSS file paths + out_rss : str + output RSS file path + + Returns + ------- + RSS + stacked RSS object + """ + + rsss = [loadRSS(in_rss) for in_rss in in_rsss] + + log.info(f"stacking frames in {','.join([os.path.basename(in_rss) for in_rss in in_rsss])} along fiber ID axis") + rss_out = RSS.from_spectrographs(*rsss) + + # write output + log.info(f"writing stacked RSS to {os.path.basename(out_rss)}") + rss_out.writeFitsData(out_rss) + + return rss_out + + def join_spec_channels(in_rsss: List[str], out_rss: str, use_weights: bool = True): """Stitch together the three RSS channels (brz) into a single RSS. diff --git a/python/lvmdrp/functions/run_drp.py b/python/lvmdrp/functions/run_drp.py index cbfe628c..66a96d38 100644 --- a/python/lvmdrp/functions/run_drp.py +++ b/python/lvmdrp/functions/run_drp.py @@ -23,7 +23,7 @@ find_peaks_auto, trace_peaks, extract_spectra) from lvmdrp.functions.rssMethod import (determine_wavelength_solution, create_pixel_table, - resample_wavelength, join_spec_channels, stack_rss) + resample_wavelength, join_spec_channels, stack_spectrographs) from lvmdrp.utils.metadata import (get_frames_metadata, get_master_metadata, extract_metadata, get_analog_groups, match_master_metadata, create_master_path) from lvmdrp.utils.convert import tileid_grp @@ -1102,7 +1102,7 @@ def combine_spectrographs(tileid: int, mjd: int, channel: str, expnum: int) -> R kind='', camera=channel, imagetype="object", expnum=expnum) # combine RSS files along fiber ID direction - return stack_rss(hsci_paths, frame_path, axis=0) + return stack_spectrographs(hsci_paths, frame_path) def stack_ext(files: list, ext: Union[int, str] = 0) -> np.array: From b89fc35560afa3bbf0f4d0a7f6c4a736be94c93f Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 11 Jan 2024 10:22:40 -0300 Subject: [PATCH 15/18] removing deprecated modules --- python/lvmdrp/functions/gmosMethod.py | 1195 ----------------- python/lvmdrp/functions/vimosMethod.py | 1660 ------------------------ 2 files changed, 2855 deletions(-) delete mode 100644 python/lvmdrp/functions/gmosMethod.py delete mode 100644 python/lvmdrp/functions/vimosMethod.py diff --git a/python/lvmdrp/functions/gmosMethod.py b/python/lvmdrp/functions/gmosMethod.py deleted file mode 100644 index 9dbdb4ab..00000000 --- a/python/lvmdrp/functions/gmosMethod.py +++ /dev/null @@ -1,1195 +0,0 @@ -import os -import numpy -from astropy.io import fits as pyfits - -from lvmdrp.core.header import Header -from lvmdrp.core.image import Image -from lvmdrp.functions import skyMethod as sky -from lvmdrp.functions import imageMethod as im -from lvmdrp.functions import rssMethod as rss -from lvmdrp.functions import fluxCalMethod as flux - - -description = "Provides Methods to reduce GMOS data" -gmos_calib_n = os.path.dirname(__file__) + "/../../config/GMOS-N/" -gmos_calib_s = os.path.dirname(__file__) + "/../../config/GMOS-S/" - - -def createCCDfromArchive_drp( - infile, - prefix, - master_bias=None, - splits="0,0,0,0", - single=False, - mask_saturated=True, - saturate_value=65535, -): - single = bool(single) - splits = numpy.array(splits.split(",")).astype(numpy.int16) - hdu = pyfits.open(infile, do_not_scale_image_data=False, memmap=False) - hdr = hdu[0].header - bins = numpy.array(hdu[1].header["CCDSUM"].split()).astype("int") - if "Hamamatsu" in hdr["DETECTOR"]: - sections = 12 - CCD = numpy.zeros((4224 / bins[1], 6144 / bins[0]), dtype=numpy.float32) - CCD_err = numpy.zeros((4224 / bins[1], 6144 / bins[0]), dtype=numpy.float32) - CCD_mask = numpy.zeros((4224 / bins[1], 6144 / bins[0]), dtype="bool") - else: - sections = 6 - CCD = numpy.zeros((4608, 6144), dtype=numpy.float32) - CCD_err = numpy.zeros((4608, 6144), dtype=numpy.float32) - CCD_mask = numpy.zeros((4608, 6144), dtype="bool") - if single is True: - sections = sections / 3 - if master_bias is not None: - hdu_bias = pyfits.open(master_bias, do_not_scale_image_data=True, memmap=False) - - for i in range(1, sections + 1): - temp_sec = hdu[i].header["DETSEC"].replace("[", "").replace("]", "").split(",") - CCDsec_x = [ - (int(temp_sec[0].split(":")[0]) - 1) / bins[0], - int(temp_sec[0].split(":")[1]) / bins[0], - ] - CCDsec_y = [ - (int(temp_sec[1].split(":")[0]) - 1) / bins[1], - int(temp_sec[1].split(":")[1]) / bins[1], - ] - if CCDsec_x[0] < 0: - CCDsec_x[0] = 0 - if CCDsec_y[0] < 0: - CCDsec_y[0] = 0 - - temp_dat = hdu[i].header["DATASEC"].replace("[", "").replace("]", "").split(",") - DATAsec_x = [int(temp_dat[0].split(":")[0]) - 1, int(temp_dat[0].split(":")[1])] - DATAsec_y = [int(temp_dat[1].split(":")[0]) - 1, int(temp_dat[1].split(":")[1])] - img = hdu[i].data[DATAsec_y[0]: DATAsec_y[1], DATAsec_x[0]: DATAsec_x[1]] - - temp_bias = ( - hdu[i].header["BIASSEC"].replace("[", "").replace("]", "").split(",") - ) - BIASsec_x = ( - int(temp_bias[0].split(":")[0]) - 1, - int(temp_bias[0].split(":")[1]), - ) - BIASsec_y = ( - int(temp_bias[1].split(":")[0]) - 1, - int(temp_bias[1].split(":")[1]), - ) - bias = numpy.median( - hdu[i].data[BIASsec_y[0]: BIASsec_y[1], BIASsec_x[0]: BIASsec_x[1]] - ) - gain = float(hdu[i].header["gain"]) - rdnoise = float(hdu[i].header["rdnoise"]) - # print(infile,gain,rdnoise) - if master_bias is not None: - img_bias = hdu_bias[i].data - # gain = hdu_bias[i].header['GAINORIG'] - # rdnoise = hdu_bias[i].header['RONORIG'] - bias_bias = numpy.median( - img_bias[BIASsec_y[0]: BIASsec_y[1], BIASsec_x[0]: BIASsec_x[1]] - ) - bias = img_bias[ - DATAsec_y[0]: DATAsec_y[1], DATAsec_x[0]: DATAsec_x[1] - ] - (bias_bias - bias) - if master_bias is not None: - img[(img - bias) <= 0] = bias[(img - bias) <= 0] - else: - img[(img - bias) <= 0] = bias - CCD[CCDsec_y[0]: CCDsec_y[1], CCDsec_x[0]: CCDsec_x[1]] = (img - bias) * gain - CCD_err[CCDsec_y[0]: CCDsec_y[1], CCDsec_x[0]: CCDsec_x[1]] = numpy.sqrt( - (img - bias) * gain + rdnoise**2 - ) - select_nan = numpy.isnan(CCD_err) - CCD_err[select_nan] = rdnoise - if mask_saturated: - select = img == saturate_value - CCD_mask[CCDsec_y[0]: CCDsec_y[1], CCDsec_x[0]: CCDsec_x[1]] = select - - if splits[0] == 0: - if not mask_saturated: - CCD1_out = Image( - data=CCD[:, 0: 2046 / bins[0] + 1], - error=CCD_err[:, 0: 0: 2046 / bins[0] + 1], - header=hdr, - ) - else: - CCD1_out = Image( - data=CCD[:, 0: 2046 / bins[0] + 1], - error=CCD_err[:, 0: 2046 / bins[0] + 1], - mask=CCD_mask[:, 0: 2046 / bins[0] + 1], - header=hdr, - ) - else: - if not mask_saturated: - CCD1_out = Image( - data=CCD[:, splits[0] / bins[0]: 2046 / bins[0] + 1], - error=CCD_err[:, splits[0] / bins[0]: 2046 / bins[0] + 1], - header=hdr, - ) - else: - CCD1_out = Image( - data=CCD[:, splits[0] / bins[0]: 2046 / bins[0] + 1], - error=CCD_err[:, splits[0] / bins[0]: 2046 / bins[0] + 1], - mask=CCD_mask[:, splits[0] / bins[0]: 2046 / bins[0] + 1], - header=hdr, - ) - CCD1_out.writeFitsData(prefix + ".CCD1.fits") - if splits[1] == 0 and splits[2] == 0: - if not mask_saturated: - CCD2_out = Image( - data=CCD[:, 2048 / bins[0]: 4094 / bins[0] + 1], - error=CCD_err[:, 2048 / bins[0]: 4094 / bins[0] + 1], - header=hdr, - ) - else: - CCD2_out = Image( - data=CCD[:, 2048 / bins[0]: 4094 / bins[0] + 1], - error=CCD_err[:, 2048 / bins[0]: 4094 / bins[0] + 1], - mask=CCD_mask[:, 2048 / bins[0]: 4094 / bins[0] + 1], - header=hdr, - ) - CCD2_out.writeFitsData(prefix + ".CCD2.fits") - else: - if not mask_saturated: - CCD2_out = Image( - data=CCD[:, 2048: 2048 + splits[1]], - error=CCD_err[:, 2048: 2048 + splits[1]], - header=hdr, - ) - else: - CCD2_out = Image( - data=CCD[:, 2048: 2048 + splits[1]], - error=CCD_err[:, 2048: 2048 + splits[1]], - mask=CCD_mask[:, 2048: 2048 + splits[1]], - header=hdr, - ) - CCD2_out.writeFitsData(prefix + ".CCD2L.fits") - if not mask_saturated: - CCD2_out = Image( - data=CCD[:, 2048 + splits[2]: 4095], - error=CCD_err[:, 2048 + splits[2]: 4095], - header=hdr, - ) - else: - CCD2_out = Image( - data=CCD[:, 2048 + splits[2]: 4095], - error=CCD_err[:, 2048 + splits[2]: 4095], - mask=CCD_mask[:, 2048 + splits[2]: 4095], - header=hdr, - ) - CCD2_out.writeFitsData(prefix + ".CCD2R.fits") - if splits[3] == 0: - if not mask_saturated: - CCD3_out = Image( - data=CCD[:, 4096 / bins[0]: 6142 / bins[0] + 1], - error=CCD_err[:, 4096 / bins[0]: 6142 / bins[0] + 1], - header=hdr, - ) - else: - CCD3_out = Image( - data=CCD[:, 4096 / bins[0]: 6142 / bins[0] + 1], - error=CCD_err[:, 4096 / bins[0]: 6142 / bins[0] + 1], - mask=CCD_mask[:, 4096 / bins[0]: 6142 / bins[0] + 1], - header=hdr, - ) - - else: - if not mask_saturated: - CCD3_out = Image( - data=CCD[:, 4096 / bins[0]: 4096 / bins[0] + splits[3]], - error=CCD_err[:, 4096 / bins[0]: 4096 / bins[0] + splits[3]], - header=hdr, - ) - else: - CCD3_out = Image( - data=CCD[:, 4096 / bins[0]: 4096 / bins[0] + splits[3]], - error=CCD_err[:, 4096 / bins[0]: 4096 / bins[0] + splits[3]], - mask=CCD_mask[:, 4096 / bins[0]: 4096 / bins[0] + splits[3]], - header=hdr, - ) - CCD3_out.writeFitsData(prefix + ".CCD3.fits") - return bins - - -def combineBias_drp(file_list, file_out): - files = open(file_list, "r") - lines = files.readlines() - hdu = pyfits.open(lines[0][:-1], do_not_scale_image_data=True, memmap=False) - hdulist = [pyfits.PrimaryHDU()] - for j in range(1, len(hdu)): - for i in range(len(lines)): - hdu = pyfits.open(lines[i][:-1], do_not_scale_image_data=True, memmap=False) - if i == 0: - dim = hdu[j].data.shape - frames = numpy.zeros((len(lines), dim[0], dim[1]), dtype=numpy.float32) - frames[i, :, :] = hdu[j].data - - mean = numpy.median(frames, 0) - hdu_out = pyfits.ImageHDU(mean) - hdu_out.header = hdu[j].header - hdulist.append(hdu_out) - hdu = pyfits.HDUList(hdulist) - hdu.writeto(file_out, overwrite=True) - - -def reduceCalib_drp(trace, master_bias, arc="", fiberflat="1", reduce_ccd="ALL"): - fiberflat = int(fiberflat) - hdr_trace = Header() - hdr_trace.loadFitsHeader(trace) - IFU_mask = hdr_trace.getHdrValue("MASKNAME") - grating = hdr_trace.getHdrValue("GRATING") - centwave = float(hdr_trace.getHdrValue("CENTWAVE")) - instrument = hdr_trace.getHdrValue("INSTRUME") - # print(grating, centwave,IFU_mask) - if instrument == "GMOS-N": - gmos_calib = gmos_calib_n - elif instrument == "GMOS-S": - gmos_calib = gmos_calib_s - if IFU_mask == "IFU-2": - if grating == "R400+_G5305" and centwave == 800.0: - splits = "500,1000,1760,0" - slice1 = 500 - slice2L = 400 - slice2R = 250 - slice3 = 200 - setup = "2R400_800" - poly_disp = [-5, -5, -2, -5] - poly_fwhm = ["-3,-3", "-3,-3", "-3,-2", "-3,-3"] - start_wave = 7000 - end_wave = 8500 - disp_pix = 0.7 - elif grating == "R400+_G5305" and centwave == 760.0: - splits = "70,400,1200,1730" - slice1 = 500 - slice2L = 200 - slice2R = 250 - slice3 = 200 - setup = "2R400_760" - poly_disp = [-5, -2, -5, -5] - poly_fwhm = ["-3,-3", "-3,-2", "-3,-3", "-3,-3"] - start_wave = 7000 - end_wave = 8500 - disp_pix = 0.7 - elif grating == "R150+_G5308" and centwave == 730.0: - splits = "0,700,1200,1900" - slice1 = 200 - slice2L = 100 - slice2R = 200 - slice3 = 200 - setup = "2R150_730" - poly_disp = [-5, -2, -2, -5] - poly_fwhm = ["-3,-3", "-2,-2", "-2,-2", "-3,-3"] - start_wave = 4800 - end_wave = 9900 - disp_pix = 2.0 - elif grating == "R150+_G5308" and centwave == 760.0: - splits = "0,800,1250,1900" - slice1 = 200 - slice2L = 100 - slice2R = 200 - slice3 = 200 - setup = "2R150_760" - poly_disp = [-5, -2, -2, -4] - poly_fwhm = ["-3,-3", "-2,-2", "-2,-2", "-3,-3"] - start_wave = 5100 - end_wave = 9900 - disp_pix = 2.0 - ccds = ["CCD1", "CCD2L", "CCD2R", "CCD3"] - steps = [40, 20, 20, 40] - - bins = createCCDfromArchive_drp( - trace, "FLAT", master_bias=master_bias, splits=splits - ) - if arc != "": - createCCDfromArchive_drp(arc, "ARC", master_bias=master_bias, splits=splits) - im.find_peaks_auto( - "FLAT.CCD1.fits", - "peaks.CCD1", - nfibers=750, - slice=slice1, - median_cross=1, - median_box=20, - verbose=0, - ) - im.find_peaks_auto( - "FLAT.CCD2L.fits", - "peaks.CCD2L", - nfibers=750, - slice=slice2L, - median_cross=1, - median_box=20, - verbose=0, - ) - im.findPeaksMaster2_drp( - "FLAT.CCD2R.fits", - "%s/master_peaks.BLUE_slit_2019" % (gmos_calib), - "peaks.CCD2R", - threshold=10000, - slice=slice2R, - median_cross=1, - median_box=20, - verbose=0, - ) - im.findPeaksMaster2_drp( - "FLAT.CCD3.fits", - "%s/master_peaks.BLUE_slit_2019" % (gmos_calib), - "peaks.CCD3", - threshold=10000, - slice=slice3, - median_cross=1, - median_box=20, - verbose=0, - ) - im.trace_peaks( - "FLAT.CCD1.fits", - "peaks.CCD1", - "tjunk.CCD1.trc.fits", - poly_disp="-5", - steps=50, - coadd=20, - max_diff=1, - median_cross=1, - threshold_peak=400, - ) - im.trace_peaks( - "FLAT.CCD2L.fits", - "peaks.CCD2L", - "tjunk.CCD2L.trc.fits", - poly_disp="-2", - steps=20, - coadd=30, - max_diff=1, - threshold_peak=400, - median_cross=1, - ) - im.trace_peaks( - "FLAT.CCD2R.fits", - "peaks.CCD2R", - "tjunk.CCD2R.trc.fits", - poly_disp="-2", - steps=20, - coadd=30, - max_diff=1, - median_cross=1, - threshold_peak=400, - ) - im.trace_peaks( - "FLAT.CCD3.fits", - "peaks.CCD3", - "tjunk.CCD3.trc.fits", - poly_disp="-5", - steps=50, - coadd=20, - max_diff=1, - median_cross=1, - threshold_peak=400, - ) - for i in range(len(ccds)): - im.subtractStraylight_drp( - "FLAT.%s.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "FLAT.%s.back.fits" % (ccds[i]), - "FLAT.%s.stray.fits" % (ccds[i]), - aperture=14, - poly_cross=6, - smooth_disp=80, - minfit=20, - maxfit=-10, - ) - im.traceFWHM_drp( - "FLAT.%s.stray.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "tjunk.%s.fwhm.fits" % (ccds[i]), - blocks=16, - steps=steps[i], - poly_disp=-5, - init_fwhm=3, - clip="1.0,4.0", - threshold_flux=2000, - ) - if arc != "": - im.LACosmic_drp( - "ARC.%s.fits" % (ccds[i]), - "ARC.%s.cosmic.fits" % (ccds[i]), - sigma_det=5.0, - flim=2.0, - iter=4, - error_box="20,3", - replace_box="20,3", - rdnoise=3.5, - sig_gauss="1.4,1.4", - increase_radius=1, - parallel=2, - ) - im.subtractStraylight_drp( - "ARC.%s.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "ARC.%s.back.fits" % (ccds[i]), - "ARC.%s.stray.fits" % (ccds[i]), - aperture=14, - poly_cross=6, - smooth_disp=70, - minfit=20, - maxfit=-10, - ) - im.extract_spectra( - "ARC.%s.cosmic.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "ARC.%s.ms.fits" % (ccds[i]), - method="aperture", - aperture=5, - ) - rss.determine_wavelength_solution( - "ARC.%s.ms.fits" % (ccds[i]), - "ARC.%s" % (ccds[i]), - "%s/arc.%s.%s.txt" % (gmos_calib, ccds[i], setup), - poly_dispersion=poly_disp[i], - poly_fwhm=poly_fwhm[i], - flux_min=100.0, - fwhm_max=8.0, - rel_flux_limits="0.1,6.0", - aperture=20, - ) - if fiberflat == 1: - im.extract_spectra( - "FLAT.%s.stray.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "FLAT.%s.ms.fits" % (ccds[i]), - method="optimal", - fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), - parallel=1, - ) - rss.create_pixel_table( - "FLAT.%s.ms.fits" % (ccds[i]), - "FLAT.%s.rss.fits" % (ccds[i]), - "ARC.%s.disp.fits" % (ccds[i]), - "ARC.%s.res.fits" % (ccds[i]), - ) - if fiberflat == 1: - rss.glueRSS_drp("FLAT.CCD1.rss.fits,FLAT.CCD2L.rss.fits", "FLAT_red.rss.fits") - rss.glueRSS_drp("FLAT.CCD2R.rss.fits,FLAT.CCD3.rss.fits", "FLAT_blue.rss.fits") - rss.resample_wavelength( - "FLAT_blue.rss.fits", - "FLAT_blue.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - err_sim=0, - method="linear", - parallel=1, - ) - rss.resample_wavelength( - "FLAT_red.rss.fits", - "FLAT_red.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - err_sim=0, - method="linear", - ) - rss.mergeRSS_drp( - "FLAT_red.disp_cor.fits,FLAT_blue.disp_cor.fits", "FLAT.disp_cor.fits" - ) - rss.create_fiberflat("FLAT.disp_cor.fits", "FIBERFLAT.fits", clip="0.25,2.0") - - elif IFU_mask == "IFU-R" or IFU_mask == "IFU-B": - ccds = numpy.array(["CCD1", "CCD2", "CCD3"]) - splits = "0,0,0,0" - - if grating == "R400+_G5305" and centwave == 700.0 and IFU_mask == "IFU-R": - setup = "1RR400_700" - slice_CCD = [1000, 1000, 100] - poly_disp = [-6, -6, -6] - poly_fwhm = ["-2,-3", "-2,-3", "-2,-3"] - start_wave = 5800 - end_wave = 9000 - disp_pix = 0.7 - - elif grating == "R400+_G5325" and centwave == 690.0 and IFU_mask == "IFU-R": - setup = "1RR400_690" - slice_CCD = [700, 350, 200] - poly_disp = [-4, -4, -4] - poly_fwhm = ["-2,-3", "-2,-3", "-2,-3"] - smooth_median_flat = 100 - if reduce_ccd == "ALL": - start_wave = 6300 - end_wave = 9000 - elif reduce_ccd == "CCD2": - start_wave = 6150 - end_wave = 7600 - disp_pix = 0.7 - - elif grating == "R400+_G5305" and centwave == 710.0 and IFU_mask == "IFU-R": - setup = "1RR400_710" - slice_CCD = [700, 350, 200] - poly_disp = [-4, -4, -4] - poly_fwhm = ["-2,-3", "-2,-3", "-2,-3"] - start_wave = 5800 - end_wave = 9000 - disp_pix = 0.7 - elif grating == "B600+_G5307" and centwave == 625.0 and IFU_mask == "IFU-R": - setup = "1RB600_625" - slice_CCD = [2000, 2000, 2000] - - poly_disp = [-3, -3, -3] - poly_fwhm = ["-2,-3", "-2,-3", "-2,-3"] - start_wave = 4670 - end_wave = 7850 - disp_pix = 0.7 - - bins = createCCDfromArchive_drp( - trace, "FLAT", master_bias=master_bias, splits=splits - ) - if arc != "": - createCCDfromArchive_drp(arc, "ARC", master_bias=master_bias, splits=splits) - if reduce_ccd == "ALL": - indices = [0, 1, 2] - elif reduce_ccd == "CCD1": - indices = [0] - elif reduce_ccd == "CCD2": - indices = [1] - elif reduce_ccd == "CCD3": - indices = [2] - for i in indices: - im.addCCDMask_drp( - "FLAT.%s.fits" % (ccds[i]), - "%s/MASK.%s.Hamamatsu.fits" % (gmos_calib, ccds[i]), - ) - if instrument == "GMOS-N": - im.find_peaks_auto( - "FLAT.%s.fits" % (ccds[i]), - "peaks.%s" % (ccds[i]), - nfibers=750, - slice=slice_CCD[i] / bins[0], - median_cross=1, - median_box=20, - verbose=0, - ) - else: - # findPeaksOffset_drp('FLAT.%s.fits'%(ccds[i]), '%s/master_peaks.RED_slit'%(gmos_calib), 'peaks.%s'%(ccds[i]), threshold=7000, slice=slice_CCD[i],median_cross=1, median_box=20) - im.findPeaksMaster_drp( - "FLAT.%s.fits" % (ccds[i]), - "%s/master_peaks.RED_slit" % (gmos_calib), - "peaks.%s" % (ccds[i]), - slice=slice_CCD[i] / bins[0], - median_cross=1, - median_box=10, - ) - # im.trace_peaks('FLAT.%s.fits'%(ccds[i]), 'peaks.%s'%(ccds[i]), 'tjunk.%s.trc.fits'%(ccds[i]), poly_disp='-5', steps=20/bins[1], max_diff=1,threshold_peak=50, median_box=50, verbose=1) - # im.subtractStraylight_drp('FLAT.%s.fits' % (ccds[i]), 'tjunk.%s.trc.fits' % (ccds[i]), 'FLAT.%s.back.fits' % (ccds[i]), 'FLAT.%s.stray.fits' % (ccds[i]), aperture=10, poly_cross=6, smooth_disp=70/bins[0],smooth_gauss=10) - # im.traceFWHM_drp('FLAT.%s.stray.fits'%(ccds[i]), 'tjunk.%s.trc.fits'%(ccds[i]), 'tjunk.%s.fwhm.fits'%(ccds[i]), blocks=16, steps=50/bins[0], poly_disp=-5, init_fwhm=3, clip='1.0,8.0', threshold_flux=500) - if arc != "": - # im.LACosmic_drp('ARC.%s.fits'%(ccds[i]), 'ARC.%s.cosmic.fits'%(ccds[i]), sigma_det=5.0, flim=2.0, iter=4, error_box='20,3', replace_box='20,3', rdnoise=3.5,sig_gauss='1.4,1.4', increase_radius=1, parallel=2) - # im.addCCDMask_drp('ARC.%s.cosmic.fits'%(ccds[i]),'%s/MASK.%s.Hamamatsu.fits'%(gmos_calib,ccds[i])) - im.extract_spectra( - "ARC.%s.cosmic.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "ARC.%s.ms.fits" % (ccds[i]), - method="aperture", - aperture=5, - ) - rss.determine_wavelength_solution( - "ARC.%s.ms.fits" % (ccds[i]), - "ARC.%s" % (ccds[i]), - "%s/arc.%s.%s.txt" % (gmos_calib, ccds[i], setup), - poly_dispersion=poly_disp[i], - poly_fwhm=poly_fwhm[i], - flux_min=40.0, - fwhm_max=8.0, - aperture=20 / bins[0], - rel_flux_limits="0.1,6.0", - ) - if fiberflat == 1: - im.extract_spectra( - "FLAT.%s.stray.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "FLAT.%s.ms.fits" % (ccds[i]), - method="optimal", - fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), - parallel=1, - ) - rss.create_pixel_table( - "FLAT.%s.ms.fits" % (ccds[i]), - "FLAT.%s.rss.fits" % (ccds[i]), - "ARC.%s.disp.fits" % (ccds[i]), - "ARC.%s.res.fits" % (ccds[i]), - ) - - if fiberflat == 1: - if reduce_ccd == "ALL": - rss.glueRSS_drp("FLAT.CCD1.rss.fits,FLAT.CCD2.rss.fits", "FLAT.rss.fits") - - rss.resample_wavelength( - "FLAT.rss.fits", - "FLAT.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - compute_densities=1, - err_sim=0, - method="linear", - parallel=1, - ) - else: - rss.resample_wavelength( - "FLAT.%s.rss.fits" % (reduce_ccd), - "FLAT.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - compute_densities=1, - err_sim=0, - method="linear", - parallel=1, - ) - rss.create_fiberflat( - "FLAT.disp_cor.fits", - "FIBERFLAT.fits", - smooth_median=smooth_median_flat, - clip="0.25,2.0", - ) - - -def reduceSTD_drp( - std, - master_bias, - fiberflat="1", - ref_star="", - straylight="0", - mask_telluric="", - reduce_ccd="ALL", -): - hdr_std = Header() - hdr_std.loadFitsHeader(std) - IFU_mask = hdr_std.getHdrValue("MASKNAME") - grating = hdr_std.getHdrValue("GRATING") - centwave = float(hdr_std.getHdrValue("CENTWAVE")) - instrument = hdr_std.getHdrValue("INSTRUME") - if instrument == "GMOS-N": - gmos_calib = gmos_calib_n - elif instrument == "GMOS-S": - gmos_calib = gmos_calib_s - if IFU_mask == "IFU-2": - if grating == "R400+_G5305" and centwave == 800.0: - splits = "500,1000,1760,0" - start_wave = 7000 - end_wave = 8500 - disp_pix = 0.7 - mask_wave = "7150,7330,7530,7700,8120,8270,8350,8450" - smooth_poly = -15 - elif grating == "R400+_G5305" and centwave == 760.0: - splits = "70,400,1200,1730" - start_wave = 7000 - end_wave = 8500 - disp_pix = 0.7 - mask_wave = "7130,7260,7550,7700,7930,8050,8120,8270" - smooth_poly = -40 - elif grating == "R150+_G5308" and centwave == 730.0: - splits = "0,700,1200,1900" - # setup = "2R150_730" - start_wave = 4800 - end_wave = 9900 - disp_pix = 2.0 - ccds = ["CCD1", "CCD2L", "CCD2R", "CCD3"] - # steps = [40, 20, 20, 40] - - createCCDfromArchive_drp(std, "STD", master_bias=master_bias, splits=splits) - for i in range(len(ccds)): - im.LACosmic_drp( - "STD.%s.fits" % (ccds[i]), - "STD.%s.cosmic.fits" % (ccds[i]), - sigma_det=5.0, - flim=2.0, - iter=4, - error_box="20,3", - replace_box="20,3", - rdnoise=3.5, - sig_gauss="1.4,1.4", - increase_radius=1, - parallel=2, - ) - im.addCCDMask_drp( - "STD.%s.cosmic.fits" % (ccds[i]), - "%s/MASK.%s.Hamamatsu.fits" % (gmos_calib, ccds[i]), - ) - if int(straylight) == 1: - im.subtractStraylight_drp( - "STD.%s.cosmic.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "STD.%s.back.fits" % (ccds[i]), - "STD.%s.stray.fits" % (ccds[i]), - aperture=14, - poly_cross=6, - smooth_disp=70, - minfit=20, - maxfit=10, - ) - im.extract_spectra( - "STD.%s.cosmic.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "STD.%s.ms.fits" % (ccds[i]), - method="optimal", - fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), - parallel=1, - ) - else: - im.extract_spectra( - "STD.%s.stray.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "STD.%s.ms.fits" % (ccds[i]), - method="optimal", - fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), - parallel=1, - ) - rss.create_pixel_table( - "STD.%s.ms.fits" % (ccds[i]), - "STD.%s.rss.fits" % (ccds[i]), - "ARC.%s.disp.fits" % (ccds[i]), - "ARC.%s.res.fits" % (ccds[i]), - ) - rss.glueRSS_drp("STD.CCD1.rss.fits,STD.CCD2L.rss.fits", "STD_red.rss.fits") - rss.glueRSS_drp("STD.CCD2R.rss.fits,STD.CCD3.rss.fits", "STD_blue.rss.fits") - rss.resample_wavelength( - "STD_blue.rss.fits", - "STD_blue.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - err_sim=0, - method="linear", - parallel=1, - ) - rss.resample_wavelength( - "STD_red.rss.fits", - "STD_red.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - err_sim=0, - method="linear", - ) - rss.mergeRSS_drp( - "STD_red.disp_cor.fits,STD_blue.disp_cor.fits", "STD.disp_cor.fits" - ) - rss.correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "FIBERFLAT.fits") - rss.includePosTab_drp("STD.flat.fits", "%s/GMOS_2slit_pt.txt" % (gmos_calib)) - rss.splitFibers_drp( - "STD.flat.fits", - "STD.obj_red.fits,STD.sky_red.fits,STD.obj_blue.fits,STD.sky_blue.fits", - "O_R,S_R,O_B,S_B", - ) - sky.constructSkySpec_drp("STD.sky_red.fits", "STD.skyspec_red.fits", nsky=200) - sky.constructSkySpec_drp("STD.sky_blue.fits", "STD.skyspec_blue.fits", nsky=200) - sky.subtractSkySpec_drp( - "STD.obj_red.fits", "STD.sobj_red.fits", "STD.skyspec_red.fits" - ) - sky.subtractSkySpec_drp( - "STD.obj_blue.fits", "STD.sobj_blue.fits", "STD.skyspec_blue.fits" - ) - rss.mergeRSS_drp("STD.sobj_red.fits,STD.sobj_blue.fits", "STD.sobj.fits") - rss.createCube_drp( - "STD.sobj.fits", "STD.cube.fits", mode="drizzle", resolution=0.2, parallel=1 - ) - elif IFU_mask == "IFU-R" or IFU_mask == "IFU-B": - ccds = ["CCD1", "CCD2", "CCD3"] - splits = "0,0,0,0" - if grating == "R400+_G5305" and centwave == 700.0 and IFU_mask == "IFU-R": - start_wave = 5800 - end_wave = 9000 - disp_pix = 0.7 - mask_wave = ( - "6200,6350,6500,6600,6860,7050,7150,7330,7570,7760,8120,8350,8650,8730" - ) - smooth_poly = -15 - elif grating == "R400+_G5325" and centwave == 690.0 and IFU_mask == "IFU-R": - # setup = "1RR400_690" - if reduce_ccd == "ALL": - start_wave = 4600 - end_wave = 9000 - elif reduce_ccd == "CCD2": - start_wave = 6140 - end_wave = 7600 - disp_pix = 0.7 - mask_wave = ( - "6200,6350,6500,6600,6860,7050,7150,7330,7570,7760,8120,8350,8650,8730" - ) - smooth_poly = -15 - - createCCDfromArchive_drp(std, "STD", master_bias=master_bias, splits=splits) - if reduce_ccd == "ALL": - indices = [0, 1, 2] - elif reduce_ccd == "CCD1": - indices = [0] - elif reduce_ccd == "CCD2": - indices = [1] - elif reduce_ccd == "CCD3": - indices = [2] - for i in indices: - # print(i) - im.LACosmic_drp( - "STD.%s.fits" % (ccds[i]), - "STD.%s.cosmic.fits" % (ccds[i]), - sigma_det=5.0, - flim=1.3, - iter=4, - error_box="20,3", - replace_box="20,3", - rdnoise=3.5, - sig_gauss="3,3", - increase_radius=1, - parallel=2, - ) - im.addCCDMask_drp( - "STD.%s.cosmic.fits" % (ccds[i]), - "%s/MASK.%s.Hamamatsu.fits" % (gmos_calib, ccds[i]), - ) - if int(straylight) == 1: - im.subtractStraylight_drp( - "STD.%s.cosmic.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "STD.%s.back.fits" % (ccds[i]), - "STD.%s.stray.fits" % (ccds[i]), - aperture=10, - poly_cross=6, - smooth_disp=70, - smooth_gauss=10, - ) - im.extract_spectra( - "STD.%s.stray.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "STD.%s.ms.fits" % (ccds[i]), - method="optimal", - fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), - parallel=1, - ) - else: - im.extract_spectra( - "STD.%s.cosmic.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "STD.%s.ms.fits" % (ccds[i]), - method="optimal", - fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), - parallel=1, - ) - rss.create_pixel_table( - "STD.%s.ms.fits" % (ccds[i]), - "STD.%s.rss.fits" % (ccds[i]), - "ARC.%s.disp.fits" % (ccds[i]), - "ARC.%s.res.fits" % (ccds[i]), - ) - if reduce_ccd == "ALL": - rss.glueRSS_drp( - "STD.CCD1.rss.fits,STD.CCD2.rss.fits,STD.CCD3.rss.fits", "STD.rss.fits" - ) - rss.resample_wavelength( - "STD.rss.fits", - "STD.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - err_sim=0, - method="linear", - ) - else: - rss.resample_wavelength( - "STD.%s.rss.fits" % (ccds[i]), - "STD.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - err_sim=0, - method="linear", - ) - rss.correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "FIBERFLAT.fits") - if IFU_mask == "IFU-R": - rss.includePosTab_drp("STD.flat.fits", "%s/GMOS_1slitR_pt.txt" % (gmos_calib)) - rss.splitFibers_drp("STD.flat.fits", "STD.obj.fits,STD.sky.fits", "O_R,S_R") - elif IFU_mask == "IFU-B": - rss.includePosTab_drp("STD.flat.fits", "%s/GMOS_1slitB_pt.txt" % (gmos_calib)) - rss.splitFibers_drp("STD.flat.fits", "STD.obj.fits,STD.sky.fits", "O_B,S_B") - sky.constructSkySpec_drp("STD.sky.fits", "STD.skyspec_red.fits", nsky=200) - sky.subtractSkySpec_drp("STD.obj.fits", "STD.sobj.fits", "STD.skyspec_red.fits") - rss.createCube_drp( - "STD.obj.fits", "STD.cube.fits", mode="drizzle", resolution=0.2, parallel=1 - ) - if ref_star != "": - flux.createSensFunction_drp( - "STD.sobj.fits", - "ratio.txt", - "%s/%s" % (gmos_calib, ref_star), - airmass="AIRMASS", - exptime="EXPTIME", - coadd=200, - extinct_curve="Paranal", - out_star="star.txt", - mask_wave=mask_wave, - mask_telluric=mask_telluric, - smooth_poly=smooth_poly, - verbose=1, - ) - - -def reduceObject_drp( - obj, - master_bias, - res_fwhm="0.0", - fiberflat="1", - straylight="1", - flux_calib="1", - telluric_cor="1", - flexure_correct="1", - reduce_ccd="ALL", -): - straylight = int(straylight) - fiberflat = int(fiberflat) - flux_calib = int(fiberflat) - telluric_cor = int(telluric_cor) - flexure_correct = int(flexure_correct) - res_fwhm = float(res_fwhm) - - hdr_obj = Header() - hdr_obj.loadFitsHeader(obj) - IFU_mask = hdr_obj.getHdrValue("MASKNAME") - grating = hdr_obj.getHdrValue("GRATING") - centwave = float(hdr_obj.getHdrValue("CENTWAVE")) - instrument = hdr_obj.getHdrValue("INSTRUME") - # print(grating, centwave,IFU_mask) - if instrument == "GMOS-N": - gmos_calib = gmos_calib_n - elif instrument == "GMOS-S": - gmos_calib = gmos_calib_s - if IFU_mask == "IFU-2": - if grating == "R400+_G5305" and centwave == 800.0: - splits = "500,1000,1760,0" - start_wave = 7000 - end_wave = 8500 - disp_pix = 0.7 - sky_line_list = "" - elif grating == "R400+_G5305" and centwave == 760.0: - splits = "70,400,1200,1730" - start_wave = 7000 - end_wave = 8500 - disp_pix = 0.7 - sky_line_list = "" - elif grating == "R150+_G5308" and centwave == 730.0: - splits = "0,700,1200,1900" - start_wave = 4800 - end_wave = 9900 - disp_pix = 2.0 - sky_line_list = "" - elif grating == "R150+_G5308" and centwave == 760.0: - splits = "0,800,1250,1900" - start_wave = 5100 - end_wave = 9900 - disp_pix = 2.0 - sky_line_list = "" - ccds = ["CCD1", "CCD2L", "CCD2R", "CCD3"] - elif IFU_mask == "IFU-R" or IFU_mask == "IFU-B": - ccds = ["CCD1", "CCD2", "CCD3"] - splits = "0,0,0,0" - if grating == "R400+_G5305" and centwave == 700.0 and IFU_mask == "IFU-R": - start_wave = 5800 - end_wave = 9000 - disp_pix = 0.7 - sky_line_list = "" - elif grating == "R400+_G5325" and centwave == 690.0 and IFU_mask == "IFU-R": - disp_pix = 0.7 - splits = "0,0,0,900" - sky_line_list = "" - if reduce_ccd == "ALL": - start_wave = 6300 - end_wave = 9000 - elif reduce_ccd == "CCD2": - start_wave = 6150 - end_wave = 7600 - elif grating == "R400+_G5305" and centwave == 710.0 and IFU_mask == "IFU-R": - start_wave = 5800 - end_wave = 9000 - disp_pix = 0.7 - sky_line_list = "" - - createCCDfromArchive_drp(obj, "OBJ", master_bias=master_bias, splits=splits) - if reduce_ccd == "ALL": - indices = numpy.arange(len(ccds)) - elif reduce_ccd == "CCD1": - indices = [0] - elif reduce_ccd == "CCD2": - indices = [1] - elif reduce_ccd == "CCD3": - indices = [2] - for i in indices: - im.LACosmic_drp( - "OBJ.%s.fits" % (ccds[i]), - "OBJ.%s.cosmic.fits" % (ccds[i]), - sigma_det=5.0, - flim=2.0, - iter=4, - error_box="20,3", - replace_box="20,3", - rdnoise=3.5, - sig_gauss="1.4,1.4", - increase_radius=1, - parallel=2, - ) - if instrument == "GMOS-S": - im.addCCDMask_drp( - "OBJ.%s.cosmic.fits" % (ccds[i]), - "%s/MASK.%s.Hamamatsu.fits" % (gmos_calib, ccds[i]), - ) - if sky_line_list != "": - im.offsetTrace_drp( - "OBJ.%s.cosmic.fits" % (ccds[i]), - "tjunk.%s.trc.fits" % (ccds[i]), - "ARC.%s.disp.fits" % (ccds[i]), - sky_line_list, - "offsetTrace_%s.log" % (ccds[i]), - blocks="10", - size="30", - ) - rss.correctTraceMask_drp( - "tjunk.%s.trc.fits" % (ccds[i]), - "tjunk.%s.trc_temp.fits" % (ccds[i]), - "offsetTrace_%s.log" % (ccds[i]), - "OBJ.%s.cosmic.fits" % (ccds[i]), - poly_smooth=flexure_correct, - ) - else: - os.system( - "cp tjunk.%s.trc.fits tjunk.%s.trc_temp.fits" % (ccds[i], ccds[i]) - ) - if straylight == 1: - im.subtractStraylight_drp( - "OBJ.%s.cosmic.fits" % (ccds[i]), - "tjunk.%s.trc_temp.fits" % (ccds[i]), - "OBJ.%s.back.fits" % (ccds[i]), - "OBJ.%s.stray.fits" % (ccds[i]), - aperture=14, - poly_cross=6, - smooth_disp=70, - smooth_gauss=15, - ) - im.extract_spectra( - "OBJ.%s.stray.fits" % (ccds[i]), - "tjunk.%s.trc_temp.fits" % (ccds[i]), - "OBJ.%s.ms.fits" % (ccds[i]), - method="optimal", - fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), - parallel=3, - ) - else: - im.extract_spectra( - "OBJ.%s.cosmic.fits" % (ccds[i]), - "tjunk.%s.trc_temp.fits" % (ccds[i]), - "OBJ.%s.ms.fits" % (ccds[i]), - method="optimal", - fwhm="tjunk.%s.fwhm.fits" % (ccds[i]), - parallel=2, - ) - rss.create_pixel_table( - "OBJ.%s.ms.fits" % (ccds[i]), - "OBJ.%s.rss.fits" % (ccds[i]), - "ARC.%s.disp.fits" % (ccds[i]), - "ARC.%s.res.fits" % (ccds[i]), - ) - if sky_line_list != "": - rss.checkPixTable_drp( - "OBJ.%s.rss.fits" % (ccds[i]), - sky_line_list, - "offsetWave_%s.log" % (ccds[i]), - aperture="12", - ) - if float(res_fwhm) != 0.0: - rss.matchResolution_drp( - "OBJ.%s.rss.fits" % (ccds[i]), - "OBJ.%s.rss.fits" % (ccds[i]), - res_fwhm, - parallel=4, - ) - - if IFU_mask == "IFU-2": - rss.glueRSS_drp("OBJ.CCD1.rss.fits,OBJ.CCD2L.rss.fits", "OBJ_red.rss.fits") - rss.glueRSS_drp("OBJ.CCD2R.rss.fits,OBJ.CCD3.rss.fits", "OBJ_blue.rss.fits") - rss.resample_wavelength( - "OBJ_blue.rss.fits", - "OBJ_blue.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - err_sim=200, - method="linear", - parallel=1, - ) - rss.resample_wavelength( - "OBJ_red.rss.fits", - "OBJ_red.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - err_sim=200, - method="linear", - ) - rss.mergeRSS_drp( - "OBJ_red.disp_cor.fits,OBJ_blue.disp_cor.fits", "OBJ.disp_cor.fits" - ) - rss.includePosTab_drp("OBJ.disp_cor.fits", "%s/GMOS_2slit_pt.txt" % (gmos_calib)) - elif IFU_mask == "IFU-R": - if reduce_ccd == "ALL": - rss.glueRSS_drp( - "OBJ.CCD1.rss.fits,OBJ.CCD2.rss.fits,OBJ.CCD3.rss.fits", "OBJ.rss.fits" - ) - rss.resample_wavelength( - "OBJ.rss.fits", - "OBJ.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - err_sim=200, - method="linear", - ) - else: - rss.resample_wavelength( - "OBJ.%s.rss.fits" % (reduce_ccd), - "OBJ.disp_cor.fits", - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_pix, - err_sim=200, - method="linear", - ) - rss.includePosTab_drp("OBJ.disp_cor.fits", "%s/GMOS_1slitR_pt.txt" % (gmos_calib)) - - if fiberflat == 1: - rss.correctFiberFlat_drp("OBJ.disp_cor.fits", "OBJ.flat.fits", "FIBERFLAT.fits") - - if flux_calib == 1: - if fiberflat == 1: - flux.fluxCalibration_drp( - "OBJ.flat.fits", - "OBJ.fobj.fits", - "ratio.txt", - "AIRMASS", - "EXPTIME", - extinct_curve="Paranal", - ref_units="1e-16", - target_units="1e-16", - norm_sb_fib="", - ) - else: - flux.fluxCalibration_drp( - "OBJ.disp_cor.fits", - "OBJ.fobj.fits", - "ratio.txt", - "AIRMASS", - "EXPTIME", - extinct_curve="Paranal", - ref_units="1e-16", - target_units="1e-16", - norm_sb_fib="", - ) - if telluric_cor == 1: - flux.correctTelluric_drp( - "OBJ.fobj.fits", - "OBJ.fobj.fits", - "telluric_template.fits", - airmass="AIRMASS", - ) diff --git a/python/lvmdrp/functions/vimosMethod.py b/python/lvmdrp/functions/vimosMethod.py deleted file mode 100644 index c9acb582..00000000 --- a/python/lvmdrp/functions/vimosMethod.py +++ /dev/null @@ -1,1660 +0,0 @@ -import os -from multiprocessing import Pool, cpu_count - -from astropy.io import fits as pyfits - -from lvmdrp.core.spectrum1d import Spectrum1D -from lvmdrp.functions import headerMethod as head -from lvmdrp.functions import imageMethod as im -from lvmdrp.functions import rssMethod as rss -from lvmdrp.functions import specialMethod as spec -from lvmdrp.functions import fluxCalMethod as flux -from lvmdrp.functions import skyMethod as sky - - -description = "Provides Methods to reduce VIMOS data" - -vimos_calib = os.path.dirname(__file__) + "/../../config/VIMOS/" - - -def renameFiles_drp(year): - dir = os.listdir(".") - for f in dir: - if "VIMOS." in f and ".fits" in f and str(year) in f: - # print(f) - hdr = pyfits.getheader(f, 0, ignore_missing_end=True) - orig_name = hdr["ORIGFILE"] - os.system("mv %s %s" % (f, orig_name)) - - -def createBIAS_drp(night): - night = int(night) - os.system("ls VIMOS_SPEC_BIAS%03d_*_B.1.fits* > combine_BIAS.B1" % (night)) - im.combineImages_drp("combine_BIAS.B1", "BIAS_B.1.fits", method="median") - os.system("rm combine_BIAS.B1") - os.system("ls VIMOS_SPEC_BIAS%03d_*_A.2.fits* > combine_BIAS.A2" % (night)) - im.combineImages_drp("combine_BIAS.A2", "BIAS_A.2.fits", method="median") - os.system("rm combine_BIAS.A2") - os.system("ls VIMOS_SPEC_BIAS%03d_*_A.3.fits* > combine_BIAS.A3" % (night)) - im.combineImages_drp("combine_BIAS.A3", "BIAS_A.3.fits", method="median") - os.system("rm combine_BIAS.A3") - os.system("ls VIMOS_SPEC_BIAS%03d_*_B.4.fits* > combine_BIAS.B4" % (night)) - im.combineImages_drp("combine_BIAS.B4", "BIAS_B.4.fits", method="median") - os.system("rm combine_BIAS.B4") - - -def combineLAMP_drp(night): - night = int(night) - os.system("ls VIMOS_IFU_LAMP%03d_*_B.1.fits* > combine_LAMP.B1" % (night)) - im.combineImages_drp("combine_LAMP.B1", "LAMP_B.1.fits", method="median") - os.system("rm combine_LAMP.B1") - os.system("ls VIMOS_IFU_LAMP%03d_*_A.2.fits* > combine_LAMP.A2" % (night)) - im.combineImages_drp("combine_LAMP.A2", "LAMP_A.2.fits", method="median") - os.system("rm combine_LAMP.A2") - os.system("ls VIMOS_IFU_LAMP%03d_*_A.3.fits* > combine_LAMP.A3" % (night)) - im.combineImages_drp("combine_LAMP.A3", "LAMP_A.3.fits", method="median") - os.system("rm combine_LAMP.A3") - os.system("ls VIMOS_IFU_LAMP%03d_*_B.4.fits* > combine_LAMP.B4" % (night)) - im.combineImages_drp("combine_LAMP.B4", "LAMP_B.4.fits", method="median") - os.system("rm combine_LAMP.B4") - - -def combineTwilight_drp(night, numbers): - list = numbers.split(",") - night = int(night) - - for i in range(len(list)): - if i == 0: - os.system( - "ls VIMOS_IFU_TwFlats%03d_%04d_B.1.* > combine_LAMP.B1" - % (night, int(list[i])) - ) - os.system( - "ls VIMOS_IFU_TwFlats%03d_%04d_A.2.* > combine_LAMP.A2" - % (night, int(list[i])) - ) - os.system( - "ls VIMOS_IFU_TwFlats%03d_%04d_A.3.* > combine_LAMP.A3" - % (night, int(list[i])) - ) - os.system( - "ls VIMOS_IFU_TwFlats%03d_%04d_B.4.* > combine_LAMP.B4" - % (night, int(list[i])) - ) - else: - os.system( - "ls VIMOS_IFU_TwFlats%03d_%04d_B.1.* >> combine_LAMP.B1" - % (night, int(list[i])) - ) - os.system( - "ls VIMOS_IFU_TwFlats%03d_%04d_A.2.* >> combine_LAMP.A2" - % (night, int(list[i])) - ) - os.system( - "ls VIMOS_IFU_TwFlats%03d_%04d_A.3.* >> combine_LAMP.A3" - % (night, int(list[i])) - ) - os.system( - "ls VIMOS_IFU_TwFlats%03d_%04d_B.4.* >> combine_LAMP.B4" - % (night, int(list[i])) - ) - - im.combineImages_drp("combine_LAMP.B1", "LAMP_B.1.fits", method="mean") - im.combineImages_drp("combine_LAMP.A2", "LAMP_A.2.fits", method="mean") - im.combineImages_drp("combine_LAMP.A3", "LAMP_A.3.fits", method="mean") - im.combineImages_drp("combine_LAMP.B4", "LAMP_B.4.fits", method="mean") - os.system("rm combine_LAMP.B1") - os.system("rm combine_LAMP.A2") - os.system("rm combine_LAMP.A3") - os.system("rm combine_LAMP.B4") - - -def prepareCalib_drp( - night, - chip, - boundary_x, - boundary_y, - peaks_ref, - ARC_ref, - start_wave, - end_wave, - disp_wave, - fwhm_max, - setup="", - border="4", - trace_master="", - fiberflat="1", - fiberflat_wave="0", - CCD_mask="", - parallel="1", -): - night = int(night) - start_wave = float(start_wave) - end_wave = float(end_wave) - disp_wave = float(disp_wave) - fiberflat = int(fiberflat) - fiberflat_wave = int(fiberflat_wave) - im.subtractBias_drp( - "LAMP_%s.fits" % (chip), - "LAMP_%s.sub.fits" % (chip), - "BIAS_%s.fits" % (chip), - boundary_x=boundary_x, - boundary_y=boundary_y, - gain="ESO DET OUT1 CONAD", - rdnoise="ESO DET OUT1 RON", - subtract_light="1", - ) - if CCD_mask != "": - im.addCCDMask_drp( - "LAMP_%s.sub.fits" % (chip), - "%s/CCDMASK_%s_%s.fits" % (vimos_calib, chip, CCD_mask), - ) - if setup != "" and setup == "orange": - im.findPeaksMaster2_drp( - "LAMP_%s.sub.fits" % (chip), - peaks_ref, - "peaks_%s.txt" % (chip), - disp_axis="y", - slice="2450", - border=border, - verbose=0, - ) - elif setup != "" and setup == "blue": - im.findPeaksMaster2_drp( - "LAMP_%s.sub.fits" % (chip), - peaks_ref, - "peaks_%s.txt" % (chip), - disp_axis="y", - slice="3400", - border=border, - verbose=0, - ) - else: - im.findPeaksMaster2_drp( - "LAMP_%s.sub.fits" % (chip), - peaks_ref, - "peaks_%s.txt" % (chip), - disp_axis="y", - border=border, - verbose=0, - ) - - im.trace_peaks( - "LAMP_%s.sub.fits" % (chip), - "peaks_%s.txt" % (chip), - "tjunk_%s.trc.fits" % (chip), - disp_axis="y", - poly_disp="-5", - steps=30, - threshold_peak=25, - coadd=120, - verbose=0, - ) - if trace_master == "": - im.subtractStraylight_drp( - "LAMP_%s.sub.fits" % (chip), - "tjunk_%s.trc.fits" % (chip), - "LAMP_%s.back.fits" % (chip), - "LAMP_%s.stray.fits" % (chip), - disp_axis="y", - aperture=10, - poly_cross=0, - smooth_disp=30, - parallel=parallel, - ) - im.traceFWHM_drp( - "LAMP_%s.stray.fits" % (chip), - "tjunk_%s.trc.fits" % (chip), - "tjunk.fwhm_%s.fits" % (chip), - disp_axis="y", - blocks=16, - steps=50, - poly_disp=-5, - init_fwhm=3, - clip="1,6.0", - parallel=parallel, - ) - else: - spec.matchMasterTrace_drp( - "tjunk_%s.trc.fits" % (chip), - "master_%s.trc.fits" % (chip), - "tjunk_%s.trc.fits" % (chip), - split="", - poly_cross=1, - poly_disp=1, - start_pix=trace_master[0], - end_pix=trace_master[1], - ) - im.subtractStraylight_drp( - "LAMP_%s.sub.fits" % (chip), - "tjunk_%s.trc.fits" % (chip), - "LAMP_%s.back.fits" % (chip), - "LAMP_%s.stray.fits" % (chip), - disp_axis="y", - aperture=15, - poly_cross=0, - smooth_disp=30, - parallel=parallel, - ) - os.system("cp master.fwhm_%s.fits tjunk.fwhm_%s.fits" % (chip, chip)) - if fiberflat_wave == 1: - im.extract_spectra( - "WAVE_%s.sub.fits" % (chip), - "tjunk_%s.trc.fits" % (chip), - "WAVE_%s.ms.fits" % (chip), - method="optimal", - fwhm="tjunk.fwhm_%s.fits" % (chip), - disp_axis="y", - parallel=parallel, - ) - rss.determine_wavelength_solution( - "WAVE_%s.ms.fits" % (chip), - "WAVE_%s" % (chip), - ARC_ref, - poly_dispersion="-5", - poly_fwhm="-3,-5", - flux_min="20.0", - aperture="10", - fwhm_max=str(fwhm_max), - fiberflat="%.2f,%.2f,%.2f,LAMP_%s.disp_cor" - % (start_wave, end_wave, disp_wave, chip), - rel_flux_limits="0.3,4.0", - verbose="1", - ) - else: - im.extract_spectra( - "WAVE_%s.sub.fits" % (chip), - "tjunk_%s.trc.fits" % (chip), - "WAVE_%s.ms.fits" % (chip), - method="aperture", - aperture=5, - disp_axis="y", - parallel=parallel, - ) - rss.determine_wavelength_solution( - "WAVE_%s.ms.fits" % (chip), - "WAVE_%s" % (chip), - ARC_ref, - poly_dispersion="-5", - poly_fwhm="-3,-5", - flux_min="20.0", - aperture="10", - fwhm_max=str(fwhm_max), - rel_flux_limits="0.1,6.0", - verbose="0", - ) - rss.create_pixel_table( - "WAVE_%s.ms.fits" % (chip), - "WAVE_%s.rss.fits" % (chip), - "WAVE_%s.disp.fits" % (chip), - "WAVE_%s.res.fits" % (chip), - ) - rss.resample_wavelength( - "WAVE_%s.rss.fits" % (chip), - "WAVE_%s.disp_cor.fits" % (chip), - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_wave, - err_sim=0, - parallel=parallel, - ) - if fiberflat == 1: - im.extract_spectra( - "LAMP_%s.stray.fits" % (chip), - "tjunk_%s.trc.fits" % (chip), - "LAMP_%s.ms.fits" % (chip), - method="optimal", - fwhm="tjunk.fwhm_%s.fits" % (chip), - disp_axis="y", - parallel=parallel, - ) - rss.create_pixel_table( - "LAMP_%s.ms.fits" % (chip), - "LAMP_%s.rss.fits" % (chip), - "WAVE_%s.disp.fits" % (chip), - "WAVE_%s.res.fits" % (chip), - ) - rss.resample_wavelength( - "LAMP_%s.rss.fits" % (chip), - "LAMP_%s.disp_cor.fits" % (chip), - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_wave, - err_sim=0, - compute_densities=1, - parallel=parallel, - ) - if fiberflat == 1 or fiberflat_wave == 1: - rss.includePosTab_drp( - "LAMP_%s.disp_cor.fits" % (chip), - "%s/vimos_HR_%s_pt.txt" % (vimos_calib, chip), - ) - - -def prepareObject_drp( - name_obj, - night, - object, - chip, - boundary_x, - boundary_y, - sky_line_list, - start_wave, - end_wave, - disp_wave, - resolution_fwhm, - flexure_order, - CCD_mask="", - correct_HVEL=False, - straylight=True, - parallel="1", -): - # if CCD_mask!='': - # im.addCCDMask_drp('%s_%s.cosmic.fits'%(name_obj,chip),'%s/CCDMASK_%s_%s.fits'%(vimos_calib,chip,CCD_mask)) - if sky_line_list != "": - # offsetTrace_drp('%s_%s.cosmic.fits'%(name_obj, chip), 'tjunk_%s.trc.fits'%(chip), 'WAVE_%s.disp.fits'%(chip), sky_line_list, 'offsetTrace_%s.log'%(chip), blocks='10', disp_axis='y', size='30') - rss.correctTraceMask_drp( - "tjunk_%s.trc.fits" % (chip), - "tjunk_%s_temp.trc.fits" % (chip), - "offsetTrace_%s.log" % (chip), - "%s_%s.cosmic.fits" % (name_obj, chip), - poly_smooth=flexure_order, - ) - else: - os.system("cp tjunk_%s.trc.fits tjunk_%s_temp.trc.fits" % (chip, chip)) - - if straylight: - im.subtractStraylight_drp( - "%s_%s.cosmic.fits" % (name_obj, chip), - "tjunk_%s_temp.trc.fits" % (chip), - "%s_%s.back.fits" % (name_obj, chip), - "%s_%s.stray.fits" % (name_obj, chip), - disp_axis="y", - aperture=10, - poly_cross=0, - smooth_disp=30, - parallel=parallel, - ) - else: - os.system( - "cp %s_%s.cosmic.fits %s_%s.stray.fits" % (name_obj, chip, name_obj, chip) - ) - - im.extract_spectra( - "%s_%s.stray.fits" % (name_obj, chip), - "tjunk_%s_temp.trc.fits" % (chip), - "%s_%s.ms.fits" % (name_obj, chip), - method="optimal", - fwhm="tjunk.fwhm_%s.fits" % (chip), - disp_axis="y", - parallel=parallel, - ) - - rss.create_pixel_table( - "%s_%s.ms.fits" % (name_obj, chip), - "%s_%s.pix_tab.fits" % (name_obj, chip), - "WAVE_%s.disp.fits" % (chip), - "WAVE_%s.res.fits" % (chip), - ) - - head.addHvelcorHdr_drp( - "%s_%s.pix_tab.fits" % (name_obj, chip), - "HVEL_COR", - RAKey="RA", - RAUnit="d", - DECKey="DEC", - ObsLongKey="ESO TEL GEOLON", - LongSignFlip=1, - ObsLatKey="ESO TEL GEOLAT", - ObsAltKey="ESO TEL GEOELEV", - ModJulKey="MJD-OBS", - extension="0", - ) - if correct_HVEL: - HVEL_key = "HVEL_COR" - else: - HVEL_key = "" - - if sky_line_list != "": - rss.checkPixTable_drp( - "%s_%s.pix_tab.fits" % (name_obj, chip), - sky_line_list, - "offsetWave_%s.log" % (chip), - aperture="12", - ) - rss.correctPixTable_drp( - "%s_%s.pix_tab.fits" % (name_obj, chip), - "%s_%s.pix_tab.fits" % (name_obj, chip), - "offsetWave_%s.log" % (chip), - "%s_%s.pix_tab.fits" % (name_obj, chip), - smooth_poly_cross="1", - smooth_poly_disp=flexure_order, - poly_disp="5", - ) - if float(resolution_fwhm) != 0.0: - rss.matchResolution_drp( - "%s_%s.pix_tab.fits" % (name_obj, chip), - "%s_%s.res.fits" % (name_obj, chip), - resolution_fwhm, - parallel=parallel, - ) - rss.resample_wavelength( - "%s_%s.res.fits" % (name_obj, chip), - "%s_%s.disp_cor.fits" % (name_obj, chip), - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_wave, - err_sim="500", - compute_densities=1, - correctHvel=HVEL_key, - parallel=parallel, - ) - else: - rss.resample_wavelength( - "%s_%s.pix_tab.fits" % (name_obj, chip), - "%s_%s.disp_cor.fits" % (name_obj, chip), - start_wave=start_wave, - end_wave=end_wave, - disp_pix=disp_wave, - err_sim="500", - compute_densities=1, - correctHvel=HVEL_key, - parallel=parallel, - ) - rss.includePosTab_drp( - "%s_%s.disp_cor.fits" % (name_obj, chip), - "%s/vimos_HR_%s_pt.txt" % (vimos_calib, chip), - ) - - -def reduceCalibMR_drp( - night, - fiberflat="1", - fiberflat_wave="0", - trace_master="", - wave_start=4880, - wave_end=9300, - wave_disp=2.0, - parallel="auto", -): - chips = ["B.1", "A.2", "A.3", "B.4"] - fiberflat = int(fiberflat) - fiberflat_wave = int(fiberflat_wave) - - image = im.loadImage("LAMP_B.1.fits") - date = image.getHdrValue("ESO OBS START").split("T")[0] - year = int(date.split("-")[0]) - - if year >= 2013: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1300,3450", "1360,3510", "1165,3215", "900,3100"] - peaks_guess = "peaks_2013.txt" - CCD_mask = "" - border = 1 - elif year >= 2007: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1300,3450", "1340,3490", "1150,3300", "900,3150"] - peaks_guess = "peaks.txt" - CCD_mask = "" - border = 1 - else: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1300,3450", "1283,3433", "1133,3283", "920,3170"] - peaks_guess = "peaks_vearly.txt" - CCD_mask = "" - border = 1 - - for i in range(len(chips)): - dir = os.listdir(".") - for file_name in dir: - if ( - "VIMOS_IFU_WAVE%03d" % (int(night)) in file_name - and chips[i] in file_name - ): - im.subtractBias_drp( - file_name, - "WAVE_%s.sub.fits" % (chips[i]), - "BIAS_%s.fits" % (chips[i]), - boundary_x=boundaries_x[i], - boundary_y=boundaries_y[i], - gain="ESO DET OUT1 CONAD", - rdnoise="ESO DET OUT1 RON", - ) - - if parallel == "auto": - cpus = cpu_count() - else: - cpus = int(parallel) - if cpus > 1: - pool = Pool(cpus) - result = [] - for i in range(len(chips)): - result.append( - pool.apply_async( - prepareCalib_drp, - args=( - night, - chips[i], - boundaries_x[i], - boundaries_y[i], - "%s/master_VIMOS_%s_%s" % (vimos_calib, chips[i], peaks_guess), - "%s/ref_lines_ARC_VIMOS_MR_%s.txt" % (vimos_calib, chips[i]), - wave_start, - wave_end, - wave_disp, - 15, - "", - border, - trace_master, - fiberflat, - fiberflat_wave, - CCD_mask, - 1, - ), - ) - ) - pool.close() - pool.join() - else: - for i in range(len(chips)): - prepareCalib_drp( - night, - chips[i], - boundaries_x[i], - boundaries_y[i], - "%s/master_VIMOS_%s_%s" % (vimos_calib, chips[i], peaks_guess), - "%s/ref_lines_ARC_VIMOS_MR_%s.txt" % (vimos_calib, chips[i]), - wave_start, - wave_end, - wave_disp, - 15, - "", - border, - trace_master, - fiberflat, - fiberflat_wave, - CCD_mask, - "auto", - ) - - if fiberflat == 1: - rss.mergeRSS_drp( - "LAMP_B.1.disp_cor.fits,LAMP_A.2.disp_cor.fits,LAMP_A.3.disp_cor.fits,LAMP_B.4.disp_cor.fits", - "LAMP.disp_cor.fits", - ) - rss.create_fiberflat("LAMP.disp_cor.fits", "fiberflat.fits", valid="1200,1600") - - -def reduceCalibHR_drp( - night, - fiberflat="1", - fiberflat_wave="0", - master_trace="0", - wave_start=4880, - wave_end=9300, - wave_disp=2.0, - setup="orange", - except_chip="", - parallel="auto", -): - chips = ["B.1", "A.2", "A.3", "B.4"] - if except_chip != "": - except_chips = except_chip.split(",") - for i in range(len(except_chips)): - chips = [x for x in chips if x != except_chips[i]] - fiberflat = int(fiberflat) - fiberflat_wave = int(fiberflat_wave) - - image = im.loadImage("LAMP_B.1.fits") - date = image.getHdrValue("ESO OBS START").split("T")[0] - year = int(date.split("-")[0]) - if date > "2012-04-15" and setup == "blue": - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - # boundaries_y=['1950,4096', '1950,4096', '1800,4096', '1600,4096'] - boundaries_y = ["1410,4096", "1410,4096", "1410,4096", "1410,4096"] - trace_limits = [[840, 2680], [840, 2680], [690, 2680], [490, 2680]] - else: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1,4096", "1,4096", "1,4096", "1,4096"] - if year == 2009: - peaks_guess = "peaks_2009.txt" - wave_guess = "_2009.txt" - CCD_mask = "2009" - border = 0 - elif year >= 2016: - peaks_guess = "peaks_2016.txt" - wave_guess = "_2012.txt" - CCD_mask = "" - border = 1 - elif year >= 2011: - peaks_guess = "peaks.txt" - wave_guess = "_2012.txt" - CCD_mask = "" - border = 1 - elif year >= 2010: - peaks_guess = "peaks.txt" - wave_guess = ".txt" - CCD_mask = "" - border = 4 - elif year >= 2008: - peaks_guess = "peaks_early.txt" - wave_guess = "_early.txt" - CCD_mask = "" - border = 4 - elif year <= 2004: - peaks_guess = "peaks_2004.txt" - wave_guess = "_2004.txt" - CCD_mask = "2004" - border = 3 - else: - peaks_guess = "peaks_vearly.txt" - wave_guess = "_vearly.txt" - CCD_mask = "" - border = 4 - if date > "2012-04-15" and setup == "blue": - wave_guess = "_new.txt" - - for i in range(len(chips)): - dir = os.listdir(".") - for file_name in dir: - if ( - "VIMOS_IFU_WAVE%03d" % (int(night)) in file_name - and chips[i] in file_name - ): - im.subtractBias_drp( - file_name, - "WAVE_%s.sub.fits" % (chips[i]), - "BIAS_%s.fits" % (chips[i]), - boundary_x=boundaries_x[i], - boundary_y=boundaries_y[i], - gain="ESO DET OUT1 CONAD", - rdnoise="ESO DET OUT1 RON", - ) - - if setup == "orange": - ss = "O" - elif setup == "blue": - ss = "B" - elif setup == "red": - ss = "R" - if parallel == "auto": - cpus = cpu_count() - else: - cpus = int(parallel) - if cpus > 1: - pool = Pool(cpus) - result = [] - for i in range(len(chips)): - if int(int(master_trace) == 1): - trace_limit = trace_limits[i] - else: - trace_limit = "" - result.append( - pool.apply_async( - prepareCalib_drp, - args=( - night, - chips[i], - boundaries_x[i], - boundaries_y[i], - "%s/master_VIMOS_%s_%s" % (vimos_calib, chips[i], peaks_guess), - "%s/ref_lines_ARC_VIMOS_HR%s_%s%s" - % (vimos_calib, ss, chips[i], wave_guess), - wave_start, - wave_end, - wave_disp, - 10.0, - setup, - border, - trace_limit, - fiberflat, - fiberflat_wave, - CCD_mask, - 1, - ), - ) - ) - pool.close() - pool.join() - else: - for i in range(len(chips)): - if int(master_trace == 1): - trace_limit = trace_limits[i] - else: - trace_limit = "" - prepareCalib_drp( - night, - chips[i], - boundaries_x[i], - boundaries_y[i], - "%s/master_VIMOS_%s_%s" % (vimos_calib, chips[i], peaks_guess), - "%s/ref_lines_ARC_VIMOS_HR%s_%s%s" - % (vimos_calib, ss, chips[i], wave_guess), - wave_start, - wave_end, - wave_disp, - 8, - setup, - border, - trace_limit, - fiberflat, - fiberflat_wave, - CCD_mask, - "auto", - ) - - if fiberflat == 1 or fiberflat_wave == 1: - merge_flat = "" - for i in range(len(chips)): - merge_flat = merge_flat + "LAMP_%s.disp_cor.fits," % (chips[i]) - rss.mergeRSS_drp(merge_flat[:-1], "LAMP.disp_cor.fits") - if setup == "orange": - if len(chips) > 2: - rss.create_fiberflat( - "LAMP.disp_cor.fits", - "fiberflat.fits", - valid="800,1200", - clip="0.3,1.7", - ) - else: - rss.create_fiberflat( - "LAMP.disp_cor.fits", - "fiberflat.fits", - valid="400,800", - clip="0.3,1.7", - ) - elif setup == "blue" and date < "2012-04-15": - if len(chips) > 2: - rss.create_fiberflat( - "LAMP.disp_cor.fits", - "fiberflat.fits", - valid="800,1200", - clip="0.2,4.0", - ) - else: - rss.create_fiberflat( - "LAMP.disp_cor.fits", - "fiberflat.fits", - valid="400,800", - clip="0.3,1.7", - ) - elif setup == "blue" and date >= "2012-04-15": - rss.create_fiberflat("LAMP.disp_cor.fits", "fiberflat.fits", clip="0.3,1.7") - elif setup == "red": - if len(chips) > 2: - rss.create_fiberflat( - "LAMP.disp_cor.fits", - "fiberflat.fits", - valid="800,1200", - clip="0.2,4.0", - ) - else: - rss.create_fiberflat( - "LAMP.disp_cor.fits", - "fiberflat.fits", - valid="400,800", - clip="0.2,4.0", - ) - - -def reduceObjectMR_drp( - night, - name_obj, - object_nr, - wave_start=4880, - wave_end=8500, - wave_disp=2.0, - res_fwhm=7.5, - A_V=0.15, - fiberflat="1", - flux_calib="1", - telluric_cor="1", - flexure_correct="1", - straylight="1", - correct_HVEL=False, - parallel="auto", -): - object_nr = int(object_nr) - night = int(night) - flexure_correct = int(flexure_correct) - straylight = int(straylight) - #correctHVEL = bool(int(correct_HVEL)) - chips = ["B.1", "A.2", "A.3", "B.4"] - try: - image = im.loadImage( - "VIMOS_IFU_OBS%03d_%04d_%s.fits.gz" % (night, object_nr, "B.1") - ) - except IOError: - image = im.loadImage("VIMOS_IFU_OBS%03d_%04d_%s.fits" % (night, object_nr, "B.1")) - date = image.getHdrValue("ESO OBS START").split("T")[0] - year = int(date.split("-")[0]) - - if year == 2009: - CCD_mask = "2009" - else: - CCD_mask = "" - if year >= 2013: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1300,3450", "1360,3510", "1165,3215", "900,3100"] - elif year >= 2007: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1300,3450", "1340,3490", "1150,3300", "900,3150"] - else: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1300,3450", "1283,3433", "1133,3283", "920,3170"] - - if flexure_correct == 1: - sky_line_list = "5577.34,6300.30,6863.97,7276.42,7750.65,8344.61,8885.85" - flexure_order = 2 - else: - sky_line_list = "" - flexure_order = 0 - - fiberflat = int(fiberflat) - flux_calib = int(flux_calib) - telluric_cor = int(telluric_cor) - - for i in range(len(chips)): - try: - im.subtractBias_drp( - "VIMOS_IFU_OBS%03d_%04d_%s.fits.gz" % (night, object_nr, chips[i]), - "%s_%s.sub.fits" % (name_obj, chips[i]), - "BIAS_%s.fits" % (chips[i]), - boundary_x=boundaries_x[i], - boundary_y=boundaries_y[i], - gain="ESO DET OUT1 CONAD", - rdnoise="ESO DET OUT1 RON", - ) - except IOError: - im.subtractBias_drp( - "VIMOS_IFU_OBS%03d_%04d_%s.fits" % (night, object_nr, chips[i]), - "%s_%s.sub.fits" % (name_obj, chips[i]), - "BIAS_%s.fits" % (chips[i]), - boundary_x=boundaries_x[i], - boundary_y=boundaries_y[i], - gain="ESO DET OUT1 CONAD", - rdnoise="ESO DET OUT1 RON", - ) - im.LACosmic_drp( - "%s_%s.sub.fits" % (name_obj, chips[i]), - "%s_%s.cosmic.fits" % (name_obj, chips[i]), - sigma_det="5.0", - flim="1.3", - iter="3", - error_box="1,20", - replace_box="1,20", - rdnoise="ESO DET OUT1 RON", - increase_radius="1", - parallel=parallel, - ) - - if parallel == "auto": - cpus = cpu_count() - else: - cpus = int(parallel) - if cpus > 1: - pool = Pool(cpus) - result = [] - for i in range(len(chips)): - result.append( - pool.apply_async( - prepareObject_drp, - args=( - name_obj, - night, - object_nr, - chips[i], - boundaries_x[i], - boundaries_y[i], - sky_line_list, - wave_start, - wave_end, - wave_disp, - res_fwhm, - flexure_order, - CCD_mask, - correct_HVEL, - bool(straylight), - 1, - ), - ) - ) - pool.close() - pool.join() - - else: - for i in range(len(chips)): - prepareObject_drp( - name_obj, - night, - object_nr, - chips[i], - boundaries_x[i], - boundaries_y[i], - sky_line_list, - wave_start, - wave_end, - wave_disp, - res_fwhm, - flexure_order, - CCD_mask, - correct_HVEL, - bool(straylight), - 4, - ) - - head.expandHdrKeys_drp("%s_B.1.disp_cor.fits" % (name_obj), "CCD1") - head.expandHdrKeys_drp("%s_A.2.disp_cor.fits" % (name_obj), "CCD2") - head.expandHdrKeys_drp("%s_A.3.disp_cor.fits" % (name_obj), "CCD3") - head.expandHdrKeys_drp("%s_B.4.disp_cor.fits" % (name_obj), "CCD4") - rss.mergeRSS_drp( - "%s_B.1.disp_cor.fits,%s_A.2.disp_cor.fits,%s_A.3.disp_cor.fits,%s_B.4.disp_cor.fits" - % (name_obj, name_obj, name_obj, name_obj), - "%s.disp_cor.fits" % (name_obj), - ) - if fiberflat == 1: - rss.correctFiberFlat_drp( - "%s.disp_cor.fits" % (name_obj), - "%s.flat.fits" % (name_obj), - "fiberflat.fits", - ) - - if flux_calib == 1: - if fiberflat == 1: - flux.fluxCalibration_drp( - "%s.flat.fits" % (name_obj), - "%s.fobj.fits" % (name_obj), - "ratio.txt", - "CCD1 ESO TEL AIRM START", - "CCD1 EXPTIME", - extinct_curve="Paranal", - ref_units="1e-16", - target_units="1e-16", - norm_sb_fib="", - ) - # else: - flux.fluxCalibration_drp( - "%s.disp_cor.fits" % (name_obj), - "%s.fobj.fits" % (name_obj), - "ratio.txt", - "CCD1 ESO TEL AIRM START", - "CCD1 EXPTIME", - extinct_curve="Paranal", - ref_units="1e-16", - target_units="1e-16", - norm_sb_fib="", - ) - if telluric_cor == 1: - flux.correctTelluric_drp( - "%s.fobj.fits" % (name_obj), - "%s.fobj.fits" % (name_obj), - "telluric_template.fits", - airmass="CCD1 ESO TEL AIRM START", - ) - - -def reduceObjectHR_drp( - night, - name_obj, - object_nr, - wave_start=4880, - wave_end=8500, - wave_disp=2.0, - res_fwhm=7.5, - A_V=0.15, - fiberflat="1", - flux_calib="1", - telluric_cor="1", - flexure_correct="0", - straylight="1", - setup="orange", - correct_HVEL=False, - except_chip="", - parallel="auto", -): - chips = ["B.1", "A.2", "A.3", "B.4"] - if except_chip != "": - except_chips = except_chip.split(",") - for i in range(len(except_chips)): - chips = [x for x in chips if x != except_chips[i]] - - object_nr = int(object_nr) - night = int(night) - flexure_correct = int(flexure_correct) - straylight = int(straylight) - - fiberflat = int(fiberflat) - flux_calib = int(flux_calib) - telluric_cor = int(telluric_cor) - - image = im.loadImage("VIMOS_IFU_OBS%03d_%04d_%s.fits.gz" % (night, object_nr, "B.1")) - date = image.getHdrValue("ESO OBS START").split("T")[0] - year = int(date.split("-")[0]) - if year == 2009: - CCD_mask = "2009" - elif year <= 2004: - CCD_mask = "2004" - else: - CCD_mask = "" - - if setup == "blue" and date >= "2012-04-15": - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1410,4096", "1410,4096", "1410,4096", "1410,4096"] - else: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1,4096", "1,4096", "1,4096", "1,4096"] - - if setup == "orange": - # ss = "O" - if flexure_correct == 1: - sky_line_list = "5577.34,6300.30,6863.97,7276.42" - flexure_order = 2 - else: - sky_line_list = "" - flexure_order = 0 - elif setup == "blue" and date < "2012-04-15": - # ss = "B" - if flexure_correct == 1: - sky_line_list = "5577.34" - flexure_order = 0 - else: - sky_line_list = "" - flexure_order = 0 - - elif setup == "blue" and date >= "2012-04-15": - # ss = "B" - sky_line_list = "" - flexure_order = 0 - - elif setup == "red": - # ss = "R" - if flexure_correct == 1: - sky_line_list = "6863.97,7276.42,7913.72,8344.61" - flexure_order = 2 - else: - sky_line_list = "" - flexure_order = 0 - - for i in range(len(chips)): - im.subtractBias_drp( - "VIMOS_IFU_OBS%03d_%04d_%s.fits.gz" % (night, object_nr, chips[i]), - "%s_%s.sub.fits" % (name_obj, chips[i]), - "BIAS_%s.fits" % (chips[i]), - boundary_x=boundaries_x[i], - boundary_y=boundaries_y[i], - gain="ESO DET OUT1 CONAD", - rdnoise="ESO DET OUT1 RON", - ) - im.LACosmic_drp( - "%s_%s.sub.fits" % (name_obj, chips[i]), - "%s_%s.cosmic.fits" % (name_obj, chips[i]), - sigma_det="5.0", - flim="1.3", - iter="3", - error_box="1,20", - replace_box="1,20", - rdnoise="ESO DET OUT1 RON", - increase_radius="1", - parallel="1", - ) - - if parallel == "auto": - cpus = cpu_count() - else: - cpus = int(parallel) - - if cpus > 1: - pool = Pool(cpus) - result = [] - for i in range(len(chips)): - result.append( - pool.apply_async( - prepareObject_drp, - args=( - name_obj, - night, - object_nr, - chips[i], - boundaries_x[i], - boundaries_y[i], - sky_line_list, - wave_start, - wave_end, - wave_disp, - res_fwhm, - flexure_order, - CCD_mask, - correct_HVEL, - bool(straylight), - 1, - ), - ) - ) - pool.close() - pool.join() - - else: - for i in range(len(chips)): - prepareObject_drp( - name_obj, - night, - object_nr, - chips[i], - boundaries_x[i], - boundaries_y[i], - sky_line_list, - wave_start, - wave_end, - wave_disp, - res_fwhm, - flexure_order, - CCD_mask, - correct_HVEL, - bool(straylight), - 4, - ) - for i in range(len(chips)): - head.expandHdrKeys_drp( - "%s_%s.disp_cor.fits" % (name_obj, chips[i]), - "CCD%s" % (chips[i].split(".")[1]), - exclude="ESO TEL AIRM START,EXPTIME", - ) - if flux_calib == 1: - flux.fluxCalibration_drp( - "%s_%s.disp_cor.fits" % (name_obj, chips[i]), - "%s_%s.fobj.fits" % (name_obj, chips[i]), - "ratio.txt", - "ESO TEL AIRM START", - "EXPTIME", - extinct_curve="Paranal", - ref_units="1e-16", - target_units="1e-16", - norm_sb_fib="", - ) - else: - os.system( - "%s_%s.disp_cor.fits %s_%s.fobj.fits" - % (name_obj, chips[i], name_obj, chips[i]) - ) - if telluric_cor == 1: - flux.correctTelluric_drp( - "%s.fobj.fits" % (name_obj), - "%s.fobj.fits" % (name_obj), - "telluric_template.fits", - airmass="ESO TEL AIRM START", - ) - merge_obj = "" - for i in range(len(chips)): - merge_obj = merge_obj + "%s_%s.fobj.fits," % (name_obj, chips[i]) - rss.mergeRSS_drp(merge_obj[:-1], "%s.fobj.fits" % (name_obj)) - - if fiberflat == 1: - rss.correctFiberFlat_drp( - "%s.fobj.fits" % (name_obj), "%s.flat.fits" % (name_obj), "fiberflat.fits" - ) - - -def reduceStdMR_drp( - night, - std_nr, - wave_start="4880", - wave_end="9300", - wave_disp="2.0", - res_fwhm="0.0", - ref_star="", - A_V=0.15, - mask_wave="6850,6960,7480,7800", - mask_telluric="", - straylight="1", - parallel="auto", -): - chips = ["B.1", "A.2", "A.3", "B.4"] - - sky_line_list = "5577.34,6300.30,6863.97,7276.42,7750.65,8344.61,8885.85" - night = int(night) - std_nr = int(std_nr) - straylight = int(straylight) - - image = im.loadImage("VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr, "B.1")) - date = image.getHdrValue("ESO OBS START").split("T")[0] - year = int(date.split("-")[0]) - - if year >= 2013: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1300,3450", "1360,3510", "1165,3215", "900,3100"] - else: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1300,3450", "1340,3490", "1150,3300", "900,3150"] - - if year == 2009: - CCD_mask = "2009" - else: - CCD_mask = "" - - for i in range(len(chips)): - im.subtractBias_drp( - "VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr + i, chips[i]), - "%s_%s.sub.fits" % ("STD" + str(i + 1), chips[i]), - "BIAS_%s.fits" % (chips[i]), - boundary_x=boundaries_x[i], - boundary_y=boundaries_y[i], - gain="ESO DET OUT1 CONAD", - rdnoise="ESO DET OUT1 RON", - ) - im.LACosmic_drp( - "%s_%s.sub.fits" % ("STD" + str(i + 1), chips[i]), - "%s_%s.cosmic.fits" % ("STD" + str(i + 1), chips[i]), - sigma_det="5.0", - flim="1.3", - iter="3", - error_box="1,20", - replace_box="1,20", - rdnoise="ESO DET OUT1 RON", - increase_radius="1", - parallel=parallel, - ) - - if parallel == "auto": - cpus = cpu_count() - else: - cpus = int(parallel) - - if cpus > 1: - pool = Pool(cpus) - result = [] - for i in range(len(chips)): - result.append( - pool.apply_async( - prepareObject_drp, - args=( - "STD" + str(i + 1), - night, - std_nr + i, - chips[i], - boundaries_x[i], - boundaries_y[i], - sky_line_list, - wave_start, - wave_end, - wave_disp, - res_fwhm, - 2, - CCD_mask, - False, - bool(straylight), - 1, - ), - ) - ) - pool.close() - pool.join() - else: - for i in range(len(chips)): - prepareObject_drp( - "STD" + str(i + 1), - night, - std_nr + i, - chips[i], - boundaries_x[i], - boundaries_y[i], - sky_line_list, - wave_start, - wave_end, - wave_disp, - res_fwhm, - 2, - CCD_mask, - False, - bool(straylight), - 4, - ) - - rss.mergeRSS_drp( - "STD1_B.1.disp_cor.fits,STD2_A.2.disp_cor.fits,STD3_A.3.disp_cor.fits,STD4_B.4.disp_cor.fits", - "STD.disp_cor.fits", - ) - rss.correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "fiberflat.fits") - - rss.splitFibers_drp( - "STD.flat.fits", - "STD1.flat.fits,STD2.flat.fits,STD3.flat.fits,STD4.flat.fits", - "QD1,QD2,QD3,QD4", - ) - std_ratios = [] - for i in range(len(chips)): - sky.constructSkySpec_drp( - "STD%d.flat.fits" % (i + 1), - "STD%d.sky_spec.fits" % (i + 1), - clip_sigma=0.0, - nsky=150, - ) - sky.subtractSkySpec_drp( - "STD%d.flat.fits" % (i + 1), - "STD%d.sobj.fits" % (i + 1), - "STD%d.sky_spec.fits" % (i + 1), - ) - head.copyHdrKey_drp( - "VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr + i, chips[i]), - "STD%d.sobj.fits" % (i + 1), - "ESO TEL AIRM START", - ) - - std_telluric = [] - if ref_star != "": - flux.createSensFunction_drp( - "STD%d.sobj.fits" % (i + 1), - "ratio_%d.txt" % (i + 1), - ref_star, - airmass="ESO TEL AIRM START", - exptime="EXPTIME", - coadd=200, - extinct_curve="Paranal", - out_star="star_%d.txt" % (i + 1), - mask_wave=mask_wave, - mask_telluric=mask_telluric, - smooth_poly=-12, - ) - std_ratios.append(Spectrum1D()) - std_ratios[i].loadTxtData("ratio_%s.txt" % (chips[i].split(".")[1])) - if mask_telluric != "": - os.system( - "cp telluric_spec.fits telluric_spec_%s.fits" - % (chips[i].split(".")[1]) - ) - std_telluric.append(Spectrum1D()) - std_telluric[i].loadFitsData( - "telluric_spec_%s.fits" % (chips[i].split(".")[1]) - ) - - if ref_star != "": - for i in range(len(std_ratios)): - if i == 0: - mean_std_ratio = std_ratios[0] - else: - mean_std_ratio += std_ratios[i] - - if mask_telluric != "": - if i == 0: - mean_telluric = std_telluric[0] - else: - mean_telluric += std_telluric[i] - - mean_std_ratio = mean_std_ratio / len(std_ratios) - mean_std_ratio.writeTxtData("ratio.txt") - if mask_telluric != "": - mean_telluric = mean_telluric / len(std_telluric) - mean_telluric.writeFitsData("telluric.fits") - - -def reduceStdHR_drp( - night, - std_nr, - wave_start="4880", - wave_end="9300", - wave_disp="2.0", - res_fwhm="0.0", - ref_star="", - setup=None, - A_V=0.15, - mask_wave="", - mask_telluric="", - smooth_poly=-12, - except_chip="", - straylight="1", - parallel="auto", -): - chips = ["B.1", "A.2", "A.3", "B.4"] - - if except_chip != "": - except_chips = except_chip.split(",") - for i in range(len(except_chips)): - chips = [x for x in chips if x != except_chips[i]] - sky_line_list = "" - night = int(night) - std_nr = int(std_nr) - straylight = int(straylight) - - image = im.loadImage("VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr, "B.1")) - date = image.getHdrValue("ESO OBS START").split("T")[0] - - year = int(date.split("-")[0]) - - if date > "2012-04-15" and setup == "blue": - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1410,4096", "1410,4096", "1410,4096", "1410,4096"] - else: - boundaries_x = ["51,2098", "51,2098", "51,2098", "51,2098"] - boundaries_y = ["1,4096", "1,4096", "1,4096", "1,4096"] - if year == 2009: - CCD_mask = "2009" - elif year <= 2004: - CCD_mask = "2004" - else: - CCD_mask = "" - - for i in range(len(chips)): - im.subtractBias_drp( - "VIMOS_IFU_STD%03d_%04d_%s.fits.gz" - % (night, std_nr + int(chips[i].split(".")[1]) - 1, chips[i]), - "%s_%s.sub.fits" % ("STD" + str(i + 1), chips[i]), - "BIAS_%s.fits" % (chips[i]), - boundary_x=boundaries_x[i], - boundary_y=boundaries_y[i], - gain="ESO DET OUT1 CONAD", - rdnoise="ESO DET OUT1 RON", - ) - im.LACosmic_drp( - "%s_%s.sub.fits" % ("STD" + str(i + 1), chips[i]), - "%s_%s.cosmic.fits" % ("STD" + str(i + 1), chips[i]), - sigma_det="5.0", - flim="1.3", - iter="3", - error_box="1,20", - replace_box="1,20", - rdnoise="ESO DET OUT1 RON", - increase_radius="1", - parallel="1", - ) - - if parallel == "auto": - cpus = cpu_count() - else: - cpus = int(parallel) - - if cpus > 1: - pool = Pool(cpus) - result = [] - for i in range(len(chips)): - result.append( - pool.apply_async( - prepareObject_drp, - args=( - "STD" + str(i + 1), - night, - std_nr + i, - chips[i], - boundaries_x[i], - boundaries_y[i], - sky_line_list, - wave_start, - wave_end, - wave_disp, - res_fwhm, - 2, - CCD_mask, - False, - bool(straylight), - 1, - ), - ) - ) - pool.close() - pool.join() - else: - for i in range(len(chips)): - prepareObject_drp( - "STD" + str(i + 1), - night, - std_nr + i, - chips[i], - boundaries_x[i], - boundaries_y[i], - sky_line_list, - wave_start, - wave_end, - wave_disp, - res_fwhm, - 2, - CCD_mask, - False, - bool(straylight), - 4, - ) - - merge_obj = "" - for i in range(len(chips)): - merge_obj = merge_obj + "STD%d_%s.disp_cor.fits," % (i + 1, chips[i]) - rss.mergeRSS_drp(merge_obj[:-1], "STD.disp_cor.fits") - rss.correctFiberFlat_drp("STD.disp_cor.fits", "STD.flat.fits", "fiberflat.fits") - - QDs = "" - files = "" - for i in range(len(chips)): - QDs = QDs + "QD%s," % (chips[i].split(".")[1]) - files = files + "STD%s.flat.fits," % (chips[i].split(".")[1]) - rss.splitFibers_drp("STD.flat.fits", files[:-1], QDs[:-1]) - std_ratios = [] - std_telluric = [] - for i in range(len(chips)): - sky.constructSkySpec_drp( - "STD%s.flat.fits" % (chips[i].split(".")[1]), - "STD%s.sky_spec.fits" % (chips[i].split(".")[1]), - clip_sigma=0.0, - nsky=70, - non_neg=0, - ) - sky.subtractSkySpec_drp( - "STD%s.flat.fits" % (chips[i].split(".")[1]), - "STD%s.sobj.fits" % (chips[i].split(".")[1]), - "STD%s.sky_spec.fits" % (chips[i].split(".")[1]), - ) - head.copyHdrKey_drp( - "VIMOS_IFU_STD%03d_%04d_%s.fits.gz" % (night, std_nr + i, chips[i]), - "STD%s.sobj.fits" % (chips[i].split(".")[1]), - "ESO TEL AIRM START", - ) - - if ref_star != "": - flux.createSensFunction_drp( - "STD%s.sobj.fits" % (chips[i].split(".")[1]), - "ratio_%s.txt" % (chips[i].split(".")[1]), - ref_star, - airmass="ESO TEL AIRM START", - exptime="EXPTIME", - coadd=200, - extinct_curve="Paranal", - out_star="star_%s.txt" % (chips[i].split(".")[1]), - mask_wave=mask_wave, - mask_telluric=mask_telluric, - smooth_poly=smooth_poly, - ) - std_ratios.append(Spectrum1D()) - std_ratios[i].loadTxtData("ratio_%s.txt" % (chips[i].split(".")[1])) - if mask_telluric != "": - os.system( - "cp telluric_spec.fits telluric_spec_%s.fits" - % (chips[i].split(".")[1]) - ) - std_telluric.append(Spectrum1D()) - std_telluric[i].loadFitsData( - "telluric_spec_%s.fits" % (chips[i].split(".")[1]) - ) - - if ref_star != "": - for i in range(len(std_ratios)): - if i == 0: - mean_std_ratio = std_ratios[0] - else: - mean_std_ratio += std_ratios[i] - - if mask_telluric != "": - if i == 0: - mean_telluric = std_telluric[0] - else: - mean_telluric += std_telluric[i] - - mean_std_ratio = mean_std_ratio / len(std_ratios) - mean_std_ratio.writeTxtData("ratio.txt") - if mask_telluric != "": - mean_telluric = mean_telluric / len(std_telluric) - mean_telluric.writeFitsData("telluric.fits") - - -def subtractSkyField_drp( - object_in, - object_out, - sky_field, - factor, - scale_region="", - scale_ind=1, - clip_sigma=0.0, - nsky=200, -): - rss.splitFibers_drp( - object_in, - "obj_QD1.fits,obj_QD2.fits,obj_QD3.fits,obj_QD4.fits", - "QD1,QD2,QD3,QD4", - ) - rss.splitFibers_drp( - sky_field, - "sky_QD1.fits,sky_QD2.fits,sky_QD3.fits,sky_QD4.fits", - "QD1,QD2,QD3,QD4", - ) - for i in range(4): - sky.constructSkySpec_drp( - "sky_QD%d.fits" % (i + 1), - "sky_spec_QD%d.fits" % (i + 1), - clip_sigma=clip_sigma, - filter=vimos_calib + "R_Johnson.txt,0,1", - nsky=nsky, - ) - sky.subtractSkySpec_drp( - "obj_QD%d.fits" % (i + 1), - "sobj_QD%d.fits" % (i + 1), - "sky_spec_QD%d.fits" % (i + 1), - factor=factor, - scale_ind=scale_ind, - scale_region=scale_region, - ) - head.copyHdrKey_drp( - "sky_QD%d.fits" % (i + 1), - "sobj_QD%d.fits" % (i + 1), - "hierarch PIPE NSKY FIB", - ) - head.copyHdrKey_drp( - "sky_QD%d.fits" % (i + 1), - "sobj_QD%d.fits" % (i + 1), - "hierarch PIPE SKY MEAN", - ) - head.copyHdrKey_drp( - "sky_QD%d.fits" % (i + 1), - "sobj_QD%d.fits" % (i + 1), - "hierarch PIPE SKY MIN", - ) - head.copyHdrKey_drp( - "sky_QD%d.fits" % (i + 1), - "sobj_QD%d.fits" % (i + 1), - "hierarch PIPE SKY MAX", - ) - head.copyHdrKey_drp( - "sky_QD%d.fits" % (i + 1), - "sobj_QD%d.fits" % (i + 1), - "hierarch PIPE SKY RMS", - ) - head.expandHdrKeys_drp( - "sobj_QD%d.fits" % (i + 1), - "CCD%d" % (i + 1), - "PIPE NSKY FIB,PIPE SKY MEAN,PIPE SKY MIN,PIPE SKY MAX,PIPE SKY RMS,PIPE SKY SCALE", - ) - rss.mergeRSS_drp("sobj_QD1.fits,sobj_QD2.fits,sobj_QD3.fits,sobj_QD4.fits", object_out) - os.system("rm *QD?.fits") From ad12f8f424d3253eb70b2c82f5f2c668e1b7a60e Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 11 Jan 2024 10:23:54 -0300 Subject: [PATCH 16/18] removing half-ported ILD functions --- python/lvmdrp/core/fluxcal.py | 998 ---------------------------------- 1 file changed, 998 deletions(-) diff --git a/python/lvmdrp/core/fluxcal.py b/python/lvmdrp/core/fluxcal.py index cd693de3..1ba1c1c7 100644 --- a/python/lvmdrp/core/fluxcal.py +++ b/python/lvmdrp/core/fluxcal.py @@ -18,1008 +18,10 @@ from scipy.interpolate import BSpline, interp1d, splrep from scipy.ndimage import median_filter from scipy.optimize import minimize, nnls -from scipy.special import eval_chebyc, eval_legendre f99_ext = F99(Rv=3.1) - -# UTILS FUNCTIONS --------------------------------------------------------------------------------- - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/misc/djs_laxisnum.pro -def djs_laxisnum(dimens, iaxis=None): - # - # Need one parameter - # - # IF N_PARAMS() LT 1 THEN BEGIN - # PRINT, 'Syntax - result = djs_laxisnum( dimens, [iaxis= ] )' - # RETURN, -1 - # ENDIF - - if iaxis is None: - iaxis = 0 - - ndimen = len(dimens) - naxis = np.int(dimens) # convert to type LONG - - if iaxis >= ndimen: - print("Invalid axis selection!") - return -1 - - result = np.zeros(naxis, dtype=int) - - if ndimen == 1: - result[:] = 0 - elif ndimen == 2: - if iaxis == 0: - for ii in range(0, naxis[0] - 1): - result[ii, :] = ii - elif iaxis == 1: - for ii in range(0, naxis[1] - 1): - result[:, ii] = ii - elif ndimen == 3: - if iaxis == 0: - for ii in range(0, naxis[0] - 1): - result[ii, :, :] = ii - elif iaxis == 1: - for ii in range(0, naxis[1] - 1): - result[:, ii, :] = ii - elif iaxis == 2: - for ii in range(0, naxis[2] - 1): - result[:, :, ii] = ii - else: - print(ndimen, " dimensions not supported!") - result = -1 - - return result - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/math/djs_reject.pro -def djs_reject( - ydata, - ymodel, - outmask=None, - inmask=None, - sigma=None, - invvar=None, - upper=None, - lower=None, - maxdev=None, - maxrej=None, - groupsize=None, - groupdim=None, - sticky=None, - groupbadpix=None, - grow=None, -): - # if (n_params() LT 2 OR NOT arg_present(outmask)) then begin - # print, 'Syntax: qdone = djs_reject(ydata, ymodel, outmask=, [ inmask=, $' - # print, ' sigma=, invvar=, upper=, lower=, maxdev=, grow= $' - # print, ' maxrej=, groupsize=, groupdim=, /sticky, /groupbadpix] )' - # return, 1 - # endif - - ndata = len(ydata) - if ndata == 0: - print("No data points") - if ndata != len(ydata): - print("Dimensions of YDATA and YMODEL do not agree") - - if inmask is not None: - if ndata != len(inmask): - print("Dimensions of YDATA and INMASK do not agree") - - if maxrej is not None: - if groupdim is not None: - if len(maxrej) != len(groupdim): - print("MAXREJ and GROUPDIM must have same number of elements!") - if groupsize is not None: - if len(maxrej) != len(groupsize): - print("MAXREJ and GROUPSIZE must have same number of elements!") - else: - groupsize = ndata - - # ---------- - # Create OUTMASK, setting =1 for good data - - if outmask is not None: - if ndata != len(outmask): - print("Dimensions of YDATA and OUTMASK do not agree") - else: - outmask = np.ones_like(ydata, dtype=bool) - - if ymodel is None: - if inmask is not None: - outmask[:] = inmask - return 0 - - if sigma is not None and invvar is not None: - print("Cannot set both SIGMA and INVVAR") - - if sigma is None and invvar is None: - if inmask is not None: - igood = np.where(inmask & outmask)[0] - else: - igood = np.where(outmask) - ngood = igood.size - - if ngood > 1: - sigma = np.std(ydata[igood] - ymodel[igood]) # scalar value - else: - sigma = 0 - - # if (n_elements(sigma) NE 1 AND n_elements(sigma) NE ndata) then $ - # message, 'Invalid number of elements for SIGMA' - - ydiff = ydata - ymodel - - # ---------- - # The working array is BADNESS, which is set to zero for good points - # (or points already rejected), and positive values for bad points. - # The values determine just how bad a point is, either corresponding - # to the number of SIGMA above or below the fit, or to the number of - # multiples of MAXDEV away from the fit. - - badness = 0.0 * outmask - - # ---------- - # Decide how bad a point is according to LOWER - - if lower is not None: - if sigma is not None: - qbad = ydiff < (-lower * sigma) - badness = (((-ydiff / (sigma + (sigma == 0))) > 0) * qbad) + badness - else: - qbad = ydiff * np.sqrt(invvar) < (-lower) - badness = (((-ydiff * np.sqrt(invvar)) > 0) * qbad) + badness - - # ---------- - # Decide how bad a point is according to UPPER - - if upper is not None: - if sigma is not None: - qbad = ydiff > (upper * sigma) - badness = (((ydiff / (sigma + (sigma == 0))) > 0) * qbad) + badness - else: - qbad = ydiff * np.sqrt(invvar) > upper - badness = (((ydiff * np.sqrt(invvar)) > 0) * qbad) + badness - - # ---------- - # Decide how bad a point is according to MAXDEV - - if maxdev is not None: - qbad = np.abs(ydiff) > maxdev - badness = (np.abs(ydiff) / maxdev * qbad) + badness - - # ---------- - # Do not consider rejecting points that are already rejected by INMASK. - # Do not consider rejecting points that are already rejected by OUTMASK - # if /STICKY is set. - - if inmask is not None: - badness = badness * inmask - if sticky is not None: - badness = badness * outmask - - # ---------- - # Reject a maximum of MAXREJ (additional) points in all the data, - # or in each group as specified by GROUPSIZE, and optionally along - # each dimension specified by GROUPDIM. - - if maxrej is not None: - # Loop over each dimension of GROUPDIM (or loop once if not set) - for iloop in range(0, (len(groupdim) > 1) - 1): - # Assign an index number in this dimension to each data point - if len(groupdim) > 0: - yndim = len(ydata.shape) - if groupdim[iloop] > yndim: - print("GROUPDIM is larger than number of dimensions for YDATA") - dimnum = djs_laxisnum(ydata.shape, iaxis=groupdim[iloop] - 1) - else: - dimnum = 0 - - # Loop over each vector specified by GROUPDIM. For ex, if - # this is a 2-D array with GROUPDIM=1, then loop over each - # column of the data. If GROUPDIM=2, then loop over each row. - # If GROUPDIM is not set, then use all whole image. - for ivec in range(0, np.max(dimnum)): - if dimnum is not None: - indx = np.where(dimnum == ivec)[0] - else: - indx = np.linspace(ndata) - - # Within this group of points, break it down into groups - # of points specified by GROUPSIZE (if set). - nin = len(indx) - - if groupbadpix is not None: - goodtemp = badness == 0 - groups_lower = np.where([1, goodtemp] - goodtemp != 1)[0] - groups_upper = np.where([goodtemp[1:], 1] - goodtemp == 1)[0] - ngroups = len(groups_lower) - else: - if groupsize is None: - ngroups = 1 - groups_lower = 0 - groups_upper = nin - 1 - else: - ngroups = nin / groupsize + 1 - groups_lower = np.linspace(ngroups) * groupsize - groups_upper = ( - (np.linspace(ngroups) + 1) * groupsize < nin - ) - 1 - - for igroup in range(0, ngroups - 1): - i1 = groups_lower[igroup] - i2 = groups_upper[igroup] - nii = i2 - i1 + 1 - - # Need the test that i1 NE -1 below to prevent a crash condition, - # but why is it that we ever get groups w/out any points? - if nii > 0 and i1 != -1: - jj = indx[i1:i2] - # Test if too many points rejected in this group... - if np.sum(badness[jj] != 0) > maxrej[iloop]: - isort = np.argsort(badness[jj]) - # Make the following points good again... - badness[jj[isort[0 : nii - maxrej[iloop] - 1]]] = 0 - i1 = i1 + groupsize[iloop] - - # ---------- - # Now modify OUTMASK, rejecting points specified by INMASK=0, - # OUTMASK=0 (if /STICKY is set), or BADNESS>0. - - newmask = badness == 0 - if grow is not None: - rejects = np.where(newmask == 0)[0] - if rejects.size != 0: - for jj in range(1, grow): - newmask[(rejects - jj) > 0] = 0 - newmask[(rejects + jj) < (ndata - 1)] = 0 - - if inmask is not None: - newmask = newmask & inmask - if sticky is not None: - newmask = newmask & outmask - - # Set QDONE if the input OUTMASK is identical to the output OUTMASK - qdone = np.sum(newmask != outmask) == 0 - outmask = newmask - - return qdone - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/bspline/cholesky_band.pro -def cholesky_band(lower, mininf=None, verbose=False): - if mininf is None: - mininf = 0.0 - # compute cholesky decomposition of banded matrix - # lower[bandwidth, n] n is the number of linear equations - - # I'm doing lower cholesky decomposition from lapack, spbtf2.f - - bw = lower.shape[0] - n = lower.shape[1] - bw - - negative = np.where(lower[0, 0 : n - 1] <= mininf)[0] - if negative.size != 0: - if verbose: - print("bad entries") - print(negative) - return negative - - kn = bw - 1 - spot = 1 + np.linspace(kn) - bi = np.linspace(kn) - for i in range(1, kn - 1): - bi = [bi, np.linspace(kn - i) + (kn + 1) * i] - - for j in range(0, n - 1): - lower[0, j] = np.sqrt(lower[0, j]) - lower[spot, j] = lower[spot, j] / lower[0, j] - x = lower[spot, j] - - if np.all(np.isnif(x)): - if verbose: - print("NaN found in cholesky_band") - return j - - hmm = np.transpose(x) @ x - here = bi + (j + 1) * bw - lower[here] = lower[here] - hmm[bi] - - return -1 - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/bspline/cholesky_solve.pro -def cholesky_solve(a, b): - bw = a.shape[0] - n = b.shape[1] - bw - - kd = bw - 1 - - # first round - spot = np.linspace(kd) + 1 - for j in range(0, n - 1): - b[j] = b[j] / a[0, j] - b[j + spot] = b[j + spot] - b[j] * a[spot, j] - - # second round - - spot = kd - np.linspace(kd) - for j in range(n - 1, 0, -1): - b[j] = (b[j] - np.sum(a[spot, j] * b[j + spot])) / a[0, j] - - return -1 - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/bspline/intrv.pro -def intrv(x, fullbkpt, nbkptord): - nx = len(x) - nbkpt = len(fullbkpt) - n = nbkpt - nbkptord - - indx = np.zeros(nx, dtype=int) - - ileft = nbkptord - 1 - for i in range(0, nx - 1): - while x[i] > fullbkpt[ileft + 1] and ileft < n - 1: - ileft = ileft + 1 - indx[i] = ileft - - return indx - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/bspline/bsplvn.pro -def bsplvn(bkpt, nord, x, ileft): - # - # Conversion of slatec utility bsplvn, used only by efc - # - # parameter index is not passed, as efc always calls with 1 - # treat x as array for all values between ileft and ileft+1 - ## - - nx = len(x) - - # - # This is to break up really HUGE arrays into manageable chunks - # - if nx > 12000000: - lower = 0 - upper = 6399999 - vnikx = np.zeros(nord) @ x - - while lower < nx: - # splog, lower, upper, nx - vnikx[lower:upper, :] = bsplvn( - bkpt, nord, x[lower:upper], ileft[lower:upper] - ) - lower = upper + 1 - upper = (upper + 6400000) < (nx - 1) - - return vnikx - - vnikx = np.zeros(nord) @ x - deltap = vnikx - deltam = vnikx - vmprev = x * 0 - vm = x * 0 - - j = 0 - vnikx[:, 0] = 1.0 - - while j < nord - 1: - ipj = ileft + j + 1 - deltap[:, j] = bkpt[ipj] - x - imj = ileft - j - deltam[:, j] = x - bkpt[imj] - vmprev = 0.0 - for num in range(0, j): - vm = vnikx[:, num] / (deltap[:, num] + deltam[:, j - num]) - vnikx[:, num] = vm * deltap[:, num] + vmprev - vmprev = vm * deltam[:, j - num] - - j = j + 1 - vnikx[:, j] = vmprev - - return vnikx - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/bspline/bspline_action.pro -def bspline_action(x, sset, x2=None): - if not isinstance(sset, dict): - print("Please send in a proper B-spline structure") - return -1 # , lower, upper - - npoly = 1 - nx = len(x) - - # - # Check for the existence of x2 - # - if x2 is not None: - if len(x2) != nx: - print("dimensions do not match between x and x2") - return -1 # , lower, upper - - if "npoly" in sset.keys(): - npoly = sset["npoly"] - - nord = sset["nord"] - goodbk = np.where(sset["bkmask"] != 0) - nbkpt = goodbk.size - if nbkpt < 2 * nord: - return -2 # , lower, upper - n = nbkpt - nord - - gb = sset["fullbkpt"][goodbk] - - bw = npoly * nord - action = np.zeros(bw) @ x - - lower = np.zeros(n - nord + 1) - upper = np.zeros(n - nord + 1) - 1 - - indx = intrv(x, gb, nord) - - bf1 = bsplvn(gb, nord, x, indx) - action = bf1 - - # -------------------------------------------------------------- - # sneaky way to calculate upper and lower indices when - # x is sorted - # - aa = np.unique(indx, return_index=True)[1] - upper[indx[aa] - nord + 1] = aa - - rindx = indx[::-1] - bb = np.unique(rindx, return_index=True)[1] - lower[rindx[bb] - nord + 1] = nx - bb - 1 - - # --------------------------------------------------------------- - # just attempt this if 2d fit is required - # - if x2 is not None: - x2norm = 2.0 * (x2[:] - sset["xmin"]) / (sset["xmax"] - sset["xmin"]) - 1.0 - if sset["funcname"] == "poly": - temppoly = np.ones(npoly) @ (x2norm * 0.0 + 1.0) - for i in range(1, npoly - 1): - temppoly[:, i] = temppoly[:, i - 1] * x2norm - if sset["funcname"] == "poly1": - temppoly = np.ones(npoly) @ x2norm - for i in range(1, npoly - 1): - temppoly[:, i] = temppoly[:, i - 1] * x2norm - if sset["funcname"] == "chebyshev": - temppoly = eval_chebyc(npoly - 1, x2norm) - if sset["funcname"] == "legendre": - temppoly = eval_legendre(npoly - 1, x2norm) - else: - temppoly = eval_legendre(npoly - 1, x2norm) - - action = np.zeros((nx, bw)) - counter = -1 - for ii in range(0, nord - 1): - for jj in range(0, npoly - 1): - counter = counter + 1 - action[:, counter] = bf1[:, ii] * temppoly[:, jj] - - return action, lower, upper - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/bspline/bspline_maskpoints.pro -def bspline_maskpoints(sset, errb, npoly=None): - if npoly is None: - npoly = 1 - - goodbk = np.where(sset["bkmask"] != 0)[0] - nbkpt = goodbk.size - nord = sset["nord"] - - if nbkpt <= 2 * nord: - return -2 - - hmm = errb[np.unique(errb / npoly, return_index=True)[1]] / npoly - n = nbkpt - nord - - if np.where(hmm >= n)[0].size != 0: - return -2 - - test = np.zeros(nbkpt, dtype=int) - for jj in range(-np.ceil(nord / 2.0), (nord / 2.0) - 1): - inside = (((hmm + jj) > 0) + nord) < (n - 1) - test[inside] = 1 - - maskthese = np.where(test == 1)[0] - - if maskthese.size != 0: - return -2 - - reality = goodbk[maskthese] - if np.sum(sset["bkmask"][reality]) == 0: - return -2 - - sset["bkmask"][reality] = 0 - return -1 - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/bspline/bspline_valu.pro -def bspline_valu(x, sset, x2=None, action=None, upper=None, lower=None): - nx = len(x) - mask = np.zeros(nx, dtype=int) - - if not isinstance(sset, dict): - print("Please send in a proper B-spline structure") - return np.zeros_like(x), mask - - xsort = np.argsort(x) - npoly = 1 - xwork = x[xsort] - - if x2 is not None: - if "npoly" in sset.keys(): - npoly = sset["npoly"] - x2work = x2[xsort] - else: - x2work = 0 - - if action is None: - action, lower, upper = bspline_action(xwork, sset, x2=x2work) - - yfit = x * 0.0 - nord = sset["nord"] - bw = npoly * nord - - spot = np.linspace(bw) - goodbk = np.where(sset["bkmask"] != 0) - nbkpt = goodbk.size - coeffbk = np.where(sset["bkmask"][nord:] != 0) - n = nbkpt - nord - - sc = len(sset["coeff"]) - if sc[0] == 2: - goodcoeff = sset["coeff"][:, coeffbk] - else: - goodcoeff = sset["coeff"][coeffbk] - - for i in range(0, n - nord): - ict = upper[i] - lower[i] + 1 - - if ict > 0: - yfit[lower[i]: upper[i]] = ( - goodcoeff[i * npoly + spot] @ action[lower[i]: upper[i], :] - ) - - yy = yfit - yy[xsort] = yfit - - mask[:] = 1 - gb = sset["fullbkpt"][goodbk] - - outside = np.where((x < gb[nord - 1]) | (x > gb[n]))[0] - if outside.size != 0: - mask[outside] = 0 - - hmm = np.where(goodbk[1:] - goodbk > 2)[0] - nhmm = hmm.size - for jj in range(0, nhmm - 1): - inside = np.where( - (x >= sset["fullbkpt"][goodbk[hmm[jj]]]) - & (x <= sset["fullbkpt"][goodbk[hmm[jj] + 1] - 1]) - ) - if inside.size != 0: - mask[inside] = 0 - - return yy, mask - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/bspline/create_bsplineset.pro -def create_bsplineset(fullbkpt, nord, npoly=None): - numbkpt = len(fullbkpt) - numcoeff = numbkpt - nord - - if npoly is None: - sset = { - "fullbkpt": fullbkpt, - "bkmask": np.ones(numbkpt, dtype=bool), - "nord": int(nord), - "coeff": np.zeros(numcoeff), - "icoeff": np.zeros(numcoeff), - } - else: - sset = { - "fullbkpt": fullbkpt, - "bkmask": np.ones(numbkpt, dtype=bool), - "nord": int(nord), - "xmin": 0.0, - "xmax": 1.0, - "funcname": "legendre", - "npoly": int(npoly), - "coeff": np.zeros((npoly, numcoeff)), - "icoeff": np.zeros((npoly, numcoeff)), - } - - return sset - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/bspline/bspline_bkpts.pro -def bspline_bkpts( - x, - nord, - bkpt=None, - bkspace=None, - nbkpts=None, - everyn=None, - silent=True, - bkspread=None, - placed=None, -): - nx = len(x) - - if bkpt is None: - range = np.max(x) - np.min(x) - startx = np.min(x) - if placed is not None: - w = np.where(placed >= startx and placed <= startx + range)[0] - cnt = w.size - nbkpts = cnt - if nbkpts < 2: - nbkpts = 2 - tempbkspace = np.double(range / (float(nbkpts - 1))) - bkpt = (np.linspace(nbkpts)) * tempbkspace + startx - else: - bkpt = placed[w] - elif bkspace is not None: - nbkpts = int(range / float(bkspace)) + 1 - if nbkpts < 2: - nbkpts = 2 - tempbkspace = np.double(range / (float(nbkpts - 1))) - bkpt = (np.linspace(nbkpts)) * tempbkspace + startx - elif nbkpts is not None: - nbkpts = int(nbkpts) - if nbkpts < 2: - nbkpts = 2 - tempbkspace = np.double(range / (float(nbkpts - 1))) - bkpt = (np.linspace(nbkpts)) * tempbkspace + startx - elif everyn is not None: - nbkpts = (nx / everyn) > 1 - if nbkpts == 1: - xspot = [0] - else: - xspot = np.linspace(nbkpts) * (nx / (nbkpts - 1)) - bkpt = x[xspot] - else: - print("No information for bkpts") - - bkpt = float(bkpt) - - if np.min(x) < np.min(bkpt): - spot = np.argmin(bkpt) - if not silent: - print("Lowest breakpoint does not cover lowest x value: changing") - bkpt[spot] = min(x) - - if np.max(x) > np.max(bkpt): - spot = np.argmax(bkpt) - if not silent: - print("highest breakpoint does not cover highest x value, changing") - bkpt[spot] = max(x) - - nshortbkpt = len(bkpt) - fullbkpt = bkpt - - if bkspread is None: - bkspread = 1.0 - if nshortbkpt == 1: - bkspace = bkspread - else: - bkspace = (bkpt[1] - bkpt[0]) * bkspread - - for i in range(1, nord - 1): - fullbkpt = [bkpt[0] - bkspace * i, fullbkpt, bkpt[nshortbkpt - 1] + bkspace * i] - - return fullbkpt, bkpt - - -# NOTE: taken from https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/bspline/bspline_fit.pro -def bspline_fit( - xdata, ydata, invvar, sset, fullbkpt=None, x2=None, npoly=None, nord=None -): - if nord is None: - nord = 4 - - if not isinstance(sset, dict): - sset = create_bsplineset(fullbkpt, nord, npoly=npoly) - - if "npoly" in sset: - npoly = sset["npoly"] - if npoly is None: - npoly = 1 - - goodbk = np.where(sset["bkmask"][nord:] != 0) - nbkpt = goodbk.size - - nord = sset["nord"] - - if nbkpt < nord: - yfit = np.zeros_like(ydata) - return -2, sset, yfit - - nn = nbkpt - nfull = nn * npoly - bw = npoly * nord # this is the bandwidth - - # The next line is REQUIRED to fill a1 - - a1, lower, upper = bspline_action(xdata, sset, x2=x2) - - a2 = a1 * (np.ones(bw) @ invvar) - - alpha = np.zeros((bw, nfull + bw), dtype=np.double) - beta = np.zeros(nfull + bw, dtype=np.double) - - bi = np.linspace(bw) - bo = np.linspace(bw) - for i in range(1, bw - 1): - bi = [bi, np.linspace(bw - i) + (bw + 1) * i] - for i in range(1, bw - 1): - bo = [bo, np.linspace(bw - i) + bw * i] - - for i in range(0, nn - nord): - itop = i * npoly - ibottom = (itop < nfull + bw) - 1 - - ict = upper[i] - lower[i] + 1 - - if ict > 0: - work = a2[lower[i] : upper[i], :] @ np.transpose(a1[lower[i] : upper[i], :]) - wb = a2[lower[i] : upper[i], :] @ ydata[lower[i] : upper[i]] - - alpha[bo + itop * bw] = alpha[bo + itop * bw] + work[bi] - beta[itop:ibottom] = beta[itop:ibottom] + wb - - # Drop break points where minimal influence is located - - min_influence = 1.0e-10 * np.sum(invvar) / nfull - - # This call to cholesky_band operates on alpha and changes contents - - errb = cholesky_band(alpha, mininf=min_influence) - - if errb[0] != -1: - yfit, _ = bspline_valu(xdata, sset, x2=x2, action=a1, upper=upper, lower=lower) - return bspline_maskpoints(sset, errb, npoly), sset, yfit - - # this changes beta to contain the solution - - errs = cholesky_solve(alpha, beta) - if errs[0] != -1: - yfit, _ = bspline_valu(xdata, sset, x2=x2, action=a1, upper=upper, lower=lower) - return bspline_maskpoints(sset, errs, npoly), sset, yfit - - sc = len(sset["coeff"]) - if sc[0] == 2: - sset["icoeff"][:, goodbk] = np.reshape( - alpha[0, np.linspace(nfull)], (npoly, nn) - ) - sset["coeff"][:, goodbk] = np.reshape(beta[np.linspace(nfull)], (npoly, nn)) - else: - sset.icoeff[goodbk] = alpha[0, np.linspace(nfull)] - sset.coeff[goodbk] = beta[np.linspace(nfull)] - - yfit, _ = bspline_valu(xdata, sset, x2=x2, action=a1, upper=upper, lower=lower) - - return 0, sset, yfit - - -# NOTE: taken from: https://svn.sdss.org/public/repo/sdss/idlutils/tags/v5_5_36/pro/bspline/bspline_iterfit.pro -def bspline_iterfit( - xdata, - ydata, - invvar=None, - nord=None, - x2=None, - npoly=None, - xmin=None, - xmax=None, - bkpt=None, - oldset=None, - maxiter=None, - upper=None, - lower=None, - requiren=None, - fullbkpt=None, - funcname=None, - **kwargs, -): - # ---------- - # Check dimensions of inputs - - nx = len(xdata) - if len(ydata) != nx: - print("Dimensions of XDATA and YDATA do not agree") - - if nord is None: - nord = 4 - if upper is None: - upper = 5 - if lower is None: - lower = 5 - - if invvar is not None: - if len(invvar) != nx: - print("Dimensions of XDATA and INVVAR do not agree") - - if x2 is not None: - if len(x2) != nx: - print("Dimensions of X and X2 do not agree") - if npoly is None: - npoly = 2 - - if maxiter is None: - maxiter = 10 - - yfit = np.zeros_like(ydata) # Default return values - - if invvar is None: - var = np.std(ydata) ** 2 - if var == 0: - var = 1 - invvar = np.zeros_like(ydata) + 1.0 / var - - if len(invvar) == 1: - outmask = np.asarray([True]) - else: - outmask = np.ones_like(invvar, dtype=bool) - - xsort = np.argsort(xdata) - maskwork = (outmask * (invvar > 0)).astype(bool)[xsort] - these = np.where(maskwork)[0] - nthese = these.size - - # ---------- - # Determine the break points and create output structure - - if oldset is not None: - sset = oldset - sset["bkmask"] = 1 - sset["coeff"] = 0 - tags = oldset.keys() - if "xmin" in tags and x2 is None: - print("X2 must be set to be consistent with OLDSET") - - else: - if nthese == 0: - print("No valid data points") - fullbkpt = 0 - return None, outmask, fullbkpt, yfit - - if fullbkpt is None: - fullbkpt = bspline_bkpts( - xdata[xsort[these]], nord=nord, bkpt=bkpt, **kwargs - ) - - sset = create_bsplineset(fullbkpt, nord, npoly=npoly) - - if nthese < nord: - print("Number of good data points fewer the NORD") - return sset, outmask, fullbkpt, yfit - - # ---------- - # Condition the X2 dependent variable by the XMIN, XMAX values. - # This will typically put X2NORM in the domain [-1,1]. - - if x2 is not None: - if xmin is None: - xmin = np.min(x2) - if xmax is None: - xmax = np.max(x2) - if xmin == xmax: - xmax = xmin + 1 - sset["xmin"] = xmin - sset["xmax"] = xmax - - if funcname is not None: - sset["funcname"] = funcname - - # ---------- - # It's okay now if the data fall outside breakpoint regions, the - # fit is just set to zero outside. - - # ---------- - # Sort the data so that X is in ascending order. - - xwork = xdata[xsort] - ywork = ydata[xsort] - invwork = invvar[xsort] - if x2 is not None: - x2work = x2[xsort] - - # ---------- - # Iterate spline fit - - iiter = 0 - error = 0 - - qdone = 0 - while ((error != 0) or (qdone == 0)) and iiter < maxiter: - ngood = np.sum(maskwork) - goodbk = np.where(sset["bkmask"] != 0)[0] - ngb = goodbk.size - - if ngood < 1 or goodbk[0] != -1: - sset["coeff"] = 0 - iiter = maxiter + 1 # End iterations - else: - if requiren is not None: - # Locate where there are two break points in a row with no good - # data points in between, and drop (mask) one of those break points. - # The first break point is kept. - i = 0 - while xwork[i] < sset["fullbkpt"][goodbk[nord]] and i < nx - 1: - i = i + 1 - - ct = 0 - for ileft in range(nord, ngb - nord): - while ( - xwork[i] >= sset["fullbkpt"][goodbk[ileft]] - and xwork[i] < sset["fullbkpt"][goodbk[ileft + 1]] - and i < nx - 1 - ): - ct = ct + (invwork[i] * maskwork[i] > 0) - i = i + 1 - - if ct >= requiren: - ct = 0 - else: - sset["bkmask"][goodbk[ileft]] = 0 - - # Do the fit. Return values for ERROR are as follows: - # 0 if fit is good - # -1 if all break points are masked - # -2 if everything is screwed - error = bspline_fit( - xwork, - ywork, - invwork * maskwork, - sset, - x2=x2work, - yfit=yfit, - nord=nord, - **kwargs, - ) - - iiter = iiter + 1 - - inmask = maskwork - - if error == -2: - # All break points have been dropped. - return sset, outmask, fullbkpt, yfit - elif error == 0: - # Iterate the fit -- next rejection iteration. - qdone = djs_reject( - ywork, - yfit, - invvar=invwork, - inmask=inmask, - outmask=maskwork, - upper=upper, - lower=lower, - **kwargs, - ) - - # ---------- - # Re-sort the output arrays OUTMASK and YFIT to agree with the input data. - - outmask[xsort] = maskwork - - temp = yfit - yfit[xsort] = temp - - return sset, outmask, fullbkpt, yfit - - # ------------------------------------------------------------------------------------------------- From 0d8eda53b5220e90817342c611380b7cb8cbd4b1 Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 11 Jan 2024 10:33:31 -0300 Subject: [PATCH 17/18] further cleaning & moving low-level functions to fluxcal module --- python/lvmdrp/core/fluxcal.py | 730 ++--------------------- python/lvmdrp/functions/fluxCalMethod.py | 65 +- 2 files changed, 63 insertions(+), 732 deletions(-) diff --git a/python/lvmdrp/core/fluxcal.py b/python/lvmdrp/core/fluxcal.py index 1ba1c1c7..87f64fad 100644 --- a/python/lvmdrp/core/fluxcal.py +++ b/python/lvmdrp/core/fluxcal.py @@ -6,678 +6,72 @@ # @License: BSD 3-Clause # @Copyright: SDSS-V LVM -from collections import namedtuple - -import matplotlib.pyplot as plt - -# FROM THE MANGA DRP CODE ------------------------------------------------------------------------- import numpy as np -from dust_extinction.parameter_averages import F99 -from pydl.pydlspec2d.spec2d import filter_thru -from pydl.pydlutils.sdss import sdss_flagval -from scipy.interpolate import BSpline, interp1d, splrep -from scipy.ndimage import median_filter -from scipy.optimize import minimize, nnls - - -f99_ext = F99(Rv=3.1) - -# ------------------------------------------------------------------------------------------------- - - -def spflux_masklines(loglam, hwidth=None, stellar=True, telluric=True): - """Returns a mask""" - - if hwidth is None: - # TODO: set this to whatever is equivalent to 400 km/s - hwidth = 5.7e-4 # Default is to mask +/- 5.7 pix = 400 km/sec - - # initialize the mask array for wavelengths - mask = np.zeros_like(loglam.size, dtype=bool) - - if stellar: - starwave = [ - # 3692.6 , # H-16 - # 3698.2 , # H-15 - 3704.9, # H-14 - # 3707.1 , # Ca-II - 3713.0, # H-13 - 3723.0, # H-12 - 3735.4, # H-11 - # 3738.0 , # Ca-II - 3751.2, # H-10 - 3771.7, # H-9 - 3799.0, # H-8 - 3836.5, # H-7 - 3890.2, # H-6 - 3934.8, # Ca_k - 3969.6, # Ca_H - 3971.2, # H-5 - 4102.9, # H-delta - 4300.0, # G-band - 4305.0, # G-band - 4310.0, # more G-band - 4341.7, # H-gamma - 4862.7, # H-beta - # 4687.1 , # He II - 5168.8, # Mg I - 5174.1, # Mg I - 5185.0, # Mg I - 5891.6, # Na I - 5897.6, # Na I - 6564.6, # H-alpha - 8500.4, # Ca II - 8544.4, # Ca II - 8664.5, # Ca II - 8752.9, # H I - 8865.3, # H I - # RY: commented out the following three lines as these are in telluric absorption regions. - # If we mask them, we get bad bspline interpolation in these regions. - # They are not useful for stellar fitting anyway as they are in telluric regions. - # 9017.8 , # H I - # 9232.2 , # H I Pa-6 - # 9548.8 , # H I Pa-5 - 10052.6, - ] # H I (Pa-delta) - # airtovac, starwave # commented out because wavelengths of features have already been set in vacuum.RY Jul 13, 2015 - - for i in range(len(starwave)): - mask = mask | ( - loglam - > np.log10(starwave[i]) - hwidth & loglam - < np.log10(starwave[i]) + hwidth - ) - - if telluric: - tellwave1 = [6842.0, 7146.0, 7588.0, 8105.0, 8910.0] - tellwave2 = [6980.0, 7390.0, 7730.0, 8440.0, 9880.0] - for i in range(len(tellwave1)): - mask = mask | ( - loglam > np.log10(tellwave1[i]) & loglam < np.log10(tellwave2[i]) - ) - - return mask - - -def spflux_medianfilt(loglam, objflux, objivar, width, **kwargs): - dims = objflux.shape - ndim = len(dims) - npix = dims[0] - if ndim == 1: - nspec = 1 - else: - nspec = dims[1] - - # ---------- - # Loop over each spectrum - - medflux = np.zeros_like(objflux) - if objivar is not None: - newivar = np.zeros_like(objivar) - for ispec in range(nspec): - # For the median-filter, ignore points near stellar absorp. features, - # but keep points near telluric bands. - qgood = np.logical_not( - spflux_masklines( - loglam[:, ispec], stellar=True, telluric=False, hwidth=8.0e-4 - ) - ) - - # Median-filter, but skipping masked points - igood = np.where(qgood)[0] - ngood = igood.size - thisback = np.zeros(npix) - if ngood > 1: - thisback[igood] = median_filter(objflux[igood, ispec], size=width, **kwargs) - thisback = np.interp(loglam, loglam[~qgood], thisback[~qgood]) - - # Force the ends of the background to be the same as the spectrum, - # which will force the ratio of the two to be unity. - hwidth = np.ceil((width - 1) / 2.0) - thisback[0:hwidth] = objflux[0:hwidth, ispec] - thisback[npix - 1 - hwidth: npix - 1] = objflux[ - npix - 1 - hwidth: npix - 1, ispec - ] - czero2 = np.where(thisback == 0)[0] - count2 = czero2.size - if count2 > 0: - thisback[czero2] = 1.0 - medflux[:, ispec] = objflux[:, ispec] / thisback - if objivar is not None: - newivar[:, ispec] = objivar[:, ispec] * thisback**2 - - return medflux, newivar - - -def spflux_bestmodel( - loglam, objflux, objivar, dispimg, plottitle="", template="kurucz" -): - filtsz = 99 # the size of the window used in median-filter the spectra. - cspeed = 2.99792458e5 - - dims = objflux.shape - ndim = len(dims) - npix = dims[0] - if ndim == 1: - nspec = 1 - else: - nspec = dims[1] - - # ---------- - # Median-filter the object fluxes - - medflux, medivar = spflux_medianfilt( - loglam, objflux, objivar, width=filtsz, mode="reflect" - ) - sqivar = np.sqrt(medivar) - - # ---------- - # Mask out the telluric bands - - sqivar = sqivar * np.logical_not( - spflux_masklines(loglam, telluric=True, stellar=False) - ) - - # ---------- - # Load the Kurucz models into memory - - # TODO: define functions spflux_read_x to read stellar spectra models - # CALLING SEQUENCE: - # spflux_read_bosz - # modelflux = spflux_read_bosz( loglam, dispimg, [ iselect=, - # kindx_return= ,dslgpsize=dslgpsize ] ) - # - # INPUTS: - # loglam - Log10 wavelengths (vacuum Angstroms) [NPIX] - # dispimg - Dispersion image, in units of pixels [NPIX] - # - # OPTIONAL INPUTS: - # iselect - If set, then only return these model numbers# default to - # returning all models - # - # OUTPUTS: - # modelflux - Model fluxes [NPIX,NMODEL] - # - # OPTIONAL OUTPUTS: - # kindx_return- Structure with model parameters for each model - # thekfile- Return which reference file was used (does NOT set which TO use!) - # - # NOTE: what is dslgpsize? - if template == "kurucz": - _, kindx, dslgpsize = spflux_read_kurucz() # Yanping test - elif template == "munari": - _, kindx, dslgpsize = spflux_read_munari() # Yanping added - elif template == "BOSZ": - _, kindx, dslgpsize = spflux_read_bosz() # Yanping added - else: - print( - "Flux calibration templates has to be specified and be one of the three: 'kurucz','munari', 'BOSZ'." - ) - - nmodel = len(kindx) - - # ---------- - # Fit the redshift just by using a canonical model - - ifud = np.where(kindx.teff == 6000 & kindx.g == 4 & kindx.feh == -1.5)[0] - if ifud.size == 0: - print("Could not find fiducial model!") - nshift = ( - np.ceil(1000.0 / cspeed / np.log(10.0) / dslgpsize / 2) * 2 - ) # set this to cover +/-500 km/s - logshift = (-nshift / 2.0 + np.arange(nshift)) * dslgpsize ##Yanping test - chivec = np.zeros(nshift) - for ishift in range(nshift): - if template == "kurucz": - modflux, kindx, dslgpsize = spflux_read_kurucz( - loglam - logshift[ishift], dispimg, iselect=ifud - ) ##Yanping test - elif template == "munari": - modflux, kindx, dslgpsize = spflux_read_munari( - loglam - logshift[ishift], dispimg, iselect=ifud - ) ##Yanping added - elif template == "BOSZ": - modflux, kindx, dslgpsize = spflux_read_bosz( - loglam - logshift[ishift], dispimg, iselect=ifud - ) ##Yanping added - else: - print( - "Flux calibration templates has to be specified and be one of the three: 'kurucz','munari', 'BOSZ'." - ) - - # Median-filter this model - medmodel, _ = spflux_medianfilt(loglam, modflux, width=filtsz, mode="reflect") - for ispec in range(nspec): - # NOTE: originally used computechi2 - chivec[ishift] = chivec[ishift] + nnls( - medflux[:, ispec] / sqivar[:, ispec], - medmodel[:, ispec] / sqivar[:, ispec], - ) - - zshift = 10**logshift - 1 # Convert log-lambda shift to redshift - # NOTE: originally used find_nminima - result = minimize(interp1d(zshift, chivec), x0=0) - zpeak = result.x - print("Best-fit velocity for std star = ", zpeak * cspeed, " km/s") - if result.status != 0: - print("Warning: Error code ", result.status, " fitting std star") - # Warning messages - if np.isnan(chivec).any(): - if (medivar < 0).any(): - print( - "There are negative ivar values causing chi-square to be NaN or the likes." - ) - else: - print("chi-square are NaN or the likes, but not caused by negative ivar.") - - # ---------- - # Generate the Kurucz models at the specified wavelengths + dispersions, - # using the best-fit redshift - - # modflux = spflux_read_kurucz(loglam-np.log10(1.+zpeak), dispimg) - - if template == "kurucz": - modflux, kindx, dslgpsize = spflux_read_kurucz( - loglam - np.log10(1 + zpeak), dispimg - ) # Yanping test - elif template == "munari": - modflux, kindx, dslgpsize = spflux_read_munari( - loglam - np.log10(1 + zpeak), dispimg - ) # Yanping added - elif template == "BOSZ": - modflux, kindx, dslgpsize = spflux_read_bosz( - loglam - np.log10(1 + zpeak), dispimg - ) # Yanping added - else: - print( - "Flux calibration templates has to be specified and be one of the three: 'kurucz','munari', 'BOSZ'." - ) - - # Need to redo median-filter for the data with the correct redshift - medflux, medivar = spflux_medianfilt( - loglam - np.log10(1.0 + zpeak), objflux, objivar, size=filtsz, mode="reflect" - ) - sqivar = np.sqrt(medivar) - # ---------- - # Mask out the telluric bands - sqivar = sqivar * np.logical_not( - spflux_masklines(loglam, telluric=True, stellar=False) - ) - - # ---------- - # Loop through each model, computing the best chi**2 - # as the sum of the best-fit chi**2 to each of the several spectra - # for this same object. Counting only the regions around stellar absorption features. - # We do this after a median-filtering of both the spectra + the models. - - chiarr = np.zeros((nmodel, nspec)) - chivec = np.zeros(nmodel) - medmodelarr = np.zeros_like(modflux) - - mlines = spflux_masklines( - loglam - np.log10(1.0 + zpeak), hwidth=12e-4, stellar=True, telluric=False - ) - linesqivar = sqivar * mlines - linechiarr = np.zeros((nmodel, nspec)) - linechivec = np.zeros(nmodel) +from scipy import signal - for imodel in range(nmodel): - # Median-filter this model - medmodelarr[:, :, imodel] = spflux_medianfilt( - loglam - np.log10(1.0 + zpeak), - modflux[:, :, imodel], - size=filtsz, - mode="reflect", - ) +from astropy.time import Time +from astropy.coordinates import SkyCoord, EarthLocation, AltAz +from astropy import units as u - for ispec in range(nspec): - chiarr[imodel, ispec] = np.sum( - (medflux[:, ispec] - medmodelarr[:, ispec, imodel]) ** 2 - * sqivar[:, ispec] - * sqivar[:, ispec] - ) - linechiarr[imodel, ispec] = np.sum( - (medflux[:, ispec] - medmodelarr[:, ispec, imodel]) ** 2 - * linesqivar[:, ispec] - * linesqivar[:, ispec] - ) - chivec[imodel] = np.sum(chiarr[imodel, :]) - linechivec[imodel] = np.sum(linechiarr[imodel, :]) +from lvmdrp import log - # ---------- - # Return the best-fit model - # Computed both full spectra chi**2 and line chi**2, but use the line chi**2 to select the best model. - ibest = np.argmin(linechivec) - linechi2 = linechivec[ibest] - linedof = np.sum(linesqivar != 0) - print("Best-fit line chi2/DOF = ", linechi2 / (linedof > 1)) - bestflux = modflux[:, :, ibest] - medmodel = spflux_medianfilt( - loglam - np.log10(1.0 + zpeak), bestflux, size=filtsz, mode="reflects" - ) - - # ---------- - # Compute the median S/N for all the spectra of this object, - # and for those data just near the absorp. lines - - indx = np.where(objivar > 0)[0] - ct = indx.size - if ct > 1: - sn_median = np.median(objflux[indx] * np.sqrt(objivar[indx])) - else: - sn_median = 0 - - indx = np.where(mlines)[0] - ct = indx.size - if ct > 1: - linesn_median = np.median(objflux[indx] * np.sqrt(objivar[indx])) - else: - linesn_median = 0.0 - print("Full median S/N = ", sn_median) - print("Line median S/N = ", linesn_median) - - Parameters = namedtuple( - "Parameters", - list(kindx.__dict__.keys()) - + ["IMODEL", "Z", "SN_MEDIAN", "LINESN_MEDIAN", "LINECHI2", "LINEDOF"], - ) - kindx1 = Parameters( - list(kindx.__dict__.values()) - + [ibest, zpeak, float(sn_median), linesn_median, linechi2, linedof] - ) - - # ---------- - # Plot the filtered object spectrum, overplotting the best-fit Kurucz/Munari model ##Yanping edited - - # TODO: implement LVM 3 channels - # Select the observation to plot that has the highest S/N, - # and one that goes blueward of 4000 Ang. - snvec = np.sum(objflux * np.sqrt(objivar), 1) * ( - 10 ** loglam[0, :] < 4000 | 10.0 ** loglam[npix - 1, :] < 4000 - ) - iplot = np.argmax(snvec, axis=1) # Best blue exposure - - snvec = np.sum(objflux * np.sqrt(objivar), axis=1) * ( - 10.0 ** loglam[0, :] > 8600 | 10.0 ** loglam[npix - 1, :] > 8600 - ) - jplot = np.argmax(snvec, axis=1) # Best red exposure - - csize = 0.85 - _, ax = plt.subplots() - ax.set_xlim(3840.0, 4120.0) - ax.set_ylim(0.0, 1.4) - ax.set_ylabel("Wavelength [Ang]") - ax.set_ylabel("Normalized Flux") - ax.set_title(plottitle) - ax.plot(10 ** loglam[:, iplot], medflux[:, iplot]) - ax.plot(10 ** loglam[:, iplot], medmodel[:, iplot], color="red") - - ax.text(3860, 1.25, kindx1.model, fontsize=csize, transform=ax.transAxes) - ax.text( - 4000, - 0.2, - f"Lines \chi^2/DOF={linechi2/(linedof>1):.2f}", - fontsize=csize, - transform=ax.transAxes, - ) - ax.text( - 3860, - 0.1, - f"Fe/H={kindx1.feh:.1f}, T_{{eff}}={kindx1.teff:.0f}, g={kindx1.g:.1f}, cz={zpeak*cspeed:.0f}", - fontsize=csize, - transform=ax.transAxes, - ) - - _, ax = plt.subplots() - ax.set_xlim(8440.0, 9160.0) - ax.set_ylim(0.0, 1.4) - ax.set_ylabel("Wavelength [Ang]") - ax.set_ylabel("Normalized Flux") - ax.set_title(plottitle) - ax.plot(10 ** loglam[:, jplot], medflux[:, jplot]) - ax.plot(10 ** loglam[:, jplot], medmodel[:, jplot], color="red") - - return bestflux, kindx1 - - -def spflux_goodfiber(pixmask): - qgood = ( - "NOPLUG" not in pixmask - and "BADTRACE" not in pixmask - and "BADFLAT" not in pixmask - and "BADARC" not in pixmask - and "MANYBADCOLUMNS" not in pixmask - and "NEARWHOPPER" not in pixmask - and "MANYREJECTED" not in pixmask - ) - return qgood - - -def spflux_bspline( - loglam, - mratio, - mrativar, - inmask=None, - return_outmask=True, - everyn=10, - disp=None, - hwidth=None, - mask_stellar=True, - mask_telluric=True, -): - # TODO: figure out which default for everyn is best - isort = np.argsort(loglam) - nord = 3 - if hwidth is None: - hwidth = 12.0e-4 - - if inmask is None: - # Choose the break points using the EVERYN option, but masking - # out more pixels near stellar features and/or telluric just when selecting them. - mask1 = np.logical_not( - spflux_masklines( - loglam, hwidth=hwidth, stellar=mask_stellar, telluric=mask_telluric - ) - ) - else: - mask1 = np.logical_not(inmask) - - ii = np.where((mrativar[isort] > 0) & mask1[isort]) - # BUG: this is actually done by bspline_bkpts - fullbkpt = splrep( - loglam[isort[ii]], - mratio[isort[ii]], - w=np.sqrt(mrativar[isort[ii]]), - k=nord, - t=loglam[isort[ii]][np.arange(everyn, dtype=int)], - quiet=1, - ) - - outmask1 = 0 - # if disp is not None: - # x2 = disp[isort] - # else: - # pass - - # BUG: this is actually done by bspline_iterfit - sset = BSpline(*fullbkpt) - - if np.max(sset.c) == 0: - print("B-spline fit failed!!") - - if return_outmask: - outmask = np.zeros_like(loglam) - outmask[isort] = outmask1 - return sset, outmask - - return sset - - -def typingmodule( - objflux, - loglam, - objivar, - dispimg, - sfd_ebv, - psfmag, - plottitle="", - template="kurucz", - thekfile="", - targetflag=None, -): - npix, nspec = objflux.shape - # ---------- - # For each star, find the best-fit model. - - unreddenfactor = 1 / f99_ext.extinguish(10.0**loglam, Ebv=1.0) - - # !p.multi = [0,1,2] - modflux = np.zeros_like(objflux) - # Find the best-fit model -- evaluated for each exposure [NPIX,NEXP] - thismodel, kindx = spflux_bestmodel( - loglam, - objflux * unreddenfactor**sfd_ebv, - objivar / unreddenfactor ** (2 * sfd_ebv), - plottitle=plottitle, - template=template, +def retrieve_header_stars(rss): + """ + Retrieve fiber, Gaia ID, exposure time and airmass for the 12 standard stars in the header. + return a list of tuples of the above quatities. + """ + lco = EarthLocation( + lat=-29.008999964 * u.deg, lon=-70.688663912 * u.deg, height=2800 * u.m ) + h = rss._header + slitmap = rss._slitmap[rss._slitmap["spectrographid"] == int(h["SPEC"][-1])] + # retrieve the data for the 12 standards from the header + stddata = [] + for i in range(12): + stdi = "STD" + str(i + 1) + if h[stdi + "ACQ"] and h[stdi + "FIB"] in slitmap["orig_ifulabel"]: + gaia_id = h[stdi + "ID"] + if gaia_id is None: + log.warning(f"{stdi} acquired but Gaia ID is {gaia_id}") + continue + fiber = h[stdi + "FIB"] + obstime = Time(h[stdi + "T0"]) + exptime = h[stdi + "EXP"] + c = SkyCoord(float(h[stdi + "RA"]), float(h[stdi + "DE"]), unit="deg") + stdT = c.transform_to(AltAz(obstime=obstime, location=lco)) + secz = stdT.secz.value + # print(gid, fib, et, secz) + stddata.append((i + 1, fiber, gaia_id, exptime, secz)) + return stddata + + +def mean_absolute_deviation(vals): + """ + Robust estimate of RMS + - see https://en.wikipedia.org/wiki/Median_absolute_deviation + """ + mval = np.nanmedian(vals) + rms = 1.4826 * np.nanmedian(np.abs(vals - mval)) + return mval, rms + # ok=np.abs(vals-mval)<4*rms + + +def butter_lowpass_filter(data, cutoff_freq, nyq_freq, order=4): + normal_cutoff = float(cutoff_freq) / nyq_freq + b, a = signal.butter(order, normal_cutoff, btype="lowpass") + y = signal.filtfilt(b, a, data) + return y + + +def filter_channel(w, f, k=3): + c = np.where(np.isfinite(f)) + s = butter_lowpass_filter(f[c], 0.01, 2) + res = s - f[c] + # plt.plot(w[c], f[c], 'k.') + # plt.plot(w[c], s, 'b-') + mres, rms = mean_absolute_deviation(res) + good = np.where(np.abs(res - mres) < k * rms) + # plt.plot(w[c][good], f[c][good], 'r.', markersize=5) + return w[c][good], f[c][good] - # Also evaluate this model over a big wavelength range [3000,11000] Ang. - # BUG: verify these hard-coded values - tmploglam = 3.4771e0 + np.linspace(5644) * 1e-4 - tmpdispimg = np.ones_like(tmploglam) # initializing this resolution vector - bluedispimg = tmpdispimg - reddispimg = tmpdispimg - bside = np.where(tmploglam < np.log10(6300.0)) - rside = np.where(tmploglam > np.log10(5900.0)) - middle = np.where((tmploglam < np.log10(6300.0)) & (tmploglam > np.log10(5900.0))) - bluedispimg[bside] = np.interp(tmploglam[bside], dispimg[:, 0], loglam[:, 0]) - reddispimg[rside] = np.interp(tmploglam[rside], dispimg[:, 1], loglam[:, 1]) - tmpdispimg[bside] = bluedispimg[bside] - tmpdispimg[rside] = reddispimg[rside] - tmpdispimg[middle] = (bluedispimg[middle] + reddispimg[middle]) / 2.0 - - # tmpflux = spflux_read_kurucz(tmploglam-np.log10(1+kindx.z), tmpdispimg, - # iselect=kindx.imodel) - - if template == "kurucz": - tmpflux = spflux_read_kurucz( - tmploglam - np.log10(1 + kindx.z), - tmpdispimg, - iselect=kindx.imodel, - thekfile=thekfile, - ) - elif template == "munari": - tmpflux = spflux_read_munari( - tmploglam - np.log10(1 + kindx.z), - tmpdispimg, - iselect=kindx.imodel, - thekfile=thekfile, - ) - elif template == "BOSZ": - tmpflux = spflux_read_bosz( - tmploglam - np.log10(1 + kindx.z), - tmpdispimg, - iselect=kindx.imodel, - thekfile=thekfile, - ) - else: - print("Template is not specified correctly.") - - # The returned models are redshifted, but not fluxed or - # reddened. Do that now... we compare data vs. model reddened. - # extcurve1 = ext_odonnell(10.**loglam, 3.1) - # extinct,10.**loglam,extcurve1,/ccm,Rv=3.1 # extcurve1 is A_lambda for Av=1 - # thismodel = thismodel * 10.**(-extcurve1 * 3.1 * sfd_ebv / 2.5) - reddenfactor = f99_ext.extinguish(10**loglam, Ebv=1.0) - # fluxreddened contain the reddening vector for E(B-V)=1.0 - thismodel = thismodel * reddenfactor**sfd_ebv - # extcurve2 = ext_odonnell(10.**tmploglam, 3.1) - # extinct,10.**tmploglam,extcurve2,/ccm,Rv=3.1 - # tmpflux = tmpflux * 10.**(-extcurve2 * 3.1 * sfd_ebv / 2.5) - reddenfactor2 = f99_ext.extinguish(10**tmploglam, Ebv=1.0) - tmpflux = tmpflux * reddenfactor2**sfd_ebv - - # Now integrate the apparent magnitude for this spectrum, - # The units of FTHRU are such that m = -2.5*np.log10(FTHRU) + (48.6-2.5*17) - # Note that these computed magnitudes, THISMAG, should be equivalent - # to THISINDX.MAG in the case of no reddening. - wavevec = 10e0**tmploglam - flambda2fnu = wavevec**2 / 2.99792e18 - - photometry = "sdss" # Both APASS and SDSS are using sdss photometry system - if ( - targetflag - and sdss_flagval("MANGA_TARGET2", ["STELLIB_PS1", "STD_PS1_COM"]) != 0 - ): - photometry = "ps1" - if targetflag and sdss_flagval("MANGA_TARGET2", "STELLIB_GAIA") != 0: - photometry = "gaiadr1" - if targetflag and sdss_flagval("MANGA_TARGET2", "STELLIB_GAIADR2") != 0: - photometry = "gaiadr2" - - if photometry == "ps1": - fthru = filter_thru( - tmpflux * flambda2fnu, - waveimg=wavevec, - toair=True, - filternames=["ps1_g.txt", "ps1_r.txt", "ps1_i.txt", "ps1_z.txt"], - ) - thismag = -2.5 * np.log10(fthru) - (48.6 - 2.5 * 17) - thismag = np.asarray([[-999.0], [thismag]]) - elif photometry == "gaiadr1": - # Using the passbands from Gaia DR2 to compute the mag, which is not quite appropriate for DR1. Better to avoid using gaiadr1 - fthru = filter_thru( - tmpflux * flambda2fnu, - waveimg=wavevec, - toair=True, - filternames=["gaia_G.dat"], - ) - thismag = -2.5 * np.log10(fthru) - (48.6 - 2.5 * 17) - elif photometry == "gaiadr2": - fthru = filter_thru( - tmpflux * flambda2fnu, - waveimg=wavevec, - toair=True, - filternames=["gaia_G.dat", "gaia_BP.dat", "gaia_RP.dat"], - ) - thismag = -2.5 * np.log10(fthru) - (48.6 - 2.5 * 17) - # Convert from AB system using revised passband to Vega system in the Gaia DR2 as-released system. The zeropoints are from Evans et al. (2018) - thismag = thismag + np.asarray( - [[25.6884 - 25.7916], [25.3514 - 25.3862], [24.7619 - 25.1162]] - ) - thismag = np.asarray([[-999.0], [thismag], [-999.0]]) - elif photometry == "sdss": # this applies to both SDSS and APASS standards. - fthru = filter_thru(tmpflux * flambda2fnu, waveimg=wavevec, toair=True) - thismag = -2.5 * np.log10(fthru) - (48.6 - 2.5 * 17) - else: - pass - # !!!!!! IMPORTANT !!!!!!!!! - # !!!!!! THIS IS NOT THE ONLY PLACE WHERE THE MODEL SPECTRUM IS NORMALIZED.!!!! - # !!!!!! For MaStar plates, we renormalize them again after putting in - # individual extinction.!!!!! - - # Compute SCALEFAC = (plugmap flux) / (flux of the model spectrum) - if photometry == "gaiadr1": - scalefac = 10.0 ** ((thismag - psfmag[1]) / 2.5) - kindx.mag[1] = psfmag[1] - elif photometry == "gaiadr2": - scalefac = 10.0 ** ((thismag[1] - psfmag[1]) / 2.5) - kindx.mag[1:3] = (thismag[1:3]).flatten() + psfmag[1] - thismag[1] - elif photometry == "ps1": - scalefac = 10.0 ** ((thismag[2] - psfmag[2]) / 2.5) - kindx.mag[1:4] = (thismag[1:4]).flatten() + psfmag[2] - thismag[2] - elif photometry == "sdss": - scalefac = 10.0 ** ((thismag[2] - psfmag[2]) / 2.5) - kindx.mag = (thismag).flatten() + psfmag[2] - thismag[2] - - thismodel = thismodel * scalefac - - modflux = thismodel - splog, prelog = "" - # !p.multi = 0 - return kindx, modflux - - -# ------------------------------------------------------------------------------------------------- diff --git a/python/lvmdrp/functions/fluxCalMethod.py b/python/lvmdrp/functions/fluxCalMethod.py index 8e8588dc..4ee7b049 100644 --- a/python/lvmdrp/functions/fluxCalMethod.py +++ b/python/lvmdrp/functions/fluxCalMethod.py @@ -43,16 +43,13 @@ import numpy as np from scipy import interpolate from scipy import stats -from scipy import signal -from astropy.time import Time -from astropy.coordinates import SkyCoord, EarthLocation, AltAz from astropy.stats import biweight_location, biweight_scale -from astropy import units as u from astropy.table import Table from lvmdrp.core.rss import RSS, loadRSS from lvmdrp.core.spectrum1d import Spectrum1D +from lvmdrp.core.fluxcal import retrieve_header_stars, filter_channel from lvmdrp.external import ancillary_func from lvmdrp.functions import skyMethod from lvmdrp import log @@ -362,66 +359,6 @@ def fluxcal_Gaia(camera, in_rss, plot=True, GAIA_CACHE_DIR=None): return res, mean, rms, rss -def retrieve_header_stars(rss): - """ - Retrieve fiber, Gaia ID, exposure time and airmass for the 12 standard stars in the header. - return a list of tuples of the above quatities. - """ - lco = EarthLocation( - lat=-29.008999964 * u.deg, lon=-70.688663912 * u.deg, height=2800 * u.m - ) - h = rss._header - slitmap = rss._slitmap[rss._slitmap["spectrographid"] == int(h["SPEC"][-1])] - # retrieve the data for the 12 standards from the header - stddata = [] - for i in range(12): - stdi = "STD" + str(i + 1) - if h[stdi + "ACQ"] and h[stdi + "FIB"] in slitmap["orig_ifulabel"]: - gaia_id = h[stdi + "ID"] - if gaia_id is None: - log.warning(f"{stdi} acquired but Gaia ID is {gaia_id}") - continue - fiber = h[stdi + "FIB"] - obstime = Time(h[stdi + "T0"]) - exptime = h[stdi + "EXP"] - c = SkyCoord(float(h[stdi + "RA"]), float(h[stdi + "DE"]), unit="deg") - stdT = c.transform_to(AltAz(obstime=obstime, location=lco)) - secz = stdT.secz.value - # print(gid, fib, et, secz) - stddata.append((i + 1, fiber, gaia_id, exptime, secz)) - return stddata - - -def mean_absolute_deviation(vals): - """ - Robust estimate of RMS - - see https://en.wikipedia.org/wiki/Median_absolute_deviation - """ - mval = np.nanmedian(vals) - rms = 1.4826 * np.nanmedian(np.abs(vals - mval)) - return mval, rms - # ok=np.abs(vals-mval)<4*rms - - -def butter_lowpass_filter(data, cutoff_freq, nyq_freq, order=4): - normal_cutoff = float(cutoff_freq) / nyq_freq - b, a = signal.butter(order, normal_cutoff, btype="lowpass") - y = signal.filtfilt(b, a, data) - return y - - -def filter_channel(w, f, k=3): - c = np.where(np.isfinite(f)) - s = butter_lowpass_filter(f[c], 0.01, 2) - res = s - f[c] - # plt.plot(w[c], f[c], 'k.') - # plt.plot(w[c], s, 'b-') - mres, rms = mean_absolute_deviation(res) - good = np.where(np.abs(res - mres) < k * rms) - # plt.plot(w[c][good], f[c][good], 'r.', markersize=5) - return w[c][good], f[c][good] - - def createSensFunction_drp( in_rss, out_throughput, From dabb67bbe11ab71ddc2424f0523baeebb4ab8cde Mon Sep 17 00:00:00 2001 From: Alfredo Mejia-Narvaez Date: Thu, 11 Jan 2024 10:42:10 -0300 Subject: [PATCH 18/18] fixing typo & removing reference to deprecated module --- python/lvmdrp/functions/__init__.py | 3 +-- python/lvmdrp/functions/cubeMethod.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/python/lvmdrp/functions/__init__.py b/python/lvmdrp/functions/__init__.py index a221b2b4..844c3d5e 100644 --- a/python/lvmdrp/functions/__init__.py +++ b/python/lvmdrp/functions/__init__.py @@ -7,6 +7,5 @@ imageMethod, plotMethod, rssMethod, - specialMethod, - vimosMethod, + specialMethod ) diff --git a/python/lvmdrp/functions/cubeMethod.py b/python/lvmdrp/functions/cubeMethod.py index d5b15326..1359e9ac 100644 --- a/python/lvmdrp/functions/cubeMethod.py +++ b/python/lvmdrp/functions/cubeMethod.py @@ -11,7 +11,7 @@ from copy import deepcopy from scipy import stats -from lvdrp.core import fit_profile +from lvmdrp.core import fit_profile from lvmdrp.core.cube import Cube, loadCube from lvmdrp.core.image import Image from lvmdrp.core.rss import RSS