diff --git a/bin/pypeit_c_enabled b/bin/pypeit_c_enabled index 8a21ba246b..746e82cdc1 100755 --- a/bin/pypeit_c_enabled +++ b/bin/pypeit_c_enabled @@ -13,11 +13,25 @@ else: print('Successfully imported bspline C utilities.') try: - from pypeit.bspline.setup_package import extra_compile_args + + # Check for whether OpenMP support is enabled, by seeing if the bspline + # extension was compiled with it. + # + # The extension_helpers code that is run to figure out OMP support runs + # multiple tests to determine compiler version, some of which output to stderr. + # To make the output pretty we redirect those to /dev/null (or equivalent) + import os + import sys + devnull_fd = os.open(os.devnull,os.O_WRONLY) + os.dup2(devnull_fd,sys.stderr.fileno()) + + from pypeit.bspline.setup_package import get_extensions + bspline_extension = get_extensions()[0] except: print("Can't check status of OpenMP support") else: - if '-fopenmp' in extra_compile_args: + # Windows uses -openmp, other environments use -fopenmp + if any(['openmp' in arg for arg in bspline_extension.extra_compile_args]): print('OpenMP compiler support detected.') else: print('OpenMP compiler support not detected. Bspline utilities single-threaded.') diff --git a/deprecated/datacube.py b/deprecated/datacube.py index abeb9a1e4c..83e339e87b 100644 --- a/deprecated/datacube.py +++ b/deprecated/datacube.py @@ -443,3 +443,71 @@ def make_whitelight_frompixels(all_ra, all_dec, all_wave, all_sci, all_wghts, al whitelight_ivar[:, :, ff] = ivar_img.copy() return whitelight_Imgs, whitelight_ivar, whitelightWCS + +def make_sensfunc(ss_file, senspar, blaze_wave=None, blaze_spline=None, grating_corr=False): + """ + Generate the sensitivity function from a standard star DataCube. + + Args: + ss_file (:obj:`str`): + The relative path and filename of the standard star datacube. It + should be fits format, and for full functionality, should ideally of + the form :class:`~pypeit.coadd3d.DataCube`. + senspar (:class:`~pypeit.par.pypeitpar.SensFuncPar`): + The parameters required for the sensitivity function computation. + blaze_wave (`numpy.ndarray`_, optional): + Wavelength array used to construct blaze_spline + blaze_spline (`scipy.interpolate.interp1d`_, optional): + Spline representation of the reference blaze function (based on the illumflat). + grating_corr (:obj:`bool`, optional): + If a grating correction should be performed, set this variable to True. + + Returns: + `numpy.ndarray`_: A mask of the good sky pixels (True = good) + """ + # TODO :: This routine has not been updated to the new spec1d plan of passing in a sensfunc object + # :: Probably, this routine should be removed and the functionality moved to the sensfunc object + msgs.error("coding error - make_sensfunc is not currently supported. Please contact the developers") + # Check if the standard star datacube exists + if not os.path.exists(ss_file): + msgs.error("Standard cube does not exist:" + msgs.newline() + ss_file) + msgs.info(f"Loading standard star cube: {ss_file:s}") + # Load the standard star cube and retrieve its RA + DEC + stdcube = fits.open(ss_file) + star_ra, star_dec = stdcube[1].header['CRVAL1'], stdcube[1].header['CRVAL2'] + + # Extract a spectrum of the standard star + wave, Nlam_star, Nlam_ivar_star, gpm_star = extract_standard_spec(stdcube) + + # Extract the information about the blaze + if grating_corr: + blaze_wave_curr, blaze_spec_curr = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data + blaze_spline_curr = interp1d(blaze_wave_curr, blaze_spec_curr, + kind='linear', bounds_error=False, fill_value="extrapolate") + # Perform a grating correction + grat_corr = correct_grating_shift(wave, blaze_wave_curr, blaze_spline_curr, blaze_wave, blaze_spline) + # Apply the grating correction to the standard star spectrum + Nlam_star /= grat_corr + Nlam_ivar_star *= grat_corr ** 2 + + # Read in some information above the standard star + std_dict = flux_calib.get_standard_spectrum(star_type=senspar['star_type'], + star_mag=senspar['star_mag'], + ra=star_ra, dec=star_dec) + # Calculate the sensitivity curve + # TODO :: This needs to be addressed... unify flux calibration into the main PypeIt routines. + msgs.warn("Datacubes are currently flux-calibrated using the UVIS algorithm... this will be deprecated soon") + zeropoint_data, zeropoint_data_gpm, zeropoint_fit, zeropoint_fit_gpm = \ + flux_calib.fit_zeropoint(wave, Nlam_star, Nlam_ivar_star, gpm_star, std_dict, + mask_hydrogen_lines=senspar['mask_hydrogen_lines'], + mask_helium_lines=senspar['mask_helium_lines'], + hydrogen_mask_wid=senspar['hydrogen_mask_wid'], + nresln=senspar['UVIS']['nresln'], + resolution=senspar['UVIS']['resolution'], + trans_thresh=senspar['UVIS']['trans_thresh'], + polyorder=senspar['polyorder'], + polycorrect=senspar['UVIS']['polycorrect'], + polyfunc=senspar['UVIS']['polyfunc']) + wgd = np.where(zeropoint_fit_gpm) + sens = np.power(10.0, -0.4 * (zeropoint_fit[wgd] - flux_calib.ZP_UNIT_CONST)) / np.square(wave[wgd]) + return interp1d(wave[wgd], sens, kind='linear', bounds_error=False, fill_value="extrapolate") diff --git a/doc/calibrations/calibrations.rst b/doc/calibrations/calibrations.rst index 94c5285fd8..33b40837ad 100644 --- a/doc/calibrations/calibrations.rst +++ b/doc/calibrations/calibrations.rst @@ -127,6 +127,7 @@ The primary calibration procedures are, in the order they're performed: flexure wave_calib Slit Alignment (IFU only) + non-linear correction flat_fielding scattlight diff --git a/doc/calibrations/nonlinear.rst b/doc/calibrations/nonlinear.rst new file mode 100644 index 0000000000..31e9a56776 --- /dev/null +++ b/doc/calibrations/nonlinear.rst @@ -0,0 +1,93 @@ + +===================== +Non-Linear correction +===================== + +The non-linear correction is a simple way to correct the non-linear behavior of the +sensor. Imagine a perfect light source that emits photons with a constant count rate. +An ideal detector would be able to measure this count rate independently of the exposure +time. However, in practice, detectors have a non-linear response to the incoming photons. +This means that the count rate measured by the detector is not proportional to the count +rate of the light source. The non-linear correction is a simple way to correct this +non-linear behavior. Most modern CCD cameras have a linear response to about 1% over +most of the dynamic range, so this correction is not necessary for most applications. +The correction parameters can be measured by the user, but care should be taken with +how you collect the calibration data, since the internal continuum light source used +for most flat-field calibrations is not a perfect light source. + +At present, the non-linear correction is only implemented for KCWI, which is already very +close to linear over most of the dynamic range of the CCD. If you have collected non-linear +calibration data with another instrument, please contact the developers to see if we can +implement this correction for your instrument. + +Calibrations required +--------------------- + +The non-linear correction requires a set of dedicated calibration data that can be used to measure +the non-linear response of the detector. This calibration data should be collected with +a continuum light source (usually internal to the instrument). We currently recommend that +you turn on the internal lamp, and leave it for 30 minutes to warm up and settle. The lamp +count rate will likely decay over time, so we recommend that you collect a series of exposures +that are bracketed by `reference` exposures of constant exposure time (also known as the +`bracketed repeat exposure` technique). The reference exposures are used to monitor the +temporal decay of the lamp count rate. Here is a screenshot to show the lamp decay as a +function of time. The points are the `reference exposures` and are colour-coded by the +wavelength of the light, and the solid lines are the best-fit exponential + 2D polynomial +decay curves. The bottom panel shows a residual of the fit. Thus, both the relative shape +and the intensity of the lamp decay can be monitored as a function of time. + +.. image:: ../figures/nonlinear_lamp_decay.png + +Between a set of reference exposures, you should +acquire a series of exposures with increasing exposure time. The exposure time should be +increased by a factor of 2 between each exposure. The exposure time should be chosen so that +the count rate is not too high, but also not too low. The counts should extend to at least 10% +of the maximum counts of the detector. The `reference` exposures should have an exposure time +that is close to the middle of the range of exposure times used for the other exposures. It is +often good practice to collect an even number of exposures at each exposure time; some shutters +move in opposite directions for adjacent exposures, and the true exposure time may depend on +the direction that the shutter is moving. + +To determine the non-linearity, we assume that the correction is quadratic, such that the +quadratic term is a small perturbation from linear response. The functional form of the +true count rate is then: + +.. math:: + + C_T \cdot t = C_M \cdot t (1 + b \cdot C_M \cdot t) + +where :math:`C_T` is the true count rate, :math:`C_M` is the measured count rate, :math:`t` +is the exposure time, and :math:`b` is the non-linearity coefficient. +The non-linearity coefficient can be determined by fitting +the measured count rate as a function of the exposure time. That that as the true count rate +tends to zero, the measured count rate tends to zero. The non-linearity coefficient can be +measured for each pixel on the detector, and it is a good idea to consider the non-linearity +coefficient for different amplifiers separately. The above equation can be written as a matrix +equation: + +.. math:: + + M \cdot x = e + M = \begin{bmatrix} + C_{M,1}t_1 & (C_{M,1}t_1)^2 \\ + C_{M,2}t_2 & (C_{M,2}t_2)^2 \\ + \vdots & \vdots \\ + C_{M,n}t_n & (C_{M,n}t_n)^2 \\ + \end{bmatrix} + x = \begin{bmatrix} + 1/C_T \\ + b/C_T \\ + \end{bmatrix} + e = \begin{bmatrix} + t_1 \\ + t_2 \\ + \vdots \\ + t_n \\ + \end{bmatrix} + +where :math:`M` is a matrix of measured count rates, :math:`x` is a vector of the non-linearity +coefficient and the true count rate, and :math:`e` is a vector of exposure times. The non-linearity +coefficient can be determined by inverting the matrix :math:`M`, and solving for :math:`x`. The +non-linearity coefficient can be determined for each pixel on the detector. The central value of +the distribution of non-linearity coefficients can be used as a fixed non-linearity coefficient +for each amplifier of the detector. diff --git a/doc/calibrations/slit_tracing.rst b/doc/calibrations/slit_tracing.rst index 84bbdbd574..af2b1ba969 100644 --- a/doc/calibrations/slit_tracing.rst +++ b/doc/calibrations/slit_tracing.rst @@ -369,7 +369,7 @@ case for low-dispersion data, e.g. LRISb 300 grism spectra .. code-block:: ini [calibrations] - [[slits]] + [[slitedges]] smash_range = 0.5,1. Algorithm diff --git a/doc/coadd3d.rst b/doc/coadd3d.rst index 03640ca5e5..5651dd0ee2 100644 --- a/doc/coadd3d.rst +++ b/doc/coadd3d.rst @@ -57,6 +57,7 @@ saved as ``BB1245p4238.coadd3d``: [reduce] [[cube]] combine = True + align = True output_filename = BB1245p4238_datacube.fits save_whitelight = True @@ -74,18 +75,39 @@ If you want to combine all exposures into a single datacube, you need to set ``c as in the above example, and provide an ``output_filename``. This is very useful if you want to combine several standard star exposures into a single datacube for flux calibration, for example. -The spec2d block provides a list of :doc:`out_spec2D` files. You can also specify an optional scale correction -as part of the spec2d block. This relative scale correction ensures that the relative spectral sensitivity of the -datacube is constant across the field of view. The spec2d file used for the ``scale_corr`` column should either be a -twilight or dome flat reduced as a ``science`` frame (see :doc:`spectrographs/keck_kcwi` for a description of what you need to do). -In order to use this functionality, you should not reduce your science data with a spectral illumination correction. -In other words, in your :doc:`pypeit_file` file, set the following when you execute :ref:`run-pypeit`: - -.. code-block:: ini - - [scienceframe] - [[process]] - use_specillum = False +The spec2d block provides a list of :doc:`out_spec2D` files. You can also specify several optional +corrections as part of the spec2d block, including: + +* ``scale_corr``: A relative scale correction file that is used to correct the relative + spectral sensitivity of the datacube. This relative scale correction ensures that the + relative spectral sensitivity of the datacube is constant across the field of view. + The spec2d file used for the ``scale_corr`` column should either be a twilight or dome flat + reduced as a ``science`` frame (see :doc:`spectrographs/keck_kcwi` for a description of what + you need to do). In order to use this functionality, you should not reduce your science data + with a spectral illumination correction. In other words, in your :doc:`pypeit_file` file, set + the following when you execute :ref:`run-pypeit`: + + .. code-block:: ini + + [scienceframe] + [[process]] + use_specillum = False + +* ``grating_corr``: A grating correction file that is used to correct the grating relative sensitivity + of individual spec2d files. It is unlikely that you will require this correction. This is only required + if you are combining spec2d files that have very slightly different wavelength solutions *and* you only + have a sensitivity function for one of these setups. Otherwise, if you have a sensitivity function for + each setup, you should use the ``sensfile`` option to specify the sensitivity function for each wavelength + setup. For further details, see :ref:`coadd3d_gratcorr`. +* ``skysub_frame``: A sky subtraction frame that is used to remove the sky background of the datacube. + For further details, see :ref:`coadd3d_skysub`. +* ``ra_offset``: A right ascension offset that is used to correct the pointing of the datacube. + For further details, see :ref:`coadd3d_offsets`. +* ``dec_offset``: A declination offset that is used to correct the pointing of the datacube. + For further details, see :ref:`coadd3d_offsets`. +* ``sensfile``: A sensitivity function file that is used to correct the absolute sensitivity of the datacube. + The required input file is the sensitivity function, which is generated with the ``pypeit_sensfunc`` script. + For further details, see :ref:`coadd3d_fluxing`. run --- @@ -96,10 +118,58 @@ Then run the script: pypeit_coadd_datacube BB1245p4238.coadd3d -o +There are several recommended steps of the coadd3d process that can be run separately. These are: + +#. Step 1 - Create a datacube of your standard star exposures. It is worthwhile noting that the + standard star exposures should be reduced with the same setup as the science exposures. The + datacube is then used to flux calibrate the science exposures. + The datacube is created by running the following command: + + .. code-block:: console + + pypeit_coadd_datacube StandardStarName.coadd3d -o + +#. Step 2 - Extract the 1D spectra from the datacube. This is done by running the following command, + assuming that the output datacube from the previous step was called ``StandardStarName.fits``. + The ``pypeit_extract_datacube`` script will produce an output file called + ``spec1d_StandardStarName.fits``: + + .. code-block:: console + + pypeit_extract_datacube StandardStarName.fits -o + + This script is only designed for point sources. Both a boxcar and an optimal extraction are calculated. + The boxcar extraction uses a circular aperture with a radius set by the ``boxcar_radius`` parameter. + The optimal extraction uses the white light image as the object profile. Also note that the extraction + if performed on the sky-subtracted datacube. Therefore, the ``spec1d`` file contains the 1D spectra + including a ``BOX_COUNTS_SKY`` and ``OPT_COUNTS_SKY`` columns. These columns do not contain the sky + counts, but rather the residual level of the sky aperture. This is useful for checking the quality of + the sky subtraction. + +#. Step 3 - Generate a sensitivity function from the 1D spectra. This is done by running the following + command, assuming that the output 1D spectra from the previous step was called + ``spec1d_StandardStarName.fits``. The ``pypeit_sensfunc`` script will produce an output file called + ``sens_StandardStarName.fits``: + + .. code-block:: console + + pypeit_sensfunc spec1d_StandardStarName.fits -o sens_StandardStarName.fits + + For further details, see :doc:`_sensitivity_function`. + +#. Step 4 - Generate a datacube of the science exposures. This is done by running the following command: + + .. code-block:: console + + pypeit_coadd_datacube ScienceName.coadd3d -o + + Note that you will need to specify the sensitivity function file using the ``sensfile`` option in the + :doc:`coadd3d_file` file. For further details, see :ref:`coadd3d_fluxing`. + Combination options =================== -PypeIt currently supports two different methods to convert an spec2d frame into a datacube; +PypeIt currently supports two different methods to convert a spec2d frame into a datacube; these options are called ``subpixel`` (default) and ``NGP`` (which is short for, nearest grid point), and can be set using the following keyword arguments: @@ -114,20 +184,45 @@ into many subpixels, and assigns each subpixel to a voxel of the datacube. Flux but voxels are correlated, and the error spectrum does not account for covariance between adjacent voxels. The subpixellation scale can be separately set in the spatial and spectral direction on the 2D detector. If you would like to change the subpixellation factors from -the default values (5), you can set the ``spec_subpixel`` and ``spat_subpixel`` keywords -as follows: +the default values (5), you can optionally set (one or all of) the ``spec_subpixel``, +``spat_subpixel``, and ``slice_subpixel`` parameters as follows: .. code-block:: ini [reduce] [[cube]] method = subpixel - spec_subpixel = 8 - spat_subpixel = 10 + spec_subpixel = 3 + spat_subpixel = 7 + slice_subpixel = 10 The total number of subpixels generated for each detector pixel on the spec2d frame is -spec_subpixel x spat_subpixel. The default values (5) divide each spec2d pixel into 25 subpixels -during datacube creation. As an alternative, you can convert the spec2d frames into a datacube +spec_subpixel x spat_subpixel x slice_subpixel. The default values (5) divide each spec2d pixel +into 5x5x5=125 subpixels during datacube creation. +``spec_subpixel`` is the number of subpixels in the spectral +direction (i.e. predominantly detector columns), +``spat_subpixel`` is the number of subpixels in the +spatial direction (i.e. the long axis of each slice; predominantly along detector rows), and +``slice_subpixel`` is the number of times to divide each of the +slices (i.e. the short axis of each slice). Note that all three of these ``subpixel`` +definitions are perpendicular to each other in the datacube. + +While the ``spec_subpixel`` and ``spat_subpixel`` options are somewhat intuitive (i.e. the code is +dividing detector columns and rows into smaller subpixels), the ``slice_subpixel`` option may not be +immediately obvious, so consider the following example. Imagine the long edge of the slice aligned +East-West. The short edge of a single slice will span a Dec difference of 0.35 arcseconds in the case +of the Keck/KCWI Small slicer. ``slice_subpixel`` is effectively dividing this slice width further. +If ``slice_subpixel=7`` then this is creating seven subslices, each of width 0.05 arcseconds. The +importance of this becomes really noticeable when combined with the differential atmospheric +refraction (DAR) correction. Consider two wavelengths that have a relative DAR of 0.15 arcseconds. +In this case, choosing a value of ``slice_subpixel=1`` would shift the relative spatial positions by +0.35 arcseconds (i.e. the difference between adjacent slices) compared to the true difference of 0.15 +arcseconds. ``slice_subpixel`` divides the flux of the slice into evenly spaced rectangular bins, and +places each of these into a voxel of the final datacube. In the case of a 0.15 arcsecond shift, this +would mean that :math:`3/7` of the flux ends up in one output slice and the remaining :math:`4/7` of +the flux ends up in the adjacent slice. + +As an alternative to the ``subpixel`` method, you can convert the spec2d frames into a datacube with the ``NGP`` method. This algorithm is effectively a 3D histogram. This approach is faster than ``subpixel``, flux is conserved, and voxels are not correlated. However, this option suffers the same downsides as any histogram; the choice of bin sizes can change how the datacube appears. @@ -135,20 +230,42 @@ This algorithm takes each pixel on the spec2d frame and puts the flux of this pi in the datacube. Depending on the binning used, some voxels may be empty (zero flux) while a neighbouring voxel might contain the flux from two spec2d pixels. +.. _coadd3d_fluxing: + Flux calibration ================ If you would like to flux calibrate your datacube, you need to -produce your standard star datacube first, and when generating -the datacube of the science frame you must pass in the name of -the standard star cube in your ``coadd3d`` file as follows: +produce your standard star datacube first. Then extract the spectrum +of the standard star using the ``pypeit_extract_datacube`` script. This +will produce a ``spec1d`` file that you will need to use to generate a +sensitivity function in the usual way (see :doc:`_sensitivity_function`). +Then, when generating the datacube of the science frame you must include +the name of the sensitivity function in your ``coadd3d`` file as follows: .. code-block:: ini [reduce] [[cube]] - standard_cube = standard_star_cube.fits + sensfunc = my_sensfunc.fits + + +Also, an important note for users that wish to combine multiple standard star exposures +into a single datacube: PypeIt currently performs an extinction correction when +generating the sensitivity function; this is perfectly fine for single exposures. +However, if you are combining multiple standard star exposures into a single datacube, +you should note that the extinction correction will be applied at the airmass of the +first standard star exposure listed in the ``coadd3d`` file. This is because the +extinction correction is currently not applied to each individual frame in the datacube. +This control flow will be changed in a future release of PypeIt, but for now, to stay +consistent with the current pipeline, the extinction correction is done in the sensitivity +function algorithms, with the caveat that the standard star exposures are assumed to have +similar airmasses. If you have standard star exposures with significantly different +airmasses, then you should use just one of these exposures to generate the sensitivity +function. + +.. _coadd3d_skysub: Sky Subtraction =============== @@ -172,19 +289,20 @@ then you can specify the ``skysub_frame`` in the ``spec2d`` block of the above. If you have dedicated sky frames, then it is generally recommended to reduce these frames as if they are regular science frames, but add the following keyword arguments at the top of your -:doc:`pypeit_file`: +:doc:`coadd3d_file`: .. code-block:: ini [reduce] [[skysub]] - joint_fit = True - user_regions = : + user_regions = 5:95 [flexure] spec_method = slitcen -This ensures that all pixels in the slit are used to generate a -complete model of the sky. +This ensures that the innermost 90 percent of pixels in each slit are +used to generate a model of the sky. + +.. _coadd3d_gratcorr: Grating correction ================== @@ -193,19 +311,33 @@ The grating correction is needed if any of the data are recorded with even a very slightly different setup (e.g. data taken on two different nights with the same *intended* wavelength coverage, but the grating angle of the two nights were slightly different). -This is also needed if your standard star observations were taken -with a slightly different setup. This correction requires that you +You can avoid this correction if you generate a sensitivity function +for each of the setups. However, if you have not done this, then +the grating correction is needed. This correction requires that you have taken calibrations (i.e. flatfields) with the two different -setups. By default, the grating correction will be applied, but it -can be disabled by setting the following keyword argument in your -``coadd3d`` file: +setups, and uses the relative sensitivity of the two flatfields to +estimate the sensitivity correction. By default, the grating correction +will not be applied. If you want to apply the grating correction, you +will need to specify the relative path+file of the Flat calibration +file for each spec2d file. You will need to specify a ``grating_corr`` +file for each science frame, in the ``spec2d`` block of the +``.coadd3d`` file: .. code-block:: ini - [reduce] - [[cube]] - grating_corr = False + # Read in the data + spec2d read + filename | grating_corr + Science/spec2d_scienceframe_01.fits | Calibrations/Flat_A_0_DET01.fits + Science/spec2d_scienceframe_02.fits | Calibrations/Flat_B_1_DET01.fits + spec2d end +If all spec2d files were reduced with the same Flat calibration file, +then you do not need to specify the grating correction file. Also, if you +generate a sensitivity function for each spec2d file, then you do not need +to specify the grating correction file. The grating correction file is only +needed if you have one sensitivity function for all spec2d files, even though +the spec2d files were acquired with different grating angles. Astrometric correction ====================== @@ -224,12 +356,16 @@ file: [[cube]] astrometric = False +If a :doc:`calibrations/align` frame is not available, then the astrometric +correction will be based on the slit edges. White light image ================= A white light image can be generated for the combined frame, or -for each individual frame if ``combine=False``, by setting the following +for each individual frame if ``combine=False``, by setting the +``save_whitelight`` keyword argument. You can set the wavelength +range of the white light image by setting the ``whitelight_range`` keyword argument: .. code-block:: ini @@ -237,10 +373,13 @@ keyword argument: [reduce] [[cube]] save_whitelight = True + whitelight_range = 5000,6000 White light images are not produced by default. The output filename for the white light images are given the suffix ``_whitelight.fits``. +.. _coadd3d_offsets: + Spatial alignment with different setups ======================================= diff --git a/doc/figures/nonlinear_lamp_decay.png b/doc/figures/nonlinear_lamp_decay.png new file mode 100644 index 0000000000..430b4327f4 Binary files /dev/null and b/doc/figures/nonlinear_lamp_decay.png differ diff --git a/doc/help/run_pypeit.rst b/doc/help/run_pypeit.rst index 60a98ddfc7..2870d8a61a 100644 --- a/doc/help/run_pypeit.rst +++ b/doc/help/run_pypeit.rst @@ -4,7 +4,7 @@ usage: run_pypeit [-h] [-v VERBOSITY] [-r REDUX_PATH] [-m] [-s] [-o] [-c] pypeit_file - ## PypeIt : The Python Spectroscopic Data Reduction Pipeline v1.16.1.dev97+g2ab5988a9 + ## PypeIt : The Python Spectroscopic Data Reduction Pipeline v1.16.1.dev109+g885cb1823 ## ## Available spectrographs include: ## bok_bc, gemini_flamingos1, gemini_flamingos2, gemini_gmos_north_e2v, diff --git a/doc/include/keck_deimos.sorted.rst b/doc/include/keck_deimos.sorted.rst index 828519122f..4e41db2257 100644 --- a/doc/include/keck_deimos.sorted.rst +++ b/doc/include/keck_deimos.sorted.rst @@ -12,17 +12,17 @@ filename | frametype | ra | dec | target | dispname | decker | binning | mjd | airmass | exptime | dispangle | amp | filter1 | lampstat01 | dateobs | utc | frameno | calib DE.20170527.06713.fits | arc,tilt | 57.99999999999999 | 45.0 | DOME PHLAT | 830G | LongMirr | 1,1 | 57900.077631 | 1.41291034 | 1.0 | 8099.98291016 | SINGLE:B | OG550 | Kr Xe Ar Ne | 2017-05-27 | 01:51:53.87 | 30 | 0 d0527_0030.fits.gz | arc,tilt | 57.99999999999999 | 45.0 | DOME PHLAT | 830G | LongMirr | 1,1 | 57900.077631 | 1.41291034 | 1.0 | 8099.98291016 | SINGLE:B | OG550 | Kr Xe Ar Ne | 2017-05-27 | 01:51:53.87 | 30 | 0 - d0527_0031.fits.gz | pixelflat,illumflat,trace | 57.99999999999999 | 45.0 | DOME PHLAT | 830G | LongMirr | 1,1 | 57900.07851 | 1.41291034 | 4.0 | 8099.98291016 | SINGLE:B | OG550 | Qz | 2017-05-27 | 01:53:10.93 | 31 | 0 DE.20170527.06790.fits | pixelflat,illumflat,trace | 57.99999999999999 | 45.0 | DOME PHLAT | 830G | LongMirr | 1,1 | 57900.07851 | 1.41291034 | 4.0 | 8099.98291016 | SINGLE:B | OG550 | Qz | 2017-05-27 | 01:53:10.93 | 31 | 0 - DE.20170527.06864.fits | pixelflat,illumflat,trace | 57.99999999999999 | 45.0 | DOME PHLAT | 830G | LongMirr | 1,1 | 57900.079356 | 1.41291034 | 4.0 | 8099.98291016 | SINGLE:B | OG550 | Qz | 2017-05-27 | 01:54:24.03 | 32 | 0 + d0527_0031.fits.gz | pixelflat,illumflat,trace | 57.99999999999999 | 45.0 | DOME PHLAT | 830G | LongMirr | 1,1 | 57900.07851 | 1.41291034 | 4.0 | 8099.98291016 | SINGLE:B | OG550 | Qz | 2017-05-27 | 01:53:10.93 | 31 | 0 d0527_0032.fits.gz | pixelflat,illumflat,trace | 57.99999999999999 | 45.0 | DOME PHLAT | 830G | LongMirr | 1,1 | 57900.079356 | 1.41291034 | 4.0 | 8099.98291016 | SINGLE:B | OG550 | Qz | 2017-05-27 | 01:54:24.03 | 32 | 0 - d0527_0033.fits.gz | pixelflat,illumflat,trace | 57.99999999999999 | 45.0 | DOME PHLAT | 830G | LongMirr | 1,1 | 57900.080211 | 1.41291034 | 4.0 | 8099.98291016 | SINGLE:B | OG550 | Qz | 2017-05-27 | 01:55:36.93 | 33 | 0 + DE.20170527.06864.fits | pixelflat,illumflat,trace | 57.99999999999999 | 45.0 | DOME PHLAT | 830G | LongMirr | 1,1 | 57900.079356 | 1.41291034 | 4.0 | 8099.98291016 | SINGLE:B | OG550 | Qz | 2017-05-27 | 01:54:24.03 | 32 | 0 DE.20170527.06936.fits | pixelflat,illumflat,trace | 57.99999999999999 | 45.0 | DOME PHLAT | 830G | LongMirr | 1,1 | 57900.080211 | 1.41291034 | 4.0 | 8099.98291016 | SINGLE:B | OG550 | Qz | 2017-05-27 | 01:55:36.93 | 33 | 0 + d0527_0033.fits.gz | pixelflat,illumflat,trace | 57.99999999999999 | 45.0 | DOME PHLAT | 830G | LongMirr | 1,1 | 57900.080211 | 1.41291034 | 4.0 | 8099.98291016 | SINGLE:B | OG550 | Qz | 2017-05-27 | 01:55:36.93 | 33 | 0 DE.20170527.37601.fits | science | 261.0363749999999 | 19.028166666666667 | P261_OFF | 830G | LongMirr | 1,1 | 57900.435131 | 1.03078874 | 1200.0 | 8099.98291016 | SINGLE:B | OG550 | Off | 2017-05-27 | 10:26:41.61 | 80 | 0 - d0527_0081.fits.gz | science | 261.0363749999999 | 19.028166666666667 | P261_OFF | 830G | LongMirr | 1,1 | 57900.449842 | 1.01267696 | 1200.0 | 8099.98291016 | SINGLE:B | OG550 | Off | 2017-05-27 | 10:47:52.92 | 81 | 0 DE.20170527.38872.fits | science | 261.0363749999999 | 19.028166666666667 | P261_OFF | 830G | LongMirr | 1,1 | 57900.449842 | 1.01267696 | 1200.0 | 8099.98291016 | SINGLE:B | OG550 | Off | 2017-05-27 | 10:47:52.92 | 81 | 0 - DE.20170527.41775.fits | science | 261.0362916666666 | 19.028888888888886 | P261_OFF | 830G | LongMirr | 1,1 | 57900.483427 | 1.00093023 | 1200.0 | 8099.98291016 | SINGLE:B | OG550 | Off | 2017-05-27 | 11:36:15.35 | 83 | 0 + d0527_0081.fits.gz | science | 261.0363749999999 | 19.028166666666667 | P261_OFF | 830G | LongMirr | 1,1 | 57900.449842 | 1.01267696 | 1200.0 | 8099.98291016 | SINGLE:B | OG550 | Off | 2017-05-27 | 10:47:52.92 | 81 | 0 d0527_0083.fits.gz | science | 261.0362916666666 | 19.028888888888886 | P261_OFF | 830G | LongMirr | 1,1 | 57900.483427 | 1.00093023 | 1200.0 | 8099.98291016 | SINGLE:B | OG550 | Off | 2017-05-27 | 11:36:15.35 | 83 | 0 + DE.20170527.41775.fits | science | 261.0362916666666 | 19.028888888888886 | P261_OFF | 830G | LongMirr | 1,1 | 57900.483427 | 1.00093023 | 1200.0 | 8099.98291016 | SINGLE:B | OG550 | Off | 2017-05-27 | 11:36:15.35 | 83 | 0 DE.20170527.43045.fits | science | 261.0362916666666 | 19.028888888888886 | P261_OFF | 830G | LongMirr | 1,1 | 57900.498135 | 1.00838805 | 1200.0 | 8099.98291016 | SINGLE:B | OG550 | Off | 2017-05-27 | 11:57:25.35 | 84 | 0 DE.20170527.44316.fits | science | 261.0362916666666 | 19.028888888888886 | P261_OFF | 830G | LongMirr | 1,1 | 57900.512854 | 1.02377681 | 1200.0 | 8099.98291016 | SINGLE:B | OG550 | Off | 2017-05-27 | 12:18:36.71 | 85 | 0 DE.20170527.53184.fits | science | 349.99316666666664 | -5.16575 | Feige 110 | 830G | LongMirr | 1,1 | 57900.615484 | 1.42505162 | 45.0 | 8099.98291016 | SINGLE:B | OG550 | Off | 2017-05-27 | 14:46:24.88 | 93 | 0 diff --git a/doc/installing.rst b/doc/installing.rst index 79e92d9de6..1772a7b8f7 100644 --- a/doc/installing.rst +++ b/doc/installing.rst @@ -222,6 +222,30 @@ for x86-64: Solutions/Recommendations/Feedback for these installation options are welcome; please `Submit an issue`_. +User Installation on Windows +--------------------------------------------- + +#. Download `Python for Windows `_. + +#. Run the installer. + + * Make sure "Add python.exe to Path" or "Add Python to environment variables" is selected before installing. + * If you have Admin privileges click "Disable path length limit" after the installation succeeds. + +#. Downloand and run the `Visual Studio build tools `_ installer. + + * Only "Desktop Development with C++" needs to be checked. + * Click install + +#. Create a virtual environment as in `Setup a clean python environment `__ and install PypeIt as described above. + +If running ``python`` on Windows brings up a window for the Microsoft Store you may want to change the application alias. +This is under ``Settings -> Apps -> App execution aliases`` on Windows 10 and ``Settings -> Apps -> Advanced app settings -> App execution aliases`` +on Windows 11. Disable the ``App Installer`` options for the ``python.exe`` and ``python3.exe`` executables. + +An alternative for running under Windows is to install the `Windows Subsystem for Linux (WSL) `_. +This in effect allows you to run PypeIt under Linux under Windows. + ---- .. _data_installation: @@ -518,11 +542,12 @@ will work single-threaded if OpenMP is not available. GCC supports OpenMP out of the box, however the ``clang`` compiler that Apple's XCode provides does not. So for optimal performance on Apple hardware, you will want to install GCC via ``homebrew`` or ``macports`` and specify its use when installing ``pypeit``. For example, if you installed -GCC 12.x via ``homebrew``, you would get ``pypeit`` to use it by doing, for example: +GCC via ``homebrew``, you would get ``pypeit`` to use it by doing, for example: .. code-block:: console - CC=gcc-12 pip install pypeit + $ export CC=/opt/homebrew/bin/gcc + $ pip install pypeit Basically, ``pypeit`` checks the ``CC`` environment variable for what compiler to use so configure that as needed to use your desired compiler. The ``pypeit_c_enabled`` script can be used to check if diff --git a/doc/pypeit_par.rst b/doc/pypeit_par.rst index db0e4ae1c8..d8a7b4a747 100644 --- a/doc/pypeit_par.rst +++ b/doc/pypeit_par.rst @@ -582,21 +582,21 @@ Collate1DPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.Collate1DPar` -========================= =============== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description -========================= =============== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== -``dry_run`` bool .. False If set, the script will display the matching File and Object Ids but will not flux, coadd or archive. -``exclude_serendip`` bool .. False Whether to exclude SERENDIP objects from collating. -``exclude_slit_trace_bm`` list, str .. A list of slit trace bitmask bits that should be excluded. -``flux`` bool .. False If set, the script will flux calibrate using archived sensfuncs before coadding. -``ignore_flux`` bool .. False If set, the script will only coadd non-fluxed spectra even if flux data is present. Otherwise fluxed spectra are coadded if all spec1ds have been fluxed calibrated. -``match_using`` str .. ``ra/dec`` Determines how 1D spectra are matched as being the same object. Must be either 'pixel' or 'ra/dec'. -``outdir`` str .. ``/Users/westfall/Work/packages/pypeit/doc`` The path where all coadded output files and report files will be placed. -``refframe`` str .. .. Perform reference frame correction prior to coadding. Options are: observed, heliocentric, barycentric -``spec1d_outdir`` str .. .. The path where all modified spec1d files are placed. These are only created if flux calibration or refframe correction are asked for. -``tolerance`` str, float, int .. 1.0 The tolerance used when comparing the coordinates of objects. If two objects are within this distance from each other, they are considered the same object. If match_using is 'ra/dec' (the default) this is an angular distance. The defaults units are arcseconds but other units supported by astropy.coordinates.Angle can be used (`e.g.`, '0.003d' or '0h1m30s'). If match_using is 'pixel' this is a float. -``wv_rms_thresh`` float .. .. If set, any objects with a wavelength RMS > this value are skipped, else all wavelength RMS values are accepted. -========================= =============== ======= ============================================ ================================================================================================================================================================================================================================================================================================================================================================================================================== +========================= =============== ======= =============================== ================================================================================================================================================================================================================================================================================================================================================================================================================== +Key Type Options Default Description +========================= =============== ======= =============================== ================================================================================================================================================================================================================================================================================================================================================================================================================== +``dry_run`` bool .. False If set, the script will display the matching File and Object Ids but will not flux, coadd or archive. +``exclude_serendip`` bool .. False Whether to exclude SERENDIP objects from collating. +``exclude_slit_trace_bm`` list, str .. A list of slit trace bitmask bits that should be excluded. +``flux`` bool .. False If set, the script will flux calibrate using archived sensfuncs before coadding. +``ignore_flux`` bool .. False If set, the script will only coadd non-fluxed spectra even if flux data is present. Otherwise fluxed spectra are coadded if all spec1ds have been fluxed calibrated. +``match_using`` str .. ``ra/dec`` Determines how 1D spectra are matched as being the same object. Must be either 'pixel' or 'ra/dec'. +``outdir`` str .. ``/home/dusty/work/PypeIt/doc`` The path where all coadded output files and report files will be placed. +``refframe`` str .. .. Perform reference frame correction prior to coadding. Options are: observed, heliocentric, barycentric +``spec1d_outdir`` str .. .. The path where all modified spec1d files are placed. These are only created if flux calibration or refframe correction are asked for. +``tolerance`` str, float, int .. 1.0 The tolerance used when comparing the coordinates of objects. If two objects are within this distance from each other, they are considered the same object. If match_using is 'ra/dec' (the default) this is an angular distance. The defaults units are arcseconds but other units supported by astropy.coordinates.Angle can be used (`e.g.`, '0.003d' or '0h1m30s'). If match_using is 'pixel' this is a float. +``wv_rms_thresh`` float .. .. If set, any objects with a wavelength RMS > this value are skipped, else all wavelength RMS values are accepted. +========================= =============== ======= =============================== ================================================================================================================================================================================================================================================================================================================================================================================================================== ---- @@ -617,7 +617,7 @@ Key Type Options ``multi_min_SN`` int, float .. 1 Minimum S/N for analyzing sky spectrum for flexure ``spec_maxshift`` int .. 20 Maximum allowed spectral flexure shift in pixels. ``spec_method`` str ``boxcar``, ``slitcen``, ``skip`` ``skip`` Method used to correct for flexure. Use skip for no correction. If slitcen is used, the flexure correction is performed before the extraction of objects (not recommended). Options are: None, boxcar, slitcen, skip -``spectrum`` str .. ``paranal_sky.fits`` Archive sky spectrum to be used for the flexure correction. See ``pypeit/data/sky_spec/`` for a list of available sky spectra. If ``model`` is used, a model sky spectrum will be generated using :func:`~pypeit.wavemodel.nearIR_modelsky` and the spectralresolution of the spectrum to be flexure corrected. +``spectrum`` str .. ``paranal_sky.fits`` Archive sky spectrum to be used for the flexure correction. =================== ========== ======================================================== ==================== ====================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== @@ -649,22 +649,22 @@ ReduxPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.ReduxPar` -====================== ============== ======= ============================================ ========================================================================================================================================================================================================================================================================================================================================================================================================== -Key Type Options Default Description -====================== ============== ======= ============================================ ========================================================================================================================================================================================================================================================================================================================================================================================================== -``calwin`` int, float .. 0 The window of time in hours to search for calibration frames for a science frame -``chk_version`` bool .. True If True enforce strict PypeIt version checking to ensure that all files were created with the current version of PypeIt. If set to False, the code will attempt to read out-of-date files and keep going. Beware (!!) that this can lead to unforeseen bugs that either cause the code to crash or lead to erroneous results. I.e., you really need to know what you are doing if you set this to False! -``detnum`` int, list .. .. Restrict reduction to a list of detector indices. In case of mosaic reduction (currently only available for Gemini/GMOS and Keck/DEIMOS) ``detnum`` should be a list of tuples of the detector indices that are mosaiced together. E.g., for Gemini/GMOS ``detnum`` would be ``[(1,2,3)]`` and for Keck/DEIMOS it would be ``[(1, 5), (2, 6), (3, 7), (4, 8)]`` -``ignore_bad_headers`` bool .. False Ignore bad headers (NOT recommended unless you know it is safe). -``maskIDs`` str, int, list .. .. Restrict reduction to a set of slitmask IDs Example syntax -- ``maskIDs = 818006,818015`` This must be used with detnum (for now). -``qadir`` str .. ``QA`` Directory relative to calling directory to write quality assessment files. -``quicklook`` bool .. False Run a quick look reduction? This is usually good if you want to quickly reduce the data (usually at the telescope in real time) to get an initial estimate of the data quality. -``redux_path`` str .. ``/Users/westfall/Work/packages/pypeit/doc`` Path to folder for performing reductions. Default is the current working directory. -``scidir`` str .. ``Science`` Directory relative to calling directory to write science files. -``slitspatnum`` str, list .. .. Restrict reduction to a set of slit DET:SPAT values (closest slit is used). Example syntax -- slitspatnum = DET01:175,DET01:205 or MSC02:2234 If you are re-running the code, (i.e. modifying one slit) you *must* have the precise SPAT_ID index. -``sortroot`` str .. .. A filename given to output the details of the sorted files. If None, the default is the root name of the pypeit file. If off, no output is produced. -``spectrograph`` str .. .. Spectrograph that provided the data to be reduced. See :ref:`instruments` for valid options. -====================== ============== ======= ============================================ ========================================================================================================================================================================================================================================================================================================================================================================================================== +====================== ============== ======= =============================== ========================================================================================================================================================================================================================================================================================================================================================================================================== +Key Type Options Default Description +====================== ============== ======= =============================== ========================================================================================================================================================================================================================================================================================================================================================================================================== +``calwin`` int, float .. 0 The window of time in hours to search for calibration frames for a science frame +``chk_version`` bool .. True If True enforce strict PypeIt version checking to ensure that all files were created with the current version of PypeIt. If set to False, the code will attempt to read out-of-date files and keep going. Beware (!!) that this can lead to unforeseen bugs that either cause the code to crash or lead to erroneous results. I.e., you really need to know what you are doing if you set this to False! +``detnum`` int, list .. .. Restrict reduction to a list of detector indices. In case of mosaic reduction (currently only available for Gemini/GMOS and Keck/DEIMOS) ``detnum`` should be a list of tuples of the detector indices that are mosaiced together. E.g., for Gemini/GMOS ``detnum`` would be ``[(1,2,3)]`` and for Keck/DEIMOS it would be ``[(1, 5), (2, 6), (3, 7), (4, 8)]`` +``ignore_bad_headers`` bool .. False Ignore bad headers (NOT recommended unless you know it is safe). +``maskIDs`` str, int, list .. .. Restrict reduction to a set of slitmask IDs Example syntax -- ``maskIDs = 818006,818015`` This must be used with detnum (for now). +``qadir`` str .. ``QA`` Directory relative to calling directory to write quality assessment files. +``quicklook`` bool .. False Run a quick look reduction? This is usually good if you want to quickly reduce the data (usually at the telescope in real time) to get an initial estimate of the data quality. +``redux_path`` str .. ``/home/dusty/work/PypeIt/doc`` Path to folder for performing reductions. Default is the current working directory. +``scidir`` str .. ``Science`` Directory relative to calling directory to write science files. +``slitspatnum`` str, list .. .. Restrict reduction to a set of slit DET:SPAT values (closest slit is used). Example syntax -- slitspatnum = DET01:175,DET01:205 or MSC02:2234 If you are re-running the code, (i.e. modifying one slit) you *must* have the precise SPAT_ID index. +``sortroot`` str .. .. A filename given to output the details of the sorted files. If None, the default is the root name of the pypeit file. If off, no output is produced. +``spectrograph`` str .. .. Spectrograph that provided the data to be reduced. See :ref:`instruments` for valid options. +====================== ============== ======= =============================== ========================================================================================================================================================================================================================================================================================================================================================================================================== ---- diff --git a/doc/releases/1.16.1dev.rst b/doc/releases/1.16.1dev.rst index 1d3a9915d7..11948f585c 100644 --- a/doc/releases/1.16.1dev.rst +++ b/doc/releases/1.16.1dev.rst @@ -47,6 +47,12 @@ Script Changes - Added ``pypeit_show_pixflat`` script to inspect the (slitless) pixel flat generated during the reduction and stored in ``data/static_calibs/{spectrograph_name}`` +- A new script, called `pypeit_extract_datacube`, allows 1D spectra of point + sources to be extracted from datacubes. +- The sensitivity function is now generated outside of datacube generation. +- The `grating_corr` column is now used to select the correct grating + correction file for each spec2d file when generating the datacube. + Datamodel Changes ----------------- @@ -66,4 +72,7 @@ Bug Fixes - Allow for empty 2D wavecal solution in HDU extension of WaveCalib file - Fix a MAJOR BUT SUBTLE bug in the use of ``numpy.argsort``. When using ``numpy.argsort`` the parameter kind='stable' should be used to ensure that a sorting algorithm more robust - than "quicksort" is used. \ No newline at end of file + than "quicksort" is used. + + + diff --git a/pypeit/alignframe.py b/pypeit/alignframe.py index 9a28491d21..2cfc6b28d8 100644 --- a/pypeit/alignframe.py +++ b/pypeit/alignframe.py @@ -181,8 +181,8 @@ def build_traces(self, show_peaks=False, debug=False): dict: self.align_dict """ # Generate slits - slitid_img_init = self.slits.slit_img(initial=True) - left, right, _ = self.slits.select_edges(initial=True) + slitid_img_init = self.slits.slit_img() + left, right, _ = self.slits.select_edges() align_prof = dict({}) # Go through the slits diff --git a/pypeit/bspline/setup_package.py b/pypeit/bspline/setup_package.py index 415759c0c1..122cff8dc0 100644 --- a/pypeit/bspline/setup_package.py +++ b/pypeit/bspline/setup_package.py @@ -1,50 +1,9 @@ import os import sys -import tempfile, subprocess, shutil from setuptools import Extension +from extension_helpers import add_openmp_flags_if_available -# the most reliable way to check for openmp support in the C compiler is to try to build -# some test code with the -fopenmp flag. openmp provides a big performance boost, but some -# systems, notably apple's version of clang that xcode provides, don't support it out of the box. - -# see http://openmp.org/wp/openmp-compilers/ -omp_test = \ -r""" -#include -#include -int main() { -#pragma omp parallel -printf("Hello from thread %d, nthreads %d\n", omp_get_thread_num(), omp_get_num_threads()); -} -""" - -def check_for_openmp(): - tmpdir = tempfile.mkdtemp() - curdir = os.getcwd() - os.chdir(tmpdir) - - if 'CC' in os.environ: - c_compiler = os.environ['CC'] - else: - c_compiler = 'gcc' - - filename = r'test.c' - with open(filename, 'w') as file: - file.write(omp_test) - with open(os.devnull, 'w') as fnull: - result = subprocess.call([c_compiler, '-fopenmp', filename], - stdout=fnull, stderr=fnull) - - os.chdir(curdir) - # clean up test code - shutil.rmtree(tmpdir) - - # return code from compiler process is 0 if it completed successfully - if result == 0: - return True - else: - return False C_BSPLINE_PKGDIR = os.path.relpath(os.path.dirname(__file__)) @@ -55,16 +14,17 @@ def check_for_openmp(): if not sys.platform.startswith('win'): extra_compile_args.append('-fPIC') -if check_for_openmp(): - extra_compile_args.append('-fopenmp') - extra_link_args = ['-fopenmp'] -else: - extra_link_args = [] def get_extensions(): - return [Extension(name='pypeit.bspline._bspline', sources=SRC_FILES, - extra_compile_args=extra_compile_args, language='c', - extra_link_args=extra_link_args, - export_symbols=['bspline_model', 'solution_arrays', - 'cholesky_band', 'cholesky_solve', - 'intrv'])] + extension = Extension(name='pypeit.bspline._bspline', sources=SRC_FILES, + extra_compile_args=extra_compile_args, language='c', + export_symbols=['bspline_model', 'solution_arrays', + 'cholesky_band', 'cholesky_solve', + 'intrv']) + + # extension_helpers will check for opnmp support by trying to build + # some test code with the appropriate flag. openmp provides a big performance boost, but some + # systems, notably apple's version of clang that xcode provides, don't support it out of the box. + + add_openmp_flags_if_available(extension) + return [extension] diff --git a/pypeit/calibrations.py b/pypeit/calibrations.py index a3c755259e..f204bde7ec 100644 --- a/pypeit/calibrations.py +++ b/pypeit/calibrations.py @@ -4,6 +4,7 @@ .. include common links, assuming primary doc root is up one directory .. include:: ../include/links.rst """ +import os from pathlib import Path from datetime import datetime from copy import deepcopy @@ -216,6 +217,46 @@ def __init__(self, fitstbl, par, spectrograph, caldir, qadir=None, self.success = False self.failed_step = None + def check_calibrations(self, file_list, check_lamps=True): + """ + Check if the input calibration files are consistent with each other. + This step is usually needed when combining calibration frames of a given type. + This routine currently only prints out warning messages if the calibration files are not consistent. + + Note: The exposure times are currently checked in the combine step, so they are not checked here. + + Parameters + ---------- + file_list : list + List of calibration files to check + check_lamps : bool, optional + Check if the lamp status is the same for all the files. Default is True. + """ + + lampstat = [None] * len(file_list) + # Loop on the files + for ii, ifile in enumerate(file_list): + # Save the lamp status + headarr = deepcopy(self.spectrograph.get_headarr(ifile)) + lampstat[ii] = self.spectrograph.get_lamps_status(headarr) + + # Check that the lamps being combined are all the same + if check_lamps: + if not lampstat[1:] == lampstat[:-1]: + msgs.warn("The following files contain different lamp status") + # Get the longest strings + maxlen = max([len("Filename")] + [len(os.path.split(x)[1]) for x in file_list]) + maxlmp = max([len("Lamp status")] + [len(x) for x in lampstat]) + strout = "{0:" + str(maxlen) + "} {1:s}" + # Print the messages + print(msgs.indent() + '-' * maxlen + " " + '-' * maxlmp) + print(msgs.indent() + strout.format("Filename", "Lamp status")) + print(msgs.indent() + '-' * maxlen + " " + '-' * maxlmp) + for ff, file in enumerate(file_list): + print(msgs.indent() + + strout.format(os.path.split(file)[1], " ".join(lampstat[ff].split("_")))) + print(msgs.indent() + '-' * maxlen + " " + '-' * maxlmp) + def find_calibrations(self, frametype, frameclass): """ Find calibration files and identifiers. @@ -341,6 +382,9 @@ def get_arc(self): # Reset the BPM self.get_bpm(frame=raw_files[0]) + # Perform a check on the files + self.check_calibrations(raw_files) + # Otherwise, create the processed file. msgs.info(f'Preparing a {frame["class"].calib_type} calibration frame.') self.msarc = buildimage.buildimage_fromlist(self.spectrograph, self.det, @@ -384,6 +428,9 @@ def get_tiltimg(self): # Reset the BPM self.get_bpm(frame=raw_files[0]) + # Perform a check on the files + self.check_calibrations(raw_files) + # Otherwise, create the processed file. msgs.info(f'Preparing a {frame["class"].calib_type} calibration frame.') self.mstilt = buildimage.buildimage_fromlist(self.spectrograph, self.det, @@ -433,6 +480,9 @@ def get_align(self): # Reset the BPM self.get_bpm(frame=raw_files[0]) + # Perform a check on the files + self.check_calibrations(raw_files) + # Otherwise, create the processed file. msgs.info(f'Preparing a {frame["class"].calib_type} calibration frame.') msalign = buildimage.buildimage_fromlist(self.spectrograph, self.det, @@ -480,6 +530,9 @@ def get_bias(self): self.msbias = frame['class'].from_file(cal_file, chk_version=self.chk_version) return self.msbias + # Perform a check on the files + self.check_calibrations(raw_files) + # Otherwise, create the processed file. msgs.info(f'Preparing a {frame["class"].calib_type} calibration frame.') self.msbias = buildimage.buildimage_fromlist(self.spectrograph, self.det, @@ -530,6 +583,9 @@ def get_dark(self): # there any reason why creation of the bpm should come after the dark, # or can we change the order? + # Perform a check on the files + self.check_calibrations(raw_files) + # Otherwise, create the processed file. self.msdark = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['darkframe'], raw_files, @@ -604,6 +660,9 @@ def get_scattlight(self): # Reset the BPM self.get_bpm(frame=raw_scattlight_files[0]) + # Perform a check on the files + self.check_calibrations(raw_scattlight_files) + binning = self.fitstbl[scatt_idx[0]]['binning'] dispname = self.fitstbl[scatt_idx[0]]['dispname'] scattlightImage = buildimage.buildimage_fromlist(self.spectrograph, self.det, @@ -614,7 +673,7 @@ def get_scattlight(self): spatbin = parse.parse_binning(binning)[1] pad = self.par['scattlight_pad'] // spatbin - offslitmask = self.slits.slit_img(pad=pad, initial=True, flexure=None) == -1 + offslitmask = self.slits.slit_img(pad=pad, flexure=None) == -1 # Get starting parameters for the scattered light model x0, bounds = self.spectrograph.scattered_light_archive(binning, dispname) @@ -983,6 +1042,10 @@ def get_flats(self): if len(raw_pixel_files) > 0: # Reset the BPM self.get_bpm(frame=raw_pixel_files[0]) + + # Perform a check on the files + self.check_calibrations(raw_pixel_files) + msgs.info('Creating pixel-flat calibration frame using files: ') for f in raw_pixel_files: msgs.prindent(f'{Path(f).name}') @@ -995,6 +1058,10 @@ def get_flats(self): if len(raw_lampoff_files) > 0: # Reset the BPM self.get_bpm(frame=raw_lampoff_files[0]) + + # Perform a check on the files + self.check_calibrations(raw_lampoff_files) + msgs.info('Subtracting lamp off flats using files: ') for f in raw_lampoff_files: msgs.prindent(f'{Path(f).name}') @@ -1021,9 +1088,14 @@ def get_flats(self): if not pix_is_illum and len(raw_illum_files) > 0: # Reset the BPM self.get_bpm(frame=raw_illum_files[0]) + + # Perform a check on the files + self.check_calibrations(raw_illum_files) + msgs.info('Creating slit-illumination flat calibration frame using files: ') for f in raw_illum_files: msgs.prindent(f'{Path(f).name}') + illum_flat = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['illumflatframe'], raw_illum_files, dark=self.msdark, bias=self.msbias, scattlight=self.msscattlight, @@ -1033,6 +1105,10 @@ def get_flats(self): for f in raw_lampoff_files: msgs.prindent(f'{Path(f).name}') if lampoff_flat is None: + # Perform a check on the files + self.check_calibrations(raw_lampoff_files) + + # Build the image lampoff_flat = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['lampoffflatsframe'], raw_lampoff_files, @@ -1144,6 +1220,9 @@ def get_slits(self): # Reset the BPM self.get_bpm(frame=raw_trace_files[0]) + # Perform a check on the files + self.check_calibrations(raw_trace_files) + traceImage = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['traceframe'], raw_trace_files, bias=self.msbias, bpm=self.msbpm, @@ -1158,6 +1237,9 @@ def get_slits(self): # Reset the BPM self.get_bpm(frame=raw_trace_files[0]) + # Perform a check on the files + self.check_calibrations(raw_lampoff_files) + lampoff_flat = buildimage.buildimage_fromlist(self.spectrograph, self.det, self.par['lampoffflatsframe'], raw_lampoff_files, dark=self.msdark, diff --git a/pypeit/coadd3d.py b/pypeit/coadd3d.py index deb8d2e79e..5638cb6f77 100644 --- a/pypeit/coadd3d.py +++ b/pypeit/coadd3d.py @@ -15,7 +15,7 @@ import numpy as np from pypeit import msgs -from pypeit import alignframe, datamodel, flatfield, io, spec2dobj, utils +from pypeit import alignframe, datamodel, flatfield, io, sensfunc, spec2dobj, utils from pypeit.core.flexure import calculate_image_phase from pypeit.core import datacube, extract, flux_calib, parse from pypeit.spectrographs.util import load_spectrograph @@ -93,7 +93,7 @@ class DataCube(datamodel.DataContainer): 'spectrograph', 'spect_meta', '_ivar', # This is set internally, and should be accessed with self.ivar - '_wcs' # This is set internally, and should be accessed with self.wcs + '_wcs' ] def __init__(self, flux, sig, bpm, wave, PYP_SPEC, blaze_wave, blaze_spec, sensfunc=None, @@ -106,6 +106,7 @@ def __init__(self, flux, sig, bpm, wave, PYP_SPEC, blaze_wave, blaze_spec, sensf # Initialise the internals self._ivar = None self._wcs = None + self.head0 = None # This contains the primary header of the spec2d used to make the datacube def _bundle(self): """ @@ -162,9 +163,12 @@ def to_file(self, ofile, primary_hdr=None, hdr=None, **kwargs): subheader = spectrograph.subheader_for_spec(self.head0, self.head0) else: subheader = {} - # Add em in + # Add them in for key in subheader: primary_hdr[key] = subheader[key] + # Set the exposure time to 1, since datacubes are counts/second. + # This is needed for the sensitivity function calculation + primary_hdr['EXPTIME'] = 1.0 # Do it super(DataCube, self).to_file(ofile, primary_hdr=primary_hdr, hdr=hdr, **kwargs) @@ -175,7 +179,7 @@ def from_file(cls, ifile, verbose=True, chk_version=True, **kwargs): Over-load :func:`~pypeit.datamodel.DataContainer.from_file` to deal with the header - + Args: ifile (:obj:`str`, `Path`_): Fits file with the data to read @@ -191,12 +195,12 @@ def from_file(cls, ifile, verbose=True, chk_version=True, **kwargs): self = cls.from_hdu(hdu, chk_version=chk_version, **kwargs) # Internals self.filename = ifile - self.head0 = hdu[1].header # Actually use the first extension here, since it contains the WCS + self.head0 = hdu[0].header # Meta self.spectrograph = load_spectrograph(self.PYP_SPEC) self.spect_meta = self.spectrograph.parse_spec_header(hdu[0].header) self._ivar = None - self._wcs = None + self._wcs = wcs.WCS(hdu[1].header) return self @property @@ -214,20 +218,35 @@ def ivar(self): self._ivar = utils.inverse(self.sig**2) return self._ivar - @property - def wcs(self): + def extract_spec(self, parset, outname=None, boxcar_radius=None, overwrite=False): """ - Utility function to provide the world coordinate system of the datacube + Extract a spectrum from the datacube - Returns - ------- - self._wcs : `astropy.wcs.WCS`_ - The WCS based on the stored header information. Note that self._wcs should - not be accessed directly, and you should only call self.wcs + Parameters + ---------- + parset : dict + A dictionary containing the :class:`~pypeit.par.pypeitpar.ReducePar` parameters. + outname : str, optional + Name of the output file + boxcar_radius : float, optional + Radius of the circular boxcar (in arcseconds) to use for the extraction + overwrite : bool, optional + Overwrite any existing files """ - if self._wcs is None: - self._wcs = wcs.WCS(self.head0) - return self._wcs + # Extract the spectrum + fwhm = parset['findobj']['find_fwhm'] if parset['extraction']['use_user_fwhm'] else None + + # Datacube's are counts/second, so set the exposure time to 1 + exptime = 1.0 + # TODO :: Avoid transposing these large cubes + sobjs = datacube.extract_point_source(self.wave, self.flux.T, self.ivar.T, self.bpm.T, self._wcs, + exptime=exptime, pypeline=self.spectrograph.pypeline, + fluxed=self.fluxed, boxcar_radius=boxcar_radius, + optfwhm=fwhm, whitelight_range=parset['cube']['whitelight_range']) + + # Save the extracted spectrum + spec1d_filename = 'spec1d_' + self.filename if outname is None else outname + sobjs.write_to_fits(self.head0, spec1d_filename, overwrite=overwrite) class DARcorrection: @@ -340,9 +359,9 @@ class CoAdd3D: """ # Superclass factory method generates the subclass instance @classmethod - def get_instance(cls, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offsets=None, - dec_offsets=None, spectrograph=None, det=1, overwrite=False, show=False, - debug=False): + def get_instance(cls, spec2dfiles, par, skysub_frame=None, sensfile=None, scale_corr=None, grating_corr=None, + ra_offsets=None, dec_offsets=None, spectrograph=None, det=1, + overwrite=False, show=False, debug=False): """ Instantiate the subclass appropriate for the provided spectrograph. @@ -357,13 +376,13 @@ def get_instance(cls, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_o """ return next(c for c in cls.__subclasses__() if c.__name__ == (spectrograph.pypeline + 'CoAdd3D'))( - spec2dfiles, par, skysub_frame=skysub_frame, scale_corr=scale_corr, - ra_offsets=ra_offsets, dec_offsets=dec_offsets, spectrograph=spectrograph, - det=det, overwrite=overwrite, show=show, debug=debug) + spec2dfiles, par, skysub_frame=skysub_frame, sensfile=sensfile, scale_corr=scale_corr, + grating_corr=grating_corr, ra_offsets=ra_offsets, dec_offsets=dec_offsets, + spectrograph=spectrograph, det=det, overwrite=overwrite, show=show, debug=debug) - def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offsets=None, - dec_offsets=None, spectrograph=None, det=None, overwrite=False, show=False, - debug=False): + def __init__(self, spec2dfiles, par, skysub_frame=None, sensfile=None, scale_corr=None, grating_corr=None, + ra_offsets=None, dec_offsets=None, spectrograph=None, det=None, + overwrite=False, show=False, debug=False): """ Args: @@ -378,9 +397,16 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offs skysub_frame (:obj:`list`, optional): If not None, this should be a list of frames to use for the sky subtraction of each individual entry of spec2dfiles. It should be the same length as spec2dfiles. + sensfile (:obj:`list`, optional): + If not None, this should be a list of frames to use for the sensitivity function of each individual + entry of spec2dfiles. It should be the same length as spec2dfiles. scale_corr (:obj:`list`, optional): If not None, this should be a list of relative scale correction options. It should be the same length as spec2dfiles. + grating_corr (:obj:`list`, optional): + If not None, this should be a list of `str`, where each element is the relative path to the + Flat calibration file that was used to reduce each spec2d file. It should be the + same length as spec2dfiles. ra_offsets (:obj:`list`, optional): If not None, this should be a list of relative RA offsets of each frame. It should be the same length as spec2dfiles. The units should be degrees. @@ -418,8 +444,12 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offs # Do some quick checks on the input options if skysub_frame is not None and len(skysub_frame) != self.numfiles: msgs.error("The skysub_frame list should be identical length to the spec2dfiles list") + if sensfile is not None and len(sensfile) != self.numfiles: + msgs.error("The sensfile list should be identical length to the spec2dfiles list") if scale_corr is not None and len(scale_corr) != self.numfiles: msgs.error("The scale_corr list should be identical length to the spec2dfiles list") + if grating_corr is not None and len(grating_corr) != self.numfiles: + msgs.error("The grating_corr list should be identical length to the spec2dfiles list") if ra_offsets is not None and len(ra_offsets) != self.numfiles: msgs.error("The ra_offsets list should be identical length to the spec2dfiles list") if dec_offsets is not None and len(dec_offsets) != self.numfiles: @@ -430,8 +460,18 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offs if ra_offsets is not None and dec_offsets is None: msgs.error("If you provide ra_offsets, you must also provide dec_offsets") # Set the frame specific options + self.sensfile = None + if sensfile is None: + # User didn't provide a sensfile for each frame. Check if they provided a single one. + if self.cubepar['sensfile'] is not None: + # User provided a single sensfile. Use this for all frames. + self.sensfile = self.numfiles*[self.cubepar['sensfile']] + else: + # User provided a sensfile for each frame. Use these. + self.sensfile = sensfile self.skysub_frame = skysub_frame self.scale_corr = scale_corr + self.grating_corr = grating_corr self.ra_offsets = list(ra_offsets) if isinstance(ra_offsets, np.ndarray) else ra_offsets self.dec_offsets = list(dec_offsets) if isinstance(dec_offsets, np.ndarray) else dec_offsets # If there is only one frame being "combined" AND there's no reference image, then don't compute the translation. @@ -456,6 +496,8 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offs # Initialise the lists of ra_offsets and dec_offsets self.ra_offsets = [0.0]*self.numfiles self.dec_offsets = [0.0]*self.numfiles + if self.grating_corr is None: + self.grating_corr = [None] * self.numfiles # Check on Spectrograph input if spectrograph is None: @@ -469,7 +511,7 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offs self.ifu_ra, self.ifu_dec = np.array([]), np.array([]) # The RA and Dec at the centre of the IFU, as stored in the header self.all_sci, self.all_ivar, self.all_wave, self.all_slitid, self.all_wghts = [], [], [], [], [] - self.all_tilts, self.all_slits, self.all_align = [], [], [] + self.all_tilts, self.all_slits, self.all_align, self.all_header = [], [], [], [] self.all_wcs, self.all_ra, self.all_dec, self.all_dar = [], [], [], [] self.weights = np.ones(self.numfiles) # Weights to use when combining cubes @@ -506,12 +548,10 @@ def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offs self.check_outputs() # Check the reference cube and image exist, if requested - self.fluxcal = False + self.fluxcal = False if self.sensfile is None else True self.blaze_wave, self.blaze_spec = None, None self.blaze_spline, self.flux_spline = None, None self.flat_splines = dict() # A dictionary containing the splines of the flatfield - if self.cubepar['standard_cube'] is not None: - self.make_sensfunc() # If a reference image has been set, check that it exists if self.cubepar['reference_image'] is not None: @@ -578,22 +618,6 @@ def set_blaze_spline(self, wave_spl, spec_spl): self.blaze_spline = interp1d(wave_spl, spec_spl, kind='linear', bounds_error=False, fill_value="extrapolate") - def make_sensfunc(self): - """ - Generate the sensitivity function to be used for the flux calibration. - """ - self.fluxcal = True - # The first standard star cube is used as the reference blaze spline - if self.cubepar['grating_corr']: - # Load the blaze information - stdcube = fits.open(self.cubepar['standard_cube']) - # If a reference blaze spline has not been set, do that now. - self.set_blaze_spline(stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data) - # Generate a spline representation of the sensitivity function - self.flux_spline = datacube.make_sensfunc(self.cubepar['standard_cube'], self.senspar, - blaze_wave=self.blaze_wave, blaze_spline=self.blaze_spline, - grating_corr=self.cubepar['grating_corr']) - def set_default_scalecorr(self): """ Set the default mode to use for relative spectral scale correction. @@ -814,8 +838,8 @@ def add_grating_corr(self, flatfile, waveimg, slits, spat_flexure=None): msgs.info("Calculating relative sensitivity for grating correction") # Load the Flat file flatimages = flatfield.FlatImages.from_file(flatfile, chk_version=self.chk_version) - total_illum = flatimages.fit2illumflat(slits, finecorr=False, frametype='illum', initial=True, spat_flexure=spat_flexure) * \ - flatimages.fit2illumflat(slits, finecorr=True, frametype='illum', initial=True, spat_flexure=spat_flexure) + total_illum = flatimages.fit2illumflat(slits, finecorr=False, frametype='illum', spat_flexure=spat_flexure) * \ + flatimages.fit2illumflat(slits, finecorr=True, frametype='illum', spat_flexure=spat_flexure) flatframe = flatimages.pixelflat_raw / total_illum if flatimages.pixelflat_spec_illum is None: # Calculate the relative scale @@ -877,12 +901,13 @@ class SlicerIFUCoAdd3D(CoAdd3D): - White light images are also produced, if requested. """ - def __init__(self, spec2dfiles, par, skysub_frame=None, scale_corr=None, ra_offsets=None, - dec_offsets=None, spectrograph=None, det=1, overwrite=False, show=False, - debug=False): - super().__init__(spec2dfiles, par, skysub_frame=skysub_frame, scale_corr=scale_corr, - ra_offsets=ra_offsets, dec_offsets=dec_offsets, spectrograph=spectrograph, - det=det, overwrite=overwrite, show=show, debug=debug) + def __init__(self, spec2dfiles, par, skysub_frame=None, sensfile=None, scale_corr=None, grating_corr=None, + ra_offsets=None, dec_offsets=None, spectrograph=None, det=1, + overwrite=False, show=False, debug=False): + super().__init__(spec2dfiles, par, skysub_frame=skysub_frame, sensfile=sensfile, + scale_corr=scale_corr, grating_corr=grating_corr, + ra_offsets=ra_offsets, dec_offsets=dec_offsets, spectrograph=spectrograph, det=det, + overwrite=overwrite, show=show, debug=debug) self.mnmx_wv = None # Will be used to store the minimum and maximum wavelengths of every slit and frame. self._spatscale = np.zeros((self.numfiles, 2)) # index 0, 1 = pixel scale, slicer scale self._specscale = np.zeros(self.numfiles) @@ -925,7 +950,7 @@ def get_alignments(self, spec2DObj, slits, spat_flexure=None): msgs.info("Using slit edges for astrometric transform") # If nothing better was provided, use the slit edges if alignments is None: - left, right, _ = slits.select_edges(initial=True, flexure=spat_flexure) + left, right, _ = slits.select_edges(flexure=spat_flexure) locations = [0.0, 1.0] traces = np.append(left[:, None, :], right[:, None, :], axis=1) else: @@ -972,7 +997,7 @@ def load(self): # Load all spec2d files and prepare the data for making a datacube for ff, fil in enumerate(self.spec2d): # Load it up - msgs.info("Loading PypeIt spec2d frame:" + msgs.newline() + fil) + msgs.info(f"Loading PypeIt spec2d frame ({ff+1}/{len(self.spec2d)}):" + msgs.newline() + fil) spec2DObj = spec2dobj.Spec2DObj.from_file(fil, self.detname, chk_version=self.chk_version) detector = spec2DObj.detector @@ -980,6 +1005,7 @@ def load(self): # Load the header hdr0 = spec2DObj.head0 + self.all_header.append(hdr0) self.ifu_ra = np.append(self.ifu_ra, self.spec.compound_meta([hdr0], 'ra')) self.ifu_dec = np.append(self.ifu_dec, self.spec.compound_meta([hdr0], 'dec')) @@ -989,8 +1015,8 @@ def load(self): # Initialise the slit edges msgs.info("Constructing slit image") slits = spec2DObj.slits - slitid_img_init = slits.slit_img(pad=0, initial=True, flexure=spat_flexure) - slits_left, slits_right, _ = slits.select_edges(initial=True, flexure=spat_flexure) + slitid_img = slits.slit_img(pad=0, flexure=spat_flexure) + slits_left, slits_right, _ = slits.select_edges(flexure=spat_flexure) # The order of operations below proceeds as follows: # (1) Get science image @@ -1015,7 +1041,7 @@ def load(self): bpmmask = spec2DObj.bpmmask # Mask the edges of the spectrum where the sky model is bad - sky_is_good = datacube.make_good_skymask(slitid_img_init, spec2DObj.tilts) + sky_is_good = datacube.make_good_skymask(slitid_img, spec2DObj.tilts) # TODO :: Really need to write some detailed information in the docs about all of the various corrections that can optionally be applied @@ -1048,7 +1074,7 @@ def load(self): if self.mnmx_wv is None: self.mnmx_wv = np.zeros((len(self.spec2d), slits.nslits, 2)) for slit_idx, slit_spat in enumerate(slits.spat_id): - onslit_init = (slitid_img_init == slit_spat) + onslit_init = (slitid_img == slit_spat) self.mnmx_wv[ff, slit_idx, 0] = np.min(waveimg[onslit_init]) self.mnmx_wv[ff, slit_idx, 1] = np.max(waveimg[onslit_init]) @@ -1071,7 +1097,7 @@ def load(self): # Construct a good pixel mask # TODO: This should use the mask function to figure out which elements are masked. - onslit_gpm = (slitid_img_init > 0) & (bpmmask.mask == 0) & sky_is_good + onslit_gpm = (slitid_img > 0) & (bpmmask.mask == 0) & sky_is_good # Generate the alignment splines, and then retrieve images of the RA and Dec of every pixel, # and the number of spatial pixels in each slit @@ -1083,7 +1109,7 @@ def load(self): crval_wv = self.cubepar['wave_min'] if self.cubepar['wave_min'] is not None else wave0 cd_wv = self.cubepar['wave_delta'] if self.cubepar['wave_delta'] is not None else dwv self.all_wcs.append(self.spec.get_wcs(spec2DObj.head0, slits, detector.platescale, crval_wv, cd_wv)) - ra_img, dec_img, minmax = slits.get_radec_image(self.all_wcs[ff], alignSplines, spec2DObj.tilts, initial=True, flexure=spat_flexure) + ra_img, dec_img, minmax = slits.get_radec_image(self.all_wcs[ff], alignSplines, spec2DObj.tilts, flexure=spat_flexure) # Extract wavelength and delta wavelength arrays from the images wave_ext = waveimg[onslit_gpm] @@ -1105,54 +1131,74 @@ def load(self): humidity = self.spec.get_meta_value([spec2DObj.head0], 'humidity') # Expressed as a percentage (not a fraction!) darcorr = DARcorrection(airmass, parangle, pressure, temperature, humidity, cosdec) - # Compute the extinction correction - msgs.info("Applying extinction correction") - extinct = flux_calib.load_extinction_data(self.spec.telescope['longitude'], - self.spec.telescope['latitude'], - self.senspar['UVIS']['extinct_file']) - # extinction_correction requires the wavelength is sorted - extcorr_sort = flux_calib.extinction_correction(wave_sort * units.AA, airmass, extinct) + # TODO :: Need to make a note somewhere that the extinction correction cannot currently be done + # in the datacube because the sensitivity function algorithms correct the standard star + # for extinction when generating the sensitivity function. Including the extinction correction + # in the datacube would result in a double correction of the standard star for extinction. + # This could be wrong when combining multiple standard star exposures if the airmass of the + # standard star exposures is significantly different. For now, to stay consistent with the + # current pipeline, the extinction correction is done in the sensitivity function algorithms, + # with the caveat that the standard star exposures are assumed to have similar airmasses. + # For now, comment out the extinction correction, and reapply this later when the sensitivity + # function algorithms are unified. + extcorr_sort = 1.0 + if False: + # Compute the extinction correction + msgs.info("Applying extinction correction") + # TODO :: Change the ['UVIS']['extinct_file'] here when the sensitivity function calculation is unified. + extinct = flux_calib.load_extinction_data(self.spec.telescope['longitude'], + self.spec.telescope['latitude'], + self.senspar['UVIS']['extinct_file']) + # extinction_correction requires the wavelength is sorted + extcorr_sort = flux_calib.extinction_correction(wave_sort * units.AA, airmass, extinct) # Correct for sensitivity as a function of grating angle # (this assumes the spectrum of the flatfield lamp has the same shape for all setups) gratcorr_sort = 1.0 - if self.cubepar['grating_corr']: - # Load the flatfield file - key = flatfield.FlatImages.calib_type.upper() - if key not in spec2DObj.calibs: - msgs.error('Processed flat calibration file not recorded by spec2d file!') - flatfile = os.path.join(spec2DObj.calibs['DIR'], spec2DObj.calibs[key]) + if self.grating_corr[ff] is not None: # Setup the grating correction + flatfile = self.grating_corr[ff] self.add_grating_corr(flatfile, waveimg, slits, spat_flexure=spat_flexure) # Calculate the grating correction gratcorr_sort = datacube.correct_grating_shift(wave_sort, self.flat_splines[flatfile + "_wave"], self.flat_splines[flatfile], self.blaze_wave, self.blaze_spline) - # Sensitivity function - sensfunc_sort = 1.0 + # Sensitivity function - note that the sensitivity function factors in the exposure time and the + # wavelength sampling, so if the flux calibration will not be applied, the sens_factor needs to be + # scaled by the exposure time and the wavelength sampling + sens_sort = 1.0/(exptime * dwav_sort) # If no sensitivity function is provided if self.fluxcal: msgs.info("Calculating the sensitivity function") - sensfunc_sort = self.flux_spline(wave_sort) - # Convert the flux_sav to counts/s, correct for the relative sensitivity of different setups - extcorr_sort *= sensfunc_sort / (exptime * gratcorr_sort) + # Load the sensitivity function + sens = sensfunc.SensFunc.from_file(self.sensfile[ff], chk_version=self.par['rdx']['chk_version']) + # Interpolate the sensitivity function onto the wavelength grid of the data + # TODO :: Change the ['UVIS']['extinct_file'] here when the sensitivity function calculation is unified. + sens_sort = flux_calib.get_sensfunc_factor( + wave_sort, sens.wave[:, 0], sens.zeropoint[:, 0], exptime, delta_wave=dwav_sort, + extinct_correct=True, longitude=self.spec.telescope['longitude'], + latitude=self.spec.telescope['latitude'], extinctfilepar=self.senspar['UVIS']['extinct_file'], + airmass=airmass, extrap_sens=self.par['fluxcalib']['extrap_sens']) + # Convert the flux units to counts/s, and correct for the relative sensitivity of different setups + sens_sort *= extcorr_sort/gratcorr_sort # Correct for extinction - sciImg[onslit_gpm] *= extcorr_sort[resrt] - ivar[onslit_gpm] /= extcorr_sort[resrt] ** 2 + sciImg[onslit_gpm] *= sens_sort[resrt] + ivar[onslit_gpm] /= sens_sort[resrt] ** 2 # Convert units to Counts/s/Ang/arcsec2 # Slicer sampling * spatial pixel sampling + unitscale = self.all_wcs[ff].wcs.cunit[0].to(units.arcsec) * self.all_wcs[ff].wcs.cunit[1].to(units.arcsec) sl_deg = np.sqrt(self.all_wcs[ff].wcs.cd[0, 0] ** 2 + self.all_wcs[ff].wcs.cd[1, 0] ** 2) px_deg = np.sqrt(self.all_wcs[ff].wcs.cd[1, 1] ** 2 + self.all_wcs[ff].wcs.cd[0, 1] ** 2) - scl_units = dwav_sort * (3600.0 * sl_deg) * (3600.0 * px_deg) - sciImg[onslit_gpm] /= scl_units[resrt] - ivar[onslit_gpm] *= scl_units[resrt] ** 2 + scl_units = unitscale * sl_deg * px_deg + sciImg[onslit_gpm] /= scl_units + ivar[onslit_gpm] *= scl_units ** 2 # Calculate the weights relative to the zeroth cube self.weights[ff] = 1.0 # exptime #np.median(flux_sav[resrt]*np.sqrt(ivar_sav[resrt]))**2 wghts = self.weights[ff] * np.ones(sciImg.shape) # Get the slit image and then unset pixels in the slit image that are bad - slitid_img_gpm = slitid_img_init * onslit_gpm.astype(int) + slitid_img_gpm = slitid_img * onslit_gpm.astype(int) # If individual frames are to be output without aligning them, # there's no need to store information, just make the cubes now @@ -1163,7 +1209,7 @@ def load(self): else: outfile = datacube.get_output_filename(fil, self.cubepar['output_filename'], self.combine, ff + 1) # Get the coordinate bounds - slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True)))) + slitlength = int(np.round(np.median(slits.get_slitlengths(median=True)))) numwav = int((np.max(waveimg) - wave0) / dwv) bins = self.spec.get_datacube_bins(slitlength, minmax, numwav) # Set the wavelength range of the white light image. @@ -1195,7 +1241,7 @@ def load(self): msgs.info("Saving datacube as: {0:s}".format(outfile)) final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec, sensfunc=None, fluxed=self.fluxcal) - final_cube.to_file(outfile, hdr=hdr, overwrite=self.overwrite) + final_cube.to_file(outfile, primary_hdr=self.all_header[ff], hdr=hdr, overwrite=self.overwrite) # No need to proceed and store arrays - we are writing individual datacubes continue @@ -1404,7 +1450,8 @@ def run(self): msgs.info("Saving datacube as: {0:s}".format(outfile)) final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec, sensfunc=sensfunc, fluxed=self.fluxcal) - final_cube.to_file(outfile, hdr=hdr, overwrite=self.overwrite) + # Note, we only store in the primary header the first spec2d file + final_cube.to_file(outfile, primary_hdr=self.all_header[0], hdr=hdr, overwrite=self.overwrite) else: for ff in range(self.numfiles): outfile = datacube.get_output_filename("", self.cubepar['output_filename'], False, ff) @@ -1431,4 +1478,4 @@ def run(self): msgs.info("Saving datacube as: {0:s}".format(outfile)) final_cube = DataCube(flxcube, sigcube, bpmcube, wave, self.specname, self.blaze_wave, self.blaze_spec, sensfunc=sensfunc, fluxed=self.fluxcal) - final_cube.to_file(outfile, hdr=hdr, overwrite=self.overwrite) + final_cube.to_file(outfile, primary_hdr=self.all_header[ff], hdr=hdr, overwrite=self.overwrite) diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index c87740239e..9288e06695 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -14,9 +14,8 @@ from scipy.interpolate import interp1d import numpy as np -from pypeit import msgs -from pypeit import utils -from pypeit.core import coadd, flux_calib +from pypeit import msgs, utils, specobj, specobjs +from pypeit.core import coadd, extract, flux_calib # Use a fast histogram for speed! from fast_histogram import histogramdd @@ -35,13 +34,13 @@ def gaussian2D(tup, intflux, xo, yo, sigma_x, sigma_y, theta, offset): intflux (float): The Integrated flux of the 2D Gaussian xo (float): - The centre of the Gaussian along the x-coordinate when z=0 + The centre of the Gaussian along the x-coordinate when z=0 (units of pixels) yo (float): - The centre of the Gaussian along the y-coordinate when z=0 + The centre of the Gaussian along the y-coordinate when z=0 (units of pixels) sigma_x (float): - The standard deviation in the x-direction + The standard deviation in the x-direction (units of pixels) sigma_y (float): - The standard deviation in the y-direction + The standard deviation in the y-direction (units of pixels) theta (float): The orientation angle of the 2D Gaussian offset (float): @@ -97,16 +96,19 @@ def fitGaussian2D(image, norm=False): x = np.linspace(0, image.shape[0] - 1, image.shape[0]) y = np.linspace(0, image.shape[1] - 1, image.shape[1]) xx, yy = np.meshgrid(x, y, indexing='ij') - # Setup the fitting params - idx_max = [image.shape[0]/2, image.shape[1]/2] # Just use the centre of the image as the best guess - #idx_max = np.unravel_index(np.argmax(image), image.shape) + # Setup the fitting params - Estimate a starting point for the fit using a median filter + med_filt_image = signal.medfilt2d(image, kernel_size=3) + idx_max = np.unravel_index(np.argmax(med_filt_image), image.shape) initial_guess = (1, idx_max[0], idx_max[1], 2, 2, 0, 0) bounds = ([0, 0, 0, 0.5, 0.5, -np.pi, -np.inf], [np.inf, image.shape[0], image.shape[1], image.shape[0], image.shape[1], np.pi, np.inf]) # Perform the fit + # TODO :: May want to generate the image on a finer pixel scale first popt, pcov = opt.curve_fit(gaussian2D, (xx, yy), image.ravel() / wlscl, bounds=bounds, p0=initial_guess) + # Generate a best fit model + model = gaussian2D((xx, yy), *popt).reshape(image.shape) * wlscl # Return the fitting results - return popt, pcov + return popt, pcov, model def dar_fitfunc(radec, coord_ra, coord_dec, datfit, wave, obstime, location, pressure, @@ -187,76 +189,119 @@ def correct_grating_shift(wave_eval, wave_curr, spl_curr, wave_ref, spl_ref, ord return grat_corr -def extract_standard_spec(stdcube, subpixel=20): +def extract_point_source(wave, flxcube, ivarcube, bpmcube, wcscube, exptime, + subpixel=20, boxcar_radius=None, optfwhm=None, whitelight_range=None, + pypeline="SlicerIFU", fluxed=False): """ Extract a spectrum of a standard star from a datacube Parameters ---------- - std_cube : `astropy.io.fits.HDUList`_ - An HDU list of fits files - subpixel : int + wave : `numpy.ndarray`_ + Wavelength array for the datacube + flxcube : `numpy.ndarray`_ + Datacube of the flux + ivarcube : `numpy.ndarray`_ + Datacube of the inverse variance + bpmcube : `numpy.ndarray`_ + Datacube of the bad pixel mask + wcscube : `astropy.wcs.WCS`_ + WCS of the datacube + exptime : float + Exposure time listed in the header of the datacube + subpixel : int, optional Number of pixels to subpixelate spectrum when creating mask + boxcar_radius : float, optional + Radius of the circular boxcar (in arcseconds) to use for the extraction + optfwhm : float, optional + FWHM of the PSF in pixels that is used to generate a Gaussian profile + for the optimal extraction. + pypeline : str, optional + PypeIt pipeline used to reduce the datacube + fluxed : bool, optional + Is the datacube fluxed? Returns ------- - wave : `numpy.ndarray`_ - Wavelength of the star. - Nlam_star : `numpy.ndarray`_ - counts/second/Angstrom - Nlam_ivar_star : `numpy.ndarray`_ - inverse variance of Nlam_star - gpm_star : `numpy.ndarray`_ - good pixel mask for Nlam_star + sobjs : `pypeit.specobjs.SpecObjs`_ + SpecObjs object containing the extracted spectrum """ - # Extract some information from the HDU list - flxcube = stdcube['FLUX'].data.T.copy() - varcube = stdcube['SIG'].data.T.copy()**2 - bpmcube = stdcube['BPM'].data.T.copy() - numwave = flxcube.shape[2] + if whitelight_range is None: + whitelight_range = [np.min(wave), np.max(wave)] + + # Generate a spec1d object to hold the extracted spectrum + msgs.info("Initialising a PypeIt SpecObj spec1d file") + sobj = specobj.SpecObj(pypeline, "DET01", SLITID=0) + sobj.RA = wcscube.wcs.crval[0] + sobj.DEC = wcscube.wcs.crval[1] + sobj.SLITID = 0 + + # Convert from counts/s/Ang/arcsec**2 to counts. The sensitivity function expects counts as input + numxx, numyy, numwave = flxcube.shape + arcsecSQ = (wcscube.wcs.cdelt[0] * wcscube.wcs.cunit[0].to(units.arcsec)) * \ + (wcscube.wcs.cdelt[1] * wcscube.wcs.cunit[1].to(units.arcsec)) + if fluxed: + # The datacube is flux calibrated, in units of 10^-17 erg/s/cm**2/Ang/arcsec**2 + # Scale the flux and ivar cubes to be in units of erg/s/cm**2/Ang + unitscale = arcsecSQ + else: + # Scale the flux and ivar cubes to be in units of counts. pypeit_sensfunc expects counts as input + deltawave = wcscube.wcs.cdelt[2]*wcscube.wcs.cunit[2].to(units.Angstrom) + unitscale = exptime * deltawave * arcsecSQ - # Setup the WCS - stdwcs = wcs.WCS(stdcube['FLUX'].header) + # Apply the relevant scaling + _flxcube = flxcube * unitscale + _ivarcube = ivarcube / unitscale**2 - wcs_scale = (1.0 * stdwcs.spectral.wcs.cunit[0]).to(units.Angstrom).value # Ensures the WCS is in Angstroms - wave = wcs_scale * stdwcs.spectral.wcs_pix2world(np.arange(numwave), 0)[0] + # Calculate the variance cube + _varcube = utils.inverse(_ivarcube) # Generate a whitelight image, and fit a 2D Gaussian to estimate centroid and width - wl_img = make_whitelight_fromcube(flxcube) - popt, pcov = fitGaussian2D(wl_img, norm=True) - wid = max(popt[3], popt[4]) + msgs.info("Making white light image") + wl_img = make_whitelight_fromcube(_flxcube, bpmcube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) + popt, pcov, model = fitGaussian2D(wl_img, norm=True) + if boxcar_radius is None: + nsig = 4 # 4 sigma should be far enough... Note: percentage enclosed for 2D Gaussian = 1-np.exp(-0.5 * nsig**2) + wid = nsig * max(popt[3], popt[4]) + else: + # Set the user-defined radius + wid = boxcar_radius / np.sqrt(arcsecSQ) + # Set the radius of the extraction boxcar for the sky determination + msgs.info("Using a boxcar radius of {:0.2f} arcsec".format(wid*np.sqrt(arcsecSQ))) + widsky = 2 * wid # Setup the coordinates of the mask - x = np.linspace(0, flxcube.shape[0] - 1, flxcube.shape[0] * subpixel) - y = np.linspace(0, flxcube.shape[1] - 1, flxcube.shape[1] * subpixel) + x = np.linspace(0, numxx - 1, numxx * subpixel) + y = np.linspace(0, numyy - 1, numyy * subpixel) xx, yy = np.meshgrid(x, y, indexing='ij') # Generate a mask - newshape = (flxcube.shape[0] * subpixel, flxcube.shape[1] * subpixel) + msgs.info("Generating an object mask") + newshape = (numxx * subpixel, numyy * subpixel) mask = np.zeros(newshape) - nsig = 4 # 4 sigma should be far enough... Note: percentage enclosed for 2D Gaussian = 1-np.exp(-0.5 * nsig**2) - ww = np.where((np.sqrt((xx - popt[1]) ** 2 + (yy - popt[2]) ** 2) < nsig * wid)) + ww = np.where((np.sqrt((xx - popt[1]) ** 2 + (yy - popt[2]) ** 2) < wid)) mask[ww] = 1 - mask = utils.rebinND(mask, (flxcube.shape[0], flxcube.shape[1])).reshape(flxcube.shape[0], flxcube.shape[1], 1) + mask = utils.rebinND(mask, (numxx, numyy)).reshape(numxx, numyy, 1) # Generate a sky mask - newshape = (flxcube.shape[0] * subpixel, flxcube.shape[1] * subpixel) + msgs.info("Generating a sky mask") + newshape = (numxx * subpixel, numyy * subpixel) smask = np.zeros(newshape) - nsig = 8 # 8 sigma should be far enough - ww = np.where((np.sqrt((xx - popt[1]) ** 2 + (yy - popt[2]) ** 2) < nsig * wid)) + ww = np.where((np.sqrt((xx - popt[1]) ** 2 + (yy - popt[2]) ** 2) < widsky)) smask[ww] = 1 - smask = utils.rebinND(smask, (flxcube.shape[0], flxcube.shape[1])).reshape(flxcube.shape[0], flxcube.shape[1], 1) + smask = utils.rebinND(smask, (numxx, numyy)).reshape(numxx, numyy, 1) + # Subtract off the object mask region, so that we just have an annulus around the object smask -= mask - # Subtract the residual sky + msgs.info("Subtracting the residual sky") + # Subtract the residual sky from the datacube skymask = np.logical_not(bpmcube) * smask - skycube = flxcube * skymask - skyspec = skycube.sum(0).sum(0) - nrmsky = skymask.sum(0).sum(0) + skycube = _flxcube * skymask + skyspec = skycube.sum(axis=(0,1)) + nrmsky = skymask.sum(axis=(0,1)) skyspec *= utils.inverse(nrmsky) - flxcube -= skyspec.reshape((1, 1, numwave)) - - # Subtract the residual sky from the whitelight image + _flxcube -= skyspec.reshape((1, 1, numwave)) + # Now subtract the residual sky from the white light image sky_val = np.sum(wl_img[:, :, np.newaxis] * smask) / np.sum(smask) wl_img -= sky_val @@ -267,88 +312,102 @@ def extract_standard_spec(stdcube, subpixel=20): norm_flux /= np.sum(norm_flux) # Extract boxcar cntmask = np.logical_not(bpmcube) * mask # Good pixels within the masked region around the standard star - flxscl = (norm_flux * cntmask).sum(0).sum(0) # This accounts for the flux that is missing due to masked pixels - scimask = flxcube * cntmask - varmask = varcube * cntmask**2 + flxscl = (norm_flux * cntmask).sum(axis=(0,1)) # This accounts for the flux that is missing due to masked pixels + scimask = _flxcube * cntmask + varmask = _varcube * cntmask**2 nrmcnt = utils.inverse(flxscl) - box_flux = scimask.sum(0).sum(0) * nrmcnt - box_var = varmask.sum(0).sum(0) * nrmcnt**2 + box_flux = scimask.sum(axis=(0,1)) * nrmcnt + box_var = varmask.sum(axis=(0,1)) * nrmcnt**2 box_gpm = flxscl > 1/3 # Good pixels are those where at least one-third of the standard star flux is measured - # Setup the return values - ret_flux, ret_var, ret_gpm = box_flux, box_var, box_gpm - - # Convert from counts/s/Ang/arcsec**2 to counts/s/Ang - arcsecSQ = 3600.0*3600.0*(stdwcs.wcs.cdelt[0]*stdwcs.wcs.cdelt[1]) - ret_flux *= arcsecSQ - ret_var *= arcsecSQ**2 - # Return the box extraction results - return wave, ret_flux, utils.inverse(ret_var), ret_gpm - -def make_sensfunc(ss_file, senspar, blaze_wave=None, blaze_spline=None, grating_corr=False): - """ - Generate the sensitivity function from a standard star DataCube. - - Args: - ss_file (:obj:`str`): - The relative path and filename of the standard star datacube. It - should be fits format, and for full functionality, should ideally of - the form :class:`~pypeit.coadd3d.DataCube`. - senspar (:class:`~pypeit.par.pypeitpar.SensFuncPar`): - The parameters required for the sensitivity function computation. - blaze_wave (`numpy.ndarray`_, optional): - Wavelength array used to construct blaze_spline - blaze_spline (`scipy.interpolate.interp1d`_, optional): - Spline representation of the reference blaze function (based on the illumflat). - grating_corr (:obj:`bool`, optional): - If a grating correction should be performed, set this variable to True. - - Returns: - `numpy.ndarray`_: A mask of the good sky pixels (True = good) - """ - # Check if the standard star datacube exists - if not os.path.exists(ss_file): - msgs.error("Standard cube does not exist:" + msgs.newline() + ss_file) - msgs.info(f"Loading standard star cube: {ss_file:s}") - # Load the standard star cube and retrieve its RA + DEC - stdcube = fits.open(ss_file) - star_ra, star_dec = stdcube[1].header['CRVAL1'], stdcube[1].header['CRVAL2'] - - # Extract a spectrum of the standard star - wave, Nlam_star, Nlam_ivar_star, gpm_star = extract_standard_spec(stdcube) - - # Extract the information about the blaze - if grating_corr: - blaze_wave_curr, blaze_spec_curr = stdcube['BLAZE_WAVE'].data, stdcube['BLAZE_SPEC'].data - blaze_spline_curr = interp1d(blaze_wave_curr, blaze_spec_curr, - kind='linear', bounds_error=False, fill_value="extrapolate") - # Perform a grating correction - grat_corr = correct_grating_shift(wave, blaze_wave_curr, blaze_spline_curr, blaze_wave, blaze_spline) - # Apply the grating correction to the standard star spectrum - Nlam_star /= grat_corr - Nlam_ivar_star *= grat_corr ** 2 - - # Read in some information above the standard star - std_dict = flux_calib.get_standard_spectrum(star_type=senspar['star_type'], - star_mag=senspar['star_mag'], - ra=star_ra, dec=star_dec) - # Calculate the sensitivity curve - # TODO :: This needs to be addressed... unify flux calibration into the main PypeIt routines. - msgs.warn("Datacubes are currently flux-calibrated using the UVIS algorithm... this will be deprecated soon") - zeropoint_data, zeropoint_data_gpm, zeropoint_fit, zeropoint_fit_gpm = \ - flux_calib.fit_zeropoint(wave, Nlam_star, Nlam_ivar_star, gpm_star, std_dict, - mask_hydrogen_lines=senspar['mask_hydrogen_lines'], - mask_helium_lines=senspar['mask_helium_lines'], - hydrogen_mask_wid=senspar['hydrogen_mask_wid'], - nresln=senspar['UVIS']['nresln'], - resolution=senspar['UVIS']['resolution'], - trans_thresh=senspar['UVIS']['trans_thresh'], - polyorder=senspar['polyorder'], - polycorrect=senspar['UVIS']['polycorrect'], - polyfunc=senspar['UVIS']['polyfunc']) - wgd = np.where(zeropoint_fit_gpm) - sens = np.power(10.0, -0.4 * (zeropoint_fit[wgd] - flux_calib.ZP_UNIT_CONST)) / np.square(wave[wgd]) - return interp1d(wave[wgd], sens, kind='linear', bounds_error=False, fill_value="extrapolate") + # Store the BOXCAR extraction information + sobj.BOX_RADIUS = wid # Size of boxcar radius in pixels + sobj.BOX_WAVE = wave.astype(float) + if fluxed: + sobj.BOX_FLAM = box_flux + sobj.BOX_FLAM_SIG = np.sqrt(box_var) + sobj.BOX_FLAM_IVAR = utils.inverse(box_var) + else: + sobj.BOX_COUNTS = box_flux + sobj.BOX_COUNTS_SIG = np.sqrt(box_var) + sobj.BOX_COUNTS_IVAR = utils.inverse(box_var) + sobj.BOX_COUNTS_SKY = skyspec # This is not the real sky, it is the residual sky. The datacube is presumed to be sky subtracted + sobj.BOX_MASK = box_gpm + sobj.S2N = np.median(box_flux * np.sqrt(utils.inverse(box_var))) + + # Now do the OPTIMAL extraction + msgs.info("Extracting an optimal spectrum of datacube") + # First, we need to rearrange the datacube and inverse variance cube into a 2D array. + # The 3D -> 2D conversion is done so that there is a spectral and spatial dimension, + # and the brightest white light pixel is transformed to be at the centre column of the 2D + # array. Then, the second brightest white light pixel is transformed to be next to the centre + # column of the 2D array, and so on. This is done so that the optimal extraction algorithm + # can be applied. + optkern = wl_img + if optfwhm is not None: + msgs.info("Generating a 2D Gaussian kernel for the optimal extraction, with FWHM = {:.2f} pixels".format(optfwhm)) + x = np.linspace(0, wl_img.shape[0] - 1, wl_img.shape[0]) + y = np.linspace(0, wl_img.shape[1] - 1, wl_img.shape[1]) + xx, yy = np.meshgrid(x, y, indexing='ij') + # Generate a Gaussian kernel + intflux = 1 + xo, yo = popt[1], popt[2] + fwhm2sigma = 1.0 / (2 * np.sqrt(2 * np.log(2))) + sigma_x, sigma_y = optfwhm*fwhm2sigma, optfwhm*fwhm2sigma + theta, offset, = 0.0, 0.0 + optkern = gaussian2D((xx, yy), intflux, xo, yo, sigma_x, sigma_y, theta, offset).reshape(wl_img.shape) + # Normalise the kernel + optkern /= np.sum(optkern) + + optkern_masked = optkern * mask[:,:,0] + # Normalise the white light image + optkern_masked /= np.sum(optkern_masked) + asrt = np.argsort(optkern_masked, axis=None) + # Need to ensure that the number of pixels in the object profile is even + if asrt.size % 2 != 0: + # Remove the pixel with the lowest kernel weight. + # It should be a zero value (given the mask), so it doesn't matter if we remove it + asrt = asrt[1:] + # Now sort the indices of the pixels in the object profile + tmp = asrt.reshape((asrt.size//2, 2)) + objprof_idx = np.append(tmp[:,0], tmp[::-1,1]) + objprof = optkern_masked[np.unravel_index(objprof_idx, optkern.shape)] + + # Now slice the datacube and inverse variance cube into a 2D array + spat, spec = np.meshgrid(objprof_idx, np.arange(numwave), indexing='ij') + spatspl = np.apply_along_axis(np.unravel_index, 1, spat, optkern.shape) + # Now slice the datacube and corresponding cubes/vectors into a series of 2D arrays + numspat = objprof_idx.size + flxslice = (spatspl[:,0,:], spatspl[:,1,:], spec) + flxcube2d = _flxcube[flxslice].T + ivarcube2d = _ivarcube[flxslice].T + gpmcube2d = np.logical_not(bpmcube[flxslice].T) + waveimg = wave.reshape((numwave,1)).repeat(numspat, axis=1) + skyimg = np.zeros((numwave, numspat)) # Note, the residual sky has already been subtracted off _flxcube + oprof = objprof.reshape((1,numspat)).repeat(numwave, axis=0) + thismask = np.ones_like(flxcube2d, dtype=bool) + + # Now do the optimal extraction + extract.extract_optimal(flxcube2d, ivarcube2d, gpmcube2d, waveimg, skyimg, thismask, oprof, + sobj, min_frac_use=0.05, fwhmimg=None, base_var=None, count_scale=None, noise_floor=None) + + # TODO :: The optimal extraction may suffer from residual DAR correction issues. This is because the + # :: object profile assumes that the white light image represents the true spatial profile of the + # :: object. One possibility is to fit a (linear?) model to the ratio of box/optimal extraction + # :: and then apply this model to the optimal extraction. This is a bit of a fudge. + # Note that extract.extract_optimal() stores the optimal extraction in the + # sobj.OPT_COUNTS, sobj.OPT_COUNTS_SIG, and sobj.OPT_COUNTS_IVAR attributes. + # We need to store the fluxed extraction into the FLAM attributes (a slight fudge). + if fluxed: + sobj.OPT_FLAM = sobj.OPT_COUNTS + sobj.OPT_FLAM_SIG = sobj.OPT_COUNTS_SIG + sobj.OPT_FLAM_IVAR = sobj.OPT_COUNTS_IVAR + + # Make a specobjs object + sobjs = specobjs.SpecObjs() + sobjs.add_sobj(sobj) + # Return the specobj object + return sobjs def make_good_skymask(slitimg, tilts): @@ -537,13 +596,16 @@ def get_whitelight_range(wavemin, wavemax, wl_range): return wlrng -def make_whitelight_fromcube(cube, wave=None, wavemin=None, wavemax=None): +def make_whitelight_fromcube(cube, bpmcube, wave=None, wavemin=None, wavemax=None): """ Generate a white light image using an input cube. Args: cube (`numpy.ndarray`_): 3D datacube (the final element contains the wavelength dimension) + bpmcube (`numpy.ndarray`_): + 3D bad pixel mask cube (the final element contains the wavelength dimension). + A value of 1 indicates a bad pixel. wave (`numpy.ndarray`_, optional): 1D wavelength array. Only required if wavemin or wavemax are not None. @@ -560,7 +622,6 @@ def make_whitelight_fromcube(cube, wave=None, wavemin=None, wavemax=None): A whitelight image of the input cube (of type `numpy.ndarray`_). """ # Make a wavelength cut, if requested - cutcube = cube.copy() if wavemin is not None or wavemax is not None: # Make some checks on the input if wave is None: @@ -576,10 +637,15 @@ def make_whitelight_fromcube(cube, wave=None, wavemin=None, wavemax=None): ww = np.where((wave >= wavemin) & (wave <= wavemax))[0] wmin, wmax = ww[0], ww[-1]+1 cutcube = cube[:, :, wmin:wmax] + # Cut the bad pixel mask and convert it to a good pixel mask + cutgpmcube = np.logical_not(bpmcube[:, :, wmin:wmax]) + else: + cutcube = cube.copy() + cutgpmcube = np.logical_not(bpmcube) # Now sum along the wavelength axis - nrmval = np.sum(cutcube != 0.0, axis=2) - nrmval[nrmval == 0.0] = 1.0 - wl_img = np.sum(cutcube, axis=2) / nrmval + nrmval = np.sum(cutgpmcube, axis=2) + nrmval[nrmval == 0] = 1.0 + wl_img = np.sum(cutcube*cutgpmcube, axis=2) / nrmval return wl_img @@ -1117,7 +1183,7 @@ def compute_weights_frompix(raImg, decImg, waveImg, sciImg, ivarImg, slitidImg, # Compute the weights return compute_weights(raImg, decImg, waveImg, sciImg, ivarImg, slitidImg, all_wcs, all_tilts, all_slits, all_align, all_dar, ra_offsets, dec_offsets, - wl_full[:, :, 0], dspat, dwv, + wl_full, dspat, dwv, ra_min=ra_min, ra_max=ra_max, dec_min=dec_min, dec_max=dec_max, wave_min=wave_min, sn_smooth_npix=sn_smooth_npix, weight_method=weight_method, correct_dar=correct_dar) @@ -1367,36 +1433,39 @@ def generate_image_subpixel(image_wcs, bins, sciImg, ivarImg, waveImg, slitid_im correction will not be applied. Returns: - `numpy.ndarray`_: The white light images for all frames + `numpy.ndarray`_: The white light images for all frames. If combine=True, + this will be a single 2D image. Otherwise, it will be a 3D array with + dimensions (numra, numdec, numframes). """ # Perform some checks on the input -- note, more complete checks are performed in subpixellate() _sciImg, _ivarImg, _waveImg, _slitid_img_gpm, _wghtImg, _all_wcs, _tilts, _slits, _astrom_trans, _all_dar, _ra_offset, _dec_offset = \ check_inputs([sciImg, ivarImg, waveImg, slitid_img_gpm, wghtImg, all_wcs, tilts, slits, astrom_trans, all_dar, ra_offset, dec_offset]) - numframes = len(_sciImg) - - # Prepare the array of white light images to be stored - numra = bins[0].size-1 - numdec = bins[1].size-1 - all_wl_imgs = np.zeros((numra, numdec, numframes)) - # Loop through all frames and generate white light images - for fr in range(numframes): - msgs.info(f"Creating image {fr+1}/{numframes}") - if combine: - # Subpixellate - img, _, _ = subpixellate(image_wcs, bins, _sciImg, _ivarImg, _waveImg, _slitid_img_gpm, _wghtImg, - _all_wcs, _tilts, _slits, _astrom_trans, _all_dar, _ra_offset, _dec_offset, - spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, slice_subpixel=slice_subpixel, - skip_subpix_weights=True, correct_dar=correct_dar) - else: + # Generate the white light images + if combine: + # Subpixellate + img, _, _ = subpixellate(image_wcs, bins, _sciImg, _ivarImg, _waveImg, _slitid_img_gpm, _wghtImg, + _all_wcs, _tilts, _slits, _astrom_trans, _all_dar, _ra_offset, _dec_offset, + spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, slice_subpixel=slice_subpixel, + skip_subpix_weights=True, correct_dar=correct_dar) + return img[:, :, 0] + else: + # Prepare the array of white light images to be stored + numframes = len(_sciImg) + numra = bins[0].size - 1 + numdec = bins[1].size - 1 + all_wl_imgs = np.zeros((numra, numdec, numframes)) + # Loop through all frames and generate white light images + for fr in range(numframes): + msgs.info(f"Creating image {fr + 1}/{numframes}") # Subpixellate img, _, _ = subpixellate(image_wcs, bins, _sciImg[fr], _ivarImg[fr], _waveImg[fr], _slitid_img_gpm[fr], _wghtImg[fr], _all_wcs[fr], _tilts[fr], _slits[fr], _astrom_trans[fr], _all_dar[fr], _ra_offset[fr], _dec_offset[fr], spec_subpixel=spec_subpixel, spat_subpixel=spat_subpixel, slice_subpixel=slice_subpixel, skip_subpix_weights=True, correct_dar=correct_dar) - all_wl_imgs[:, :, fr] = img[:, :, 0] - # Return the constructed white light images - return all_wl_imgs + all_wl_imgs[:, :, fr] = img[:, :, 0] + # Return the constructed white light images + return all_wl_imgs def generate_cube_subpixel(output_wcs, bins, sciImg, ivarImg, waveImg, slitid_img_gpm, wghtImg, @@ -1532,7 +1601,7 @@ def generate_cube_subpixel(output_wcs, bins, sciImg, ivarImg, waveImg, slitid_im whitelight_range[0], whitelight_range[1])) # Get the output filename for the white light image out_whitelight = get_output_whitelight_filename(outfile) - whitelight_img = make_whitelight_fromcube(flxcube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) + whitelight_img = make_whitelight_fromcube(flxcube, bpmcube, wave=wave, wavemin=whitelight_range[0], wavemax=whitelight_range[1]) msgs.info("Saving white light image as: {0:s}".format(out_whitelight)) img_hdu = fits.PrimaryHDU(whitelight_img.T, header=whitelight_wcs.to_header()) img_hdu.writeto(out_whitelight, overwrite=overwrite) @@ -1636,8 +1705,7 @@ def subpixellate(output_wcs, bins, sciImg, ivarImg, waveImg, slitid_img_gpm, wgh Returns: :obj:`tuple`: Three or four `numpy.ndarray`_ objects containing (1) the datacube generated from the subpixellated inputs, (2) the corresponding - variance cube, (3) the corresponding bad pixel mask cube, and (4) the - residual cube. The latter is only returned if debug is True. + variance cube, and (3) the corresponding bad pixel mask cube. """ # Check the inputs for combinations of lists or not _sciImg, _ivarImg, _waveImg, _gpmImg, _wghtImg, _all_wcs, _tilts, _slits, _astrom_trans, _all_dar, _ra_offset, _dec_offset = \ diff --git a/pypeit/core/extract.py b/pypeit/core/extract.py index 4fc2532354..5e48c01567 100644 --- a/pypeit/core/extract.py +++ b/pypeit/core/extract.py @@ -443,7 +443,7 @@ def extract_boxcar(imgminsky, ivar, mask, waveimg, skyimg, spec, fwhmimg=None, b spec.BOX_WAVE = wave_box spec.BOX_COUNTS = flux_box*mask_box spec.BOX_COUNTS_IVAR = ivar_box*mask_box*np.logical_not(bad_box) - spec.BOX_COUNTS_SIG = np.sqrt(utils.inverse( spec.BOX_COUNTS_IVAR)) + spec.BOX_COUNTS_SIG = np.sqrt(utils.inverse(spec.BOX_COUNTS_IVAR)) spec.BOX_COUNTS_NIVAR = None if nivar_box is None else nivar_box*mask_box*np.logical_not(bad_box) spec.BOX_MASK = mask_box*np.logical_not(bad_box) spec.BOX_FWHM = fwhm_box # Spectral FWHM (in Angstroms) for the boxcar extracted spectrum diff --git a/pypeit/core/flat.py b/pypeit/core/flat.py index 4a88d3d5a0..bac66b4c9a 100644 --- a/pypeit/core/flat.py +++ b/pypeit/core/flat.py @@ -16,44 +16,8 @@ from IPython import embed from pypeit import msgs -from pypeit.core import parse -from pypeit.core import pixels -from pypeit.core import tracewave from pypeit.core import coadd from pypeit import utils -from pypeit.core import pydl - -# TODO: Put this in utils -def linear_interpolate(x1, y1, x2, y2, x): - r""" - Interplate or extrapolate between two points. - - Given a line defined two points, :math:`(x_1,y_1)` and - :math:`(x_2,y_2)`, return the :math:`y` value of a new point on - the line at coordinate :math:`x`. - - This function is meant for speed. No type checking is performed and - the only check is that the two provided ordinate coordinates are not - numerically identical. By definition, the function will extrapolate - without any warning. - - Args: - x1 (:obj:`float`): - First abscissa position - y1 (:obj:`float`): - First ordinate position - x2 (:obj:`float`): - Second abscissa position - y3 (:obj:`float`): - Second ordinate position - x (:obj:`float`): - Abcissa for new value - - Returns: - :obj:`float`: Interpolated/extrapolated value of ordinate at - :math:`x`. - """ - return y1 if np.isclose(x1,x2) else y1 + (x-x1)*(y2-y1)/(x2-x1) # TODO: Make this function more general and put it in utils. @@ -505,10 +469,120 @@ def poly_map(rawimg, rawivar, waveimg, slitmask, slitmask_trim, modelimg, deg=3, return modelmap, relscale +def tweak_slit_edges_gradient(left, right, spat_coo, norm_flat, maxfrac=0.1, debug=False): + r""" Adjust slit edges based on the gradient of the normalized + flat-field illumination profile. + + Args: + left (`numpy.ndarray`_): + Array with the left slit edge for a single slit. Shape is + :math:`(N_{\rm spec},)`. + right (`numpy.ndarray`_): + Array with the right slit edge for a single slit. Shape + is :math:`(N_{\rm spec},)`. + spat_coo (`numpy.ndarray`_): + Spatial pixel coordinates in fractions of the slit width + at each spectral row for the provided normalized flat + data. Coordinates are relative to the left edge (with the + left edge at 0.). Shape is :math:`(N_{\rm flat},)`. + Function assumes the coordinate array is sorted. + norm_flat (`numpy.ndarray`_) + Normalized flat data that provide the slit illumination + profile. Shape is :math:`(N_{\rm flat},)`. + maxfrac (:obj:`float`, optional): + The maximum fraction of the slit width that the slit edge + can be adjusted by this algorithm. If ``maxfrac = 0.1``, + this means the maximum change in the slit width (either + narrowing or broadening) is 20% (i.e., 10% for either + edge). + debug (:obj:`bool`, optional): + If True, the function will output plots to test if the + fitting is working correctly. + + Returns: + tuple: Returns six objects: + + - The threshold used to set the left edge + - The fraction of the slit that the left edge is shifted to + the right + - The adjusted left edge + - The threshold used to set the right edge + - The fraction of the slit that the right edge is shifted to + the left + - The adjusted right edge + """ + # Check input + nspec = len(left) + if len(right) != nspec: + msgs.error('Input left and right traces must have the same length!') + + # Median slit width + slitwidth = np.median(right - left) + + # Calculate the gradient of the normalized flat profile + grad_norm_flat = np.gradient(norm_flat) + # Smooth with a Gaussian kernel + # The standard deviation of the kernel is set to be one detector pixel. Since the norm_flat array is oversampled, + # we need to set the kernel width (sig_res) to be the oversampling factor. + sig_res = norm_flat.size / slitwidth + # The scipy.ndimage module is faster than the astropy convolution module + grad_norm_flat_smooth = scipy.ndimage.gaussian_filter1d(grad_norm_flat, sig_res, mode='nearest') + + # Find the location of the minimum/maximum gradient - this is the amount of shift required + left_shift = spat_coo[np.argmax(grad_norm_flat_smooth)] + right_shift = spat_coo[np.argmin(grad_norm_flat_smooth)]-1.0 + + # Check if the shift is within the allowed range + if np.abs(left_shift) > maxfrac: + msgs.warn('Left slit edge shift of {0:.1f}% exceeds the maximum allowed of {1:.1f}%'.format( + 100*left_shift, 100*maxfrac) + msgs.newline() + + 'The left edge will not be tweaked.') + left_shift = 0.0 + else: + msgs.info('Tweaking left slit boundary by {0:.1f}%'.format(100 * left_shift) + + ' ({0:.2f} pixels)'.format(left_shift * slitwidth)) + if np.abs(right_shift) > maxfrac: + msgs.warn('Right slit edge shift of {0:.1f}% exceeds the maximum allowed of {1:.1f}%'.format( + 100*right_shift, 100*maxfrac) + msgs.newline() + + 'The right edge will not be tweaked.') + right_shift = 0.0 + else: + msgs.info('Tweaking right slit boundary by {0:.1f}%'.format(100 * right_shift) + + ' ({0:.2f} pixels)'.format(right_shift * slitwidth)) + + # Calculate the tweak for the left edge + new_left = left + left_shift * slitwidth + new_right = right + right_shift * slitwidth + + # Calculate the value of the threshold at the new slit edges + left_thresh = np.interp(left_shift, spat_coo, norm_flat) + right_thresh = np.interp(1+right_shift, spat_coo, norm_flat) + + if debug: + plt.subplot(211) + plt.plot(spat_coo, norm_flat, 'k-') + plt.axvline(0.0, color='b', linestyle='-', label='initial') + plt.axvline(1.0, color='b', linestyle='-') + plt.axvline(left_shift, color='g', linestyle='-', label='tweak (gradient)') + plt.axvline(1+right_shift, color='g', linestyle='-') + plt.axhline(left_thresh, xmax=0.5, color='lightgreen', linewidth=3.0, zorder=10) + plt.axhline(right_thresh, xmin=0.5, color='lightgreen', linewidth=3.0, zorder=10) + plt.legend() + plt.subplot(212) + plt.plot(spat_coo, grad_norm_flat, 'k-') + plt.plot(spat_coo, grad_norm_flat_smooth, 'm-') + plt.axvline(0.0, color='b', linestyle='-') + plt.axvline(1.0, color='b', linestyle='-') + plt.axvline(left_shift, color='g', linestyle='-') + plt.axvline(1+right_shift, color='g', linestyle='-') + plt.show() + return left_thresh, left_shift, new_left, right_thresh, right_shift, new_right + + # TODO: See pypeit/deprecated/flat.py for the previous version. We need # to continue to vet this algorithm to make sure there are no # unforeseen corner cases that cause errors. -def tweak_slit_edges(left, right, spat_coo, norm_flat, thresh=0.93, maxfrac=0.1, debug=False): +def tweak_slit_edges_threshold(left, right, spat_coo, norm_flat, thresh=0.93, maxfrac=0.1, debug=False): r""" Adjust slit edges based on the normalized slit illumination profile. @@ -614,10 +688,10 @@ def tweak_slit_edges(left, right, spat_coo, norm_flat, thresh=0.93, maxfrac=0.1, 100*maxfrac)) left_shift = maxfrac else: - left_shift = linear_interpolate(norm_flat[i], spat_coo[i], norm_flat[i+1], - spat_coo[i+1], left_thresh) + left_shift = utils.linear_interpolate(norm_flat[i], spat_coo[i], norm_flat[i+1], + spat_coo[i+1], left_thresh) msgs.info('Tweaking left slit boundary by {0:.1f}%'.format(100*left_shift) + - ' % ({0:.2f} pixels)'.format(left_shift*slitwidth)) + ' ({0:.2f} pixels)'.format(left_shift*slitwidth)) new_left += left_shift * slitwidth # ------------------------------------------------------------------ @@ -668,15 +742,15 @@ def tweak_slit_edges(left, right, spat_coo, norm_flat, thresh=0.93, maxfrac=0.1, 100*maxfrac)) right_shift = maxfrac else: - right_shift = 1-linear_interpolate(norm_flat[i-1], spat_coo[i-1], norm_flat[i], - spat_coo[i], right_thresh) + right_shift = 1-utils.linear_interpolate(norm_flat[i-1], spat_coo[i-1], norm_flat[i], + spat_coo[i], right_thresh) msgs.info('Tweaking right slit boundary by {0:.1f}%'.format(100*right_shift) + - ' % ({0:.2f} pixels)'.format(right_shift*slitwidth)) + ' ({0:.2f} pixels)'.format(right_shift*slitwidth)) new_right -= right_shift * slitwidth return left_thresh, left_shift, new_left, right_thresh, right_shift, new_right -#def flatfield(sciframe, flatframe, bpm=None, illum_flat=None, snframe=None, varframe=None): + def flatfield(sciframe, flatframe, varframe=None): r""" Field flatten the input image. diff --git a/pypeit/core/flexure.py b/pypeit/core/flexure.py index b1d94a02a2..02265a3426 100644 --- a/pypeit/core/flexure.py +++ b/pypeit/core/flexure.py @@ -1218,7 +1218,8 @@ def calculate_image_phase(imref, imshift, gpm_ref=None, gpm_shift=None, maskval= if gpm_shift is None: gpm_shift = np.ones(imshift.shape, dtype=bool) if maskval is None else imshift != maskval # Get a crude estimate of the shift - shift = phase_cross_correlation(imref, imshift, reference_mask=gpm_ref, moving_mask=gpm_shift).astype(int) + shift, _, _ = phase_cross_correlation(imref, imshift, reference_mask=gpm_ref, moving_mask=gpm_shift) + shift = shift.astype(int) # Extract the overlapping portion of the images exref = imref.copy() exshf = imshift.copy() diff --git a/pypeit/core/flux_calib.py b/pypeit/core/flux_calib.py index 816f06be08..d2e5fbcbca 100644 --- a/pypeit/core/flux_calib.py +++ b/pypeit/core/flux_calib.py @@ -696,15 +696,9 @@ def sensfunc(wave, counts, counts_ivar, counts_mask, exptime, airmass, std_dict, If you have significant telluric absorption you should be using telluric.sensnfunc_telluric. default = 0.9 Returns: - Tuple: Returns: - - Returns - ------- - meta_table: `astropy.table.Table`_ - Table containing meta data for the sensitivity function - out_table: `astropy.table.Table`_ - Table containing the sensitivity function - + tuple: Returns the following: + - meta_table: `astropy.table.Table`_ Table containing meta data for the sensitivity function + - out_table: `astropy.table.Table`_ Table containing the sensitivity function """ wave_arr, counts_arr, ivar_arr, mask_arr, log10_blaze_func, nspec, norders = utils.spec_atleast_2d(wave, counts, counts_ivar, counts_mask) @@ -755,9 +749,9 @@ def sensfunc(wave, counts, counts_ivar, counts_mask, exptime, airmass, std_dict, return meta_table, out_table -def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, extinct_correct=False, - airmass=None, longitude=None, latitude=None, extinctfilepar=None, - extrap_sens=False): + +def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, delta_wave=None, extinct_correct=False, + airmass=None, longitude=None, latitude=None, extinctfilepar=None, extrap_sens=False): """ Get the final sensitivity function factor that will be multiplied into a spectrum in units of counts to flux calibrate it. This code interpolates the sensitivity function and can also multiply in extinction and telluric corrections. @@ -766,27 +760,30 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, extin Args: wave (float `numpy.ndarray`_): shape = (nspec,) - Senstivity + Wavelength vector for the spectrum to be flux calibrated wave_zp (float `numpy.ndarray`_): - Zerooint wavelength vector shape = (nsens,) + Zeropoint wavelength vector shape = (nsens,) zeropoint (float `numpy.ndarray`_): shape = (nsens,) - Zeropoint, i.e. sensitivity function + Zeropoint, i.e. sensitivity function exptime (float): - tellmodel (float `numpy.ndarray`_, optional): shape = (nspec,) - Apply telluric correction if it is passed it. Note this is deprecated. + Exposure time in seconds + tellmodel (float, `numpy.ndarray`_, optional): + Apply telluric correction if it is passed it (shape = (nspec,)). Note this is deprecated. + delta_wave (float, `numpy.ndarray`_, optional): + The wavelength sampling of the spectrum to be flux calibrated. extinct_correct (bool, optional) - If True perform an extinction correction. Deafult = False + If True perform an extinction correction. Default = False airmass (float, optional): - Airmass used if extinct_correct=True. This is required if extinct_correct=True + Airmass used if extinct_correct=True. This is required if extinct_correct=True longitude (float, optional): longitude in degree for observatory Required for extinction correction latitude: latitude in degree for observatory - Required for extinction correction + Required for extinction correction extinctfilepar (str): - [sensfunc][UVIS][extinct_file] parameter - Used for extinction correction + [sensfunc][UVIS][extinct_file] parameter + Used for extinction correction extrap_sens (bool, optional): Extrapolate the sensitivity function (instead of crashing out) @@ -796,10 +793,23 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, extin This quantity is defined to be sensfunc_interp/exptime/delta_wave. shape = (nspec,) """ - + # Initialise some variables zeropoint_obs = np.zeros_like(wave) wave_mask = wave > 1.0 # filter out masked regions or bad wavelengths - delta_wave = wvutils.get_delta_wave(wave, wave_mask) + if delta_wave is not None: + # Check that the delta_wave is the same size as the wave vector + if isinstance(delta_wave, float): + _delta_wave = delta_wave + elif isinstance(delta_wave, np.ndarray): + if wave.size != delta_wave.size: + msgs.error('The wavelength vector and delta_wave vector must be the same size') + _delta_wave = delta_wave + else: + msgs.warn('Invalid type for delta_wave - using a default value') + _delta_wave = wvutils.get_delta_wave(wave, wave_mask) + else: + # If delta_wave is not passed in, then we will use the native wavelength sampling of the spectrum + _delta_wave = wvutils.get_delta_wave(wave, wave_mask) # print(f'get_sensfunc_factor: {np.amin(wave_zp):.1f}, {np.amax(wave_zp):.1f}, ' # f'{np.amin(wave[wave_mask]):.1f}, {np.amax(wave[wave_mask]):.1f}') @@ -829,10 +839,10 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, extin # Did the user request a telluric correction? if tellmodel is not None: # This assumes there is a separate telluric key in this dict. + msgs.warn("Telluric corrections via this method are deprecated") msgs.info('Applying telluric correction') sensfunc_obs = sensfunc_obs * (tellmodel > 1e-10) / (tellmodel + (tellmodel < 1e-10)) - if extinct_correct: if longitude is None or latitude is None: msgs.error('You must specify longitude and latitude if we are extinction correcting') @@ -848,7 +858,7 @@ def get_sensfunc_factor(wave, wave_zp, zeropoint, exptime, tellmodel=None, extin # senstot is the conversion from N_lam to F_lam, and the division by exptime and delta_wave are to convert # the spectrum in counts/pixel into units of N_lam = counts/sec/angstrom - return senstot/exptime/delta_wave + return senstot/exptime/_delta_wave def counts2Nlam(wave, counts, counts_ivar, counts_mask, exptime, airmass, longitude, latitude, extinctfilepar): @@ -1302,6 +1312,7 @@ def Nlam_to_Flam(wave, zeropoint, zp_min=5.0, zp_max=30.0): factor[gpm] = np.power(10.0, -0.4*(zeropoint[gpm] - ZP_UNIT_CONST))/np.square(wave[gpm]) return factor + def Flam_to_Nlam(wave, zeropoint, zp_min=5.0, zp_max=30.0): r""" The factor that when multiplied into F_lam converts to N_lam, @@ -1390,7 +1401,7 @@ def zeropoint_to_throughput(wave, zeropoint, eff_aperture): """ eff_aperture_m2 = eff_aperture*units.m**2 - S_lam_units = 1e-17*units.erg/units.cm**2 + S_lam_units = PYPEIT_FLUX_SCALE*units.erg/units.cm**2 # Set the throughput to be -1 in places where it is not defined. throughput = np.full_like(zeropoint, -1.0) zeropoint_gpm = (zeropoint > 5.0) & (zeropoint < 30.0) & (wave > 1.0) diff --git a/pypeit/core/procimg.py b/pypeit/core/procimg.py index b2d51ba623..795a198446 100644 --- a/pypeit/core/procimg.py +++ b/pypeit/core/procimg.py @@ -813,6 +813,7 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 frequency = np.mean(frq) # Perform the overscan subtraction for each amplifier + full_model = np.zeros_like(frame_orig) # Store the model pattern for all amplifiers in this array for aa, amp in enumerate(amps): # Get the frequency to use for this amplifier if isinstance(frequency, list): @@ -823,9 +824,9 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 use_fr = frequency # Extract overscan - overscan, os_slice = rect_slice_with_mask(frame_orig, tmp_oscan, amp) + overscan, os_slice = rect_slice_with_mask(frame_orig.copy(), tmp_oscan, amp) # Extract overscan+data - oscandata, osd_slice = rect_slice_with_mask(frame_orig, tmp_oscan+tmp_data, amp) + oscandata, osd_slice = rect_slice_with_mask(frame_orig.copy(), tmp_oscan+tmp_data, amp) # Subtract the DC offset overscan -= np.median(overscan, axis=1)[:, np.newaxis] @@ -854,7 +855,7 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 tmpamp = np.fft.rfft(overscan, axis=1) idx = (np.arange(overscan.shape[0]), np.argmax(np.abs(tmpamp), axis=1)) # Convert result to amplitude and phase - amps = (np.abs(tmpamp))[idx] * (2.0 / overscan.shape[1]) + ampls = (np.abs(tmpamp))[idx] * (2.0 / overscan.shape[1]) # STEP 2 - Using the model frequency, calculate how amplitude depends on pixel row (usually constant) # Use the above to as initial guess parameters for a chi-squared minimisation of the amplitudes @@ -877,7 +878,7 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 try: # Now fit it popt, pcov = scipy.optimize.curve_fit( - cosfunc, cent[wgd], hist[wgd], p0=[amps[ii], 0.0], + cosfunc, cent[wgd], hist[wgd], p0=[ampls[ii], 0.0], bounds=([0, -np.inf],[np.inf, np.inf]) ) except ValueError: @@ -920,21 +921,15 @@ def subtract_pattern(rawframe, datasec_img, oscansec_img, frequency=None, axis=1 model_pattern[ii, :] = cosfunc_full(xdata_all, amp_mod[ii], frq_mod[ii], popt[0]) # Estimate the improvement of the effective read noise - tmp = outframe.copy() - tmp[osd_slice] -= model_pattern - mod_oscan, _ = rect_slice_with_mask(tmp, tmp_oscan, amp) - old_ron = astropy.stats.sigma_clipped_stats(overscan, sigma=5)[-1] - new_ron = astropy.stats.sigma_clipped_stats(overscan-mod_oscan, sigma=5)[-1] + full_model[osd_slice] = model_pattern + old_ron = astropy.stats.sigma_clipped_stats(overscan, sigma=5, stdfunc='mad_std')[-1] + new_ron = astropy.stats.sigma_clipped_stats(overscan-full_model[os_slice], sigma=5, stdfunc='mad_std')[-1] msgs.info(f'Effective read noise of amplifier {amp} reduced by a factor of {old_ron/new_ron:.2f}x') - # Subtract the model pattern from the full datasec - outframe[osd_slice] -= model_pattern - # Transpose if the input frame if applied along a different axis if axis == 0: - outframe = outframe.T - # Return the result - return outframe + return (outframe - full_model).T + return outframe - full_model def pattern_frequency(frame, axis=1): @@ -1402,3 +1397,51 @@ def variance_model(base, counts=None, count_scale=None, noise_floor=None): return var +def nonlinear_counts(counts, ampimage, nonlinearity_coeffs): + r""" + Apply a nonlinearity correction to the provided counts. + + The nonlinearity correction is applied to the provided ``counts`` using the + hard-coded parameters in the provided ``nonlinearity_coeffs``. The + correction is applied to the provided ``counts`` using the following + equation: + + .. math:: + + C_{\rm corr} = C \left[ 1 + a_i C \right] + + where :math:`C` is the provided counts, :math:`C_{\rm corr}` is the corrected counts + :math:`a_i` are the provided coefficients (one for each amplifier). + + Parameters + ---------- + counts : `numpy.ndarray`_ + Array with the counts to correct. + ampimage : `numpy.ndarray`_ + Array with the amplifier image. This is used to determine the + amplifier-dependent nonlinearity correction coefficients. + nonlinearity_coeffs : `numpy.ndarray`_ + Array with the nonlinearity correction coefficients. The shape of the + array must be :math:`(N_{\rm amp})`, where :math:`N_{\rm amp}` is the + number of amplifiers. The coefficients are applied to the counts using + the equation above. + + Returns + ------- + corr_counts : + Array with the corrected counts. + """ + msgs.info('Applying a non-linearity correction to the counts.') + # Check the input + if counts.shape != ampimage.shape: + msgs.error('Counts and amplifier image have different shapes.') + _nonlinearity_coeffs = np.asarray(nonlinearity_coeffs) + # Setup the output array + corr_counts = counts.copy() + unqamp = np.unique(ampimage) + for uu in range(unqamp.size): + thisamp = unqamp[uu] + indx = (ampimage == thisamp) + corr_counts[indx] = counts[indx] * (1. + _nonlinearity_coeffs[thisamp]*counts[indx]) + # Apply the correction + return corr_counts diff --git a/pypeit/data/standards/blackbody/blackbody_info.txt b/pypeit/data/standards/blackbody/blackbody_info.txt index d0cc2e4237..060f9e49b3 100644 --- a/pypeit/data/standards/blackbody/blackbody_info.txt +++ b/pypeit/data/standards/blackbody/blackbody_info.txt @@ -7,20 +7,20 @@ # NOTE: The flux is generated at runtime, so there are no # "Files". The first column is just a placeholder. File Name RA_2000 DEC_2000 g_MAG TYPE T_K a_x10m23 -J0027m0017.fits J0027m0017 00:27:39.497 -00:17:41.93 18.90 DB 10662 0.435 -J0048p0017.fits J0048p0017 00:48:30.324 +00:17:52.80 18.14 DB 10639 0.876 -J0146m0051.fits J0146m0051 01:46:18.898 -00:51:50.51 18.17 DB 11770 0.672 -J0229m0041.fits J0229m0041 02:29:36.715 -00:41:13.63 19.07 DB 8901 0.640 -J0832p3709.fits J0832p3709 08:32:26.568 +37:09:55.48 18.98 DB 7952 1.137 -J0837p5427.fits J0837p5427 08:37:36.557 +54:27:58.64 18.73 DB 7533 1.703 -J1004p1215.fits J1004p1215 10:04:49.541 +12:15:59.65 19.18 DB 9773 0.440 -J1045p0157.fits J1045p0157 10:45:23.866 +01:57:21.96 19.09 DB 8956 0.666 -J1117p4059.fits J1117p4059 11:17:20.801 +40:59:54.67 18.08 DB 10992 0.844 -J1147p1713.fits J1147p1713 11:47:22.608 +17:13:25.21 18.65 DB 9962 0.669 -J1245p4238.fits J1245p4238 12:45:35.626 +42:38:24.58 17.14 DB 9912 2.817 -J1255p1924.fits J1255p1924 12:55:07.082 +19:24:59.00 18.53 DB 8882 1.157 -J1343p2706.fits J1343p2706 13:43:05.302 +27:06:23.98 18.93 DB 10678 0.427 -J1417p4941.fits J1417p4941 14:17:24.329 +49:41:27.85 17.25 DB 10461 2.192 -J1518p0028.fits J1518p0028 15:18:59.717 +00:28:39.58 19.44 DB 9072 0.458 -J1617p1813.fits J1617p1813 16:17:04.078 +18:13:11.96 18.78 DB 8568 0.996 -J2302m0030.fits J2302m0030 23:02:40.032 -00:30:21.60 17.80 DB 10478 1.241 +J0027m0017.fits J0027m0017 00:27:39.497 -00:17:41.93 18.90 DC 10662 0.435 +J0048p0017.fits J0048p0017 00:48:30.324 +00:17:52.80 18.14 DC 10639 0.876 +J0146m0051.fits J0146m0051 01:46:18.898 -00:51:50.51 18.17 DC 11770 0.672 +J0229m0041.fits J0229m0041 02:29:36.715 -00:41:13.63 19.07 DC 8901 0.640 +J0832p3709.fits J0832p3709 08:32:26.568 +37:09:55.48 18.98 DC 7952 1.137 +J0837p5427.fits J0837p5427 08:37:36.557 +54:27:58.64 18.73 DC 7533 1.703 +J1004p1215.fits J1004p1215 10:04:49.541 +12:15:59.65 19.18 DC 9773 0.440 +J1045p0157.fits J1045p0157 10:45:23.866 +01:57:21.96 19.09 DC 8956 0.666 +J1117p4059.fits J1117p4059 11:17:20.801 +40:59:54.67 18.08 DC 10992 0.844 +J1147p1713.fits J1147p1713 11:47:22.608 +17:13:25.21 18.65 DC 9962 0.669 +J1245p4238.fits J1245p4238 12:45:35.626 +42:38:24.58 17.14 DC 9912 2.817 +J1255p1924.fits J1255p1924 12:55:07.082 +19:24:59.00 18.53 DC 8882 1.157 +J1343p2706.fits J1343p2706 13:43:05.302 +27:06:23.98 18.93 DC 10678 0.427 +J1417p4941.fits J1417p4941 14:17:24.329 +49:41:27.85 17.25 DC 10461 2.192 +J1518p0028.fits J1518p0028 15:18:59.717 +00:28:39.58 19.44 DC 9072 0.458 +J1617p1813.fits J1617p1813 16:17:04.078 +18:13:11.96 18.78 DC 8568 0.996 +J2302m0030.fits J2302m0030 23:02:40.032 -00:30:21.60 17.80 DC 10478 1.241 diff --git a/pypeit/find_objects.py b/pypeit/find_objects.py index 6ca9dbbeed..a2d11e18f6 100644 --- a/pypeit/find_objects.py +++ b/pypeit/find_objects.py @@ -974,22 +974,6 @@ class SlicerIFUFindObjects(MultiSlitFindObjects): def __init__(self, sciImg, slits, spectrograph, par, objtype, **kwargs): super().__init__(sciImg, slits, spectrograph, par, objtype, **kwargs) - def initialize_slits(self, slits, initial=True): - """ - Gather all the :class:`~pypeit.slittrace.SlitTraceSet` attributes that - we'll use here in :class:`FindObjects`. Identical to the parent but the - slits are not trimmed. - - Args: - slits (:class:`~pypeit.slittrace.SlitTraceSet`): - SlitTraceSet object containing the slit boundaries that will be - initialized. - initial (:obj:`bool`, optional): - Use the initial definition of the slits. If False, - tweaked slits are used. - """ - super().initialize_slits(slits, initial=True) - def global_skysub(self, skymask=None, bkg_redux_sciimg=None, update_crmask=True, previous_sky=None, show_fit=False, show=False, show_objs=False, objs_not_masked=False, reinit_bpm: bool = True): @@ -1076,7 +1060,7 @@ def joint_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0), # Use the FWHM map determined from the arc lines to convert the science frame # to have the same effective spectral resolution. - fwhm_map = self.wv_calib.build_fwhmimg(self.tilts, self.slits, initial=True, spat_flexure=self.spat_flexure_shift) + fwhm_map = self.wv_calib.build_fwhmimg(self.tilts, self.slits, spat_flexure=self.spat_flexure_shift) thismask = thismask & (fwhm_map != 0.0) # Need to include S/N for deconvolution sciimg = skysub.convolve_skymodel(self.sciImg.image, fwhm_map, thismask) @@ -1085,8 +1069,8 @@ def joint_skysub(self, skymask=None, update_crmask=True, trim_edg=(0,0), model_ivar = self.sciImg.ivar sl_ref = self.par['calibrations']['flatfield']['slit_illum_ref_idx'] # Prepare the slitmasks for the relative spectral illumination - slitmask = self.slits.slit_img(pad=0, initial=True, flexure=self.spat_flexure_shift) - slitmask_trim = self.slits.slit_img(pad=-3, initial=True, flexure=self.spat_flexure_shift) + slitmask = self.slits.slit_img(pad=0, flexure=self.spat_flexure_shift) + slitmask_trim = self.slits.slit_img(pad=-3, flexure=self.spat_flexure_shift) for nn in range(numiter): msgs.info("Performing iterative joint sky subtraction - ITERATION {0:d}/{1:d}".format(nn+1, numiter)) # TODO trim_edg is in the parset so it should be passed in here via trim_edg=tuple(self.par['reduce']['trim_edge']), diff --git a/pypeit/flatfield.py b/pypeit/flatfield.py index 4d729177ca..7be32bc805 100644 --- a/pypeit/flatfield.py +++ b/pypeit/flatfield.py @@ -828,6 +828,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): # Set parameters (for convenience; spec_samp_fine = self.flatpar['spec_samp_fine'] spec_samp_coarse = self.flatpar['spec_samp_coarse'] + tweak_method = self.flatpar['tweak_method'] tweak_slits = self.flatpar['tweak_slits'] tweak_slits_thresh = self.flatpar['tweak_slits_thresh'] tweak_slits_maxfrac = self.flatpar['tweak_slits_maxfrac'] @@ -1111,9 +1112,10 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): # TODO: Will this break if left_thresh, left_shift, self.slits.left_tweak[:,slit_idx], right_thresh, \ right_shift, self.slits.right_tweak[:,slit_idx] \ - = flat.tweak_slit_edges(self.slits.left_init[:,slit_idx], + = self.tweak_slit_edges(self.slits.left_init[:,slit_idx], self.slits.right_init[:,slit_idx], spat_coo_data, spat_flat_data, + method=tweak_method, thresh=tweak_slits_thresh, maxfrac=tweak_slits_maxfrac, debug=debug) # TODO: Because the padding doesn't consider adjacent @@ -1124,14 +1126,15 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): # Update the onslit mask _slitid_img = self.slits.slit_img(slitidx=slit_idx, initial=False) onslit_tweak = _slitid_img == slit_spat - spat_coo_tweak = self.slits.spatial_coordinate_image(slitidx=slit_idx, - slitid_img=_slitid_img) + # Note, we need to get the full image with the coordinates similar to spat_coo_init, otherwise, the + # tweaked locations are biased. + spat_coo_tweak = self.slits.spatial_coordinate_image(slitidx=slit_idx, full=True, slitid_img=_slitid_img) # Construct the empirical illumination profile # TODO This is extremely inefficient, because we only need to re-fit the illumflat, but # spatial_fit does both the reconstruction of the illumination function and the bspline fitting. # Only the b-spline fitting needs be reddone with the new tweaked spatial coordinates, so that would - # save a ton of runtime. It is not a trivial change becauase the coords are sorted, etc. + # save a ton of runtime. It is not a trivial change because the coords are sorted, etc. exit_status, spat_coo_data, spat_flat_data, spat_bspl, spat_gpm_fit, \ spat_flat_fit, spat_flat_data_raw = self.spatial_fit( norm_spec, spat_coo_tweak, median_slit_widths[slit_idx], spat_gpm, gpm, debug=False) @@ -1182,7 +1185,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): spat_model[onslit_padded] = spat_bspl.value(spat_coo_final[onslit_padded])[0] specspat_illum = np.fmax(spec_model, 1.0) * spat_model norm_spatspec = rawflat / specspat_illum - self.spatial_fit_finecorr(norm_spatspec, onslit_tweak, slit_idx, slit_spat, gpm, doqa=doqa) + spat_illum_fine = self.spatial_fit_finecorr(norm_spatspec, onslit_tweak, slit_idx, slit_spat, gpm, doqa=doqa)[onslit_tweak] # ---------------------------------------------------------- # Construct the illumination profile with the tweaked edges @@ -1423,6 +1426,11 @@ def spatial_fit_finecorr(self, normed, onslit_tweak, slit_idx, slit_spat, gpm, s A positive number trims the slit edges, a negative number pads the slit edges. doqa : :obj:`bool`, optional: Save the QA? + + Returns + ------- + illumflat_finecorr: `numpy.ndarray`_ + An image (same shape as normed) containing the fine correction to the spatial illumination profile """ # check id self.waveimg is available if self.waveimg is None: @@ -1439,6 +1447,7 @@ def spatial_fit_finecorr(self, normed, onslit_tweak, slit_idx, slit_spat, gpm, s onslit_tweak_trim = self.slits.slit_img(pad=-slit_trim, slitidx=slit_idx, initial=False) == slit_spat # Setup slitimg = (slit_spat + 1) * onslit_tweak.astype(int) - 1 # Need to +1 and -1 so that slitimg=-1 when off the slit + left, right, msk = self.slits.select_edges(flexure=self.wavetilts.spat_flexure if self.wavetilts is not None else 0.0) this_left = left[:, slit_idx] this_right = right[:, slit_idx] @@ -1448,7 +1457,6 @@ def spatial_fit_finecorr(self, normed, onslit_tweak, slit_idx, slit_spat, gpm, s this_slit = np.where(onslit_tweak & self.rawflatimg.select_flag(invert=True) & (self.waveimg!=0.0)) this_wave = self.waveimg[this_slit] xpos_img = self.slits.spatial_coordinate_image(slitidx=slit_idx, - initial=True, slitid_img=slitimg, flexure_shift=self.wavetilts.spat_flexure if self.wavetilts is not None else 0.0) # Generate the trimmed versions for fitting @@ -1491,7 +1499,7 @@ def spatial_fit_finecorr(self, normed, onslit_tweak, slit_idx, slit_spat, gpm, s normed[np.logical_not(onslit_tweak)] = 1 # For the QA, make everything off the slit equal to 1 spatillum_finecorr_qa(normed, illumflat_finecorr, this_left, this_right, ypos_fit, this_slit_trim, outfile=outfile, title=title, half_slen=slitlen//2) - return + return illumflat_finecorr def extract_structure(self, rawflat_orig, slit_trim=3): """ @@ -1606,7 +1614,7 @@ def spectral_illumination(self, gpm=None, debug=False): return illum_profile_spectral(rawflat, self.waveimg, self.slits, slit_illum_ref_idx=self.flatpar['slit_illum_ref_idx'], model=None, gpmask=gpm, skymask=None, trim=trim, - flexure=self.wavetilts.spat_flexure if self.wavetilts is not None else 0.0, + flexure=self.wavetilts.spat_flexure if self.wavetilts is not None else 0.0, smooth_npix=self.flatpar['slit_illum_smooth_npix'], debug=debug) @@ -1899,7 +1907,7 @@ def show_flats(image_list, wcs_match=True, slits=None, waveimg=None): # TODO :: This could possibly be moved to core.flat def illum_profile_spectral(rawimg, waveimg, slits, slit_illum_ref_idx=0, smooth_npix=None, polydeg=None, - model=None, gpmask=None, skymask=None, trim=3, flexure=None, maxiter=5): + model=None, gpmask=None, skymask=None, trim=3, flexure=None, maxiter=5, debug=False): """ Determine the relative spectral illumination of all slits. Currently only used for image slicer IFUs. @@ -1932,6 +1940,8 @@ def illum_profile_spectral(rawimg, waveimg, slits, slit_illum_ref_idx=0, smooth_ Spatial flexure maxiter : :obj:`int` Maximum number of iterations to perform + debug : :obj:`bool` + Show the results of the relative spectral illumination correction Returns ------- @@ -1948,14 +1958,14 @@ def illum_profile_spectral(rawimg, waveimg, slits, slit_illum_ref_idx=0, smooth_ gpm = gpmask if (gpmask is not None) else np.ones_like(rawimg, dtype=bool) modelimg = model if (model is not None) else rawimg.copy() # Setup the slits - slitid_img_init = slits.slit_img(pad=0, initial=True, flexure=flexure) - slitid_img_trim = slits.slit_img(pad=-trim, initial=True, flexure=flexure) + slitid_img = slits.slit_img(pad=0, flexure=flexure) + slitid_img_trim = slits.slit_img(pad=-trim, flexure=flexure) scaleImg = np.ones_like(rawimg) modelimg_copy = modelimg.copy() # Obtain the minimum and maximum wavelength of all slits mnmx_wv = np.zeros((slits.nslits, 2)) for slit_idx, slit_spat in enumerate(slits.spat_id): - onslit_init = (slitid_img_init == slit_spat) + onslit_init = (slitid_img == slit_spat) mnmx_wv[slit_idx, 0] = np.min(waveimg[onslit_init]) mnmx_wv[slit_idx, 1] = np.max(waveimg[onslit_init]) wavecen = np.mean(mnmx_wv, axis=1) @@ -1990,7 +2000,7 @@ def illum_profile_spectral(rawimg, waveimg, slits, slit_illum_ref_idx=0, smooth_ for ss in range(1, slits.spat_id.size): # Calculate the region of overlap onslit_b = (slitid_img_trim == slits.spat_id[wvsrt[ss]]) - onslit_b_init = (slitid_img_init == slits.spat_id[wvsrt[ss]]) + onslit_b_init = (slitid_img == slits.spat_id[wvsrt[ss]]) onslit_b_olap = onslit_b & gpm & (waveimg >= mnmx_wv[wvsrt[ss], 0]) & (waveimg <= mnmx_wv[wvsrt[ss], 1]) & skymask_now hist, edge = np.histogram(waveimg[onslit_b_olap], bins=wavebins, weights=modelimg_copy[onslit_b_olap]) cntr, edge = np.histogram(waveimg[onslit_b_olap], bins=wavebins) @@ -2041,7 +2051,6 @@ def illum_profile_spectral(rawimg, waveimg, slits, slit_illum_ref_idx=0, smooth_ modelimg_copy /= relscl_model if max(abs(1/minv), abs(maxv)) < 1.005: # Relative accuracy of 0.5% is sufficient break - debug = False if debug: embed() ricp = rawimg.copy() @@ -2056,12 +2065,24 @@ def illum_profile_spectral(rawimg, waveimg, slits, slit_illum_ref_idx=0, smooth_ scale_ref = histScl * norm plt.subplot(211) plt.plot(wave_ref, this_spec) + plt.xlim([3600, 4500]) plt.subplot(212) plt.plot(wave_ref, scale_ref) + plt.xlim([3600, 4500]) plt.subplot(211) plt.plot(wave_ref, spec_ref, 'k--') + plt.xlim([3600, 4500]) + plt.show() + # Plot the relative scales of each slit + scales_med, scales_avg = np.zeros(slits.spat_id.size), np.zeros(slits.spat_id.size) + for ss in range(slits.spat_id.size): + onslit_ref_trim = (slitid_img_trim == slits.spat_id[ss]) & gpm & skymask_now & (waveimg>3628) & (waveimg<4510) + scales_med[ss] = np.median(ricp[onslit_ref_trim]/scaleImg[onslit_ref_trim]) + scales_avg[ss] = np.mean(ricp[onslit_ref_trim]/scaleImg[onslit_ref_trim]) + plt.plot(slits.spat_id, scales_med, 'bo-', label='Median') + plt.plot(slits.spat_id, scales_avg, 'ro-', label='Mean') + plt.legend() plt.show() - return scaleImg diff --git a/pypeit/images/buildimage.py b/pypeit/images/buildimage.py index 16dc24dbe9..14c4f3d1d9 100644 --- a/pypeit/images/buildimage.py +++ b/pypeit/images/buildimage.py @@ -10,11 +10,13 @@ from pypeit import msgs from pypeit.par import pypeitpar +from pypeit.images import rawimage from pypeit.images import combineimage from pypeit.images import pypeitimage from pypeit.core.framematch import valid_frametype + class ArcImage(pypeitimage.PypeItCalibrationImage): """ Simple DataContainer for the Arc Image @@ -160,7 +162,10 @@ def buildimage_fromlist(spectrograph, det, frame_par, file_list, bias=None, bpm= scattlight=None, flatimages=None, maxiters=5, ignore_saturation=True, slits=None, mosaic=None, calib_dir=None, setup=None, calib_id=None): """ - Perform basic image processing on a list of images and combine the results. + Perform basic image processing on a list of images and combine the results. All + core processing steps for each image are handled by :class:`~pypeit.images.rawimage.RawImage` and + image combination is handled by :class:`~pypeit.images.combineimage.CombineImage`. + This function can be used to process both single images, lists of images, and detector mosaics. .. warning:: @@ -246,6 +251,16 @@ def buildimage_fromlist(spectrograph, det, frame_par, file_list, bias=None, bpm= # Should the detectors be reformatted into a single image mosaic? if mosaic is None: mosaic = isinstance(det, tuple) and frame_par['frametype'] not in ['bias', 'dark'] + + rawImage_list = [] + # Loop on the files + for ifile in file_list: + # Load raw image + rawImage = rawimage.RawImage(ifile, spectrograph, det) + # Process + rawImage_list.append(rawImage.process( + frame_par['process'], scattlight=scattlight, bias=bias, + bpm=bpm, dark=dark, flatimages=flatimages, slits=slits, mosaic=mosaic)) # Do it combineImage = combineimage.CombineImage(rawImage_list, frame_par['process']) diff --git a/pypeit/images/combineimage.py b/pypeit/images/combineimage.py index 2d91ca86eb..147497d9c4 100644 --- a/pypeit/images/combineimage.py +++ b/pypeit/images/combineimage.py @@ -4,8 +4,6 @@ .. include:: ../include/links.rst """ -import os - from IPython import embed import numpy as np @@ -16,50 +14,39 @@ from pypeit.par import pypeitpar from pypeit import utils from pypeit.images import pypeitimage -from pypeit.images import rawimage from pypeit.images import imagebitmask + class CombineImage: """ - Process and combine detector images. - - All core processing steps for each image are handled by - :class:`~pypeit.images.rawimage.RawImage`. This class can be used to - process both single images, lists of images, and detector mosaics. + Process and combine detector images. Args: - spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): - Spectrograph used to take the data. - det (:obj:`int`, :obj:`tuple`): - The 1-indexed detector number(s) to process. If a tuple, it must - include detectors viable as a mosaic for the provided spectrograph; - see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. + rawImages (:obj:`list`, :class:`~pypeit.images.pypeitimage.PypeItImage): + Either a single :class:`~pypeit.images.pypeitimage.PypeItImage` object or a list of one or more + of these objects to be combined into a an image. par (:class:`~pypeit.par.pypeitpar.ProcessImagesPar`): Parameters that dictate the processing of the images. - files (:obj:`str`, array-like): - A set of one or more images to process and combine. Attributes: - spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): - Spectrograph used to take the data. det (:obj:`int`, :obj:`tuple`): The 1-indexed detector number(s) to process. par (:class:`~pypeit.par.pypeitpar.ProcessImagesPar`): Parameters that dictate the processing of the images. - files (:obj:`list`): - A set of one or more images to process and combine. + rawImages (:obj:`list`): + A list of one or more :class:`~pypeit.images.rawimage.RawImage` objects + to be combined. """ - def __init__(self, spectrograph, det, par, files): - self.spectrograph = spectrograph - self.det = det + def __init__(self, rawImages, par): if not isinstance(par, pypeitpar.ProcessImagesPar): msgs.error('Provided ParSet for must be type ProcessImagesPar.') + self.rawImages = list(rawImages) if hasattr(rawImages, '__len__') else [rawImages] self.par = par # This musts be named this way as it is frequently a child - self.files = list(files) if hasattr(files, '__len__') else [files] - # NOTE: nfiles is a property method. Defining files above must come + + # NOTE: nimgs is a property method. Defining rawImages above must come # before this check! - if self.nfiles == 0: + if self.nimgs == 0: msgs.error('CombineImage requires a list of files to instantiate') @@ -75,7 +62,7 @@ def run(self, ignore_saturation=False, maxiters=5): file and returns the result. If there are multiple files, all the files are processed and the - processed images are combined based on the ``combine_method``, where the + processed images are combined based on the ``par['combine']``, where the options are: - 'mean': If ``sigma_clip`` is True, this is a sigma-clipped mean; @@ -133,34 +120,13 @@ def run(self, ignore_saturation=False, maxiters=5): in images of the same shape. Args: - bias (:class:`~pypeit.images.buildimage.BiasImage`, optional): - Bias image for bias subtraction; passed directly to - :func:`~pypeit.images.rawimage.RawImage.process` for all images. - scattlight (:class:`~pypeit.scattlight.ScatteredLight`, optional): - Scattered light model to be used to determine scattered light. - flatimages (:class:`~pypeit.flatfield.FlatImages`, optional): - Flat-field images for flat fielding; passed directly to - :func:`~pypeit.images.rawimage.RawImage.process` for all images. ignore_saturation (:obj:`bool`, optional): If True, turn off the saturation flag in the individual images before stacking. This avoids having such values set to 0, which for certain images (e.g. flat calibrations) can have unintended consequences. - sigma_clip (:obj:`bool`, optional): - When ``combine_method='mean'``, perform a sigma-clip the data; - see :func:`~pypeit.core.combine.weighted_combine`. - bpm (`numpy.ndarray`_, optional): - Bad pixel mask; passed directly to - :func:`~pypeit.images.rawimage.RawImage.process` for all images. - sigrej (:obj:`float`, optional): - When ``combine_method='mean'``, this sets the sigma-rejection - thresholds used when sigma-clipping the image combination. - Ignored if ``sigma_clip`` is False. If None and ``sigma_clip`` - is True, the thresholds are determined automatically based on - the number of images provided; see - :func:`~pypeit.core.combine.weighted_combine``. maxiters (:obj:`int`, optional): - When ``combine_method='mean'``) and sigma-clipping + When ``par['combine']='mean'``) and sigma-clipping (``sigma_clip`` is True), this sets the maximum number of rejection iterations. If None, rejection iterations continue until no more data are rejected; see @@ -170,79 +136,56 @@ def run(self, ignore_saturation=False, maxiters=5): :class:`~pypeit.images.pypeitimage.PypeItImage`: The combination of all the processed images. """ + + # Check the input (i.e., bomb out *before* it does any processing) - if self.nfiles == 0: + if self.nimgs == 0: msgs.error('Object contains no files to process!') - if self.nfiles > 1 and combine_method not in ['mean', 'median']: - msgs.error(f'Unknown image combination method, {combine_method}. Must be ' + if self.nimgs > 1 and self.par['combine'] not in ['mean', 'median']: + msgs.error(f'Unknown image combination method, {self.par["combine"]}. Must be ' '"mean" or "median".') - + file_list = [] # Loop on the files - for kk, ifile in enumerate(self.files): - # Load raw image - rawImage = rawimage.RawImage(ifile, self.spectrograph, self.det) - # Process - pypeitImage = rawImage.process(self.par, scattlight=scattlight, bias=bias, bpm=bpm, dark=dark, - flatimages=flatimages, slits=slits, mosaic=mosaic) - - if self.nfiles == 1: + for kk, rawImage in enumerate(self.rawImages): + if self.nimgs == 1: # Only 1 file, so we're done - pypeitImage.files = self.files - return pypeitImage + rawImage.files = [rawImage.filename] + return rawImage elif kk == 0: # Allocate arrays to collect data for each frame - shape = (self.nfiles,) + pypeitImage.shape + shape = (self.nimgs,) + rawImage.shape img_stack = np.zeros(shape, dtype=float) scl_stack = np.ones(shape, dtype=float) rn2img_stack = np.zeros(shape, dtype=float) basev_stack = np.zeros(shape, dtype=float) gpm_stack = np.zeros(shape, dtype=bool) - lampstat = [None]*self.nfiles - exptime = np.zeros(self.nfiles, dtype=float) + exptime = np.zeros(self.nimgs, dtype=float) - # Save the lamp status - # TODO: As far as I can tell, this is the *only* place rawheadlist - # is used. Is there a way we can get this from fitstbl instead? - lampstat[kk] = self.spectrograph.get_lamps_status(pypeitImage.rawheadlist) # Save the exposure time to check if it's consistent for all images. - exptime[kk] = pypeitImage.exptime + exptime[kk] = rawImage.exptime # Processed image - img_stack[kk] = pypeitImage.image + img_stack[kk] = rawImage.image # Get the count scaling - if pypeitImage.img_scale is not None: - scl_stack[kk] = pypeitImage.img_scale + if rawImage.img_scale is not None: + scl_stack[kk] = rawImage.img_scale # Read noise squared image - if pypeitImage.rn2img is not None: - rn2img_stack[kk] = pypeitImage.rn2img * scl_stack[kk]**2 + if rawImage.rn2img is not None: + rn2img_stack[kk] = rawImage.rn2img * scl_stack[kk]**2 # Processing variance image - if pypeitImage.base_var is not None: - basev_stack[kk] = pypeitImage.base_var * scl_stack[kk]**2 + if rawImage.base_var is not None: + basev_stack[kk] = rawImage.base_var * scl_stack[kk]**2 # Final mask for this image # TODO: This seems kludgy to me. Why not just pass ignore_saturation # to process_one and ignore the saturation when the mask is actually # built, rather than untoggling the bit here? if ignore_saturation: # Important for calibrations as we don't want replacement by 0 - pypeitImage.update_mask('SATURATION', action='turn_off') + rawImage.update_mask('SATURATION', action='turn_off') # Get a simple boolean good-pixel mask for all the unmasked pixels - gpm_stack[kk] = pypeitImage.select_flag(invert=True) - - # Check that the lamps being combined are all the same: - if not lampstat[1:] == lampstat[:-1]: - msgs.warn("The following files contain different lamp status") - # Get the longest strings - maxlen = max([len("Filename")]+[len(os.path.split(x)[1]) for x in self.files]) - maxlmp = max([len("Lamp status")]+[len(x) for x in lampstat]) - strout = "{0:" + str(maxlen) + "} {1:s}" - # Print the messages - print(msgs.indent() + '-'*maxlen + " " + '-'*maxlmp) - print(msgs.indent() + strout.format("Filename", "Lamp status")) - print(msgs.indent() + '-'*maxlen + " " + '-'*maxlmp) - for ff, file in enumerate(self.files): - print(msgs.indent() - + strout.format(os.path.split(file)[1], " ".join(lampstat[ff].split("_")))) - print(msgs.indent() + '-'*maxlen + " " + '-'*maxlmp) - - # Do a similar check for exptime + gpm_stack[kk] = rawImage.select_flag(invert=True) + file_list.append(rawImage.filename) + + # Check that all exposure times are consistent + # TODO: JFH suggests that we move this to calibrations.check_calibrations if np.any(np.absolute(np.diff(exptime)) > 0): # TODO: This should likely throw an error instead! msgs.warn('Exposure time is not consistent for all images being combined! ' @@ -281,21 +224,21 @@ def run(self, ignore_saturation=False, maxiters=5): #TODO: Update docs # Coadd them - if combine_method == 'mean': - weights = np.ones(self.nfiles, dtype=float)/self.nfiles + if self.par['combine'] == 'mean': + weights = np.ones(self.nimgs, dtype=float)/self.nimgs img_list_out, var_list_out, gpm, nframes \ = combine.weighted_combine(weights, [img_stack, scl_stack], # images to stack [rn2img_stack, basev_stack], # variances to stack - gpm_stack, sigma_clip=sigma_clip, + gpm_stack, sigma_clip=self.par['clip'], sigma_clip_stack=img_stack, # clipping based on img - sigrej=sigrej, maxiters=maxiters) + sigrej=self.par['comb_sigrej'], maxiters=maxiters) comb_img, comb_scl = img_list_out comb_rn2, comb_basev = var_list_out # Divide by the number of images that contributed to each pixel comb_scl[gpm] /= nframes[gpm] - elif combine_method == 'median': + elif self.par['combine'] == 'median': bpm_stack = np.logical_not(gpm_stack) nframes = np.sum(gpm_stack, axis=0) gpm = nframes > 0 @@ -326,26 +269,26 @@ def run(self, ignore_saturation=False, maxiters=5): # Build the combined image comb = pypeitimage.PypeItImage(image=comb_img, ivar=utils.inverse(comb_var), nimg=nframes, - amp_img=pypeitImage.amp_img, det_img=pypeitImage.det_img, + amp_img=rawImage.amp_img, det_img=rawImage.det_img, rn2img=comb_rn2, base_var=comb_basev, img_scale=comb_scl, # NOTE: This *must* be a boolean. bpm=np.logical_not(gpm), # NOTE: The detector is needed here so # that we can get the dark current later. - detector=pypeitImage.detector, - PYP_SPEC=self.spectrograph.name, + detector=rawImage.detector, + PYP_SPEC=rawImage.PYP_SPEC, units='e-' if self.par['apply_gain'] else 'ADU', exptime=comb_texp, noise_floor=self.par['noise_floor'], shot_noise=self.par['shot_noise']) # Internals # TODO: Do we need these? - comb.files = self.files - comb.rawheadlist = pypeitImage.rawheadlist - comb.process_steps = pypeitImage.process_steps + comb.files = file_list + comb.rawheadlist = rawImage.rawheadlist + comb.process_steps = rawImage.process_steps # Build the base level mask - comb.build_mask(saturation='default', mincounts='default') + comb.build_mask(saturation='default' if not ignore_saturation else None, mincounts='default') # Flag all pixels with no contributions from any of the stacked images. comb.update_mask('STCKMASK', indx=np.logical_not(gpm)) @@ -354,10 +297,10 @@ def run(self, ignore_saturation=False, maxiters=5): return comb @property - def nfiles(self): + def nimgs(self): """ The number of files in :attr:`files`. """ - return len(self.files) if isinstance(self.files, (np.ndarray, list)) else 0 + return len(self.rawImages) if isinstance(self.rawImages, (np.ndarray, list)) else 0 diff --git a/pypeit/images/pypeitimage.py b/pypeit/images/pypeitimage.py index d8a6edcb8a..dc8e07b591 100644 --- a/pypeit/images/pypeitimage.py +++ b/pypeit/images/pypeitimage.py @@ -91,6 +91,9 @@ class PypeItImage(datamodel.DataContainer): :class:`~pypeit.images.rawimage.RawImage.process`. """ + # TODO These docs are confusing. The __init__ method needs to be documented just as it is for + # every other class that we have written in PypeIt, i.e. the arguments all need to be documented. They are not + # documented here and instead we have the odd Args documentation above. version = '1.3.0' """Datamodel version number""" @@ -127,7 +130,8 @@ class PypeItImage(datamodel.DataContainer): 'shot_noise': dict(otype=bool, descr='Shot-noise included in variance'), 'spat_flexure': dict(otype=float, descr='Shift, in spatial pixels, between this image ' - 'and SlitTrace')} + 'and SlitTrace'), + 'filename': dict(otype=str, descr='Filename for the image'),} """Data model components.""" internals = ['process_steps', 'files', 'rawheadlist'] @@ -160,10 +164,11 @@ def from_pypeitimage(cls, pypeitImage): # Done return self + # TODO This init method needs proper docs, which includes every optional argument. See my comment above. def __init__(self, image, ivar=None, nimg=None, amp_img=None, det_img=None, rn2img=None, base_var=None, img_scale=None, fullmask=None, detector=None, spat_flexure=None, - PYP_SPEC=None, units=None, exptime=None, noise_floor=None, shot_noise=None, - bpm=None, crmask=None, usermask=None, clean_mask=False): + filename=None, PYP_SPEC=None, units=None, exptime=None, noise_floor=None, + shot_noise=None, bpm=None, crmask=None, usermask=None, clean_mask=False): if image is None: msgs.error('Must provide an image when instantiating PypeItImage.') diff --git a/pypeit/images/rawimage.py b/pypeit/images/rawimage.py index e3cb297479..55da1ac505 100644 --- a/pypeit/images/rawimage.py +++ b/pypeit/images/rawimage.py @@ -177,6 +177,7 @@ def __init__(self, ifile, spectrograph, det): self.steps = dict(apply_gain=False, subtract_pattern=False, subtract_overscan=False, + correct_nonlinear=False, subtract_continuum=False, subtract_scattlight=False, trim=False, @@ -304,6 +305,26 @@ def build_ivar(self): noise_floor=self.par['noise_floor']) return utils.inverse(var) + def correct_nonlinear(self): + """ + Apply a non-linear correction to the image. + + This is a simple wrapper for :func:`~pypeit.core.procimg.nonlinear_counts`. + """ + step = inspect.stack()[0][3] + if self.steps[step]: + # Already applied + msgs.warn('Non-linear correction was already applied.') + return + + inim = self.image.copy() + for ii in range(self.nimg): + # Correct the image for non-linearity. Note that the variance image is not changed here. + self.image[ii, ...] = procimg.nonlinear_counts(self.image[ii, ...], self.datasec_img[ii, ...]-1, + self.par['correct_nonlinear']) + + self.steps[step] = True + def estimate_readnoise(self): r""" Estimate the readnoise (in electrons) based on the overscan regions of @@ -613,7 +634,12 @@ def process(self, par, bpm=None, scattlight=None, flatimages=None, bias=None, sl self.subtract_bias(bias) # TODO: Checking for count (well-depth) saturation should be done here. - # TODO :: Non-linearity correction should be done here. + + # - Perform a non-linearity correction. This is done before the + # flat-field and dark correction because the flat-field modifies + # the counts. + if self.par['correct_nonlinear'] is not None: + self.correct_nonlinear() # - Create the dark current image(s). The dark-current image *always* # includes the tabulated dark current and the call below ensures @@ -676,7 +702,8 @@ def process(self, par, bpm=None, scattlight=None, flatimages=None, bias=None, sl exptime=self.exptime, noise_floor=self.par['noise_floor'], shot_noise=self.par['shot_noise'], - bpm=_bpm.astype(bool)) + bpm=_bpm.astype(bool), + filename=self.filename) pypeitImage.rawheadlist = self.headarr pypeitImage.process_steps = [key for key in self.steps.keys() if self.steps[key]] @@ -1192,7 +1219,7 @@ def subtract_scattlight(self, msscattlight, slits, debug=False): f" {tmp[13]}, {tmp[14]}, {tmp[15]}]) # Polynomial terms (coefficients of spec**index)\n" print(strprint) pad = msscattlight.pad // spatbin - offslitmask = slits.slit_img(pad=pad, initial=True, flexure=None) == -1 + offslitmask = slits.slit_img(pad=pad, flexure=None) == -1 from matplotlib import pyplot as plt _frame = self.image[ii, ...] vmin, vmax = 0, np.max(scatt_img) @@ -1217,7 +1244,7 @@ def subtract_scattlight(self, msscattlight, slits, debug=False): elif self.par["scattlight"]["method"] == "frame": # Calculate a model specific for this frame pad = msscattlight.pad // spatbin - offslitmask = slits.slit_img(pad=pad, initial=True, flexure=None) == -1 + offslitmask = slits.slit_img(pad=pad, flexure=None) == -1 # Get starting parameters for the scattered light model x0, bounds = self.spectrograph.scattered_light_archive(binning, dispname) # Perform a fit to the scattered light @@ -1241,11 +1268,11 @@ def subtract_scattlight(self, msscattlight, slits, debug=False): # Check if a fine correction to the scattered light should be applied if do_finecorr: pad = self.par['scattlight']['finecorr_pad'] // spatbin - offslitmask = slits.slit_img(pad=pad, initial=True, flexure=None) == -1 + offslitmask = slits.slit_img(pad=pad, flexure=None) == -1 # Check if the user wishes to mask some inter-slit regions if self.par['scattlight']['finecorr_mask'] is not None: # Get the central trace of each slit - left, right, _ = slits.select_edges(initial=True, flexure=None) + left, right, _ = slits.select_edges(flexure=None) centrace = 0.5*(left+right) # Now mask user-defined inter-slit regions offslitmask = scattlight.mask_slit_regions(offslitmask, centrace, diff --git a/pypeit/inputfiles.py b/pypeit/inputfiles.py index cfcefba6f4..952f40142b 100644 --- a/pypeit/inputfiles.py +++ b/pypeit/inputfiles.py @@ -735,6 +735,16 @@ class SensFile(InputFile): datablock_required = False setup_required = False + +class ExtractFile(InputFile): + """Child class for the Extraction input file + """ + data_block = 'extract' # Defines naming of data block + flavor = 'Extract' # Defines naming of file + setup_required = False + datablock_required = False + + class FluxFile(InputFile): """Child class for the Fluxing input file """ @@ -895,6 +905,18 @@ def options(self): Parse the options associated with a cube block. Here is a description of the available options: + - ``sensfunc``: The name of an a sensitivity function file that is used + for the flux calibration. The file provided here should be generated by + (or of the same format as the output of) the command :ref:`pypeit_sensfunc`. + This parameter can also be set for all frames + with the default command: + + .. code-block:: ini + + [reduce] + [[cube]] + sensfile = sens.fits + - ``scale_corr``: The name of an alternative spec2d file that is used for the relative spectral scale correction. This parameter can also be set for all frames with the default command: @@ -905,6 +927,16 @@ def options(self): [[cube]] scale_corr = spec2d_alternative.fits + - ``grating_corr``: The name of a Flat calibrations file that is used + for the grating tilt correction. This parameter can also be set for all frames + with the default command: + + .. code-block:: ini + + [reduce] + [[cube]] + grating_corr = Flat_A_0_DET01.fits + - ``skysub_frame``: The name of an alternative spec2d file that is used for the sky subtraction. This parameter can also be set for all frames with the default command: @@ -915,23 +947,47 @@ def options(self): [[cube]] skysub_frame = spec2d_alternative.fits + - ``ra_offset``: The RA offset to apply to the WCS of the cube. + + - ``dec_offset``: The DEC offset to apply to the WCS of the cube. + + Returns ------- opts: dict Dictionary containing cube options. """ # Define the list of allowed parameters - opts = dict(scale_corr=None, skysub_frame=None, ra_offset=None, dec_offset=None) + opts = dict(sensfile=None, scale_corr=None, grating_corr=None, skysub_frame=None, + ra_offset=None, dec_offset=None) + + # Get the sensfunc files + sensfile = self.path_and_files('sensfile', skip_blank=False, check_exists=False) + if sensfile is None: + opts['sensfile'] = None + elif len(sensfile) == 1 and len(self.filenames) > 1: + opts['sensfile'] = sensfile*len(self.filenames) + elif len(sensfile) != 0: + opts['sensfile'] = sensfile # Get the scale correction files scale_corr = self.path_and_files('scale_corr', skip_blank=False, check_exists=False) if scale_corr is None: opts['scale_corr'] = [None]*len(self.filenames) elif len(scale_corr) == 1 and len(self.filenames) > 1: - opts['scale_corr'] = scale_corr.lower()*len(self.filenames) + opts['scale_corr'] = scale_corr*len(self.filenames) elif len(scale_corr) != 0: opts['scale_corr'] = scale_corr + # Get the grating correction files + grating_corr = self.path_and_files('grating_corr', skip_blank=False, check_exists=False) + if grating_corr is None: + opts['grating_corr'] = [None]*len(self.filenames) + elif len(grating_corr) == 1 and len(self.filenames) > 1: + msgs.error("You cannot specify a single grating correction file for multiple input files.") + elif len(grating_corr) != 0: + opts['grating_corr'] = grating_corr + # Get the skysub files skysub_frame = self.path_and_files('skysub_frame', skip_blank=False, check_exists=False) if skysub_frame is None: diff --git a/pypeit/metadata.py b/pypeit/metadata.py index a0f0df18cb..f72a7c20c9 100644 --- a/pypeit/metadata.py +++ b/pypeit/metadata.py @@ -1032,18 +1032,28 @@ def _set_calib_group_bits(self): Set the calibration group bit based on the string values of the 'calib' column. """ - # NOTE: This is a hack to ensure the type of the *elements* of the calib - # column are all strings, but that the type of the column remains as - # "object". I'm calling this a hack because doing this is easier than + # Ensure that the type of the *elements* of the calib column are all + # strings, but that the type of the column remains as "object". + # NOTE: This is effectively a hack because doing this is easier than # trying to track down everywhere calib is changed to values that may or # may not be integers instead of strings. self['calib'] = np.array([str(c) for c in self['calib']], dtype=object) + # Collect and expand any lists # group_names = np.unique(np.concatenate( # [s.split(',') for s in self['calib'] if s not in ['all', 'None']])) - # DP changed to below because np.concatenate does not accept an empty list, - # which is the case when calib is None for all frames. This should avoid the code to crash - group_names = np.unique(sum([s.split(',') for s in self['calib'] if s not in ['all', 'None']], [])) + # NOTE: The above doesn't always work because np.concatenate does not + # accept an empty list, which is the case when calib is None or 'all' + # for all frames. + group_names = np.unique(sum([s.split(',') for s in self['calib'] + if s not in ['all', 'None']], [])) + + # If all the calibration groups are set to None or 'all', group_names + # can be an empty list. But we need to identify at least one + # calibration group, so I insert a mock value. + if group_names.size == 0: + group_names = np.array(['0'], dtype=object) + # Expand any ranges keep_group = np.ones(group_names.size, dtype=bool) added_groups = [] @@ -1052,6 +1062,7 @@ def _set_calib_group_bits(self): # Parse the range keep_group[i] = False added_groups += [str(n) for n in parse.str2list(name)] + # Combine and find the unique *integer* identifiers group_names = np.unique(np.asarray(added_groups + (group_names[keep_group]).tolist()).astype(int)) diff --git a/pypeit/par/pypeitpar.py b/pypeit/par/pypeitpar.py index 2bb9404f06..52c94f7b26 100644 --- a/pypeit/par/pypeitpar.py +++ b/pypeit/par/pypeitpar.py @@ -219,7 +219,7 @@ def __init__(self, trim=None, apply_gain=None, orient=None, # calib_setup_and_bit=None, rmcompact=None, sigclip=None, sigfrac=None, objlim=None, use_biasimage=None, use_overscan=None, use_darkimage=None, - dark_expscale=None, + dark_expscale=None, correct_nonlinear=None, empirical_rn=None, shot_noise=None, noise_floor=None, use_pixelflat=None, use_illumflat=None, use_specillum=None, use_pattern=None, subtract_scattlight=None, scattlight=None, subtract_continuum=None, @@ -276,6 +276,15 @@ def __init__(self, trim=None, apply_gain=None, orient=None, 'for \'savgol\', set overscan_par = order, window size ; ' \ 'for \'median\', set overscan_par = None or omit the keyword.' + defaults['correct_nonlinear'] = None + dtypes['correct_nonlinear'] = list + descr['correct_nonlinear'] = 'Correct for non-linear response of the detector. If None, ' \ + 'no correction is performed. If a list, then the list should be ' \ + 'the non-linear correction parameter (alpha), where the functional ' \ + 'form is given by Ct = Cm (1 + alpha x Cm), with Ct and Cm the true ' \ + 'and measured counts. This parameter is usually ' \ + 'hard-coded for a given spectrograph, and should otherwise be left as None.' \ + defaults['use_darkimage'] = False dtypes['use_darkimage'] = bool descr['use_darkimage'] = 'Subtract off a dark image. If True, one or more darks must ' \ @@ -584,7 +593,7 @@ class FlatFieldPar(ParSet): see :ref:`parameters`. """ def __init__(self, method=None, pixelflat_file=None, spec_samp_fine=None, - spec_samp_coarse=None, spat_samp=None, tweak_slits=None, tweak_slits_thresh=None, + spec_samp_coarse=None, spat_samp=None, tweak_slits=None, tweak_method=None, tweak_slits_thresh=None, tweak_slits_maxfrac=None, rej_sticky=None, slit_trim=None, slit_illum_pad=None, illum_iter=None, illum_rej=None, twod_fit_npoly=None, saturated_slits=None, slit_illum_relative=None, slit_illum_ref_idx=None, slit_illum_smooth_npix=None, @@ -650,6 +659,17 @@ def __init__(self, method=None, pixelflat_file=None, spec_samp_fine=None, descr['tweak_slits'] = 'Use the illumination flat field to tweak the slit edges. ' \ 'This will work even if illumflatten is set to False ' + defaults['tweak_method'] = 'threshold' + options['tweak_method'] = FlatFieldPar.valid_tweak_methods() + dtypes['tweak_method'] = str + descr['tweak_method'] = 'Method used to tweak the slit edges (when "tweak_slits" is set to True). ' \ + 'Options include: {0:s}. '.format(', '.join(options['tweak_method'])) + \ + 'The "threshold" method determines when the left and right slit edges ' \ + 'fall below a threshold relative to the peak illumination. ' \ + 'The "gradient" method determines where the gradient is the highest at ' \ + 'the left and right slit edges. This method performs better when there is ' \ + 'systematic vignetting in the spatial direction. ' \ + defaults['tweak_slits_thresh'] = 0.93 dtypes['tweak_slits_thresh'] = float descr['tweak_slits_thresh'] = 'If tweak_slits is True, this sets the illumination function threshold used to ' \ @@ -767,7 +787,7 @@ def from_dict(cls, cfg): k = np.array([*cfg.keys()]) parkeys = ['method', 'pixelflat_file', 'spec_samp_fine', 'spec_samp_coarse', 'spat_samp', 'pixelflat_min_wave', 'pixelflat_max_wave', - 'tweak_slits', 'tweak_slits_thresh', 'tweak_slits_maxfrac', + 'tweak_slits', 'tweak_method', 'tweak_slits_thresh', 'tweak_slits_maxfrac', 'rej_sticky', 'slit_trim', 'slit_illum_pad', 'slit_illum_relative', 'illum_iter', 'illum_rej', 'twod_fit_npoly', 'saturated_slits', 'slit_illum_ref_idx', 'slit_illum_smooth_npix', 'slit_illum_finecorr', 'fit_2d_det_response'] @@ -788,6 +808,13 @@ def valid_methods(): """ return ['bspline', 'skip'] # [ 'PolyScan', 'bspline' ]. Same here. Not sure what PolyScan is + @staticmethod + def valid_tweak_methods(): + """ + Return the valid options for tweaking slits. + """ + return ['threshold', 'gradient'] + @staticmethod def valid_saturated_slits_methods(): """ @@ -1578,9 +1605,9 @@ class CubePar(ParSet): """ def __init__(self, slit_spec=None, weight_method=None, align=None, combine=None, output_filename=None, - standard_cube=None, reference_image=None, save_whitelight=None, whitelight_range=None, method=None, + sensfile=None, reference_image=None, save_whitelight=None, whitelight_range=None, method=None, ra_min=None, ra_max=None, dec_min=None, dec_max=None, wave_min=None, wave_max=None, - spatial_delta=None, wave_delta=None, astrometric=None, grating_corr=None, scale_corr=None, + spatial_delta=None, wave_delta=None, astrometric=None, scale_corr=None, skysub_frame=None, spec_subpixel=None, spat_subpixel=None, slice_subpixel=None, correct_dar=None): @@ -1648,11 +1675,11 @@ def __init__(self, slit_spec=None, weight_method=None, align=None, combine=None, 'the combined datacube. If combine=False, the output filenames will be ' \ 'prefixed with ``spec3d_*``' - defaults['standard_cube'] = None - dtypes['standard_cube'] = str - descr['standard_cube'] = 'Filename of a standard star datacube. This cube will be used to correct ' \ - 'the relative scales of the slits, and to flux calibrate the science ' \ - 'datacube.' + defaults['sensfile'] = None + dtypes['sensfile'] = str + descr['sensfile'] = 'Filename of a sensitivity function to use to flux calibrate your datacube. ' \ + 'The sensitivity function file will also be used to correct the relative scales ' \ + 'of the slits.' defaults['reference_image'] = None dtypes['reference_image'] = str @@ -1766,13 +1793,6 @@ def __init__(self, slit_spec=None, weight_method=None, align=None, combine=None, dtypes['astrometric'] = bool descr['astrometric'] = 'If true, an astrometric correction will be applied using the alignment frames.' - defaults['grating_corr'] = True - dtypes['grating_corr'] = bool - descr['grating_corr'] = 'This option performs a small correction for the relative blaze function of all ' \ - 'input frames that have (even slightly) different grating angles, or if you are ' \ - 'flux calibrating your science data with a standard star that was observed with ' \ - 'a slightly different setup.' - defaults['scale_corr'] = None dtypes['scale_corr'] = str descr['scale_corr'] = 'This option performs a small correction for the relative spectral illumination ' \ @@ -1811,10 +1831,10 @@ def from_dict(cls, cfg): k = np.array([*cfg.keys()]) # Basic keywords - parkeys = ['slit_spec', 'output_filename', 'standard_cube', 'reference_image', 'save_whitelight', + parkeys = ['slit_spec', 'output_filename', 'sensfile', 'reference_image', 'save_whitelight', 'method', 'spec_subpixel', 'spat_subpixel', 'slice_subpixel', 'ra_min', 'ra_max', 'dec_min', 'dec_max', 'wave_min', 'wave_max', 'spatial_delta', 'wave_delta', 'weight_method', 'align', 'combine', - 'astrometric', 'grating_corr', 'scale_corr', 'skysub_frame', 'whitelight_range', 'correct_dar'] + 'astrometric', 'scale_corr', 'skysub_frame', 'whitelight_range', 'correct_dar'] badkeys = np.array([pk not in parkeys for pk in k]) if np.any(badkeys): @@ -1841,7 +1861,6 @@ def validate(self): raise ValueError("'weight_method' must be one of:\n" + ", ".join(allowed_weight_methods)) - class FluxCalibratePar(ParSet): """ A parameter set holding the arguments for how to perform the flux diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index 68bcd193ec..6505a0ef9d 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -790,6 +790,9 @@ def objfind_one(self, frames, det, bg_frames=None, std_outfile=None): bkg_redux_sciimg = sciImg # Build the background image bg_file_list = self.fitstbl.frame_paths(bg_frames) + # TODO I think we should create a separate self.par['bkgframe'] parameter set to hold the image + # processing parameters for the background frames. This would allow the user to specify different + # parameters for the background frames than for the science frames. bgimg = buildimage.buildimage_fromlist(self.spectrograph, det, frame_par, bg_file_list, bpm=self.caliBrate.msbpm, bias=self.caliBrate.msbias, diff --git a/pypeit/scattlight.py b/pypeit/scattlight.py index a6f149a6d4..675da0d07b 100644 --- a/pypeit/scattlight.py +++ b/pypeit/scattlight.py @@ -122,7 +122,7 @@ def show(self, image=None, slits=None, mask=False, wcs_match=True): wcs_match : :obj:`bool`, optional Use a reference image for the WCS and match all image in other channels to it. """ - offslitmask = slits.slit_img(pad=0, initial=True, flexure=None) == -1 if mask else 1 + offslitmask = slits.slit_img(pad=0, flexure=None) == -1 if mask else 1 # Prepare the frames _data = self.scattlight_raw if image is None else image diff --git a/pypeit/scripts/__init__.py b/pypeit/scripts/__init__.py index 8dc34fd239..4c8c160a91 100644 --- a/pypeit/scripts/__init__.py +++ b/pypeit/scripts/__init__.py @@ -21,6 +21,7 @@ from pypeit.scripts import collate_1d from pypeit.scripts import compare_sky from pypeit.scripts import edge_inspector +from pypeit.scripts import extract_datacube from pypeit.scripts import flux_calib from pypeit.scripts import flux_setup from pypeit.scripts import identify @@ -65,5 +66,3 @@ def script_classes(): return dict([ (n,c) for n,c in zip(scr_n[srt],scr_c[srt])]) pypeit_scripts = list(script_classes().keys()) - - diff --git a/pypeit/scripts/coadd_datacube.py b/pypeit/scripts/coadd_datacube.py index 2d4fbe4d71..d3c9c0aaa7 100644 --- a/pypeit/scripts/coadd_datacube.py +++ b/pypeit/scripts/coadd_datacube.py @@ -5,13 +5,6 @@ .. include common links, assuming primary doc root is up one directory .. include:: ../include/links.rst """ -import time -from pypeit import msgs -from pypeit import par -from pypeit import inputfiles -from pypeit import utils -from pypeit.coadd3d import CoAdd3D -from pypeit.spectrographs.util import load_spectrograph from pypeit.scripts import scriptbase from IPython import embed @@ -32,6 +25,15 @@ def get_parser(cls, width=None): @staticmethod def main(args): + import time + + from pypeit import msgs + from pypeit import par + from pypeit import inputfiles + from pypeit import utils + from pypeit.coadd3d import CoAdd3D + from pypeit.spectrographs.util import load_spectrograph + # Set the verbosity, and create a logfile if verbosity == 2 msgs.set_logfile_and_verbosity('coadd_datacube', args.verbosity) @@ -58,13 +60,15 @@ def main(args): dec_offsets = coadd3dfile.options['dec_offset'] skysub_frame = coadd3dfile.options['skysub_frame'] scale_corr = coadd3dfile.options['scale_corr'] + sensfile = coadd3dfile.options['sensfile'] + grating_corr = coadd3dfile.options['grating_corr'] # Instantiate CoAdd3d tstart = time.time() - coadd = CoAdd3D.get_instance(coadd3dfile.filenames, parset, skysub_frame=skysub_frame, - scale_corr=scale_corr, ra_offsets=ra_offsets, - dec_offsets=dec_offsets, spectrograph=spectrograph, - det=args.det, overwrite=args.overwrite) + coadd = CoAdd3D.get_instance(coadd3dfile.filenames, parset, skysub_frame=skysub_frame, sensfile=sensfile, + scale_corr=scale_corr, grating_corr=grating_corr, + ra_offsets=ra_offsets, dec_offsets=dec_offsets, + spectrograph=spectrograph, det=args.det, overwrite=args.overwrite) # Coadd the files coadd.run() diff --git a/pypeit/scripts/extract_datacube.py b/pypeit/scripts/extract_datacube.py new file mode 100644 index 0000000000..f819c4aad6 --- /dev/null +++ b/pypeit/scripts/extract_datacube.py @@ -0,0 +1,79 @@ +""" +This script allows the user to read a spec3D FITS file (DataCube) +from IFU instruments, and extract a 1D spectrum of the brightest +object. This script is primarily used to extract a spectrum of a +point source from a DataCube, and save it as a spec1d file. A +common usage is to extract a spectrum of a standard star from a +DataCube, and use it to flux calibrate the science DataCubes. + +.. include common links, assuming primary doc root is up one directory +.. include:: ../include/links.rst +""" +from pypeit.scripts import scriptbase + + +class ExtractDataCube(scriptbase.ScriptBase): + + @classmethod + def get_parser(cls, width=None): + parser = super().get_parser(description='Read in a datacube, extract a spectrum of a point source,' + 'and save it as a spec1d file.', width=width) + parser.add_argument('file', type = str, default=None, help='spec3d.fits DataCube file') + parser.add_argument("-e", "--ext_file", type=str, + help='Configuration file with extraction parameters') + parser.add_argument("-s", "--save", type=str, + help='Output spec1d filename') + parser.add_argument('-o', '--overwrite', default=False, action='store_true', + help='Overwrite any existing files/directories') + parser.add_argument('-b', '--boxcar_radius', type=float, default=None, + help='Radius of the circular boxcar (in arcseconds) to use for the extraction.') + parser.add_argument('-v', '--verbosity', type=int, default=1, + help='Verbosity level between 0 [none] and 2 [all]. Default: 1. ' + 'Level 2 writes a log with filename extract_datacube_YYYYMMDD-HHMM.log') + return parser + + @staticmethod + def main(args): + import time + + from pypeit import msgs + from pypeit import par + from pypeit import inputfiles + from pypeit import utils + from pypeit.spectrographs.util import load_spectrograph + from pypeit.coadd3d import DataCube + + # Set the verbosity, and create a logfile if verbosity == 2 + msgs.set_logfile_and_verbosity('extract_datacube', args.verbosity) + + # Check that a file has been provided + if args.file is None: + msgs.error('You must input a spec3d (i.e. PypeIt DataCube) fits file') + extcube = DataCube.from_file(args.file) + spectrograph = load_spectrograph(extcube.PYP_SPEC) + + if args.ext_file is None: + parset = spectrograph.default_pypeit_par() + else: + # Read in the relevant information from the .extract file + ext3dfile = inputfiles.ExtractFile.from_file(args.ext_file) + + # Parameters + spectrograph_def_par = spectrograph.default_pypeit_par() + parset = par.PypeItPar.from_cfg_lines(cfg_lines=spectrograph_def_par.to_config(), + merge_with=(ext3dfile.cfg_lines,)) + + # Set the boxcar radius + boxcar_radius = args.boxcar_radius + + # Set the output name + outname = None if args.save is None else args.save + + # Load the DataCube + tstart = time.time() + + # Extract the spectrum + extcube.extract_spec(parset['reduce'], outname=outname, boxcar_radius=boxcar_radius, overwrite=args.overwrite) + + # Report the extraction time + msgs.info(utils.get_time_string(time.time()-tstart)) diff --git a/pypeit/scripts/sensfunc.py b/pypeit/scripts/sensfunc.py index b687dab2ca..45e4b9a61b 100644 --- a/pypeit/scripts/sensfunc.py +++ b/pypeit/scripts/sensfunc.py @@ -59,8 +59,9 @@ def get_parser(cls, width=None): parser.add_argument("-s", "--sens_file", type=str, help='Configuration file with sensitivity function parameters') parser.add_argument("-f", "--flatfile", type=str, - help="R|Use the flat file for computing the sensitivity " - "function. Note that it is not possible to set --flatfile and " + help="R|Use a flat calibration file to compute the blaze function when generating " + "the sensitivity function. This is helpful to account for small scale undulations " + "in the sensitivity function. Note that it is not possible to set --flatfile and " "simultaneously use a .sens file with the --sens_file option. If " "you are using a .sens file, set the flatfile there via e.g.:\n\n" "F| [sensfunc]\n" diff --git a/pypeit/sensfunc.py b/pypeit/sensfunc.py index 9494888054..5112145238 100644 --- a/pypeit/sensfunc.py +++ b/pypeit/sensfunc.py @@ -337,7 +337,6 @@ def compute_blaze(self, wave, trace_spec, trace_spat, flatfile, box_radius=10.0, # to get rid of this .squeeze() return log10_blaze_function.squeeze() - def _bundle(self): """ Bundle the object for writing using @@ -481,7 +480,6 @@ def flux_std(self): self.sens['SENS_FLUXED_STD_FLAM_IVAR'] = flam_ivar.T self.sens['SENS_FLUXED_STD_MASK'] = flam_mask.T - def eval_zeropoint(self, wave, iorddet): """ Dummy method, overloaded by subclasses diff --git a/pypeit/slittrace.py b/pypeit/slittrace.py index 93b4b7df32..9405b51ad2 100644 --- a/pypeit/slittrace.py +++ b/pypeit/slittrace.py @@ -479,7 +479,7 @@ def get_slitlengths(self, initial=False, median=False): slitlen = right - left return np.median(slitlen, axis=1) if median else slitlen - def get_radec_image(self, wcs, alignSplines, tilts, slit_compute=None, slice_offset=None, initial=True, flexure=None): + def get_radec_image(self, wcs, alignSplines, tilts, slit_compute=None, slice_offset=None, initial=False, flexure=None): """Generate an RA and DEC image for every pixel in the frame NOTE: This function is currently only used for SlicerIFU reductions. diff --git a/pypeit/specobj.py b/pypeit/specobj.py index 97ce35aa99..d69bd74f0a 100644 --- a/pypeit/specobj.py +++ b/pypeit/specobj.py @@ -485,7 +485,6 @@ def update_flex_shift(self, shift, flex_type='local'): # Now update the total flexure self.FLEX_SHIFT_TOTAL += shift - # TODO This should be a wrapper calling a core algorithm. def apply_flux_calib(self, wave_zp, zeropoint, exptime, tellmodel=None, extinct_correct=False, airmass=None, longitude=None, latitude=None, extinctfilepar=None, extrap_sens=False): @@ -502,8 +501,8 @@ def apply_flux_calib(self, wave_zp, zeropoint, exptime, tellmodel=None, extinct_ exptime (float): Exposure time tellmodel (?): - Telluric correction - extinct_correct (?): + Telluric correction. Note: This is deprecated and will be removed in a future version. + extinct_correct (bool, optional): If True, extinction correct airmass (float, optional): Airmass diff --git a/pypeit/specobjs.py b/pypeit/specobjs.py index 62cea7d46c..0ee732c774 100644 --- a/pypeit/specobjs.py +++ b/pypeit/specobjs.py @@ -236,7 +236,6 @@ def unpack_object(self, ret_flam=False, extract_type='OPT'): detector = [None]*norddet ech_orders = np.zeros(norddet, dtype=int) - # TODO make the extraction that is desired OPT vs BOX an optional input variable. for iorddet in range(norddet): wave[:, iorddet] = getattr(self, wave_key)[iorddet] flux_gpm[:, iorddet] = getattr(self, '{}_MASK'.format(extract_type))[iorddet] @@ -244,7 +243,7 @@ def unpack_object(self, ret_flam=False, extract_type='OPT'): if self[0].PYPELINE == 'Echelle': ech_orders[iorddet] = self[iorddet].ECH_ORDER flux[:, iorddet] = getattr(self, flux_key)[iorddet] - flux_ivar[:, iorddet] = getattr(self, flux_key+'_IVAR')[iorddet] #OPT_FLAM_IVAR + flux_ivar[:, iorddet] = getattr(self, flux_key+'_IVAR')[iorddet] trace_spat[:, iorddet] = self[iorddet].TRACE_SPAT trace_spec[:, iorddet] = self[iorddet].trace_spec @@ -719,16 +718,23 @@ def write_to_fits(self, subheader, outfile, overwrite=True, update_det=None, Args: subheader (:obj:`dict`): + Dictionary with header keywords and values to be added to the + primary header of the output file. outfile (str): + Name of the output file overwrite (bool, optional): + Overwrite the output file if it exists? + update_det (int or list, optional): + If provided, do not clobber the existing file but only update + the indicated detectors. Useful for re-running on a subset of detectors slitspatnum (:obj:`str` or :obj:`list`, optional): Restricted set of slits for reduction. If provided, do not clobber the existing file but only update the indicated slits. Useful for re-running on a subset of slits - update_det (int or list, optional): - If provided, do not clobber the existing file but only update - the indicated detectors. Useful for re-running on a subset of detectors - + history (:obj:`str`, optional): + String to be added to the header HISTORY keyword. + debug (:obj:`bool`, optional): + If True, run in debug mode. """ if os.path.isfile(outfile) and not overwrite: msgs.warn(f'{outfile} exits. Set overwrite=True to overwrite it.') diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index 148464d7d1..5a88298847 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -612,11 +612,13 @@ def default_pypeit_par(cls): # Don't do 1D extraction for 3D data - it's meaningless because the DAR correction must be performed on the 3D data. par['reduce']['extraction']['skip_extraction'] = True # Because extraction occurs before the DAR correction, don't extract - #par['calibrations']['flatfield']['tweak_slits'] = False # Do not tweak the slit edges (we want to use the full slit) + # Tweak the slit edges using the gradient method for SlicerIFU + par['calibrations']['flatfield']['tweak_slits'] = True # Tweak the slit edges + par['calibrations']['flatfield']['tweak_method'] = 'gradient' # The gradient method is better for SlicerIFU. par['calibrations']['flatfield']['tweak_slits_thresh'] = 0.0 # Make sure the full slit is used (i.e. when the illumination fraction is > 0.5) par['calibrations']['flatfield']['tweak_slits_maxfrac'] = 0.0 # Make sure the full slit is used (i.e. no padding) par['calibrations']['flatfield']['slit_trim'] = 2 # Trim the slit edges - par['calibrations']['slitedges']['pad'] = 2 # Need to pad out the tilts for the astrometric transform when creating a datacube. + par['calibrations']['slitedges']['pad'] = 0 # Do not pad the slits - this ensures that the tweak_edges method=gradient guarantees that the edges are defined at the maximum gradient. # Decrease the wave tilts order, given the shorter slits of the IFU par['calibrations']['tilts']['spat_order'] = 1 @@ -624,7 +626,6 @@ def default_pypeit_par(cls): # Make sure that this is reduced as a slit (as opposed to fiber) spectrograph par['reduce']['cube']['slit_spec'] = True - par['reduce']['cube']['grating_corr'] = False par['reduce']['cube']['combine'] = False # Make separate spec3d files from the input spec2d files # Sky subtraction parameters @@ -717,7 +718,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): pxscl = spatial_scale / 3600.0 # 3600 is to convert arcsec to degrees # Get the typical slit length (this changes by ~0.3% over all slits, so a constant is fine for now) - slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True)))) + slitlength = int(np.round(np.median(slits.get_slitlengths(median=True)))) # Get RA/DEC raval = self.get_meta_value([hdr], 'ra') diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index f622ad99de..78bc81fa21 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -471,6 +471,13 @@ def default_pypeit_par(cls): par['calibrations']['tilts']['spat_order'] = 1 par['calibrations']['tilts']['spec_order'] = 1 + # Tweak the slit edges using the gradient method for SlicerIFU + par['calibrations']['slitedges']['pad'] = 0 # Do not pad the slits - this ensures that the tweak_edges method=gradient guarantees that the edges are defined at the maximum gradient. + par['calibrations']['flatfield']['tweak_slits'] = True # Tweak the slit edges + par['calibrations']['flatfield']['tweak_method'] = 'gradient' # The gradient method is better for SlicerIFU. + par['calibrations']['flatfield']['tweak_slits_thresh'] = 0.0 # Make sure the full slit is used (i.e. when the illumination fraction is > 0.5) + par['calibrations']['flatfield']['tweak_slits_maxfrac'] = 0.0 # Make sure the full slit is used (i.e. no padding) + # Make sure that this is reduced as a slit (as opposed to fiber) spectrograph par['reduce']['cube']['slit_spec'] = True par['reduce']['cube']['combine'] = False # Make separate spec3d files from the input spec2d files @@ -527,7 +534,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): pxscl = spatial_scale / 3600.0 # 3600 is to convert arcsec to degrees # Get the typical slit length (this changes by ~0.3% over all slits, so a constant is fine for now) - slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True)))) + slitlength = int(np.round(np.median(slits.get_slitlengths(median=True)))) # Get RA/DEC raval = self.get_meta_value([hdr], 'ra') diff --git a/pypeit/spectrographs/jwst_nirspec.py b/pypeit/spectrographs/jwst_nirspec.py index 1a5fb586e8..80a8abb2f3 100644 --- a/pypeit/spectrographs/jwst_nirspec.py +++ b/pypeit/spectrographs/jwst_nirspec.py @@ -254,7 +254,7 @@ def allowed_mosaics(self): ``PypeIt``. """ return [(1,2)] - + def get_rawimage(self, raw_file, det): """ Read raw images and generate a few other bits and pieces diff --git a/pypeit/spectrographs/keck_deimos.py b/pypeit/spectrographs/keck_deimos.py index 5f9360c87f..9657fe4d4d 100644 --- a/pypeit/spectrographs/keck_deimos.py +++ b/pypeit/spectrographs/keck_deimos.py @@ -766,7 +766,6 @@ def get_rawimage(self, raw_file, det): mosaic = None if nimg == 1 else self.get_mosaic_par(det, hdu=hdu) detectors = [self.get_detector_par(det, hdu=hdu)] if nimg == 1 else mosaic.detectors - # TODO check that that read noise and gain are the same for this amplifier mode?? if hdu[0].header['AMPMODE'] not in ['SINGLE:B', 'SINGLE:A']: msgs.error('PypeIt can only reduce images with AMPMODE == SINGLE:B or AMPMODE == SINGLE:A.') if hdu[0].header['MOSMODE'] != 'Spectral': diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index 5743eaa6eb..cdf4a20b10 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -283,6 +283,34 @@ def default_pypeit_par(cls): # Set the number of alignments in the align frames par['calibrations']['alignment']['locations'] = [0.1, 0.3, 0.5, 0.7, 0.9] # TODO:: Check this - is this accurate enough? + # Correct the illumflat for pixel-to-pixel sensitivity variations + par['calibrations']['illumflatframe']['process']['use_pixelflat'] = True + + # Make sure the overscan is subtracted from the dark + par['calibrations']['darkframe']['process']['use_overscan'] = True + + # Set the slit edge parameters + par['calibrations']['slitedges']['fit_order'] = 4 + par['calibrations']['slitedges']['pad'] = 0 # Do not pad the slits - this ensures that the tweak_edges method=gradient guarantees that the edges are defined at the maximum gradient. + par['calibrations']['slitedges']['edge_thresh'] = 5 # 5 works well with a range of setups tested by RJC (mostly 1x1 binning) + + # KCWI has non-uniform spectral resolution across the field-of-view + par['calibrations']['wavelengths']['fwhm_spec_order'] = 1 + par['calibrations']['wavelengths']['fwhm_spat_order'] = 2 + + # Alter the method used to combine pixel flats + par['calibrations']['pixelflatframe']['process']['combine'] = 'median' + par['calibrations']['flatfield']['spec_samp_coarse'] = 20.0 + par['calibrations']['flatfield']['tweak_slits'] = True # Tweak the slit edges + par['calibrations']['flatfield']['tweak_method'] = 'gradient' # The gradient method is better for SlicerIFU. + par['calibrations']['flatfield']['tweak_slits_thresh'] = 0.0 # Make sure the full slit is used (i.e. when the illumination fraction is > 0.5) + par['calibrations']['flatfield']['tweak_slits_maxfrac'] = 0.0 # Make sure the full slit is used (i.e. no padding) + par['calibrations']['flatfield']['slit_trim'] = 3 # Trim the slit edges + # Relative illumination correction + par['calibrations']['flatfield']['slit_illum_relative'] = True # Calculate the relative slit illumination + par['calibrations']['flatfield']['slit_illum_ref_idx'] = 14 # The reference index - this should probably be the same for the science frame + par['calibrations']['flatfield']['slit_illum_smooth_npix'] = 10 # Sufficiently small value so less structure in relative weights + # LACosmics parameters par['scienceframe']['process']['sigclip'] = 4.0 par['scienceframe']['process']['objlim'] = 1.5 @@ -607,7 +635,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): pxscl = spatial_scale / 3600.0 # 3600 is to convert arcsec to degrees # Get the typical slit length (this changes by ~0.3% over all slits, so a constant is fine for now) - slitlength = int(np.round(np.median(slits.get_slitlengths(initial=True, median=True)))) + slitlength = int(np.round(np.median(slits.get_slitlengths(median=True)))) # Get RA/DEC ra = self.compound_meta([hdr], 'ra') @@ -920,39 +948,26 @@ def default_pypeit_par(cls): par['calibrations']['scattlight_pad'] = 6 # This is the unbinned number of pixels to pad par['calibrations']['pixelflatframe']['process']['subtract_scattlight'] = True par['calibrations']['illumflatframe']['process']['subtract_scattlight'] = True - par['scienceframe']['process']['subtract_scattlight'] = True + par['scienceframe']['process']['subtract_scattlight'] = False par['scienceframe']['process']['scattlight']['finecorr_method'] = 'median' par['scienceframe']['process']['scattlight']['finecorr_pad'] = 4 # This is the unbinned number of pixels to pad par['scienceframe']['process']['scattlight']['finecorr_order'] = 2 # par['scienceframe']['process']['scattlight']['finecorr_mask'] = 12 # Mask the middle inter-slit region. It contains a strange scattered light feature that doesn't appear to affect any other inter-slit regions - # Correct the illumflat for pixel-to-pixel sensitivity variations - par['calibrations']['illumflatframe']['process']['use_pixelflat'] = True - - # Make sure the overscan is subtracted from the dark - par['calibrations']['darkframe']['process']['use_overscan'] = True - - # Set the slit edge parameters - par['calibrations']['slitedges']['fit_order'] = 4 - par['calibrations']['slitedges']['pad'] = 2 # Need to pad out the tilts for the astrometric transform when creating a datacube. - par['calibrations']['slitedges']['edge_thresh'] = 5 # 5 works well with a range of setups tested by RJC (mostly 1x1 binning) - - # KCWI has non-uniform spectral resolution across the field-of-view - par['calibrations']['wavelengths']['fwhm_spec_order'] = 1 - par['calibrations']['wavelengths']['fwhm_spat_order'] = 2 + # Correct for non-linear behaviour in the detector response + nonlin_array = [-1.4E-7, -1.4E-7, -1.2E-7, -1.8E-7] # AMPID=0,1,2,3 respectively + par['calibrations']['arcframe']['process']['correct_nonlinear'] = nonlin_array + par['calibrations']['tiltframe']['process']['correct_nonlinear'] = nonlin_array + par['calibrations']['pixelflatframe']['process']['correct_nonlinear'] = nonlin_array + par['calibrations']['illumflatframe']['process']['correct_nonlinear'] = nonlin_array + par['calibrations']['standardframe']['process']['correct_nonlinear'] = nonlin_array + par['scienceframe']['process']['correct_nonlinear'] = nonlin_array # Alter the method used to combine pixel flats - par['calibrations']['pixelflatframe']['process']['combine'] = 'median' - par['calibrations']['flatfield']['spec_samp_coarse'] = 20.0 + par['calibrations']['pixelflatframe']['process']['combine'] = 'mean' par['calibrations']['flatfield']['spat_samp'] = 1.0 # This should give 1% accuracy in the spatial illumination correction for 2x2 binning, and <0.5% accuracy for 1x1 binning - #par['calibrations']['flatfield']['tweak_slits'] = False # Do not tweak the slit edges (we want to use the full slit) - par['calibrations']['flatfield']['tweak_slits_thresh'] = 0.0 # Make sure the full slit is used (i.e. when the illumination fraction is > 0.5) - par['calibrations']['flatfield']['tweak_slits_maxfrac'] = 0.0 # Make sure the full slit is used (i.e. no padding) - par['calibrations']['flatfield']['slit_trim'] = 3 # Trim the slit edges - # Relative illumination correction - par['calibrations']['flatfield']['slit_illum_relative'] = True # Calculate the relative slit illumination - par['calibrations']['flatfield']['slit_illum_ref_idx'] = 14 # The reference index - this should probably be the same for the science frame - par['calibrations']['flatfield']['slit_illum_smooth_npix'] = 5 # Sufficiently small value so less structure in relative weights + + # Need to fit sinusoidal sensitivity pattern, and include in the relative pixel response par['calibrations']['flatfield']['fit_2d_det_response'] = True # Include the 2D detector response in the pixelflat. return par @@ -1342,33 +1357,8 @@ def default_pypeit_par(cls): par['calibrations']['standardframe']['process']['use_pattern'] = False par['scienceframe']['process']['use_pattern'] = False - # Correct the illumflat for pixel-to-pixel sensitivity variations - par['calibrations']['illumflatframe']['process']['use_pixelflat'] = True - - # Make sure the overscan is subtracted from the dark - par['calibrations']['darkframe']['process']['use_overscan'] = True - - # Set the slit edge parameters - par['calibrations']['slitedges']['fit_order'] = 4 - par['calibrations']['slitedges']['pad'] = 2 # Need to pad out the tilts for the astrometric transform when creating a datacube. - par['calibrations']['slitedges']['edge_thresh'] = 5 # 5 works well with a range of setups tested by RJC (mostly 1x1 binning) - - # KCWI has non-uniform spectral resolution across the field-of-view - par['calibrations']['wavelengths']['fwhm_spec_order'] = 1 - par['calibrations']['wavelengths']['fwhm_spat_order'] = 2 - - # Alter the method used to combine pixel flats - par['calibrations']['pixelflatframe']['process']['combine'] = 'median' - par['calibrations']['flatfield']['spec_samp_coarse'] = 20.0 - #par['calibrations']['flatfield']['tweak_slits'] = False # Do not tweak the slit edges (we want to use the full slit) - par['calibrations']['flatfield']['tweak_slits_thresh'] = 0.0 # Make sure the full slit is used (i.e. when the illumination fraction is > 0.5) - par['calibrations']['flatfield']['tweak_slits_maxfrac'] = 0.0 # Make sure the full slit is used (i.e. no padding) - par['calibrations']['flatfield']['slit_trim'] = 3 # Trim the slit edges - # Relative illumination correction - par['calibrations']['flatfield']['slit_illum_relative'] = True # Calculate the relative slit illumination - par['calibrations']['flatfield']['slit_illum_ref_idx'] = 14 # The reference index - this should probably be the same for the science frame - par['calibrations']['flatfield']['slit_illum_smooth_npix'] = 5 # Sufficiently small value so less structure in relative weights - par['calibrations']['flatfield']['fit_2d_det_response'] = True # Include the 2D detector response in the pixelflat. + # This is probably not necessary for KCRM + par['calibrations']['flatfield']['fit_2d_det_response'] = False # Include the 2D detector response in the pixelflat. # Sky subtraction parameters par['reduce']['skysub']['bspline_spacing'] = 0.4 diff --git a/pypeit/spectrographs/spectrograph.py b/pypeit/spectrographs/spectrograph.py index e7d711b2f1..f44fa9e013 100644 --- a/pypeit/spectrographs/spectrograph.py +++ b/pypeit/spectrographs/spectrograph.py @@ -1870,13 +1870,12 @@ def spec1d_match_spectra(self, sobjs): """ msgs.error(f'Method to match slits across detectors not defined for {self.name}') - def tweak_standard(self, wave_in, counts_in, counts_ivar_in, gpm_in, meta_table, log10_blaze_function=None): """ This routine is for performing instrument/disperser specific tweaks to standard stars so that sensitivity function fits will be well behaved. For example, masking second order light. For instruments that don't - require such tweaks it will just return the inputs, but for isntruments that do this function is overloaded + require such tweaks it will just return the inputs, but for instruments that do this function is overloaded with a method that performs the tweaks. Parameters diff --git a/pypeit/utils.py b/pypeit/utils.py index 4ac8f1fe02..a7b0f6ace1 100644 --- a/pypeit/utils.py +++ b/pypeit/utils.py @@ -1380,6 +1380,38 @@ def find_nearest(array, values): return idxs +def linear_interpolate(x1, y1, x2, y2, x): + r""" + Interplate or extrapolate between two points. + + Given a line defined two points, :math:`(x_1,y_1)` and + :math:`(x_2,y_2)`, return the :math:`y` value of a new point on + the line at coordinate :math:`x`. + + This function is meant for speed. No type checking is performed and + the only check is that the two provided ordinate coordinates are not + numerically identical. By definition, the function will extrapolate + without any warning. + + Args: + x1 (:obj:`float`): + First abscissa position + y1 (:obj:`float`): + First ordinate position + x2 (:obj:`float`): + Second abscissa position + y3 (:obj:`float`): + Second ordinate position + x (:obj:`float`): + Abcissa for new value + + Returns: + :obj:`float`: Interpolated/extrapolated value of ordinate at + :math:`x`. + """ + return y1 if np.isclose(x1,x2) else y1 + (x-x1)*(y2-y1)/(x2-x1) + + def replace_bad(frame, bpm): """ Find all bad pixels, and replace the bad pixels with the nearest good pixel diff --git a/pypeit/wavecalib.py b/pypeit/wavecalib.py index 758d2031c4..ca223a39e2 100644 --- a/pypeit/wavecalib.py +++ b/pypeit/wavecalib.py @@ -217,7 +217,8 @@ def from_hdu(cls, hdu, chk_version=True, **kwargs): # Parse all the WAVE2DFIT extensions # TODO: It would be good to have the WAVE2DFIT extensions follow the # same naming convention as the WAVEFIT extensions... - wave2d_fits = [fitting.PypeItFit.from_hdu(hdu[e], chk_version=chk_version) + wave2d_fits = [fitting.PypeItFit() if len(hdu[e].data) == 0 + else fitting.PypeItFit.from_hdu(hdu[e], chk_version=chk_version) for e in ext if 'WAVE2DFIT' in e] if len(wave2d_fits) > 0: d['wv_fit2d'] = np.asarray(wave2d_fits) diff --git a/setup.cfg b/setup.cfg index 4fcbdee861..65dedb90ca 100644 --- a/setup.cfg +++ b/setup.cfg @@ -64,7 +64,7 @@ scripts = [options.extras_require] scikit-image = - scikit-image + scikit-image>=0.23 specutils = specutils>=1.13 test = @@ -119,6 +119,7 @@ console_scripts = pypeit_coadd_datacube = pypeit.scripts.coadd_datacube:CoAddDataCube.entry_point pypeit_collate_1d = pypeit.scripts.collate_1d:Collate1D.entry_point pypeit_edge_inspector = pypeit.scripts.edge_inspector:EdgeInspector.entry_point + pypeit_extract_datacube = pypeit.scripts.extract_datacube:ExtractDataCube.entry_point pypeit_flux_calib = pypeit.scripts.flux_calib:FluxCalib.entry_point pypeit_flux_setup = pypeit.scripts.flux_setup:FluxSetup.entry_point pypeit_install_extinctfile = pypeit.scripts.install_extinctfile:InstallExtinctfile.entry_point