From 201b3aa9a2206474423222b6cbb2a58f7649370e Mon Sep 17 00:00:00 2001 From: Kyle Westfall Date: Mon, 31 Jul 2023 18:27:56 -0700 Subject: [PATCH] doc fixes --- doc/api/pypeit.scripts.ql_multislit.rst | 8 -- doc/api/pypeit.scripts.rst | 1 - doc/calibrations/image_proc.rst | 2 +- doc/calibrations/tilt.rst | 2 +- doc/conf.py | 9 +- doc/fluxing.rst | 3 +- doc/help/pypeit_ql_multislit.rst | 78 ------------ doc/include/bitmaskarray_usage.rst | 2 +- doc/include/class_datamodel_wavetilts.rst | 32 ++--- doc/include/links.rst | 10 +- doc/pypeit_par.rst | 64 +++++----- doc/scripts/base_par.rst | 2 +- doc/setup.rst | 2 +- pypeit/archive.py | 2 +- pypeit/bitmask.py | 3 +- pypeit/calibframe.py | 4 +- pypeit/calibrations.py | 4 +- pypeit/coadd1d.py | 45 ++++--- pypeit/coadd2d.py | 23 ++-- pypeit/core/coadd.py | 117 ++++++++++-------- pypeit/core/collate.py | 17 ++- pypeit/core/datacube.py | 29 +++-- pypeit/core/extract.py | 8 +- pypeit/core/findobj_skymask.py | 3 +- pypeit/core/fitting.py | 7 +- pypeit/core/flat.py | 2 +- pypeit/core/flexure.py | 75 +++++------ pypeit/core/flux_calib.py | 54 ++++---- pypeit/core/gui/identify.py | 65 +++++----- pypeit/core/gui/object_find.py | 45 ++++--- pypeit/core/gui/skysub_regions.py | 23 ++-- pypeit/core/meta.py | 4 +- pypeit/core/parse.py | 2 +- pypeit/core/pca.py | 19 ++- pypeit/core/pixels.py | 2 +- pypeit/core/procimg.py | 2 +- pypeit/core/pydl.py | 2 +- pypeit/core/qa.py | 23 ++-- pypeit/core/skysub.py | 14 ++- pypeit/core/telluric.py | 16 +-- pypeit/core/trace.py | 15 ++- pypeit/core/tracewave.py | 101 ++++++++------- pypeit/core/transform.py | 2 +- pypeit/core/wave.py | 4 +- pypeit/core/wavecal/autoid.py | 112 +++++++++-------- pypeit/core/wavecal/echelle.py | 8 +- pypeit/core/wavecal/templates.py | 10 +- pypeit/core/wavecal/waveio.py | 33 +++-- pypeit/core/wavecal/wv_fitting.py | 86 ++++++------- pypeit/core/wavecal/wvutils.py | 34 +++-- pypeit/data/utils.py | 12 +- pypeit/datamodel.py | 28 ++--- pypeit/deprecated/datacube.py | 4 +- pypeit/deprecated/orig_specobjs.py | 2 +- .../{scripts => deprecated}/ql_multislit.py | 0 pypeit/display/display.py | 36 +++--- pypeit/display/ginga_plugins.py | 4 +- pypeit/edgetrace.py | 47 +++---- pypeit/extraction.py | 24 ++-- pypeit/find_objects.py | 47 +++---- pypeit/flatfield.py | 21 ++-- pypeit/fluxcalibrate.py | 2 +- pypeit/images/buildimage.py | 6 +- pypeit/images/pypeitimage.py | 2 +- pypeit/images/rawimage.py | 2 +- pypeit/inputfiles.py | 14 +-- pypeit/metadata.py | 74 ++++++----- pypeit/par/parset.py | 32 +++-- pypeit/par/pypeitpar.py | 46 +++---- pypeit/par/util.py | 13 +- pypeit/pypeit.py | 10 +- pypeit/pypeitsetup.py | 34 ++--- pypeit/pypmsgs.py | 60 +++++---- pypeit/sampling.py | 59 +++++---- pypeit/scripts/__init__.py | 2 +- pypeit/scripts/chk_for_calibs.py | 2 +- pypeit/scripts/chk_noise_1dspec.py | 1 + pypeit/scripts/chk_noise_2dspec.py | 5 +- pypeit/scripts/collate_1d.py | 63 ++++++---- pypeit/scripts/ql.py | 6 +- pypeit/scripts/scriptbase.py | 1 + pypeit/sensfunc.py | 9 +- pypeit/slittrace.py | 40 +++--- pypeit/spec2dobj.py | 12 +- pypeit/specobj.py | 4 +- pypeit/specobjs.py | 21 ++-- pypeit/spectrographs/bok_bc.py | 5 - pypeit/spectrographs/gemini_gnirs.py | 2 +- pypeit/spectrographs/gtc_osiris.py | 2 +- pypeit/spectrographs/keck_kcwi.py | 2 +- pypeit/spectrographs/spectrograph.py | 7 +- pypeit/specutils/pypeit_loaders.py | 4 +- pypeit/tracepca.py | 38 +++--- pypeit/utils.py | 30 ++--- pypeit/wavecalib.py | 59 ++++----- pypeit/wavemodel.py | 114 +++++++++-------- pypeit/wavetilts.py | 88 +++++++------ 97 files changed, 1201 insertions(+), 1125 deletions(-) delete mode 100644 doc/api/pypeit.scripts.ql_multislit.rst delete mode 100644 doc/help/pypeit_ql_multislit.rst rename pypeit/{scripts => deprecated}/ql_multislit.py (100%) diff --git a/doc/api/pypeit.scripts.ql_multislit.rst b/doc/api/pypeit.scripts.ql_multislit.rst deleted file mode 100644 index 88a8b4b553..0000000000 --- a/doc/api/pypeit.scripts.ql_multislit.rst +++ /dev/null @@ -1,8 +0,0 @@ -pypeit.scripts.ql\_multislit module -=================================== - -.. automodule:: pypeit.scripts.ql_multislit - :members: - :private-members: - :undoc-members: - :show-inheritance: diff --git a/doc/api/pypeit.scripts.rst b/doc/api/pypeit.scripts.rst index 39c559ed1b..5d312d0e49 100644 --- a/doc/api/pypeit.scripts.rst +++ b/doc/api/pypeit.scripts.rst @@ -35,7 +35,6 @@ Submodules pypeit.scripts.parse_slits pypeit.scripts.qa_html pypeit.scripts.ql - pypeit.scripts.ql_multislit pypeit.scripts.run_pypeit pypeit.scripts.scriptbase pypeit.scripts.sensfunc diff --git a/doc/calibrations/image_proc.rst b/doc/calibrations/image_proc.rst index e034cb5231..efd4159d68 100644 --- a/doc/calibrations/image_proc.rst +++ b/doc/calibrations/image_proc.rst @@ -198,7 +198,7 @@ in our documentation as ``(nspec,nspat)``. The operations required to flip/transpose the image arrays to match the PypeIt convention are dictated by instrument-specific :class:`~pypeit.images.detector_container.DetectorContainer` parameters and performed by -:func:`~pypeit.spectrograph.spectrographs.Spectrograph.orient_image`. Image +:func:`~pypeit.spectrographs.spectrograph.Spectrograph.orient_image`. Image orientation will be performed if the ``orient`` parameter is true. .. warning:: diff --git a/doc/calibrations/tilt.rst b/doc/calibrations/tilt.rst index 12724b924b..614d4be345 100644 --- a/doc/calibrations/tilt.rst +++ b/doc/calibrations/tilt.rst @@ -58,7 +58,7 @@ Current TiltImage Data Model ============================ Internally, the image is held in -:class:`pypeit.tiltimage.TiltImage` +:class:`pypeit.images.buildimage.tiltimage.TiltImage` which subclasses from :class:`pypeit.images.pypeitimage.PypeItImage` and :class:`pypeit.datamodel.DataContainer`. diff --git a/doc/conf.py b/doc/conf.py index 4da58a95b8..9ecfc8023f 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -120,9 +120,16 @@ # When nit-picking, ignore these warnings: nitpick_ignore = [ ('py:class', 'optional'), + ('py:class', 'iterable'), + ('py:class', 'ndarray'), + ('py:class', 'dict-like'), ('py:class', 'array-like'), + ('py:class', 'table-like'), + ('py:class', 'float-like'), ('py:class', 'scalar-like'), - ('py:class', 'default') ] + ('py:class', 'default'), + ('py:class', 'callable'), + ] # The reST default role (used for this markup: `text`) to use for all # documents. diff --git a/doc/fluxing.rst b/doc/fluxing.rst index ea4ea90ed0..06ef1e9aa3 100644 --- a/doc/fluxing.rst +++ b/doc/fluxing.rst @@ -589,6 +589,5 @@ FluxSpec Class ============== The guts of the flux algorithms are guided by the -:class:`~pypeit.fluxcalibrate.FluxCalibrate` -class. +:func:`~pypeit.fluxcalibrate.apply_flux_calib` class. diff --git a/doc/help/pypeit_ql_multislit.rst b/doc/help/pypeit_ql_multislit.rst deleted file mode 100644 index 72718fa74a..0000000000 --- a/doc/help/pypeit_ql_multislit.rst +++ /dev/null @@ -1,78 +0,0 @@ -.. code-block:: console - - $ pypeit_ql_multislit -h - usage: pypeit_ql_multislit [-h] [--spec_samp_fact SPEC_SAMP_FACT] - [--spat_samp_fact SPAT_SAMP_FACT] [--bkg_redux] - [--flux] [--mask_cr] [--writefits] [--no_gui] - [--box_radius BOX_RADIUS] [--offset OFFSET] - [--redux_path REDUX_PATH] [--master_dir MASTER_DIR] - [--embed] [--show] [--det [DET ...]] - spectrograph full_rawpath files [files ...] - - Script to produce quick-look multislit PypeIt reductions - - positional arguments: - spectrograph A valid spectrograph identifier: bok_bc, - gemini_flamingos1, gemini_flamingos2, - gemini_gmos_north_e2v, gemini_gmos_north_ham, - gemini_gmos_north_ham_ns, gemini_gmos_south_ham, - gemini_gnirs_echelle, gemini_gnirs_ifu, gtc_maat, - gtc_osiris, gtc_osiris_plus, jwst_nircam, jwst_nirspec, - keck_deimos, keck_esi, keck_hires, keck_kcwi, - keck_lris_blue, keck_lris_blue_orig, keck_lris_red, - keck_lris_red_mark4, keck_lris_red_orig, keck_mosfire, - keck_nires, keck_nirspec_low, lbt_luci1, lbt_luci2, - lbt_mods1b, lbt_mods1r, lbt_mods2b, lbt_mods2r, - ldt_deveny, magellan_fire, magellan_fire_long, - magellan_mage, mdm_modspec, mdm_osmos_mdm4k, - mmt_binospec, mmt_bluechannel, mmt_mmirs, not_alfosc, - not_alfosc_vert, ntt_efosc2, p200_dbsp_blue, - p200_dbsp_red, p200_tspec, shane_kast_blue, - shane_kast_red, shane_kast_red_ret, soar_goodman_blue, - soar_goodman_red, tng_dolores, vlt_fors2, vlt_sinfoni, - vlt_xshooter_nir, vlt_xshooter_uvb, vlt_xshooter_vis, - wht_isis_blue, wht_isis_red - full_rawpath Full path to the raw files - files list of frames i.e. img1.fits img2.fits - - options: - -h, --help show this help message and exit - --spec_samp_fact SPEC_SAMP_FACT - Make the wavelength grid finer (spec_samp_fact < 1.0) or - coarser (spec_samp_fact > 1.0) by this sampling factor, - i.e. units of spec_samp_fact are pixels. (default: 1.0) - --spat_samp_fact SPAT_SAMP_FACT - Make the spatial grid finer (spat_samp_fact < 1.0) or - coarser (spat_samp_fact > 1.0) by this sampling factor, - i.e. units of spat_samp_fact are pixels. (default: 1.0) - --bkg_redux If set the script will perform difference imaging - quicklook. Namely it will identify sequences of AB pairs - based on the dither pattern and perform difference - imaging sky subtraction and fit for residuals (default: - False) - --flux This option will multiply in sensitivity function to - obtain a flux calibrated 2d spectrum (default: False) - --mask_cr This option turns on cosmic ray rejection. This improves - the reduction but doubles runtime. (default: False) - --writefits Write the ouputs to a fits file (default: False) - --no_gui Do not display the results in a GUI (default: False) - --box_radius BOX_RADIUS - Set the radius for the boxcar extraction (default: None) - --offset OFFSET Override the automatic offsets determined from the - headers. Offset is in pixels. This option is useful if a - standard dither pattern was not executed. The offset - convention is such that a negative offset will move the - (negative) B image to the left. (default: None) - --redux_path REDUX_PATH - Location where reduction outputs should be stored. - (default: current working directory) - --master_dir MASTER_DIR - Location of PypeIt Master files used for the reduction. - (default: None) - --embed Upon completion embed in ipython shell (default: False) - --show Show the reduction steps. Equivalent to the -s option - when running pypeit. (default: False) - --det [DET ...] Detector(s) to show. If more than one, the list of - detectors must be one of the allowed mosaics hard-coded - for the selected spectrograph. (default: 1) - \ No newline at end of file diff --git a/doc/include/bitmaskarray_usage.rst b/doc/include/bitmaskarray_usage.rst index 374f66b86e..c61b119730 100644 --- a/doc/include/bitmaskarray_usage.rst +++ b/doc/include/bitmaskarray_usage.rst @@ -38,7 +38,7 @@ You can access the bit flag names using: Bit access ++++++++++ -You can flag bits using :func:`~pypeit.bitmaskarray.BitMaskArray.turn_on`. For +You can flag bits using :func:`~pypeit.images.bitmaskarray.BitMaskArray.turn_on`. For example, the following code flags the center column of the image as being part of the detector bad-pixel mask: diff --git a/doc/include/class_datamodel_wavetilts.rst b/doc/include/class_datamodel_wavetilts.rst index aa5f30354d..f071e3f17d 100644 --- a/doc/include/class_datamodel_wavetilts.rst +++ b/doc/include/class_datamodel_wavetilts.rst @@ -1,19 +1,19 @@ **Version**: 1.2.0 -==================== ============================ ================= ============================================================================================================================== -Attribute Type Array Type Description -==================== ============================ ================= ============================================================================================================================== -``PYP_SPEC`` str PypeIt spectrograph name -``bpmtilts`` `numpy.ndarray`_ `numpy.integer`_ Bad pixel mask for tilt solutions. Keys are taken from SlitTraceSetBitmask -``coeffs`` `numpy.ndarray`_ `numpy.floating`_ 2D coefficents for the fit on the initial slits. One set per slit/order (3D array). -``func2d`` str Function used for the 2D fit -``nslit`` int Total number of slits. This can include masked slits -``slits_filename`` str Path to SlitTraceSet file. This helps to find the Slits calibration file when running pypeit_chk_tilts() -``spat_flexure`` float Flexure shift from the input TiltImage -``spat_id`` `numpy.ndarray`_ `numpy.integer`_ Slit spat_id -``spat_order`` `numpy.ndarray`_ `numpy.integer`_ Order for spatial fit (nslit) -``spec_order`` `numpy.ndarray`_ `numpy.integer`_ Order for spectral fit (nslit) -``tilt_traces`` `astropy.table.table.Table`_ Table with the positions of the traced and fitted tilts for all the slits. see :func:`make_tbl_tilt_traces` for more details. -``tiltimg_filename`` str Path to Tiltimg file. This helps to find Tiltimg file when running pypeit_chk_tilts() -==================== ============================ ================= ============================================================================================================================== +==================== ============================ ================= =============================================================================================================================================================== +Attribute Type Array Type Description +==================== ============================ ================= =============================================================================================================================================================== +``PYP_SPEC`` str PypeIt spectrograph name +``bpmtilts`` `numpy.ndarray`_ `numpy.integer`_ Bad pixel mask for tilt solutions. Keys are taken from SlitTraceSetBitmask +``coeffs`` `numpy.ndarray`_ `numpy.floating`_ 2D coefficents for the fit on the initial slits. One set per slit/order (3D array). +``func2d`` str Function used for the 2D fit +``nslit`` int Total number of slits. This can include masked slits +``slits_filename`` str Path to SlitTraceSet file. This helps to find the Slits calibration file when running pypeit_chk_tilts() +``spat_flexure`` float Flexure shift from the input TiltImage +``spat_id`` `numpy.ndarray`_ `numpy.integer`_ Slit spat_id +``spat_order`` `numpy.ndarray`_ `numpy.integer`_ Order for spatial fit (nslit) +``spec_order`` `numpy.ndarray`_ `numpy.integer`_ Order for spectral fit (nslit) +``tilt_traces`` `astropy.table.table.Table`_ Table with the positions of the traced and fitted tilts for all the slits. see :func:`~pypeit.wavetilts.BuildWaveTilts.make_tbl_tilt_traces` for more details. +``tiltimg_filename`` str Path to Tiltimg file. This helps to find Tiltimg file when running pypeit_chk_tilts() +==================== ============================ ================= =============================================================================================================================================================== diff --git a/doc/include/links.rst b/doc/include/links.rst index ef5b046d3f..3524abb769 100644 --- a/doc/include/links.rst +++ b/doc/include/links.rst @@ -13,6 +13,7 @@ .. _str.splitlines: https://docs.python.org/3/library/stdtypes.html#str.splitlines .. _textwrap.wrap: https://docs.python.org/3/library/textwrap.html#textwrap.wrap .. _Path: https://docs.python.org/3/library/pathlib.html +.. _io.TextIOWrapper: https://docs.python.org/3/library/io.html#io.TextIOWrapper .. numpy .. _numpy: https://numpy.org/ @@ -54,12 +55,14 @@ .. _matplotlib: https://matplotlib.org/ .. _matplotlib.pyplot.imshow: http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.imshow .. _matplotlib.axes.Axes: https://matplotlib.org/api/axes_api.html#matplotlib.axes.Axes +.. _matplotlib.backend_bases.Event: https://matplotlib.org/stable/api/backend_bases_api.html#matplotlib.backend_bases.Event .. configobj .. _configobj: http://configobj.readthedocs.io/en/latest/ .. astropy .. _astropy.io.fits: http://docs.astropy.org/en/stable/io/fits/index.html +.. _astropy.io.fits.getheader: https://docs.astropy.org/en/stable/io/fits/api/files.html#astropy.io.fits.getheader .. _astropy.io.fits.open: http://docs.astropy.org/en/stable/io/fits/api/files.html#astropy.io.fits.open .. _astropy.io.fits.HDUList: http://docs.astropy.org/en/stable/io/fits/api/hdulists.html .. _astropy.io.fits.HDUList.writeto: http://docs.astropy.org/en/stable/io/fits/api/hdulists.html#astropy.io.fits.HDUList.writeto @@ -73,7 +76,7 @@ .. _astropy.table.Table.read: https://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.read .. _astropy.table.Table.remove_rows: https://docs.astropy.org/en/stable/api/astropy.table.Table.html#astropy.table.Table.remove_rows .. _astropy.table.Row: https://docs.astropy.org/en/stable/api/astropy.table.Row.html -.. _astropy.wcs.wcs.WCS: http://docs.astropy.org/en/stable/api/astropy.wcs.WCS.html +.. _astropy.wcs.WCS: http://docs.astropy.org/en/stable/api/astropy.wcs.WCS.html .. _astropy.modeling: http://docs.astropy.org/en/stable/modeling/index.html .. _astropy.modeling.polynomial.Legendre1D: http://docs.astropy.org/en/stable/api/astropy.modeling.polynomial.Legendre1D.html .. _astropy.modeling.models.CompoundModel: http://docs.astropy.org/en/stable/modeling/compound-models.html @@ -81,11 +84,13 @@ .. _astropy.cosmology.FlatLambdaCDM: http://docs.astropy.org/en/stable/api/astropy.cosmology.FlatLambdaCDM.html#flatlambdacdm .. _astropy.constants: http://docs.astropy.org/en/stable/constants/index.html .. _astropy.time.Time: https://docs.astropy.org/en/stable/time/ +.. _astropy.coordinates.Angle: https://docs.astropy.org/en/stable/api/astropy.coordinates.Angle.html .. _astropy.coordinates.SkyCoord: https://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html .. _astropy.coordinates.EarthLocation: https://docs.astropy.org/en/stable/api/astropy.coordinates.EarthLocation.html .. _astropy.coordinates.SkyCoord.radial_velocity_correction: https://docs.astropy.org/en/stable/api/astropy.coordinates.SkyCoord.html#astropy.coordinates.SkyCoord.radial_velocity_correction .. _astropy.stats.SigmaClip: https://docs.astropy.org/en/stable/api/astropy.stats.SigmaClip.html .. _astropy.stats.sigma_clipped_stats: https://docs.astropy.org/en/stable/api/astropy.stats.sigma_clipped_stats.html +.. _astropy.units.Unit: https://docs.astropy.org/en/stable/api/astropy.units.Unit.html .. _astropy.units.Quantity: https://docs.astropy.org/en/stable/api/astropy.units.Quantity.html .. scikit @@ -122,11 +127,14 @@ .. _bottleneck: https://bottleneck.readthedocs.io/en/latest/ .. _specutils: https://specutils.readthedocs.io/en/stable/ .. _specutils.Spectrum1D: https://specutils.readthedocs.io/en/stable/api/specutils.Spectrum1D.html +.. _specutils.SpectrumList: https://specutils.readthedocs.io/en/stable/api/specutils.SpectrumList.html .. _jdaviz: https://jdaviz.readthedocs.io/en/latest/ .. _jupyter notebook: https://jupyter.org/ +.. _ppxf: https://pypi.org/project/ppxf/ .. ginga .. _ginga: https://ginga.readthedocs.io/en/stable/ +.. _ginga.RemoteClient: https://ginga.readthedocs.io/en/stable/manual/plugins_global/rc.html .. _AstroImage: https://ginga.readthedocs.io/en/stable/dev_manual/image_wrappers.html#astroimage .. _ginga GlobalPlugin: https://ginga.readthedocs.io/en/stable/dev_manual/developers.html#writing-a-global-plugin diff --git a/doc/pypeit_par.rst b/doc/pypeit_par.rst index f6b95caf05..62105a340a 100644 --- a/doc/pypeit_par.rst +++ b/doc/pypeit_par.rst @@ -54,7 +54,7 @@ the precedence order is as follows: configurations via its ``config_specific_par`` method. This allows the code to automatically define, e.g., the archived arc spectrum used for wavelength calibration given the grating used. For example, see - :func:`~pypeit.spectrographs.shane_kast.ShaneKastSpectrograph.config_specific_par` + :func:`~pypeit.spectrographs.shane_kast.ShaneKastBlueSpectrograph.config_specific_par` for Shane/Kast. These configuration-specific parameters are currently not documented here; however, they can be viewed by looking at the source code display in the API documentation. @@ -236,30 +236,30 @@ CalibrationsPar Keywords Class Instantiation: :class:`~pypeit.par.pypeitpar.CalibrationsPar` -===================== ==================================================== ======= ================================= =========================================================================================================================================================================================================================== -Key Type Options Default Description -===================== ==================================================== ======= ================================= =========================================================================================================================================================================================================================== -``alignframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the align frames -``alignment`` :class:`~pypeit.par.pypeitpar.AlignPar` .. `AlignPar Keywords`_ Define the procedure for the alignment of traces -``arcframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the wavelength calibration -``biasframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the bias correction -``bpm_usebias`` bool .. False Make a bad pixel mask from bias frames? Bias frames must be provided. -``calib_dir`` str .. ``Calibrations`` The name of the directory for the processed calibration frames. The host path for the directory is set by the redux_path (see :class:`ReduxPar`). Beware that success when changing the default value is not well tested! -``darkframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the dark-current correction -``flatfield`` :class:`~pypeit.par.pypeitpar.FlatFieldPar` .. `FlatFieldPar Keywords`_ Parameters used to set the flat-field procedure -``illumflatframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the illumination flat -``lampoffflatsframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the lamp off flats -``pinholeframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the pinholes -``pixelflatframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the pixel flat -``raise_chk_error`` bool .. True Raise an error if the calibration check fails -``skyframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the sky background observations -``slitedges`` :class:`~pypeit.par.pypeitpar.EdgeTracePar` .. `EdgeTracePar Keywords`_ Slit-edge tracing parameters -``standardframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the spectrophotometric standard observations -``tiltframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the wavelength tilts -``tilts`` :class:`~pypeit.par.pypeitpar.WaveTiltsPar` .. `WaveTiltsPar Keywords`_ Define how to trace the slit tilts using the trace frames -``traceframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for images used for slit tracing -``wavelengths`` :class:`~pypeit.par.pypeitpar.WavelengthSolutionPar` .. `WavelengthSolutionPar Keywords`_ Parameters used to derive the wavelength solution -===================== ==================================================== ======= ================================= =========================================================================================================================================================================================================================== +===================== ==================================================== ======= ================================= ================================================================================================================================================================================================================================================= +Key Type Options Default Description +===================== ==================================================== ======= ================================= ================================================================================================================================================================================================================================================= +``alignframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the align frames +``alignment`` :class:`~pypeit.par.pypeitpar.AlignPar` .. `AlignPar Keywords`_ Define the procedure for the alignment of traces +``arcframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the wavelength calibration +``biasframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the bias correction +``bpm_usebias`` bool .. False Make a bad pixel mask from bias frames? Bias frames must be provided. +``calib_dir`` str .. ``Calibrations`` The name of the directory for the processed calibration frames. The host path for the directory is set by the redux_path (see :class:`~pypeit.par.pypeitpar.ReduxPar`). Beware that success when changing the default value is not well tested! +``darkframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the dark-current correction +``flatfield`` :class:`~pypeit.par.pypeitpar.FlatFieldPar` .. `FlatFieldPar Keywords`_ Parameters used to set the flat-field procedure +``illumflatframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the illumination flat +``lampoffflatsframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the lamp off flats +``pinholeframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the pinholes +``pixelflatframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the pixel flat +``raise_chk_error`` bool .. True Raise an error if the calibration check fails +``skyframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the sky background observations +``slitedges`` :class:`~pypeit.par.pypeitpar.EdgeTracePar` .. `EdgeTracePar Keywords`_ Slit-edge tracing parameters +``standardframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the spectrophotometric standard observations +``tiltframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for the wavelength tilts +``tilts`` :class:`~pypeit.par.pypeitpar.WaveTiltsPar` .. `WaveTiltsPar Keywords`_ Define how to trace the slit tilts using the trace frames +``traceframe`` :class:`~pypeit.par.pypeitpar.FrameGroupPar` .. `FrameGroupPar Keywords`_ The frames and combination rules for images used for slit tracing +``wavelengths`` :class:`~pypeit.par.pypeitpar.WavelengthSolutionPar` .. `WavelengthSolutionPar Keywords`_ Parameters used to derive the wavelength solution +===================== ==================================================== ======= ================================= ================================================================================================================================================================================================================================================= ---- @@ -347,11 +347,11 @@ Key Type Options ``fit_maxdev`` int, float .. 5.0 Maximum deviation between the fitted and measured edge position for rejection in spatial pixels. ``fit_maxiter`` int .. 25 Maximum number of rejection iterations during edge fitting. ``fit_min_spec_length`` float .. 0.6 Minimum unmasked spectral length of a traced slit edge to use in any modeling procedure (polynomial fitting or PCA decomposition). -``fit_niter`` int .. 1 Number of iterations of re-measuring and re-fitting the edge data; see :func:`pypeit.core.trace.fit_trace`. +``fit_niter`` int .. 1 Number of iterations of re-measuring and re-fitting the edge data; see :func:`~pypeit.core.trace.fit_trace`. ``fit_order`` int .. 5 Order of the function fit to edge measurements. ``follow_span`` int .. 20 In the initial connection of spectrally adjacent edge detections, this sets the number of previous spectral rows to consider when following slits forward. -``fwhm_gaussian`` int, float .. 3.0 The `fwhm` parameter to use when using Gaussian weighting in :func:`pypeit.core.trace.fit_trace` when refining the PCA predictions of edges. See description :func:`pypeit.core.trace.peak_trace`. -``fwhm_uniform`` int, float .. 3.0 The `fwhm` parameter to use when using uniform weighting in :func:`pypeit.core.trace.fit_trace` when refining the PCA predictions of edges. See description of :func:`pypeit.core.trace.peak_trace`. +``fwhm_gaussian`` int, float .. 3.0 The `fwhm` parameter to use when using Gaussian weighting in :func:`~pypeit.core.trace.fit_trace` when refining the PCA predictions of edges. See description :func:`~pypeit.core.trace.peak_trace`. +``fwhm_uniform`` int, float .. 3.0 The `fwhm` parameter to use when using uniform weighting in :func:`~pypeit.core.trace.fit_trace` when refining the PCA predictions of edges. See description of :func:`~pypeit.core.trace.peak_trace`. ``gap_offset`` int, float .. 5.0 Offset (pixels) used for the slit edge gap width when inserting slit edges (see `sync_center`) or when nudging predicted slit edges to avoid slit overlaps. This should be larger than `minimum_slit_gap` when converted to arcseconds. ``left_right_pca`` bool .. False Construct a PCA decomposition for the left and right traces separately. This can be important for cross-dispersed echelle spectrographs (e.g., Keck-NIRES) ``length_range`` int, float .. .. Allowed range in slit length compared to the median slit length. For example, a value of 0.3 means that slit lengths should not vary more than 30%. Relatively shorter or longer slits are masked or clipped. Most useful for echelle or multi-slit data where the slits should have similar or identical lengths. @@ -368,8 +368,8 @@ Key Type Options ``minimum_slit_gap`` int, float .. .. Minimum slit gap in arcsec. Gaps between slits are determined by the median difference between the right and left edge locations of adjacent slits. Slits with small gaps are merged by removing the intervening traces.If None, no minimum slit gap is applied. This should be smaller than `gap_offset` when converted to pixels. ``minimum_slit_length`` int, float .. .. Minimum slit length in arcsec. Slit lengths are determined by the median difference between the left and right edge locations for the unmasked trace locations. This is used to identify traces that are *erroneously* matched together to form slits. Short slits are expected to be ignored or removed (see ``clip``). If None, no minimum slit length applied. ``minimum_slit_length_sci`` int, float .. .. Minimum slit length in arcsec for a science slit. Slit lengths are determined by the median difference between the left and right edge locations for the unmasked trace locations. Used in combination with ``minimum_slit_length``, this parameter is used to identify box or alignment slits; i.e., those slits that are shorter than ``minimum_slit_length_sci`` but larger than ``minimum_slit_length`` are box/alignment slits. Box slits are *never* removed (see ``clip``), but no spectra are extracted from them. If None, no minimum science slit length is applied. -``niter_gaussian`` int .. 6 The number of iterations of :func:`pypeit.core.trace.fit_trace` to use when using Gaussian weighting. -``niter_uniform`` int .. 9 The number of iterations of :func:`pypeit.core.trace.fit_trace` to use when using uniform weighting. +``niter_gaussian`` int .. 6 The number of iterations of :func:`~pypeit.core.trace.fit_trace` to use when using Gaussian weighting. +``niter_uniform`` int .. 9 The number of iterations of :func:`~pypeit.core.trace.fit_trace` to use when using uniform weighting. ``order_match`` int, float .. .. For echelle spectrographs, this is the tolerance allowed for matching identified "slits" to echelle orders. Must be in the fraction of the detector spatial scale (i.e., a value of 0.05 means that the order locations must be within 5% of the expected value). If None, no limit is used. ``order_offset`` int, float .. .. Offset to introduce to the expected order positions to improve the match for this specific data. This is an additive offset to the measured slit positions; i.e., this should minimize the difference between the expected order positions and ``self.slit_spatial_center() + offset``. Must be in the fraction of the detector spatial scale. If None, no offset is applied. ``overlap`` bool .. False Assume slits identified as abnormally short are actually due to overlaps between adjacent slits/orders. If set to True, you *must* have also used ``length_range`` to identify left-right edge pairs that have an abnormally short separation. For those short slits, the code attempts to convert the short slits into slit gaps. This is particularly useful for blue orders in Keck-HIRES data. @@ -535,7 +535,7 @@ Key Type Options Default Description ``spat_toler`` int .. 5 This parameter provides the desired tolerance in spatial pixel used to identify slits in different exposures ``use_slits4wvgrid`` bool .. False If True, use the slits to set the trace down the center ``user_obj`` int, list .. .. Object that the user wants to use to compute the weights and/or the offsets for coadding images. For longslit/multislit spectroscopy, provide the ``SLITID`` and the ``OBJID``, separated by comma, of the selected object. For echelle spectroscopy, provide the ``ECH_OBJID`` of the selected object. See :doc:`out_spec1D` for more info about ``SLITID``, ``OBJID`` and ``ECH_OBJID``. If this parameter is not ``None``, it will be used to compute the offsets only if ``offsets = auto``, and it will used to compute the weights only if ``weights = auto``. -``wave_method`` str .. .. Argument to `pypeit.core.wavecal.wvutils.get_wave_grid` method, which determines how the 2d coadd wavelength grid is constructed. The default is None, which will use a linear gridfor longslit/multislit coadds and a log10 grid for echelle coadds. Currently supported options with 2d coadding are:* 'iref' -- Use one of the exposures (the first) as the reference for the wavelength grid * 'velocity' -- Grid is uniform in velocity* 'log10' -- Grid is uniform in log10(wave). This is the same as velocity.* 'linear' -- Grid is uniform in wavelength +``wave_method`` str .. .. Argument to :func:`~pypeit.core.wavecal.wvutils.get_wave_grid` method, which determines how the 2d coadd wavelength grid is constructed. The default is None, which will use a linear gridfor longslit/multislit coadds and a log10 grid for echelle coadds. Currently supported options with 2d coadding are:* 'iref' -- Use one of the exposures (the first) as the reference for the wavelength grid * 'velocity' -- Grid is uniform in velocity* 'log10' -- Grid is uniform in log10(wave). This is the same as velocity.* 'linear' -- Grid is uniform in wavelength ``weights`` str, list .. ``auto`` Mode for the weights used to coadd images. Options are: ``auto``, ``uniform``, or a list of weights. If ``auto`` is used, PypeIt will try to compute the weights using a reference object with the highest S/N, or an object selected by the user (see ``user_obj``), if ``uniform`` is used, uniform weights will be applied. If a list of weights is provided, PypeIt will use it. ==================== ========= ======= ======== ======================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================================== @@ -941,7 +941,7 @@ Key Type Options Default ``lower`` int, float .. 3.0 Lower rejection threshold in units of sigma_corr*sigma, where sigma is the formal noise of the spectrum, and sigma_corr is an empirically determined correction to the formal error. The distribution of input chi (defined by chi = (data - model)/sigma) values is analyzed, and a correction factor to the formal error sigma_corr is returned which is multiplied into the formal errors. In this way, a rejection threshold of i.e. 3-sigma, will always correspond to roughly the same percentile. This renormalization is performed with coadd1d.renormalize_errors function, and guarantees that rejection is not too agressive in cases where the empirical errors determined from the chi-distribution differ significantly from the formal noise which is used to determine chi. ``mask_lyman_a`` bool .. True Mask the blueward of Lyman-alpha line during the fitting? ``maxiter`` int .. 2 Maximum number of iterations for the telluric + object model fitting. The code performs multiple iterations rejecting outliers at each step. The fit is then performed anew to the remaining good pixels. For this reason if you run with the disp=True option, you will see that the f(x) loss function gets progressively better during the iterations. -``minmax_coeff_bounds`` tuple .. (-5.0, 5.0) Parameters setting the polynomial coefficient bounds for sensfunc optimization. Bounds are currently determined as follows. We compute an initial fit to the sensfunc in the pypeit.core.telluric.init_sensfunc_model function. That deterines a set of coefficients. The bounds are then determined according to: [(np.fmin(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][0], obj_params['minmax_coeff_bounds'][0]), np.fmax(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][1], obj_params['minmax_coeff_bounds'][1]))] +``minmax_coeff_bounds`` tuple .. (-5.0, 5.0) Parameters setting the polynomial coefficient bounds for sensfunc optimization. Bounds are currently determined as follows. We compute an initial fit to the sensfunc in the :func:`~pypeit.core.telluric.init_sensfunc_model` function. That deterines a set of coefficients. The bounds are then determined according to: [(np.fmin(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][0], obj_params['minmax_coeff_bounds'][0]), np.fmax(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][1], obj_params['minmax_coeff_bounds'][1]))] ``model`` str .. ``exp`` Types of polynomial model. Options are poly, square, exp corresponding to normal polynomial, squared polynomial, or exponentiated polynomial ``npca`` int .. 8 Number of pca for the objmodel=qso qso PCA fit ``objmodel`` str .. .. The object model to be used for telluric fitting. Currently the options are: qso, star, and poly diff --git a/doc/scripts/base_par.rst b/doc/scripts/base_par.rst index e5fb84b0fe..810ba81ed6 100644 --- a/doc/scripts/base_par.rst +++ b/doc/scripts/base_par.rst @@ -54,7 +54,7 @@ the precedence order is as follows: configurations via its ``config_specific_par`` method. This allows the code to automatically define, e.g., the archived arc spectrum used for wavelength calibration given the grating used. For example, see - :func:`~pypeit.spectrographs.shane_kast.ShaneKastSpectrograph.config_specific_par` + :func:`~pypeit.spectrographs.shane_kast.ShaneKastBlueSpectrograph.config_specific_par` for Shane/Kast. These configuration-specific parameters are currently not documented here; however, they can be viewed by looking at the source code display in the API documentation. diff --git a/doc/setup.rst b/doc/setup.rst index 904e700e4c..2a046b17e5 100644 --- a/doc/setup.rst +++ b/doc/setup.rst @@ -216,7 +216,7 @@ this setup as determined using the relevant metadata. Each "setup block" is followed by a table listing the files and relevant metadata for all files matched to that instrument configuration. The data provided is specific to each instrument, as defined by, e.g., -:func:`~pypeit.spectrographs.keck_deimos.pypeit_file_keys`. +:func:`~pypeit.spectrographs.keck_deimos.KeckDEIMOSSpectrograph.pypeit_file_keys`. The ``sorted`` file is only provided as a means of assessing the automated setup identification and file sorting, and we encourage you diff --git a/pypeit/archive.py b/pypeit/archive.py index 0eaa69c132..888b7f49bf 100755 --- a/pypeit/archive.py +++ b/pypeit/archive.py @@ -77,7 +77,7 @@ class can be used on its own for saving metadata or passed to an col_names (list of str): The column names of the metadata - get_metadata_func (func): + get_metadata_func (callable): Function that reads metadata and file information from the objects being archived. diff --git a/pypeit/bitmask.py b/pypeit/bitmask.py index 40cb4c95ba..499ec57bb2 100644 --- a/pypeit/bitmask.py +++ b/pypeit/bitmask.py @@ -400,8 +400,9 @@ def unpack(self, value, flag=None): flag (:obj:`str`, :obj:`list`, optional): The specific bits to unpack. If None, all values are unpacked. + Returns: - tuple: A tuple of boolean numpy.ndarrays flagged according + tuple: A tuple of boolean `numpy.ndarray`_ objects flagged according to each bit. """ _flag = self._prep_flags(flag) diff --git a/pypeit/calibframe.py b/pypeit/calibframe.py index 7b648a8bdc..4bd7e7aa32 100644 --- a/pypeit/calibframe.py +++ b/pypeit/calibframe.py @@ -107,7 +107,7 @@ def set_paths(self, odir, setup, calib_id, detname): detname (:obj:`str`): The identifier used for the detector or detector mosaic for the relevant instrument; see - :func:`~pypeit.spectrograph.spectrograph.Spectrograph.get_det_name`. + :func:`~pypeit.spectrographs.spectrograph.Spectrograph.get_det_name`. """ self.calib_dir = Path(odir).resolve() # TODO: Keep this, or throw an error if the directory doesn't exist instead? @@ -325,7 +325,7 @@ def construct_calib_key(setup, calib_id, detname): detname (:obj:`str`): The identifier used for the detector or detector mosaic for the relevant instrument; see - :func:`~pypeit.spectrograph.spectrograph.Spectrograph.get_det_name`. + :func:`~pypeit.spectrographs.spectrograph.Spectrograph.get_det_name`. Returns: :obj:`str`: Calibration identifier. diff --git a/pypeit/calibrations.py b/pypeit/calibrations.py index 1e74681edb..40c14396ef 100644 --- a/pypeit/calibrations.py +++ b/pypeit/calibrations.py @@ -92,9 +92,9 @@ class Calibrations: Tilt calibration frame alignments (:class:`~pypeit.alignframe.Alignments`): Alignment calibration frame - msbias (:class:`~pypeit.buildimage.BiasImage`): + msbias (:class:`~pypeit.images.buildimage.BiasImage`): Bias calibration frame - msdark (:class:`~pypeit.buildimage.DarkImage`): + msdark (:class:`~pypeit.images.buildimage.DarkImage`): Dark calibration frame msbpm (`numpy.ndarray`_): Boolean array with the bad-pixel mask (pixels that should masked are diff --git a/pypeit/coadd1d.py b/pypeit/coadd1d.py index 3b0388c8f5..f6e8f94fc8 100644 --- a/pypeit/coadd1d.py +++ b/pypeit/coadd1d.py @@ -108,7 +108,7 @@ def load(self): def save(self, coaddfile, telluric=None, obj_model=None, overwrite=True): """ - Generate a :class:`OneSpec` object and write it to disk. + Generate a :class:`~pypeit.onespec.OneSpec` object and write it to disk. Args: coaddfile (str): @@ -373,24 +373,31 @@ def load(self): """ Load the arrays we need for performing echelle coadds. - Returns: - waves (list): - List of arrays with the wavelength arrays for each setup. The length of the list - equals the number of unique setups and each arrays in the list has shape = (nspec, norders, nexp) - fluxes (list): - List of arrays with the flux arrays for each setup. The length of the list - equals the number of unique setups and each arrays in the list has shape = (nspec, norders, nexp) - ivars (list): - List of arrays with the ivar arrays for each setup. The length of the list - equals the number of unique setups and each arrays in the list has shape = (nspec, norders, nexp) - gpms (list): - List of arrays with the gpm arrays for each setup. The length of the list - equals the number of unique setups and each arrays in the list has shape = (nspec, norders, nexp) - weights_sens (list): - List of arrays with the sensfunc weights for each setup. The length of the list - equals the number of unique setups and each arrays in the list has shape = (nspec, norders, nexp) - headers (list): - List of headers for each setup. The length of the list is the number of unique setups. + Returns + ------- + waves : list + List of arrays with the wavelength arrays for each setup. The length + of the list equals the number of unique setups and each arrays in + the list has shape = (nspec, norders, nexp) + fluxes : list + List of arrays with the flux arrays for each setup. The length of + the list equals the number of unique setups and each arrays in the + list has shape = (nspec, norders, nexp) + ivars : list + List of arrays with the ivar arrays for each setup. The length of + the list equals the number of unique setups and each arrays in the + list has shape = (nspec, norders, nexp) + gpms : list + List of arrays with the gpm arrays for each setup. The length of the + list equals the number of unique setups and each arrays in the list + has shape = (nspec, norders, nexp) + weights_sens : list + List of arrays with the sensfunc weights for each setup. The length + of the list equals the number of unique setups and each arrays in + the list has shape = (nspec, norders, nexp) + headers : list + List of headers for each setup. The length of the list is the number + of unique setups. """ diff --git a/pypeit/coadd2d.py b/pypeit/coadd2d.py index 34c48842c6..5ea748f9ee 100644 --- a/pypeit/coadd2d.py +++ b/pypeit/coadd2d.py @@ -1215,11 +1215,11 @@ def get_maskdef_dict(self, slit_idx, ref_trace_stack): Dummy method to get maskdef info. Overloaded by child methods. Args: - slit_idx: - ref_trace_stack: + slit_idx (?): + ref_trace_stack (?): Returns: - + dict: ? """ return dict(maskdef_id=None, maskdef_objpos=None, maskdef_designtab=None) @@ -1509,15 +1509,18 @@ def get_maskdef_dict(self, slit_idx, ref_trace_stack): """ Args: - slit_idx (:obj:`int`): index of a slit in the uncoadded frames - ref_trace_stack(`numpy.ndarray`_): Stack of reference traces about - which the images are rectified and coadded. It is the slitcen appropriately - shifted according the frames offsets. Shape is (nspec, nimgs). + slit_idx (:obj:`int`): + index of a slit in the uncoadded frames + ref_trace_stack (`numpy.ndarray`_): + Stack of reference traces about which the images are rectified + and coadded. It is the slitcen appropriately shifted according + the frames offsets. Shape is (nspec, nimgs). Returns: - :obj:`dict`: Dictionary containing all the maskdef info. The quantities saved - are: maskdef_id, maskdef_objpos, maskdef_slitcen, maskdef_designtab. To learn what - they are see :class:`~pypeit.slittrace.SlitTraceSet` datamodel. + :obj:`dict`: Dictionary containing all the maskdef info. The + quantities saved are: maskdef_id, maskdef_objpos, maskdef_slitcen, + maskdef_designtab. To learn what they are see + :class:`~pypeit.slittrace.SlitTraceSet` datamodel. """ # maskdef info diff --git a/pypeit/core/coadd.py b/pypeit/core/coadd.py index 5a9bd43c6a..b4b38d9c9d 100644 --- a/pypeit/core/coadd.py +++ b/pypeit/core/coadd.py @@ -941,42 +941,48 @@ def robust_median_ratio(flux, ivar, flux_ref, ivar_ref, mask=None, mask_ref=None Parameters ---------- - flux: `numpy.ndarray`_ - spectrum that will be rescaled. shape=(nspec,) - ivar: `numpy.ndarray`_ - inverse variance for the spectrum that will be rescaled. - Same shape as flux - flux_ref: `numpy.ndarray`_ - reference spectrum. Same shape as flux - mask: `numpy.ndarray`_, optional - boolean mask for the spectrum that will be rescaled. True=Good. - If not input, computed from inverse variance - ivar_ref: `numpy.ndarray`_, optional - inverse variance of reference spectrum. - mask_ref: `numpy.ndarray`_, optional - Boolean mask for reference spectrum. True=Good. If not input, computed from inverse variance. - ref_percentile: float, optional, default=70.0 - Percentile fraction used for selecting the minimum SNR cut from the reference spectrum. Pixels above this - percentile cut are deemed the "good" pixels and are used to compute the ratio. This must be a number - between 0 and 100. - min_good: float, optional, default = 0.05 - Minimum fraction of good pixels determined as a fraction of the total pixels for estimating the median ratio - maxiters: int, optional, default = 5 - Maximum number of iterations for astropy.stats.SigmaClip - sigrej: float, optional, default = 3.0 - Rejection threshold for astropy.stats.SigmaClip - max_factor: float, optional, default = 10.0, - Maximum allowed value of the returned ratio - snr_do_not_rescale: float, optional default = 1.0 - If the S/N ratio of the set of pixels (defined by upper ref_percentile in the reference spectrum) in the - input spectrum have a median value below snr_do_not_rescale, median rescaling will not be attempted - and the code returns ratio = 1.0. We also use this parameter to define the set of pixels (determined from - the reference spectrum) to compare for the rescaling. + flux : `numpy.ndarray`_ + spectrum that will be rescaled. shape=(nspec,) + ivar : `numpy.ndarray`_ + inverse variance for the spectrum that will be rescaled. Same shape as + flux + flux_ref : `numpy.ndarray`_ + reference spectrum. Same shape as flux + mask : `numpy.ndarray`_, optional + boolean mask for the spectrum that will be rescaled. True=Good. If not + input, computed from inverse variance + ivar_ref : `numpy.ndarray`_, optional + inverse variance of reference spectrum. + mask_ref : `numpy.ndarray`_, optional + Boolean mask for reference spectrum. True=Good. If not input, computed + from inverse variance. + ref_percentile : float, optional, default=70.0 + Percentile fraction used for selecting the minimum SNR cut from the + reference spectrum. Pixels above this percentile cut are deemed the + "good" pixels and are used to compute the ratio. This must be a number + between 0 and 100. + min_good : float, optional, default = 0.05 + Minimum fraction of good pixels determined as a fraction of the total + pixels for estimating the median ratio + maxiters : int, optional, default = 5 + Maximum number of iterations for astropy.stats.SigmaClip + sigrej : float, optional, default = 3.0 + Rejection threshold for astropy.stats.SigmaClip + max_factor : float, optional, default = 10.0, + Maximum allowed value of the returned ratio + snr_do_not_rescale : float, optional, default = 1.0 + If the S/N ratio of the set of pixels (defined by upper ref_percentile + in the reference spectrum) in the input spectrum have a median value + below snr_do_not_rescale, median rescaling will not be attempted and the + code returns ratio = 1.0. We also use this parameter to define the set + of pixels (determined from the reference spectrum) to compare for the + rescaling. Returns ------- - ratio: float - the number that must be multiplied into flux in order to get it to match up with flux_ref + ratio : float + the number that must be multiplied into flux in order to get it to match + up with flux_ref """ ## Mask for reference spectrum and your spectrum @@ -1970,7 +1976,7 @@ def scale_spec_stack(wave_grid, wave_grid_mid, waves, fluxes, ivars, gpms, sns, array of hand scale factors, not well tested sn_min_polyscale : float, optional, default=2.0 maximum SNR for perforing median scaling - sn_min_medscale : float, optional default=0.5 + sn_min_medscale : float, optional, default=0.5 minimum SNR for perforing median scaling debug : bool, optional, default=False show interactive QA plot @@ -2103,7 +2109,7 @@ def combspec(waves, fluxes, ivars, gpms, sn_smooth_npix, S/N due to systematics. lower : float, optional, default=3.0 lower rejection threshold for djs_reject - upper : float: optional, default=3.0 + upper : float, optional, default=3.0 upper rejection threshold for djs_reject maxrej : int, optional maximum number of pixels to reject in each iteration for djs_reject. @@ -2380,7 +2386,7 @@ def ech_combspec(waves_arr_setup, fluxes_arr_setup, ivars_arr_setup, gpms_arr_se the orders. The length of the list is nsetups. Each element of the list corresponds to a dinstinct setup and each numpy array has shape=(nspec, norder, nexp) - setup_ids : list of strings, optional, default=None + setup_ids : list, optional, default=None List of strings indicating the name of each setup. If None uppercase letters A, B, C, etc. will be used nbests : int or list or `numpy.ndarray`_, optional @@ -2443,7 +2449,7 @@ def ech_combspec(waves_arr_setup, fluxes_arr_setup, ivars_arr_setup, gpms_arr_se S/N due to lower : float, optional, default=3.0 lower rejection threshold for djs_reject - upper : float: optional, default=3.0 + upper : float, optional, default=3.0 upper rejection threshold for djs_reject maxrej : int, optional maximum number of pixels to reject in each iteration for djs_reject. @@ -2865,24 +2871,27 @@ def get_wave_bins(thismask_stack, waveimg_stack, wave_grid): Parameters ---------- - thismask_stack: list - List of boolean arrays containing the masks indicating which pixels are on - the slit in question. `True` values are on the slit; - `False` values are off the slit. Length of the list is nimgs. Shapes of the individual elements in the list - are (nspec, nspat), but each image can have a different shape. - waveimg_stack: list - List of the wavelength images, each of which is a float `numpy.ndarray`_. Length of the list is nimgs. - Shapes of the individual elements in the list are (nspec, nspat), but each image can have a different shape. - wave_grid: array shape (ngrid) + thismask_stack : list + List of boolean arrays containing the masks indicating which pixels are + on the slit in question. `True` values are on the slit; `False` values + are off the slit. Length of the list is nimgs. Shapes of the + individual elements in the list are (nspec, nspat), but each image can + have a different shape. + + waveimg_stack : list + List of the wavelength images, each of which is a float + `numpy.ndarray`_. Length of the list is nimgs. Shapes of the individual + elements in the list are (nspec, nspat), but each image can have a + different shape. + wave_grid : `numpy.ndarray`_, shape=(ngrid,) The wavelength grid created for the 2d coadd Returns ------- - wave_bins : `numpy.ndarray`_ - shape (ind_upper-ind_lower + 1, ) - Wavelength bins that are relevant given the illuminated pixels (thismask_stack) and - wavelength coverage (waveimg_stack) of the image stack - + wave_bins : `numpy.ndarray`_, shape = (ind_upper-ind_lower + 1, ) + Wavelength bins that are relevant given the illuminated pixels + (thismask_stack) and wavelength coverage (waveimg_stack) of the image + stack """ # Determine the wavelength grid that we will use for the current slit/order @@ -3023,12 +3032,12 @@ def compute_coadd2d(ref_trace_stack, sciimg_stack, sciivar_stack, skymodel_stack are (nspec, nspat), but each image can have a different shape. weights (list, optional): The weights used when combining the rectified images (see - :func:`weighted_combine`). If weights is None a + :func:`~pypeit.core.combine.weighted_combine`). If weights is None a uniform weighting is used. If weights is not None then it must be a list of length nimgs. The individual elements of the list can either be floats, indiciating the weight to be used for each image, or arrays with shape = (nspec,) or shape = (nspec, nspat), indicating pixel weights for the individual images. Weights are broadast to the correct size - of the image stacks (see :func:`broadcast_weights`), as necessary. + of the image stacks (see :func:`~pypeit.core.combine.broadcast_weights`), as necessary. spat_samp_fact (float, optional): Spatial sampling for 2d coadd spatial bins in pixels. A value > 1.0 (i.e. bigger pixels) will downsample the images spatially, whereas < @@ -3037,7 +3046,7 @@ def compute_coadd2d(ref_trace_stack, sciimg_stack, sciivar_stack, skymodel_stack Wavelength grid in log10(wave) onto which the image stacks will be rectified. The code will automatically choose the subset of this grid encompassing the wavelength coverage of - the image stacks provided (see :func:`waveimg_stack`). + the image stacks provided (see ``waveimg_stack``). Either `loglam_grid` or `wave_grid` must be provided. wave_grid (`numpy.ndarray`_, optional): Same as `loglam_grid` but in angstroms instead of diff --git a/pypeit/core/collate.py b/pypeit/core/collate.py index 0c4bb2a949..97f9ba2018 100755 --- a/pypeit/core/collate.py +++ b/pypeit/core/collate.py @@ -4,6 +4,8 @@ # -*- coding: utf-8 -*- """ This module contains code for collating multiple 1d spectra source object. + +.. include:: ../include/links.rst """ #TODO -- Consider moving this into the top-level, i.e. out of core @@ -32,7 +34,7 @@ class SourceObject: Args: spec1d_obj (:obj:`pypeit.specobj.SpecObj`): The initial spectra of the group as a SpecObj. - spec1d_header (:obj:`astropy.io.fits.Header`): + spec1d_header (`astropy.io.fits.Header`_): The header for the first spec1d file in the group. spec1d_file (str): Filename of the first spec1d file in the group. spectrograph (:obj:`pypeit.spectrographs.spectrograph.Spectrograph`): @@ -47,7 +49,7 @@ class SourceObject: The list of spectra in the group as SpecObj objects. spec1d_file_list (list of str): The pathnames of the spec1d files in the group. - spec1d_header_list: (list of :obj:`astropy.io.fits.Header`): + spec1d_header_list: (list of `astropy.io.fits.Header`_): The headers of the spec1d files in the group """ def __init__(self, spec1d_obj, spec1d_header, spec1d_file, spectrograph, match_type): @@ -92,7 +94,7 @@ def _config_key_match(self, header): ones for this SourceObject. Args: - header (:obj:`astropy.io.fits.Header`): + header (`astropy.io.fits.Header`_): Header from a spec1d file. Returns: @@ -129,16 +131,13 @@ def match(self, spec_obj, spec1d_header, tolerance, unit = u.arcsec): Args: spec_obj (:obj:`pypeit.specobj.SpecObj`): The SpecObj to compare with this SourceObject. - - spec1d_header (:obj:`astropy.io.fits.Header`): + spec1d_header (`astropy.io.fits.Header`_): The header from the spec1d that dontains the SpecObj. - tolerance (float): Maximum distance that two spectra can be from each other to be considered to be from the same source. Measured in floating point pixels or as an angular distance (see ``unit1`` argument). - - unit (:obj:`astropy.units.Unit`): + unit (`astropy.units.Unit`_): Units of ``tolerance`` argument if match_type is 'ra/dec'. Defaults to arcseconds. Igored if match_type is 'pixel'. @@ -189,7 +188,7 @@ def collate_spectra_by_source(source_list, tolerance, unit=u.arcsec): Maximum distance that two spectra can be from each other to be considered to be from the same source. Measured in floating point pixels or as an angular distance (see ``unit`` argument). - unit (:obj:`astropy.units.Unit`): + unit (`astropy.units.Unit`_): Units of ``tolerance`` argument if match_type is 'ra/dec'. Defaults to arcseconds. Ignored if match_type is 'pixel'. diff --git a/pypeit/core/datacube.py b/pypeit/core/datacube.py index 0061fe4425..8333a8709d 100644 --- a/pypeit/core/datacube.py +++ b/pypeit/core/datacube.py @@ -182,7 +182,9 @@ def wcs(self): def dar_fitfunc(radec, coord_ra, coord_dec, datfit, wave, obstime, location, pressure, temperature, rel_humidity): - """ Generates a fitting function to calculate the offset due to differential atmospheric refraction + """ + Generates a fitting function to calculate the offset due to differential + atmospheric refraction Args: radec (tuple): @@ -205,8 +207,7 @@ def dar_fitfunc(radec, coord_ra, coord_dec, datfit, wave, obstime, location, pre Outside relative humidity at `location`. This should be between 0 to 1. Returns: - chisq (float): - chi-squared difference between datfit and model + float: chi-squared difference between datfit and model """ (diff_ra, diff_dec) = radec # Generate the coordinate with atmospheric conditions @@ -784,7 +785,7 @@ def load_imageWCS(filename, ext=0): Returns: image (`numpy.ndarray`_): 2D image data - imgwcs (`astropy.wcs.wcs.WCS`_): The WCS of the image + imgwcs (`astropy.wcs.WCS`_): The WCS of the image """ imghdu = fits.open(filename) image = imghdu[ext].data.T @@ -818,7 +819,7 @@ def make_whitelight_frompixels(all_ra, all_dec, all_wave, all_sci, all_wghts, al all_ivar (`numpy.ndarray`_, optional): 1D flattened array containing of the inverse variance of each pixel from all spec2d files. If provided, inverse variance images will be calculated and returned for each white light image. - whitelightWCS (`astropy.wcs.wcs.WCS`_, optional): + whitelightWCS (`astropy.wcs.WCS`_, optional): The WCS of a reference white light image. If supplied, you must also supply numra and numdec. numra (int, optional): @@ -922,7 +923,7 @@ def create_wcs(cubepar, all_ra, all_dec, all_wave, dspat, dwv, collapse=False, e Returns ------- - cubewcs : `astropy.wcs.wcs.WCS`_ + cubewcs : `astropy.wcs.WCS`_ astropy WCS to be used for the combined cube voxedges : tuple A three element tuple containing the bin edges in the x, y (spatial) and @@ -1001,7 +1002,7 @@ def generate_WCS(crval, cdelt, equinox=2000.0, name="PYP_SPEC"): Equinox of the WCS Returns: - `astropy.wcs.wcs.WCS`_ : astropy WCS to be used for the combined cube + `astropy.wcs.WCS`_ : astropy WCS to be used for the combined cube """ # Create a new WCS object. msgs.info("Generating WCS") @@ -1123,7 +1124,7 @@ def generate_image_subpixel(image_wcs, all_ra, all_dec, all_wave, all_sci, all_i Generate a white light image from the input pixels Args: - image_wcs (`astropy.wcs.wcs.WCS`_): + image_wcs (`astropy.wcs.WCS`_): World coordinate system to use for the white light images. all_ra (`numpy.ndarray`_): 1D flattened array containing the right ascension of each pixel (units = degrees) @@ -1217,7 +1218,7 @@ def generate_cube_subpixel(outfile, output_wcs, all_ra, all_dec, all_wave, all_s Args: outfile (str): Filename to be used to save the datacube - output_wcs (`astropy.wcs.wcs.WCS`_): + output_wcs (`astropy.wcs.WCS`_): Output world coordinate system. all_ra (`numpy.ndarray`_): 1D flattened array containing the right ascension of each pixel (units = degrees) @@ -1348,7 +1349,7 @@ def subpixellate(output_wcs, all_ra, all_dec, all_wave, all_sci, all_ivar, all_w and better looking cubes, versus no sampling and better behaved errors. Args: - output_wcs (`astropy.wcs.wcs.WCS`_): + output_wcs (`astropy.wcs.WCS`_): Output world coordinate system. all_ra (`numpy.ndarray`_) 1D flattened array containing the right ascension of each pixel (units = degrees) @@ -1517,7 +1518,7 @@ def get_output_filename(fil, par_outfile, combine, idx=1): Index of filename to be saved. Required if combine=False. Returns: - outfile (str): The output filename to use. + str: The output filename to use. """ if combine: if par_outfile == "": @@ -1535,14 +1536,16 @@ def get_output_filename(fil, par_outfile, combine, idx=1): def get_output_whitelight_filename(outfile): - """ Given the output filename of a datacube, create an appropriate whitelight fits file name + """ + Given the output filename of a datacube, create an appropriate whitelight + fits file name Args: outfile (str): The output filename used for the datacube. Returns: - out_wl_filename (str): The output filename to use for the whitelight image. + str: The output filename to use for the whitelight image. """ out_wl_filename = os.path.splitext(outfile)[0] + "_whitelight.fits" return out_wl_filename diff --git a/pypeit/core/extract.py b/pypeit/core/extract.py index 53785e1fe2..634f04bffc 100644 --- a/pypeit/core/extract.py +++ b/pypeit/core/extract.py @@ -28,9 +28,9 @@ def extract_optimal(sciimg, ivar, mask, waveimg, skyimg, thismask, oprof, r""" Perform optimal extraction `(Horne 1986) `_ - for a single :class:`~pypeit.specobjs.SpecObj`. + for a single :class:`~pypeit.specobj.SpecObj`. - The :class:`~pypeit.specobjs.SpecObj` object is changed in place with optimal attributes + The :class:`~pypeit.specobj.SpecObj` object is changed in place with optimal attributes being filled with the extraction parameters, and additional sky and noise estimates being added. The following are the attributes that are filled here: @@ -308,11 +308,11 @@ def extract_asym_boxcar(sciimg, left_trace, righ_trace, gpm=None, ivar=None): def extract_boxcar(sciimg, ivar, mask, waveimg, skyimg, spec, fwhmimg=None, base_var=None, count_scale=None, noise_floor=None): r""" - Perform boxcar extraction for a single :class:`~pypeit.specobjs.SpecObj`. + Perform boxcar extraction for a single :class:`~pypeit.specobj.SpecObj`. The size of the boxcar must be available as an attribute of the :class:`~pypeit.specobj.SpecObj` object. - The :class:`~pypeit.specobjs.SpecObj` object is changed in place with boxcar attributes + The :class:`~pypeit.specobj.SpecObj` object is changed in place with boxcar attributes being filled with the extraction parameters, and additional sky and noise estimates being added. The following are the attributes that are filled here: diff --git a/pypeit/core/findobj_skymask.py b/pypeit/core/findobj_skymask.py index 1f9d69c8fd..b9f8bca86e 100644 --- a/pypeit/core/findobj_skymask.py +++ b/pypeit/core/findobj_skymask.py @@ -640,7 +640,8 @@ def ech_cutobj_on_snr( nperorder:int=2, min_snr:float=1.0, nabove_min_snr:int=2, box_radius:float=2.0, inmask:np.ndarray=None): - """Cut down objects based on S/N + """ + Cut down objects based on S/N This routine: diff --git a/pypeit/core/fitting.py b/pypeit/core/fitting.py index 8995cca87b..d72473eca5 100644 --- a/pypeit/core/fitting.py +++ b/pypeit/core/fitting.py @@ -476,7 +476,7 @@ def robust_optimize(ydata, fitfunc, arg_dict, maxiter=10, inmask=None, invvar=No one to fit a more general model using the optimizer of the users choice. If you are fitting simple functions like Chebyshev or Legednre polynomials using a linear least-squares algorithm, you - should use :func:robust_polyfit_djs` instead of this function. + should use :func:`robust_fit` instead of this function. Args: ydata (`numpy.ndarray`_): @@ -838,7 +838,8 @@ def polyfit2d_general(x, y, z, deg, w=None, function='polynomial', def twoD_Gaussian(tup, amplitude, xo, yo, sigma_x, sigma_y, theta, offset): - """ A 2D Gaussian to be used to fit the cross-correlation + """ + A 2D Gaussian to be used to fit the cross-correlation Args: tup (tuple): @@ -847,7 +848,7 @@ def twoD_Gaussian(tup, amplitude, xo, yo, sigma_x, sigma_y, theta, offset): The amplitude of the 2D Gaussian xo (float): The centre of the Gaussian in the x direction - yo (float: + yo (float): The centre of the Gaussian in the y direction sigma_x (float): The dispersion of the Gaussian in the x direction diff --git a/pypeit/core/flat.py b/pypeit/core/flat.py index 908dd810ee..f79b3b4ac5 100644 --- a/pypeit/core/flat.py +++ b/pypeit/core/flat.py @@ -151,7 +151,7 @@ def construct_illum_profile(norm_spec, spat_coo, slitwidth, spat_gpm=None, spat_ good-pixel mask should be for a single slit. The iterations involve constructing the illumination profile - using :func:`illum_profile` and then rejecting deviant residuals. + using :func:`illum_filter` and then rejecting deviant residuals. Each rejection iteration recomputes the standard deviation and pixels to reject from the full input set (i.e., rejected pixels are not kept between iterations). Rejection iterations are only diff --git a/pypeit/core/flexure.py b/pypeit/core/flexure.py index 5fee2e3f94..9fbf36a82a 100644 --- a/pypeit/core/flexure.py +++ b/pypeit/core/flexure.py @@ -121,9 +121,9 @@ def spec_flex_shift(obj_skyspec, arx_skyspec, arx_fwhm_pix, spec_fwhm_pix=None, """ Calculate shift between object sky spectrum and archive sky spectrum Args: - obj_skyspec (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`): + obj_skyspec (`linetools.spectra.xspectrum1d.XSpectrum1d`_): Spectrum of the sky related to our object - arx_skyspec (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`): + arx_skyspec (`linetools.spectra.xspectrum1d.XSpectrum1d`_): Archived sky spectrum arx_fwhm_pix (:obj:`float`): Spectral FWHM (in pixels) of the archived sky spectrum. @@ -320,9 +320,9 @@ def get_fwhm_gauss_smooth(arx_skyspec, obj_skyspec, arx_fwhm_pix, spec_fwhm_pix= """ Args: - arx_skyspec (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`): + arx_skyspec (`linetools.spectra.xspectrum1d.XSpectrum1d`_): Archived sky spectrum. - obj_skyspec (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`): + obj_skyspec (`linetools.spectra.xspectrum1d.XSpectrum1d`_): Sky spectrum associated with the science target. arx_fwhm_pix (:obj:`float`): Spectral FWHM (in pixels) of the archived sky spectrum. @@ -408,7 +408,7 @@ def spec_flex_shift_global(slit_specs, islit, sky_spectrum, arx_fwhm_pix, empty_ this list are sky spectra, extracted from the center of each slit. islit (:obj:`int`): Index of the slit where the sky spectrum related to our object is. - sky_spectrum (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`): + sky_spectrum (`linetools.spectra.xspectrum1d.XSpectrum1d`_): Archived sky spectrum. arx_fwhm_pix (:obj:`float`): Spectral FWHM (in pixels) of the archived sky spectrum. @@ -477,11 +477,11 @@ def spec_flex_shift_local(slits, slitord, specobjs, islit, sky_spectrum, arx_fwh Slit trace set. slitord (`numpy.ndarray`_): Array of slit/order numbers. - specobjs (:class:`~pypeit.specobjs.Specobjs`, optional): + specobjs (:class:`~pypeit.specobjs.SpecObjs`, optional): Spectral extractions. islit (:obj:`int`): Index of the slit where the sky spectrum related to our object is. - sky_spectrum (:class:`linetools.spectra.xspectrum1d.XSpectrum1d`): + sky_spectrum (`linetools.spectra.xspectrum1d.XSpectrum1d`_): Archived sky spectrum. arx_fwhm_pix (:obj:`float`): Spectral FWHM (in pixels) of the archived sky spectrum. @@ -610,7 +610,7 @@ def spec_flexure_slit(slits, slitord, slit_bpm, sky_file, method="boxcar", speco extracted. This method uses a spectrum (stored in slitspecs) that is extracted from the center of each slit. - specobjs (:class:`~pypeit.specobjs.Specobjs`, optional): + specobjs (:class:`~pypeit.specobjs.SpecObjs`, optional): Spectral extractions slit_specs (:obj:`list`, optional): A list of linetools.xspectrum1d, one for each slit. The spectra stored in @@ -815,8 +815,8 @@ def get_archive_spectrum(sky_file): Sky file Returns: - (:obj:`XSpectrum1D`): Sky spectrum - (float): FWHM of the sky lines in pixels. + tuple: The sky spectrum (`linetools.spectra.xspectrum1d.XSpectrum1D`) + and the FWHM (float) of the sky lines in pixels. """ # Load Archive. Save the fwhm to avoid the performance hit from calling it on the archive sky spectrum # multiple times @@ -858,7 +858,7 @@ def get_sky_spectrum(sciimg, ivar, waveimg, thismask, global_sky, box_radius, sl extracted. For example, DET01. Returns: - (:obj:`XSpectrum1D`): Sky spectrum + (`linetools.spectra.xspectrum1d.XSpectrum1D`): Sky spectrum """ spec = specobj.SpecObj(PYPELINE=pypeline, SLITID=-1, DET=str(det)) spec.trace_spec = np.arange(slits.nspec) @@ -917,9 +917,9 @@ def spec_flexure_qa(slitords, bpm, basename, flex_list, Used to generate the output file name flex_list (list): list of :obj:`dict` objects containing the flexure information - specobjs (:class:`~pypeit.specobjs.Specobjs`, optional): + specobjs (:class:`~pypeit.specobjs.SpecObjs`, optional): Spectrally extracted objects - out_dir (str, optonal): + out_dir (str, optional): Path to the output directory for the QA plots. If None, the current is used. """ @@ -1064,29 +1064,30 @@ def calculate_image_phase(imref, imshift, gpm_ref=None, gpm_shift=None, maskval= skimage is not installed, a standard (unmasked) cross-correlation is used. - Args: - im_ref (`numpy.ndarray`_): - Reference image - imshift (`numpy.ndarray`_): - Image that we want to measure the shift of (relative to im_ref) - gpm_ref (`numpy.ndarray`_): - Mask of good pixels (True = good) in the reference image - gpm_shift (`numpy.ndarray`_): - Mask of good pixels (True = good) in the shifted image - maskval (float, optional): - If gpm_ref and gpm_shift are both None, a single value can be specified - and this value will be masked in both images. - - Returns: - ra_diff (float): - Relative shift (in pixels) of image relative to im_ref (x direction). - In order to align image with im_ref, ra_diff should be added to the - x-coordinates of image - dec_diff (float): - Relative shift (in pixels) of image relative to im_ref (y direction). - In order to align image with im_ref, dec_diff should be added to the - y-coordinates of image - + Parameters + ---------- + im_ref : `numpy.ndarray`_ + Reference image + imshift : `numpy.ndarray`_ + Image that we want to measure the shift of (relative to im_ref) + gpm_ref : `numpy.ndarray`_ + Mask of good pixels (True = good) in the reference image + gpm_shift : `numpy.ndarray`_ + Mask of good pixels (True = good) in the shifted image + maskval : float, optional + If gpm_ref and gpm_shift are both None, a single value can be specified + and this value will be masked in both images. + + Returns + ------- + ra_diff : float + Relative shift (in pixels) of image relative to im_ref (x direction). + In order to align image with im_ref, ra_diff should be added to the + x-coordinates of image + dec_diff : float + Relative shift (in pixels) of image relative to im_ref (y direction). + In order to align image with im_ref, dec_diff should be added to the + y-coordinates of image """ # Do some checks first try: @@ -1327,7 +1328,7 @@ class MultiSlitFlexure(DataContainer): internals = ['flex_par', # Parameters (FlexurePar) 'spectrograph', # spectrograph - 'specobjs', # Specobjs object + 'specobjs', # SpecObjs object 'sobj_idx', # (ndet, nslits); Index to specobjs (tuple of arrays) 'sky_table', # Sky line table # 2D models diff --git a/pypeit/core/flux_calib.py b/pypeit/core/flux_calib.py index a9ea2d9804..bd17c4b666 100644 --- a/pypeit/core/flux_calib.py +++ b/pypeit/core/flux_calib.py @@ -120,7 +120,7 @@ def find_standard_file(ra, dec, toler=20.*units.arcmin, check=False): Object right-ascension in decimal deg dec : float Object declination in decimal deg - toler : :class:`astropy.units.quantity.Quantity`, optional + toler : `astropy.units.Quantity`_, optional Tolerance on matching archived standards to input. Expected to be in arcmin. check : bool, optional @@ -439,11 +439,14 @@ def load_extinction_data(longitude, latitude, extinctfilepar, Parameters ---------- - longitude, latitude: Geocentric coordinates in degrees (floats). - extinctfilepar : (str) + longitude : float + Geocentric longitude in degrees. + latitude : float + Geocentric latitude in degrees. + extinctfilepar : str The sensfunc['extinct_file'] parameter, used to determine which extinction file to load. - toler : Angle, optional + toler : `astropy.coordinates.Angle`_, optional Tolerance for matching detector to site (5 deg) Returns @@ -496,17 +499,18 @@ def load_extinction_data(longitude, latitude, extinctfilepar, def extinction_correction(wave, airmass, extinct): """ - Derive extinction correction + Derive extinction correction. + Based on algorithm in LowRedux (long_extinct) Parameters ---------- - wave: `numpy.ndarray`_ + wave : `numpy.ndarray`_ Wavelengths for interpolation. Should be sorted. Assumes angstroms. airmass : float Airmass - extinct : Table + extinct : `astropy.table.Table`_ Table of extinction values Returns @@ -615,7 +619,7 @@ def sensfunc(wave, counts, counts_ivar, counts_mask, exptime, airmass, std_dict, Args: wave (`numpy.ndarray`_): Wavelength of the star. Shape (nspec,) or (nspec, norders) - counts (ndarray): + counts (`numpy.ndarray`_): Flux (in counts) of the star. Shape (nspec,) or (nspec, norders) counts_ivar (`numpy.ndarray`_): Inverse variance of the star counts. Shape (nspec,) or (nspec, norders) @@ -1368,21 +1372,21 @@ def zeropoint_qa_plot(wave, zeropoint_data, zeropoint_data_gpm, zeropoint_fit, z Parameters ---------- - wave: `numpy.ndarray`_ + wave : `numpy.ndarray`_ Wavelength array - zeropoint_data: `numpy.ndarray`_ + zeropoint_data : `numpy.ndarray`_ Zeropoint data array - zeropoint_data_gpm: bool `numpy.ndarray`_ + zeropoint_data_gpm : boolean `numpy.ndarray`_ Good pixel mask array for zeropoint_data - zeropoint_fit: `numpy.ndarray`_ + zeropoint_fit : `numpy.ndarray`_ Zeropoint fitting array - zeropoint_fit_gpm: bool zeropoint_fit + zeropoint_fit_gpm : boolean `numpy.ndarray`_ Good pixel mask array for zeropoint_fit - title: str, optional + title : str, optional Title for the QA plot - axis: None or matplotlib.pyplot axis, optional - axis used for ploting. - show: bool, optional + axis : `matplotlib.axes.Axes`_, optional + axis used for ploting. If None, a new plot is created + show : bool, optional Whether to show the QA plot """ @@ -1426,26 +1430,26 @@ def standard_zeropoint(wave, Nlam, Nlam_ivar, Nlam_gpm, flam_true, mask_recomb=N inverse variance of counts/s/Angstrom Nlam_gpm : `numpy.ndarray`_ mask for bad pixels. True is good. - flam_true : Quantity array - standard star true flux (erg/s/cm^2/A) + flam_true : `astropy.units.Quantity`_ + array with true standard star flux (erg/s/cm^2/A) mask_recomb: `numpy.ndarray`_ mask for hydrogen (and/or helium II) recombination lines. True is good. mask_tell: `numpy.ndarray`_ mask for telluric regions. True is good. - maxiter : integer + maxiter : int maximum number of iterations for polynomial fit - upper : integer + upper : int number of sigma for rejection in polynomial - lower : integer + lower : int number of sigma for rejection in polynomial - polyorder : integer + polyorder : int order of polynomial fit balm_mask_wid: float Mask parameter for Balmer absorption. A region equal to balm_mask_wid in units of angstrom is masked. - nresln: integer/float + nresln: int, float number of resolution elements between breakpoints - resolution: integer/float. + resolution: int, float The spectral resolution. This paramters should be removed in the future. The resolution should be estimated from spectra directly. debug : bool diff --git a/pypeit/core/gui/identify.py b/pypeit/core/gui/identify.py index 46c3b26b0e..b54e455d01 100644 --- a/pypeit/core/gui/identify.py +++ b/pypeit/core/gui/identify.py @@ -1,3 +1,6 @@ +""" +.. include:: ../include/links.rst +""" from datetime import datetime import os import copy @@ -199,12 +202,12 @@ def initialise(cls, arccen, lamps, slits, slit=0, par=None, wv_calib_all=None, Parameters ---------- - arccen : ndarray + arccen : `numpy.ndarray`_ Arc spectrum lamps : :obj:`list` List of arc lamps to be used for wavelength calibration. E.g., ['ArI','NeI','KrI','XeI'] - slits : :class:`SlitTraceSet` + slits : :class:`~pypeit.slittrace.SlitTraceSet` Data container with slit trace information slit : int, optional The slit to be used for wavelength calibration @@ -408,7 +411,7 @@ def linelist_select(self, event): Note, only the LMB works. Args: - event (Event): A matplotlib event instance + event (`matplotlib.backend_bases.Event`_): A matplotlib event instance """ if event.button == 1: self.update_line_id() @@ -594,7 +597,7 @@ def draw_callback(self, event): """Draw the lines and annotate with their IDs Args: - event (Event): A matplotlib event instance + event (`matplotlib.backend_bases.Event`_): A matplotlib event instance """ # Get the background self.background = self.canvas.copy_from_bbox(self.axes['main'].bbox) @@ -624,10 +627,11 @@ def get_ann_ypos(self, scale=1.02): """Calculate the y locations of the annotated IDs Args: - scale (float): Scale the location relative to the maximum value of the spectrum + scale (float): + Scale the location relative to the maximum value of the spectrum Returns: - ypos (ndarray): y locations of the annotations + `numpy.ndarray`_: y locations of the annotations """ ypos = np.zeros(self._detns.size) for xx in range(self._detns.size): @@ -644,10 +648,11 @@ def get_ind_under_point(self, event): """Get the index of the line closest to the cursor Args: - event (Event): Matplotlib event instance containing information about the event + event (`matplotlib.backend_bases.Event`_): + Matplotlib event instance containing information about the event Returns: - ind (int): Index of the spectrum where the event occurred + int: Index of the spectrum where the event occurred """ ind = np.argmin(np.abs(self.plotx - event.xdata)) return ind @@ -656,10 +661,11 @@ def get_axisID(self, event): """Get the ID of the axis where an event has occurred Args: - event (Event): Matplotlib event instance containing information about the event + event (`matplotlib.backend_bases.Event`_): + Matplotlib event instance containing information about the event Returns: - axisID (int, None): Axis where the event has occurred + int: Axis where the event has occurred """ if event.inaxes == self.axes['main']: return 0 @@ -680,7 +686,7 @@ def get_results(self): manually identified all lines. Returns: - wvcalib (dict): Dict of wavelength calibration solutions + dict: Dict of wavelength calibration solutions """ wvcalib = {} # Check that a result exists: @@ -800,7 +806,8 @@ def button_press_callback(self, event): """What to do when the mouse button is pressed Args: - event (Event): Matplotlib event instance containing information about the event + event (`matplotlib.backend_bases.Event`_): + Matplotlib event instance containing information about the event """ if event.inaxes is None: return @@ -834,10 +841,8 @@ def button_release_callback(self, event): """What to do when the mouse button is released Args: - event (Event): Matplotlib event instance containing information about the event - - Returns: - None + event (`matplotlib.backend_bases.Event`_): + Matplotlib event instance containing information about the event """ self._msedown = False if event.inaxes is None: @@ -885,10 +890,8 @@ def key_press_callback(self, event): """What to do when a key is pressed Args: - event (Event): Matplotlib event instance containing information about the event - - Returns: - None + event (`matplotlib.backend_bases.Event`_): + Matplotlib event instance containing information about the event """ # Check that the event is in an axis... if not event.inaxes: @@ -1069,20 +1072,20 @@ def delete_line_id(self): self._lineflg[rmid] = 0 def fitsol_value(self, xfit=None, idx=None): - """Calculate the wavelength at a pixel + """ + Calculate the wavelength at a pixel Parameters ---------- - - xfit : ndarray, float + xfit : `numpy.ndarray`_, float Pixel values that the user wishes to evaluate the wavelength - idx : ndarray, int + idx : `numpy.ndarray`_, int Index of the arc line detections that the user wishes to evaluate the wavelength Returns ------- - - disp : The wavelength (Angstroms) of the requested pixels + disp : `numpy.ndarray`_, float + The wavelength (Angstroms) of the requested pixels """ if xfit is None: xfit = self._detns @@ -1099,11 +1102,15 @@ def fitsol_deriv(self, xfit=None, idx=None): """Calculate the dispersion as a function of wavelength Args: - xfit (ndarray, float): Pixel values that the user wishes to evaluate the wavelength - idx (int): Index of the arc line detections that the user wishes to evaluate the wavelength + xfit (`numpy.ndarray`_, float): + Pixel values that the user wishes to evaluate the wavelength + idx (int): + Index of the arc line detections that the user wishes to + evaluate the wavelength Returns: - disp (ndarray, float, None): The dispersion (Angstroms/pixel) as a function of wavelength + ndarray, float: The dispersion (Angstroms/pixel) as a function of + wavelength """ if xfit is None: xfit = self._detns diff --git a/pypeit/core/gui/object_find.py b/pypeit/core/gui/object_find.py index b06d0370aa..5541341445 100644 --- a/pypeit/core/gui/object_find.py +++ b/pypeit/core/gui/object_find.py @@ -5,6 +5,8 @@ Implement color scaling with RMB click+drag +.. include:: ../include/links.rst + """ import os @@ -89,16 +91,25 @@ def from_dict(self, obj_dict, det): self.add_object(det, pos_spat, pos_spec, obj_dict['traces'][ii], spec_trace, obj_dict['fwhm'][ii], addrm=0) def add_object(self, det, pos_spat, pos_spec, trc_spat, trc_spec, fwhm, addrm=1): - """Add an object trace + """x + Add an object trace Args: - det (int): Detector to add a slit on - pos_spat (float): Spatial pixel position - pos_spec (float): Spectral pixel position - trc_spat (ndarray): Spatial trace of object - trc_spec (ndarray): Spectral trace of object - fwhm (float): FWHM of the object - addrm (int): Flag to say if an object was been added (1), removed (-1), or was an auto found slit (0) + det (int): + Detector to add a slit on + pos_spat (float): + patial pixel position + pos_spec (float): + Spectral pixel position + trc_spat (`numpy.ndarray`_): + Spatial trace of object + trc_spec (`numpy.ndarray`_): + Spectral trace of object + fwhm (float): + FWHM of the object + addrm (int): + Flag to say if an object was been added (1), removed (-1), or + was an auto found slit (0) """ self._det.append(det) self._add_rm.append(addrm) @@ -402,7 +413,7 @@ def draw_callback(self, event): """Draw callback (i.e. everytime the canvas is being drawn/updated) Args: - event (Event): A matplotlib event instance + event (`matplotlib.backend_bases.Event`_): A matplotlib event instance """ # Get the background self.background = self.canvas.copy_from_bbox(self.axes['main'].bbox) @@ -412,7 +423,7 @@ def get_ind_under_point(self, event): """Get the index of the object trace closest to the cursor Args: - event (Event): Matplotlib event instance containing information about the event + event (`matplotlib.backend_bases.Event`_): Matplotlib event instance containing information about the event """ mindist = self._spatpos.shape[0]**2 self._obj_idx = -1 @@ -430,7 +441,7 @@ def get_axisID(self, event): """Get the ID of the axis where an event has occurred Args: - event (Event): Matplotlib event instance containing information about the event + event (`matplotlib.backend_bases.Event`_): Matplotlib event instance containing information about the event Returns: int, None: Axis where the event has occurred @@ -456,7 +467,7 @@ def button_press_callback(self, event): """What to do when the mouse button is pressed Args: - event (Event): Matplotlib event instance containing information about the event + event (`matplotlib.backend_bases.Event`_): Matplotlib event instance containing information about the event """ if event.inaxes is None: return @@ -475,7 +486,7 @@ def button_release_callback(self, event): """What to do when the mouse button is released Args: - event (Event): Matplotlib event instance containing information about the event + event (`matplotlib.backend_bases.Event`_): Matplotlib event instance containing information about the event """ if event.inaxes is None: return @@ -527,7 +538,7 @@ def key_press_callback(self, event): """What to do when a key is pressed Args: - event (Event): Matplotlib event instance containing information about the event + event (`matplotlib.backend_bases.Event`_): Matplotlib event instance containing information about the event """ # Check that the event is in an axis... if not event.inaxes: @@ -859,11 +870,11 @@ def initialise(det, frame, left, right, obj_trace, trace_models, sobjs, slit_ids Args: det (int): 1-indexed detector number - frame (numpy.ndarray): + frame (`numpy.ndarray`_): Sky subtracted science image - left (numpy.ndarray): + left (`numpy.ndarray`_): Slit left edges - right (numpy.ndarray): + right (`numpy.ndarray`_): Slit right edges obj_trace (dict): Result of diff --git a/pypeit/core/gui/skysub_regions.py b/pypeit/core/gui/skysub_regions.py index 6835cfa557..26417eaa94 100644 --- a/pypeit/core/gui/skysub_regions.py +++ b/pypeit/core/gui/skysub_regions.py @@ -1,5 +1,8 @@ """ This script allows the user to manually select the sky background regions + +.. include:: ../include/links.rst + """ import os @@ -154,7 +157,7 @@ def initialize(cls, det, frame, slits, pypeline, spectrograph, outname="skyregio ---------- det : int Detector index - frame : ndarray + frame : `numpy.ndarray`_ Sky subtracted science image slits : :class:`~pypeit.slittrace.SlitTraceSet` Object with the image coordinates of the slit edges @@ -284,7 +287,7 @@ def button_regb(self, event): would select the first 10%, the inner 30%, and the final 20% of all slits Args: - event : Event + event : `matplotlib.backend_bases.Event`_ A matplotlib event instance """ self.update_infobox(message='Enter regions in the terminal (see terminal for help)', yesno=False) @@ -315,7 +318,7 @@ def button_cont(self, event): """What to do when the 'exit and save' button is clicked Args: - event : Event + event : `matplotlib.backend_bases.Event`_ A matplotlib event instance """ self._respreq = [True, "exit_update"] @@ -326,7 +329,7 @@ def button_exit(self, event): """What to do when the 'exit and do not save changes' button is clicked Args: - event : Event + event : `matplotlib.backend_bases.Event`_ A matplotlib event instance """ self._respreq = [True, "exit_restore"] @@ -373,7 +376,7 @@ def draw_callback(self, event): """Draw callback (i.e. everytime the canvas is being drawn/updated) Args: - event : Event + event : `matplotlib.backend_bases.Event`_ A matplotlib event instance """ # Get the background @@ -384,7 +387,7 @@ def get_current_slit(self, event): """Get the index of the slit closest to the cursor Args: - event : Event + event : `matplotlib.backend_bases.Event`_ Matplotlib event instance containing information about the event """ # Find the current slit @@ -401,7 +404,7 @@ def get_axisID(self, event): """Get the ID of the axis where an event has occurred Args: - event : Event + event : `matplotlib.backend_bases.Event`_ Matplotlib event instance containing information about the event Returns: @@ -427,7 +430,7 @@ def button_press_callback(self, event): """What to do when the mouse button is pressed Args: - event : Event + event : `matplotlib.backend_bases.Event`_ Matplotlib event instance containing information about the event """ if event.inaxes is None: @@ -449,7 +452,7 @@ def button_release_callback(self, event): """What to do when the mouse button is released Args: - event : Event + event : `matplotlib.backend_bases.Event`_ Matplotlib event instance containing information about the event """ if event.inaxes is None: @@ -494,7 +497,7 @@ def key_press_callback(self, event): """What to do when a key is pressed Args: - event : Event + event : `matplotlib.backend_bases.Event`_ Matplotlib event instance containing information about the event """ # Check that the event is in an axis... diff --git a/pypeit/core/meta.py b/pypeit/core/meta.py index 2c0b5cd814..a4eacf5f9b 100644 --- a/pypeit/core/meta.py +++ b/pypeit/core/meta.py @@ -1,6 +1,6 @@ """ -Provides methods common to :class:`pypeit.metadata.PypeItMetaData` and -:class:`pypeit.spectographs.spectrograph.Spectrograph` that define the +Provides methods common to :class:`~pypeit.metadata.PypeItMetaData` and +:class:`~pypeit.spectrographs.spectrograph.Spectrograph` that define the common metadata used for all spectrographs. .. include common links, assuming primary doc root is up one directory diff --git a/pypeit/core/parse.py b/pypeit/core/parse.py index 3d516ca555..89ef9f01fe 100644 --- a/pypeit/core/parse.py +++ b/pypeit/core/parse.py @@ -123,7 +123,7 @@ def parse_binning(binning:str): parsed directly from the Header. The developer needs to react accordingly.. Args: - binning (str, ndarray or tuple): + binning (str, `numpy.ndarray`_, tuple): Returns: tuple: binspectral, binspatial as integers diff --git a/pypeit/core/pca.py b/pypeit/core/pca.py index 4f94241a1d..9ee6dd4520 100644 --- a/pypeit/core/pca.py +++ b/pypeit/core/pca.py @@ -33,8 +33,7 @@ def pca_decomposition(vectors, npca=None, pca_explained_var=99.0, mean=None): This is a fully generalized convenience function for a specific use of `sklearn.decomposition.PCA`_. When used - within PypeIt, the vectors to decompose (see, e.g., - :class:`pypeit.edgetrace.EdgeTracePCA`) typically have the + within PypeIt, the vectors to decompose typically have the length of the spectral axis. This means that, within PypeIt, arrays are typically transposed when passed to this function. @@ -133,12 +132,12 @@ def fit_pca_coefficients(coeff, order, ivar=None, weights=None, function='legend The coefficients of each PCA component are fit by a low-order polynomial, where the abscissa is set by the `coo` argument (see - :func:`pypeit.fitting.robust_fit`). + :func:`~pypeit.core.fitting.robust_fit`). .. note:: This is a general function, not really specific to the PCA; and is really just a wrapper for - :func:`pypeit.fitting.robust_fit`. + :func:`~pypeit.core.fitting.robust_fit`. Args: coeff (`numpy.ndarray`_): @@ -157,7 +156,7 @@ def fit_pca_coefficients(coeff, order, ivar=None, weights=None, function='legend ivar (`numpy.ndarray`_, optional): Inverse variance in the PCA coefficients to use during the fit; see the `invvar` parameter of - :func:`pypeit.fitting.robust_fit`. If None, fit is + :func:`~pypeit.core.fitting.robust_fit`. If None, fit is not error weighted. If a vector with shape :math:`(N_{\rm vec},)`, the same error will be assumed for all PCA components (i.e., `ivar` will be expanded to match the @@ -166,7 +165,7 @@ def fit_pca_coefficients(coeff, order, ivar=None, weights=None, function='legend weights (`numpy.ndarray`_, optional): Weights to apply to the PCA coefficients during the fit; see the `weights` parameter of - :func:`pypeit.fitting.robust_fit`. If None, the + :func:`~pypeit.core.fitting.robust_fit`. If None, the weights are uniform. If a vector with shape :math:`(N_{\rm vec},)`, the same weights will be assumed for all PCA components (i.e., `weights` will be expanded @@ -177,14 +176,14 @@ def fit_pca_coefficients(coeff, order, ivar=None, weights=None, function='legend lower (:obj:`float`, optional): Number of standard deviations used for rejecting data **below** the mean residual. If None, no rejection is - performed. See :func:`fitting.robust_fit`. + performed. See :func:`~pypeit.core.fitting.robust_fit`. upper (:obj:`float`, optional): Number of standard deviations used for rejecting data **above** the mean residual. If None, no rejection is - performed. See :func:`fitting.robust_fit`. + performed. See :func:`~pypeit.core.fitting.robust_fit`. maxrej (:obj:`int`, optional): Maximum number of points to reject during fit iterations. - See :func:`fitting.robust_fit`. + See :func:`~pypeit.core.fitting.robust_fit`. maxiter (:obj:`int`, optional): Maximum number of rejection iterations allows. To force no rejection iterations, set to 0. @@ -197,7 +196,7 @@ def fit_pca_coefficients(coeff, order, ivar=None, weights=None, function='legend Minimum and maximum values used to rescale the independent axis data. If None, the minimum and maximum values of `coo` are used. See - :func:`fitting.robust_fit`. + :func:`~pypeit.core.fitting.robust_fit`. debug (:obj:`bool`, optional): Show plots useful for debugging. diff --git a/pypeit/core/pixels.py b/pypeit/core/pixels.py index c1d423021e..65b51db151 100644 --- a/pypeit/core/pixels.py +++ b/pypeit/core/pixels.py @@ -26,7 +26,7 @@ def phys_to_pix(array, pixlocn, axis): Returns ------- - pixarr : ndarray + pixarr : `numpy.ndarray`_ The pixel locations of the input array (as seen on a computer screen) """ if len(array.shape) > 2: diff --git a/pypeit/core/procimg.py b/pypeit/core/procimg.py index 98f8b7299f..6c356c1506 100644 --- a/pypeit/core/procimg.py +++ b/pypeit/core/procimg.py @@ -1084,7 +1084,7 @@ def trim_frame(frame, mask): `numpy.ndarray`_: Trimmed image Raises: - PypitError: + :class:`~pypeit.pypmsgs.PypeItError`: Error raised if the trimmed image includes masked values because the shape of the valid region is odd. """ diff --git a/pypeit/core/pydl.py b/pypeit/core/pydl.py index 9c402c019c..b7916dfbd4 100644 --- a/pypeit/core/pydl.py +++ b/pypeit/core/pydl.py @@ -325,7 +325,7 @@ class TraceSet(object): When initialized with x,y positions, this contains the fitted y values. pypeitFits : list - Holds a list of :class:`pypeit.fitting.PypeItFit` fits + Holds a list of :class:`~pypeit.core.fitting.PypeItFit` fits """ # ToDO Remove the kwargs and put in all the djs_reject parameters here def __init__(self, *args, **kwargs): diff --git a/pypeit/core/qa.py b/pypeit/core/qa.py index d50d818f4b..1929f9c5c2 100644 --- a/pypeit/core/qa.py +++ b/pypeit/core/qa.py @@ -1,4 +1,7 @@ """ Module for QA in PypeIt + +.. include:: ../include/links.rst + """ import os import datetime @@ -188,11 +191,12 @@ def html_header(title): return head def html_end(f, body, links=None): - """ Fill in the HTML file with a proper ending + """ + Fill in the HTML file with a proper ending Parameters ---------- - f : file + f : `io.TextIOWrapper`_ body : str links : str, optional @@ -222,8 +226,10 @@ def html_init(f, title): Initialize the HTML file Args: - f (fileobj): file object to write to - title (str): title + f (`io.TextIOWrapper`_): + file object to write to + title (str): + title Returns: str: Initial HTML text incluing the header and links @@ -473,11 +479,14 @@ def gen_exp_html(): def close_qa(pypeit_file, qa_path): - """Tie off QA under a crash + """ + Tie off QA under a crash Args: - pypeit_file (_type_): _description_ - qa_path (_type_): _description_ + pypeit_file (str): + PypeIt file name + qa_path (str): + Path to QA directory """ if pypeit_file is None: return diff --git a/pypeit/core/skysub.py b/pypeit/core/skysub.py index c7abc336e4..d33f2f5c84 100644 --- a/pypeit/core/skysub.py +++ b/pypeit/core/skysub.py @@ -573,7 +573,7 @@ def local_skysub_extract(sciimg, sciivar, tilts, waveimg, global_sky, thismask, slit_righ : `numpy.ndarray`_ Right slit boundary in floating point pixels. shape (nspec, 1) or (nspec) - sobjs : :class:`~pypeit.specobjs.SpecoObjs` object + sobjs : :class:`~pypeit.specobjs.SpecObjs` object Object containing the information about the objects found on the slit/order from objfind or ech_objfind ingpm : `numpy.ndarray`_, optional @@ -1080,7 +1080,7 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, Image identifying the 0-indexed order associated with each pixel. Pixels with -1 are not associatead with any order. - sobjs : :class:`~pypeit.specobjs.SpecoObjs` object + sobjs : :class:`~pypeit.specobjs.SpecObjs` object Object containing the information about the objects found on the slit/order from objfind or ech_objfind spat_pix: `numpy.ndarray`_, optional @@ -1167,14 +1167,16 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, (does not have the right count levels). In principle this could be improved if the user could pass in a model of what the sky is for near-IR difference imaging + residual subtraction - debug_bkpts: + debug_bkpts : bool, optional + debug show_profile : bool, default=False Show QA for the object profile fitting to the screen. Note that this will show interactive matplotlib plots which will block the execution of the code until the window is closed. show_resids : bool, optional Show the model fits and residuals. - show_fwhm: + show_fwhm : bool, optional + show fwhm adderr : float, default = 0.01 Error floor. The quantity adderr**2*sciframe**2 is added to the variance to ensure that the S/N is never > 1/adderr, effectively setting a floor @@ -1208,7 +1210,7 @@ def ech_local_skysub_extract(sciimg, sciivar, fullmask, tilts, waveimg, Model inverse variance where ``thismask`` is true. outmask : `numpy.ndarray`_ Model mask where ``thismask`` is true. - sobjs : :class:`~pypeit.specobjs.SpecoObjs` object + sobjs : :class:`~pypeit.specobjs.SpecObjs` object Same object as passed in """ @@ -1473,7 +1475,7 @@ def generate_mask(pypeline, skyreg, slits, slits_left, slits_right, spat_flexure skyreg : list A list of size nslits. Each element contains a numpy array (dtype=bool) where a True value indicates a value that is part of the sky region. - slits : :class:`SlitTraceSet` + slits : :class:`~pypeit.slittrace.SlitTraceSet` Data container with slit trace information slits_left : `numpy.ndarray`_ A 2D array containing the pixel coordinates of the left slit edges diff --git a/pypeit/core/telluric.py b/pypeit/core/telluric.py index ba702df74d..378e57ce81 100644 --- a/pypeit/core/telluric.py +++ b/pypeit/core/telluric.py @@ -578,7 +578,7 @@ def unpack_orders(sobjs, ret_flam=False): arrays necessary for telluric fitting. Args: - sobjs (obj): + sobjs (:class:`~pypeit.specobjs.SpecObjs`): SpecObjs object ret_flam (bool): If true return the FLAM, otherwise return COUNTS @@ -1239,15 +1239,15 @@ def sensfunc_telluric(wave, counts, counts_ivar, counts_mask, exptime, airmass, std_dict : :obj:`dict` Dictionary containing the information for the true flux of the standard star. - log10_blaze_function (`numpy.ndarray`_ , optional): + log10_blaze_function : `numpy.ndarray`_ , optional The log10 blaze function determined from a flat field image. If this is passed in the sensitivity function model will be a (parametric) polynomial fit multiplied into the (non-parametric) log10_blaze_function. Shape must match ``wave``, i.e. (nspec,) or (nspec, norddet). telgridfile : :obj:`str` - File containing grid of HITRAN atmosphere models. This file is given by - :func:`~pypeit.spectrographs.spectrograph.Spectrograph.telluric_grid_file`. + File containing grid of HITRAN atmosphere models; see + :class:`~pypeit.par.pypeitpar.TelluricPar`. ech_orders : `numpy.ndarray`_, shape is (norders,), optional If passed, provides the true order numbers for the spectra provided. polyorder : :obj:`int`, optional, default = 8 @@ -1856,8 +1856,8 @@ class Telluric(datamodel.DataContainer): Good pixel gpm for the object in question. Same shape as ``wave``. telgridfile (:obj:`str`): - File containing grid of HITRAN atmosphere models. This file is - given by :func:`~pypeit.spectrographs.spectrograph.Spectrograph.telluric_grid_file`. + File containing grid of HITRAN atmosphere models; see + :class:`~pypeit.par.pypeitpar.TelluricPar`. obj_params (:obj:`dict`): Dictionary of parameters for initializing the object model. init_obj_model (callable): @@ -1936,7 +1936,7 @@ class Telluric(datamodel.DataContainer): option, you will see that the f(x) loss function gets progressively better during the iterations. sticky (:obj:`bool`, optional): - Sticky parameter for the :func:`~pypeit.utils.djs_reject` + Sticky parameter for the :func:`~pypeit.core.pydl.djs_reject` algorithm for iterative model fit rejection. If True then points rejected from a previous iteration are kept rejected, in other words the bad pixel mask is the OR of all previous iterations and @@ -1961,7 +1961,7 @@ class Telluric(datamodel.DataContainer): into the formal errors. In this way, a rejection threshold of i.e. 3-sigma, will always correspond to roughly the same percentile. This renormalization is performed with - :func:`~pypeit.coadd1d.renormalize_errors`, and guarantees that + :func:`~pypeit.core.coadd.renormalize_errors`, and guarantees that rejection is not too agressive in cases where the empirical errors determined from the chi-distribution differ significantly from the formal noise, which is used to determine ``chi``. diff --git a/pypeit/core/trace.py b/pypeit/core/trace.py index e0bf0a67d6..fd7d03cbac 100644 --- a/pypeit/core/trace.py +++ b/pypeit/core/trace.py @@ -603,8 +603,7 @@ def follow_centroid(flux, start_row, start_cen, ivar=None, bpm=None, fwgt=None, Object used to flag the feature traces. If None, assessments use a boolean array to flag traces. If not None, errors will be raised if the object cannot - interpret the correct flag names defined. In addition to - flags used by :func:`_recenter_trace_row`, this function + interpret the correct flag names defined. This function uses the DISCONTINUOUS flag. Returns: @@ -911,10 +910,10 @@ def fit_trace(flux, trace_cen, order, ivar=None, bpm=None, trace_bpm=None, weigh maxdev (:obj:`float`, optional): If provided, reject points with `abs(data-model) > maxdev` during the fitting. If None, no points are - rejected. See :func:`pypeit.utils.robust_polyfit_djs`. + rejected. See :func:`~pypeit.core.fitting.robust_fit`. maxiter (:obj:`int`, optional): Maximum number of rejection iterations allowed during the - fitting. See :func:`pypeit.utils.robust_polyfit_djs`. + fitting. See :func:`~pypeit.core.fitting.robust_fit`. niter (:obj:`int`, optional): The number of iterations for this method; i.e., the number of times the two-step fitting algorithm described @@ -929,10 +928,10 @@ def fit_trace(flux, trace_cen, order, ivar=None, bpm=None, trace_bpm=None, weigh if `debug` is true for the plotting. Default is just a running number. xmin (:obj:`float`, optional): - Lower reference for robust_polyfit polynomial fitting. + Lower reference for robust_fit polynomial fitting. Default is to use zero xmax (:obj:`float`, optional): - Upper reference for robust_polyfit polynomial fitting. + Upper reference for robust_fit polynomial fitting. Default is to use the image size in nspec direction flavor (:obj:`str`, optional): Defines the type of fit performed. Only used by QA @@ -1300,11 +1299,11 @@ def peak_trace(flux, ivar=None, bpm=None, trace_map=None, extract_width=None, sm length of the detector. The tuple gives the minimum and maximum in the fraction of the full spectral length (nspec). If None, the full image is collapsed. - peak_thresh (:obj:`float, optional): + peak_thresh (:obj:`float`, optional): The threshold for detecting peaks in the image. See the ``input_thresh`` parameter for :func:`~pypeit.core.arc.detect_lines`. - peak_clip (:obj:`float, optional): + peak_clip (:obj:`float`, optional): Sigma-clipping threshold used to clip peaks with small values; no large values are clipped. If None, no clipping is performed. Generally, if the peak detection algorithm diff --git a/pypeit/core/tracewave.py b/pypeit/core/tracewave.py index 53e5ae6611..c30102afca 100644 --- a/pypeit/core/tracewave.py +++ b/pypeit/core/tracewave.py @@ -526,60 +526,72 @@ def trace_tilts(arcimg, lines_spec, lines_spat, thismask, slit_cen, inmask=None, max_badpix_frac=0.30, tcrude_nave=5, npca=2, coeff_npoly_pca=2, sigrej_pca=2.0, debug_pca=False, show_tracefits=False): """ - Use a PCA model to determine the best object (or slit edge) traces for echelle spectrographs. + Use a PCA model to determine the best object (or slit edge) traces for + echelle spectrographs. Parameters ---------- - arcimg: ndarray, float (nspec, nspat) + arcimg : `numpy.ndarray`_, float (nspec, nspat) Image of arc or sky that will be used for tracing tilts. - lines_spec: ndarray, float (nlines,) - Array containing arc line centroids along the center of the slit for each arc line that will be traced. This is - in pixels in image coordinates. - lines_spat: ndarray, float (nlines,) - Array contianing the spatial position of the center of the slit along which the arc was extracted. This is is in - pixels in image coordinates. - thismask: ndarray, boolean (nspec, nsapt) - Boolean mask image specifying the pixels which lie on the slit/order to search for objects on. - The convention is: True = on the slit/order, False = off the slit/order. This must be the same size as the arcimg. - inmask: float ndarray, default = None, optional + lines_spec : `numpy.ndarray`_, float (nlines,) + Array containing arc line centroids along the center of the slit for + each arc line that will be traced. This is in pixels in image + coordinates. + lines_spat : `numpy.ndarray`_, float (nlines,) + Array contianing the spatial position of the center of the slit along + which the arc was extracted. This is is in pixels in image coordinates. + thismask : `numpy.ndarray`_, boolean (nspec, nsapt) + Boolean mask image specifying the pixels which lie on the slit/order to + search for objects on. The convention is: True = on the slit/order, + False = off the slit/order. This must be the same size as the arcimg. + inmask : float ndarray, default = None, optional Input mask image. - gauss: bool, default = False, optional - If true the code will trace the arc lines usign Gaussian weighted centroiding (trace_gweight) instead of the default, - which is flux weighted centroiding (trace_fweight) - fwhm: float, optional - Expected FWHM of the arc lines. - spat_order: int, default = None, optional - Order of the legendre polynomial that will be fit to the tilts. + gauss : bool, default = False, optional + If true the code will trace the arc lines usign Gaussian weighted + centroiding (trace_gweight) instead of the default, which is flux + weighted centroiding (trace_fweight) + fwhm : float, optional + Expected FWHM of the arc lines. + spat_order : int, default = None, optional + Order of the legendre polynomial that will be fit to the tilts. maxdev_tracefit: float, default = 1.0, optional - Maximum absolute deviation for the arc tilt fits during iterative trace fitting expressed in units of the fwhm. - sigrej_trace: float, default = 3.0, optional - From each line we compute a median absolute deviation of the trace from the polynomial fit. We then - analyze the distribution of maximxum absolute deviations (MADs) for all the lines, and reject sigrej_trace outliers - from that distribution. + Maximum absolute deviation for the arc tilt fits during iterative trace + fitting expressed in units of the fwhm. + sigrej_trace : float, default = 3.0, optional + From each line we compute a median absolute deviation of the trace from + the polynomial fit. We then analyze the distribution of maximxum + absolute deviations (MADs) for all the lines, and reject sigrej_trace + outliers from that distribution. max_badpix_frac: float, default = 0.30, optional - Maximum fraction of total pixels that can be masked by the trace_gweight algorithm - (because the residuals are too large) to still be usable for tilt fitting. - tcrude_nave: int, default = 5, optional - Trace crude is used to determine the initial arc line tilts, which are then iteratively fit. Trace crude - can optionally boxcar smooth the image (along the spatial direction of the image, i.e. roughly along the arc line tilts) - to improve the tracing. + Maximum fraction of total pixels that can be masked by the trace_gweight + algorithm (because the residuals are too large) to still be usable for + tilt fitting. + tcrude_nave : int, default = 5, optional + Trace crude is used to determine the initial arc line tilts, which are + then iteratively fit. Trace crude can optionally boxcar smooth the image + (along the spatial direction of the image, i.e. roughly along the arc + line tilts) to improve the tracing. npca: int, default = 1, optional - Tilts are initially traced and then a PCA is performed. The PCA is used to determine better crutches for a second - round of improved tilt tracing. This parameter is the order of that PCA and determined how much the tilts behavior - is being compressed. npca = 0 would be just using the mean tilt. This PCA is only an intermediate step to - improve the crutches and is an attempt to make the tilt tracing that goes into the final fit more robust. + Tilts are initially traced and then a PCA is performed. The PCA is used + to determine better crutches for a second round of improved tilt + tracing. This parameter is the order of that PCA and determined how much + the tilts behavior is being compressed. npca = 0 would be just using the + mean tilt. This PCA is only an intermediate step to improve the crutches + and is an attempt to make the tilt tracing that goes into the final fit + more robust. coeff_npoly_pca: int, default = 1, optional - Order of polynomial fits used for PCA coefficients fitting for the PCA described above. + Order of polynomial fits used for PCA coefficients fitting for the PCA + described above. sigrej_pca: float, default = 2.0, optional - Significance threhsold for rejection of outliers from fits to PCA coefficients for the PCA described above. + Significance threhsold for rejection of outliers from fits to PCA + coefficients for the PCA described above. show_tracefits: bool, default = False, optional - If true the fits will be shown to each arc line trace by iter_fitting.py + If true the fits will be shown to each arc line trace by iter_fitting.py Returns ------- trace_tilts_dict : dict - See trace_tilts_work for a complete description - + See :func:`trace_tilts_work` for a complete description """ #show_tracefits = True #debug_pca = True @@ -646,7 +658,8 @@ def fit_tilts(trc_tilt_dict, thismask, slit_cen, spat_order=3, spec_order=4, max ---------- trc_tilt_dict: dict Diciontary containing tilt info - slitord_id (int): Slit ID, spatial; only used for QA + slitord_id : int + Slit ID, spatial; only used for QA all_tilts: order: yorder: @@ -844,11 +857,11 @@ def fit2tilts(shape, coeff2, func2d, spat_shift=None): Parameters ---------- - shape: tuple of ints, + shape : tuple of ints, shape of image - coeff2: ndarray, float + coeff2 : `numpy.ndarray`_, float result of griddata tilt fit - func2d: str + func2d : str the 2d function used to fit the tilts spat_shift : float, optional Spatial shift to be added to image pixels before evaluation @@ -857,7 +870,7 @@ def fit2tilts(shape, coeff2, func2d, spat_shift=None): Returns ------- - tilts: ndarray, float + tilts : `numpy.ndarray`_, float Image indicating how spectral pixel locations move across the image. This output is used in the pipeline. diff --git a/pypeit/core/transform.py b/pypeit/core/transform.py index b7771f0771..d1b916b697 100644 --- a/pypeit/core/transform.py +++ b/pypeit/core/transform.py @@ -158,7 +158,7 @@ def coordinate_transform_2d(coo, matrix, inverse=False): second column (``coo[:,1]``). matrix (`numpy.ndarray`_): The :math:3\times 3` affine-transformation matrix. See - :func:`~pypeit.core.mosaic.affine_transform_matrix`. + :func:`affine_transform_matrix`. inverse (:obj:`bool`, optional): By default, the function performs the *active* transformation; i.e., applying the transformation to the coordinates, moving them within diff --git a/pypeit/core/wave.py b/pypeit/core/wave.py index a8122bba21..bd623d5711 100644 --- a/pypeit/core/wave.py +++ b/pypeit/core/wave.py @@ -29,7 +29,7 @@ def geomotion_calculate(radec, time, longitude, latitude, elevation, refframe): Args: radec (`astropy.coordinates.SkyCoord`_): RA, DEC of source - time (:obj:`astropy.time.Time`): + time (`astropy.time.Time`_): Time of observation longitude (float): Telescope longitude in deg @@ -56,7 +56,7 @@ def geomotion_correct(radec, time, longitude, latitude, elevation, refframe): Args: radec (`astropy.coordinates.SkyCoord`_): RA, DEC of source - time (:obj:`astropy.time.Time`): + time (`astropy.time.Time`_): Time of observation gd_slitord (`numpy.ndarray`_): Array of good slit/order IDs diff --git a/pypeit/core/wavecal/autoid.py b/pypeit/core/wavecal/autoid.py index cf2c3c1d9c..887315cc30 100644 --- a/pypeit/core/wavecal/autoid.py +++ b/pypeit/core/wavecal/autoid.py @@ -814,7 +814,7 @@ def set_fwhm(par, measured_fwhm=None): and the measured_fwhm Args: - par (:class:`~pypeit.par.pypeitpar.WaveSolutionPar`): + par (:class:`~pypeit.par.pypeitpar.WavelengthSolutionPar`): Key parameters that drive the behavior of the wavelength-solution algorithms. measured_fwhm (:obj:`float`): @@ -1065,7 +1065,7 @@ def echelle_wvcalib(spec, orders, spec_arxiv, wave_arxiv, lamps, par, lamps : :obj:`list` List of arc lamps to be used for wavelength calibration. E.g., ['ArI','NeI','KrI','XeI'] - par : :class:`~pypeit.par.pypeitpar.WaveSolutionPar` + par : :class:`~pypeit.par.pypeitpar.WavelengthSolutionPar` Key parameters that drive the behavior of the wavelength-solution algorithms. ok_mask : `numpy.ndarray`, optional @@ -1289,7 +1289,7 @@ class ArchiveReid: lamps : :obj:`list` List of arc lamps to be used for wavelength calibration. E.g., ['ArI','NeI','KrI','XeI'] - par : :class:`~pypeit.par.pypeitpar.WaveSolutionPar` + par : :class:`~pypeit.par.pypeitpar.WavelengthSolutionPar` Key parameters that drive the behavior of the wavelength-solution algorithms. ech_fixed_format: bool @@ -2396,22 +2396,26 @@ def get_use_tcent_old(self, corr, cut=True, arr_err=None, weak=False): """ Grab the lines to use - Args: - corr: int - Set if pixels correlate with wavelength (corr==1) or - anticorrelate (corr=-1) - arr_err: - A list [tcent, ecent] indicating which detection list - should be used. Note that if arr_err is set then the - weak keyword is ignored. - weak: bool, optional - If True, return the weak lines - cut: bool, optional - Cut on the lines according to significance - - Returns: - tuple: arr, err - + Parameters + ---------- + corr : int + Set if pixels correlate with wavelength (corr==1) or + anticorrelate (corr=-1) + cut: bool, optional + Cut on the lines according to significance + arr_err : list, optional + A list [tcent, ecent] indicating which detection list + should be used. Note that if arr_err is set then the + weak keyword is ignored. + weak: bool, optional + If True, return the weak lines + + Returns + ------- + arr : `numpy.ndarray` + ??? + err : `numpy.ndarray` + ??? """ # Decide which array to use if arr_err is None: @@ -2439,17 +2443,22 @@ def get_use_tcent(self, corr, tcent_ecent): """ Grab the lines to use - Args: - corr: int - Set if pixels correlate with wavelength (corr==1) or - anticorrelate (corr=-1) - tcent_ecent: - A list [tcent, ecent] indicating which detection list - should be used. Note that if arr_err is set then the weak - keyword is ignored. - - Returns: - tuple: arr, err + Parameters + ---------- + corr : int + Set if pixels correlate with wavelength (corr==1) or + anticorrelate (corr=-1) + tcent_ecent : list + A list [tcent, ecent] indicating which detection list + should be used. Note that if arr_err is set then the weak + keyword is ignored. + + Returns + ------- + arr : `numpy.ndarray` + ??? + err : `numpy.ndarray` + ??? """ # Return the appropriate tcent tcent, ecent = tcent_ecent[0], tcent_ecent[1] @@ -2466,7 +2475,8 @@ def results_brute(self, tcent_ecent, poly=3, pix_tol=0.5, detsrch=5, lstsrch=5, Parameters ---------- - tcent_ecent: list of ndarrays, [tcent, ecent] + tcent_ecent: list + List of `numpy.ndarray` objects, [tcent, ecent] poly, optional: algorithms to use for pattern matching. Only triangles (3) and quadrangles (4) are supported @@ -2478,7 +2488,9 @@ def results_brute(self, tcent_ecent, poly=3, pix_tol=0.5, detsrch=5, lstsrch=5, lstsrch, optional: Number of lines to search over for the detected lines wavedata, optional: + ??? arrerr, optional: + ??? """ # Import the pattern matching algorithms @@ -2548,22 +2560,25 @@ def solve_slit(self, slit, psols, msols, tcent_ecent, nstore=1, nselw=3, nseld=3 and log10(disp). Then it attempts to fit each value determined (default of 1) to try to figure out if it is a reasonable fit. - Args: - slit: - psols: - msols: - tcent_ecent: list, [tcent, ecent] - nstore: - Number of pattern matches to store and fit - nselw: - All solutions around the best central wavelength - solution within +- nselw are selected to be fit - nseld: - All solutions around the best log10(dispersion) solution - within +- nseld are selected to be fit - - Returns: - tuple: patt_dict, final_dict + Parameters + ---------- + slit: + psols: + msols: + tcent_ecent: list, [tcent, ecent] + nstore: + Number of pattern matches to store and fit + nselw: + All solutions around the best central wavelength + solution within +- nselw are selected to be fit + nseld: + All solutions around the best log10(dispersion) solution + within +- nseld are selected to be fit + + Returns + ------- + patt_dict: + final_dict: """ # Extract the solutions @@ -2713,8 +2728,7 @@ def solve_patterns(self, slit, bestlist, tcent_ecent): A list [tcent, ecent] indicating which detection list should be used. Note that if arr_err is set then the weak keyword is ignored. Returns: - patt_dict (dict): - Dictionary containing information about the best patterns. + dict: Dictionary containing information about the best patterns. """ diff --git a/pypeit/core/wavecal/echelle.py b/pypeit/core/wavecal/echelle.py index d67c364da5..8c08c1a4b9 100644 --- a/pypeit/core/wavecal/echelle.py +++ b/pypeit/core/wavecal/echelle.py @@ -24,7 +24,7 @@ def predict_ech_order_coverage(angle_fits_params, xd_angle_coeffs, fits of the reddest order as a function of xdangle. Args: - angle_fits_params (astropy.table.Table): + angle_fits_params (`astropy.table.Table`_): Table holding the arxiv parameters xd_angle_coeffs Table holding the arxiv data @@ -57,14 +57,14 @@ def predict_ech_wave_soln(angle_fits_params, ech_angle_coeffs, ech_angle, order_ wavelength solution coefficients vs echelle angle at the given echelle angle. Args: - angle_fits_params (astropy.table.Table): + angle_fits_params (`astropy.table.Table`_): Table holding the parameters governing the echelle angle fits - ech_angle_coeffs (numpy.ndarray): + ech_angle_coeffs (`numpy.ndarray`_): Array holding the polynomial coefficients for the fits of the wavelength solution polynomial coefficients vs echelle angle. ech_angle (float): Echelle angle - order_vec (numpy.ndarray): + order_vec (`numpy.ndarray`_): Array of order numbers for the deisred predicted spectrum. Shape = (norders,) nspec (int): Number of spectral pixels in the echelle spectrum diff --git a/pypeit/core/wavecal/templates.py b/pypeit/core/wavecal/templates.py index a7aa50e20e..bc856b73c8 100644 --- a/pypeit/core/wavecal/templates.py +++ b/pypeit/core/wavecal/templates.py @@ -242,7 +242,7 @@ def pypeit_arcspec(in_file, slit, binspec, binning=None): slit index Returns: - tuple: np.ndarray, np.ndarray, PypeItFit: wave, flux, pypeitFitting + tuple: `numpy.ndarray`_, `numpy.ndarray`_, PypeItFit: wave, flux, pypeitFitting """ if '.json' in in_file: @@ -351,12 +351,12 @@ def poly_val(coeff, x, nrm): IDL style function for polynomial Args: - coeff (np.ndarray): Polynomial coefficients - x (np.ndarray): x array - nrm (np.ndarray): Normalization terms + coeff (`numpy.ndarray`_): Polynomial coefficients + x (`numpy.ndarray`_): x array + nrm (`numpy.ndarray`_): Normalization terms Returns: - np.ndarray: Same shape as x + `numpy.ndarray`_: Same shape as x """ # diff --git a/pypeit/core/wavecal/waveio.py b/pypeit/core/wavecal/waveio.py index 2e524543cb..671c0590be 100644 --- a/pypeit/core/wavecal/waveio.py +++ b/pypeit/core/wavecal/waveio.py @@ -1,4 +1,7 @@ """ Module for I/O in arclines + +.. include:: ../include/links.rst + """ import pathlib @@ -51,17 +54,21 @@ def load_template(arxiv_file, det, wvrng=None): """ Load a full template file from disk - Args: - arxiv_file: str - det: int - wvrng (list, optional): - min, max wavelength range for the arxiv - + Parameters + ---------- + arxiv_file : str + ??? + det : int + ??? + wvrng : list, optional + min, max wavelength range for the arxiv - Returns: - wave: ndarray - flux: ndarray - binning: int, Of the template arc spectrum + Returns + ------- + wave : ndarray + flux : ndarray + binning : int + binning of the template arc spectrum """ # Path already included? @@ -148,7 +155,7 @@ def load_line_list(line_file, use_ion=False): Returns ------- - line_list : Table + line_list : `astropy.table.Table`_ """ line_file = data.get_linelist_filepath(f'{line_file}_lines.dat') if use_ion else \ @@ -172,7 +179,7 @@ def load_line_lists(lamps, unknown=False, all=False, restrict_on_instr=None): Returns ------- - line_list : Table + line_list : `astropy.table.Table`_ """ # All? @@ -279,7 +286,7 @@ def load_unknown_list(lines, unknwn_file=None, all=False): Returns ------- - unknwn_lines : Table + unknwn_lines : `astropy.table.Table`_ """ line_dict = defs.lines() diff --git a/pypeit/core/wavecal/wv_fitting.py b/pypeit/core/wavecal/wv_fitting.py index b2d8f88f0d..3d25b06900 100644 --- a/pypeit/core/wavecal/wv_fitting.py +++ b/pypeit/core/wavecal/wv_fitting.py @@ -178,45 +178,45 @@ def fit_slit(spec, patt_dict, tcent, line_lists, vel_tol = 1.0, outroot=None, sl Parameters ---------- spec : ndarray - arc spectrum + arc spectrum patt_dict : dict - dictionary of patterns + dictionary of patterns tcent: ndarray - List of the detections in this slit to be fit using the patt_dict - line_lists: astropy Table - Table containing the line list - Optional Parameters - ------------------- + List of the detections in this slit to be fit using the patt_dict + line_lists: `astropy.table.Table`_ + Table containing the line list vel_tol: float, default = 1.0 - Tolerance in km/s for matching lines in the IDs to lines in the NIST database. The default is 1.0 km/s + Tolerance in km/s for matching lines in the IDs to lines in the NIST + database. The default is 1.0 km/s outroot: str - Path for QA file. + Path for QA file. slittxt : str - Label used for QA + Label used for QA thar: bool, default = False - True if this is a ThAr fit + True if this is a ThAr fit match_toler: float, default = 3.0 - Matching tolerance when searching for new lines. This is the difference in pixels between the wavlength assigned to - an arc line by an iteration of the wavelength solution to the wavelength in the line list. + Matching tolerance when searching for new lines. This is the difference + in pixels between the wavlength assigned to an arc line by an iteration + of the wavelength solution to the wavelength in the line list. func: str, default = 'legendre' - Name of function used for the wavelength solution + Name of function used for the wavelength solution n_first: int, default = 2 - Order of first guess to the wavelength solution. + Order of first guess to the wavelength solution. sigrej_first: float, default = 2.0 - Number of sigma for rejection for the first guess to the wavelength solution. + Number of sigma for rejection for the first guess to the wavelength solution. n_final: int, default = 4 - Order of the final wavelength solution fit + Order of the final wavelength solution fit sigrej_final: float, default = 3.0 - Number of sigma for rejection for the final fit to the wavelength solution. + Number of sigma for rejection for the final fit to the wavelength solution. verbose : bool - If True, print out more information. + If True, print out more information. plot_fil: - Filename for plotting some QA? + Filename for plotting some QA? Returns ------- final_fit : dict - A dictionary containing all of the information about the fit + A dictionary containing all of the information about the fit """ # Check that patt_dict and tcent refer to each other @@ -264,47 +264,47 @@ def iterative_fitting(spec, tcent, ifit, IDs, llist, disp, Parameters ---------- spec : ndarray, shape = (nspec,) - arcline spectrum + arcline spectrum tcent : ndarray - Centroids in pixels of lines identified in spec + Centroids in pixels of lines identified in spec ifit : ndarray - Indices of the lines that will be fit + Indices of the lines that will be fit IDs: ndarray - wavelength IDs of the lines that will be fit (I think?) + wavelength IDs of the lines that will be fit (I think?) llist: dict - Linelist dictionary + Linelist dictionary disp: float - dispersion - - Optional Parameters - ------------------- + dispersion match_toler: float, default = 3.0 - Matching tolerance when searching for new lines. This is the difference in pixels between the wavlength assigned to - an arc line by an iteration of the wavelength solution to the wavelength in the line list. + Matching tolerance when searching for new lines. This is the difference + in pixels between the wavlength assigned to an arc line by an iteration + of the wavelength solution to the wavelength in the line list. func: str, default = 'legendre' - Name of function used for the wavelength solution + Name of function used for the wavelength solution n_first: int, default = 2 - Order of first guess to the wavelength solution. + Order of first guess to the wavelength solution. sigrej_first: float, default = 2.0 - Number of sigma for rejection for the first guess to the wavelength solution. + Number of sigma for rejection for the first guess to the wavelength solution. n_final: int, default = 4 - Order of the final wavelength solution fit + Order of the final wavelength solution fit sigrej_final: float, default = 3.0 - Number of sigma for rejection for the final fit to the wavelength solution. + Number of sigma for rejection for the final fit to the wavelength + solution. input_only: bool - If True, the routine will only perform a robust polyfit to the input IDs. - If False, the routine will fit the input IDs, and then include additional - lines in the linelist that are a satisfactory fit. + If True, the routine will only perform a robust polyfit to the input + IDs. If False, the routine will fit the input IDs, and then include + additional lines in the linelist that are a satisfactory fit. weights: ndarray - Weights to be used? + Weights to be used? verbose : bool - If True, print out more information. + If True, print out more information. plot_fil: - Filename for plotting some QA? + Filename for plotting some QA? Returns ------- final_fit: :class:`pypeit.core.wavecal.wv_fitting.WaveFit` + Fit result """ #TODO JFH add error checking here to ensure that IDs and ifit have the same size! diff --git a/pypeit/core/wavecal/wvutils.py b/pypeit/core/wavecal/wvutils.py index 21cf826e19..365e2215e8 100644 --- a/pypeit/core/wavecal/wvutils.py +++ b/pypeit/core/wavecal/wvutils.py @@ -426,9 +426,9 @@ def zerolag_shift_stretch(theta, y1, y2): def get_xcorr_arc(inspec1, sigdetect=5.0, sig_ceil=10.0, percent_ceil=50.0, use_raw_arc=False, fwhm = 4.0, debug=False): - - """ Utility routine to create a synthetic arc spectrum for cross-correlation using the location of the peaks in - the input spectrum. + """ + Utility routine to create a synthetic arc spectrum for cross-correlation + using the location of the peaks in the input spectrum. Args: inspec1 (`numpy.ndarray`_): @@ -577,12 +577,18 @@ def xcorr_shift_stretch(inspec1, inspec2, cc_thresh=-1.0, percent_ceil=50.0, use shift_mnmx=(-0.2,0.2), stretch_mnmx=(0.95,1.05), sigdetect = 5.0, sig_ceil=10.0, fwhm = 4.0, debug=False, toler=1e-5, seed = None): - """ Determine the shift and stretch of inspec2 relative to inspec1. This routine computes an initial - guess for the shift via maximimizing the cross-correlation. It then performs a two parameter search for the shift and stretch - by optimizing the zero lag cross-correlation between the inspec1 and the transformed inspec2 (shifted and stretched via - wvutils.shift_and_stretch()) in a narrow window about the initial estimated shift. The convention for the shift is that - positive shift means inspec2 is shifted to the right (higher pixel values) relative to inspec1. The convention for the stretch is - that it is float near unity that increases the size of the inspec2 relative to the original size (which is the size of inspec1) + """ + Determine the shift and stretch of inspec2 relative to inspec1. This + routine computes an initial guess for the shift via maximimizing the + cross-correlation. It then performs a two parameter search for the shift and + stretch by optimizing the zero lag cross-correlation between the inspec1 and + the transformed inspec2 (shifted and stretched via + wvutils.shift_and_stretch()) in a narrow window about the initial estimated + shift. The convention for the shift is that positive shift means inspec2 is + shifted to the right (higher pixel values) relative to inspec1. The + convention for the stretch is that it is float near unity that increases the + size of the inspec2 relative to the original size (which is the size of + inspec1) Parameters ---------- @@ -630,10 +636,10 @@ def xcorr_shift_stretch(inspec1, inspec2, cc_thresh=-1.0, percent_ceil=50.0, use seed: int or np.random.RandomState, optional, default = None Seed for scipy.optimize.differential_evolution optimizer. If not specified, the calculation will not be repeatable - toler (float): + toler : float Tolerance for differential evolution optimizaiton. debug = False - Show plots to the screen useful for debugging. + Show plots to the screen useful for debugging. Returns ------- @@ -642,7 +648,9 @@ def xcorr_shift_stretch(inspec1, inspec2, cc_thresh=-1.0, percent_ceil=50.0, use - success = 1, shift and stretch performed via sucessful optimization + - success = 0, shift and stretch optimization failed + - success = -1, initial x-correlation is below cc_thresh (see above), so shift/stretch optimization was not attempted @@ -660,12 +668,12 @@ def xcorr_shift_stretch(inspec1, inspec2, cc_thresh=-1.0, percent_ceil=50.0, use which unity indicating a perfect match between the two spectra. If cc_thresh is set, and the initial cross-correlation is < cc_thresh, this will be just the initial cross-correlation - shift_init: + shift_init: float The initial shift determined by maximizing the cross-correlation coefficient without allowing for a stretch. If cc_thresh is set, and the initial cross-correlation is < cc_thresh, this will be just the shift from the initial cross-correlation - cross_corr_init: + cross_corr_init: float The maximum of the initial cross-correlation coefficient determined without allowing for a stretch. If cc_thresh is set, and the initial cross-correlation is < cc_thresh, this will be diff --git a/pypeit/data/utils.py b/pypeit/data/utils.py index 7b20b34da8..8f4ee77b86 100644 --- a/pypeit/data/utils.py +++ b/pypeit/data/utils.py @@ -26,7 +26,7 @@ rather than pure strings, with all of the functionality therein contained. Most (by number) of the package data files here are distributed with the -``PypeIt`` package and are accessed via the :class:`~pypeit.data.Paths` +``PypeIt`` package and are accessed via the :class:`~pypeit.data.utils.Paths` class. For instance, the NIR spectrophotometry for Vega is accessed via: .. code-block:: python @@ -65,7 +65,7 @@ If new package-included data are added that are not very large (total directory size < a few MB), it is not necessary to use the AstroPy cache/download system. In this case, simply add the directory path to the -:class:`~pypeit.data.Paths` class and access the enclosed files similarly +:class:`~pypeit.data.utils.Paths` class and access the enclosed files similarly to the Vega example above. .. include:: ../include/links.rst @@ -338,7 +338,7 @@ def get_sensfunc_filepath(sensfunc_file: str, symlink_in_pkgdir=False) -> pathli pointing to the cached downloaded file. Defaults to False. Returns: - :obj:`pthlib.Path`: The full path to the ``sensfunc`` file + :obj:`pathlib.Path`: The full path to the ``sensfunc`` file """ # Full path within the package data structure: sensfunc_path = Paths.sensfuncs / sensfunc_file @@ -743,7 +743,7 @@ def load_telluric_grid(filename: str): The filename (NO PATH) of the telluric atmospheric grid to use. Returns: - (:obj:`astropy.io.fits.HDUList`): Telluric Grid FITS HDU list + (`astropy.io.fits.HDUList`_): Telluric Grid FITS HDU list """ # Check for existance of file parameter if not filename: @@ -774,7 +774,7 @@ def load_thar_spec(): The filename (NO PATH) of the telluric atmospheric grid to use. Returns: - (:obj:`astropy.io.fits.HDUList`): ThAr Spectrum FITS HDU list + (`astropy.io.fits.HDUList`_): ThAr Spectrum FITS HDU list """ return io.fits_open(Paths.arclines / 'thar_spec_MM201006.fits') @@ -793,6 +793,6 @@ def load_sky_spectrum(sky_file): The filename (NO PATH) of the sky file to use. Returns: - (:obj:`XSpectrum1D`): Sky spectrum + (`linetools.spectra.xspectrum1d.XSpectrum1D`_): Sky spectrum """ return xspectrum1d.XSpectrum1D.from_file(str(Paths.sky_spec / sky_file)) diff --git a/pypeit/datamodel.py b/pypeit/datamodel.py index 550ac3feda..c922478d2c 100644 --- a/pypeit/datamodel.py +++ b/pypeit/datamodel.py @@ -17,13 +17,13 @@ - Define a class attribute called ``datamodel``. See the examples below for their format. - - Provide an :func:`__init__` method that defines the + - Provide an ``__init__`` method that defines the instantiation calling sequence and passes the relevant dictionary to this base-class instantiation. - - Provide a :func:`_validate` method, if necessary, that - processes the data provided in the `__init__` into a complete + - Provide a ``_validate`` method, if necessary, that + processes the data provided in the ``__init__`` into a complete instantiation of the object. This method and the - :func:`__init__` method are the *only* places where attributes + ``__init__`` method are the *only* places where attributes can be added to the class. - Provide a :func:`_bundle` method that reorganizes the datamodel into partitions that can be written to one or more fits @@ -39,7 +39,7 @@ The attributes of the class are *not required* to be a part of the ``datamodel``; however, it makes the object simpler when they are. Any attributes that are not part of the ``datamodel`` must - be defined in either the :func:`__init__` or :func:`_validate` + be defined in either the ``__init__`` or ``_validate`` methods; otherwise, the class with throw an ``AttributeError``. Here are some examples of how to and how not to use them. @@ -424,7 +424,7 @@ def _validate(self): - The following instantiation is fine because ``DubiousInitContainer`` handles the fact that some of the - arguments to :func:`__init__` are not part of the datamodel:: + arguments to ``__init__`` are not part of the datamodel:: data = DubiousInitContainer(x,y) print(np.array_equal(data.out, data.inp1+data.inp2)) @@ -493,13 +493,13 @@ class DataContainer: Derived classes must do the following: - Define a datamodel - - Provide an :func:`__init__` method that defines the + - Provide an ``__init__`` method that defines the instantiation calling sequence and passes the relevant dictionary to this base-class instantiation. - - Provide a :func:`_validate` method, if necessary, that - processes the data provided in the `__init__` into a + - Provide a ``_validate`` method, if necessary, that + processes the data provided in the ``__init__`` into a complete instantiation of the object. This method and the - :func:`__init__` method are the *only* places where + ``__init__`` method are the *only* places where attributes can be added to the class. - Provide a :func:`_bundle` method that reorganizes the datamodel into partitions that can be written to one or @@ -515,8 +515,8 @@ class DataContainer: The attributes of the class are *not required* to be a part of the ``datamodel``; however, it makes the object simpler when they are. Any attributes that are not part of the - ``datamodel`` must be defined in either the :func:`__init__` - or :func:`_validate` methods; otherwise, the class with throw + ``datamodel`` must be defined in either the ``__init__`` + or ``_validate`` methods; otherwise, the class with throw an ``AttributeError``. .. todo:: @@ -528,8 +528,8 @@ class DataContainer: Dictionary to copy to the internal attribute dictionary. All of the keys in the dictionary *must* be elements of the ``datamodel``. Any attributes that are not part of - the ``datamodel`` can be set in the :func:`__init__` or - :func:`_validate` methods of a derived class. If None, + the ``datamodel`` can be set in the ``__init__`` or + ``_validate`` methods of a derived class. If None, the object is instantiated with all of the relevant data model attributes but with all of those attributes set to None. diff --git a/pypeit/deprecated/datacube.py b/pypeit/deprecated/datacube.py index 3cd6bdc123..56b3c1cc27 100644 --- a/pypeit/deprecated/datacube.py +++ b/pypeit/deprecated/datacube.py @@ -20,7 +20,7 @@ def generate_cube_resample(outfile, frame_wcs, slits, fluximg, ivarimg, raimg, d Args: outfile (`str`): Filename to be used to save the datacube - frame_wcs (`astropy.wcs.wcs.WCS`_): + frame_wcs (`astropy.wcs.WCS`_): World coordinate system for this frame. slits (:class:`pypeit.slittrace.SlitTraceSet`_) Information stored about the slits @@ -47,7 +47,7 @@ def generate_cube_resample(outfile, frame_wcs, slits, fluximg, ivarimg, raimg, d when evaluating the voxel geometry in detector coordinates overwrite (bool, optional): If the output file exists, it will be overwritten if this parameter is True. - output_wcs (`astropy.wcs.wcs.WCS`_, optional): + output_wcs (`astropy.wcs.WCS`_, optional): World coordinate system for the output datacube. If None, frame_wcs will be used. blaze_wave (`numpy.ndarray`_, optional): Wavelength array of the spectral blaze function diff --git a/pypeit/deprecated/orig_specobjs.py b/pypeit/deprecated/orig_specobjs.py index f14f9ab5b0..cf394c4969 100644 --- a/pypeit/deprecated/orig_specobjs.py +++ b/pypeit/deprecated/orig_specobjs.py @@ -380,7 +380,7 @@ def nobj(self): def get_std(self): """ - Return the standard star from this Specobjs. For MultiSlit this + Return the standard star from this SpecObjs. For MultiSlit this will be a single specobj in SpecObjs container, for Echelle it will be the standard for all the orders. diff --git a/pypeit/scripts/ql_multislit.py b/pypeit/deprecated/ql_multislit.py similarity index 100% rename from pypeit/scripts/ql_multislit.py rename to pypeit/deprecated/ql_multislit.py diff --git a/pypeit/display/display.py b/pypeit/display/display.py index 03da4a7401..6d3b574199 100644 --- a/pypeit/display/display.py +++ b/pypeit/display/display.py @@ -43,7 +43,7 @@ def connect_to_ginga(host='localhost', port=9000, raise_err=False, allow_new=Fal viewer if one is not already running. Returns: - RemoteClient: connection to ginga viewer. + `ginga.RemoteClient`_: connection to ginga viewer. """ # Start viewer = grc.RemoteClient(host, port) @@ -120,8 +120,8 @@ def show_image(inp, chname='Image', waveimg=None, mask=None, exten=0, cuts=None, image in other channels to it. Returns: - ginga.util.grc.RemoteClient, ginga.util.grc._channel_proxy: The - ginga remote client and the channel with the displayed image. + :obj:`tuple`: The ginga remote client object and the channel object with + the displayed image. Raises: ValueError: @@ -253,21 +253,21 @@ def show_points(viewer, ch, spec, spat, color='cyan', legend=None, legend_spec=N Parameters ---------- - viewer (ginga.util.grc.RemoteClient): + viewer : `ginga.RemoteClient`_ Ginga RC viewer - ch (ginga.util.grc._channel_proxy): + ch : ``ginga.util.grc._channel_proxy`` Ginga channel - spec (list): + spec : list List of spectral positions on image to plot - spat (list): + spat : list List of spatial positions on image to plot - color (str): + color : str Color for points - legend (str): - Label for a legeng - legend_spec (float): + legend : str + Label for a legend + legend_spec : float Spectral pixel loation for legend - legend_spat (float): + legend_spat : float Pixel loation for legend """ @@ -291,9 +291,9 @@ def show_slits(viewer, ch, left, right, slit_ids=None, left_ids=None, right_ids= Overplot slits on the image in Ginga in the given channel Args: - viewer (ginga.util.grc.RemoteClient): + viewer (`ginga.RemoteClient`_): Ginga RC viewer - ch (ginga.util.grc._channel_proxy): + ch (``ginga.util.grc._channel_proxy``): Ginga channel left (`numpy.ndarray`_): Array with spatial position of left slit edges. Shape must be :math:`(N_{\rm @@ -447,9 +447,9 @@ def show_trace(viewer, ch, trace, trc_name=None, maskdef_extr=None, manual_extr= r""" Args: - viewer (ginga.util.grc.RemoteClient): + viewer (`ginga.RemoteClient`_): Ginga RC viewer. - ch (ginga.util.grc._channel_proxy): + ch (``ginga.util.grc._channel_proxy``): Ginga channel. trace (`numpy.ndarray`_): Array with spatial position of the object traces on the detector. @@ -549,9 +549,9 @@ def show_tilts(viewer, ch, tilt_traces, yoff=0., xoff=0., points=True, nspec=Non Show the arc tilts on the input channel Args: - viewer (ginga.util.grc.RemoteClient): + viewer (`ginga.RemoteClient`_): Ginga RC viewer - ch (ginga.util.grc._channel_proxy): + ch (``ginga.util.grc._channel_proxy``): Ginga channel tilt_traces (`astropy.table.Table`_): Table containing the traced and fitted tilts diff --git a/pypeit/display/ginga_plugins.py b/pypeit/display/ginga_plugins.py index 42f3c93260..7560dda0a4 100644 --- a/pypeit/display/ginga_plugins.py +++ b/pypeit/display/ginga_plugins.py @@ -43,12 +43,12 @@ def info_xy(self, data_x, data_y, settings): image x coordinate data_y: float image y coordinate - settings: :class:`ginga.misc.Settings.SettingGroup` + settings: ``ginga.misc.Settings.SettingGroup`` ginga settings group Returns ------- - info : :class:`ginga.misc.Bunch.Bunch` + info : ``ginga.misc.Bunch.Bunch`` Metadata for this coordinate """ info = super(SlitImage, self).info_xy(data_x, data_y, settings) diff --git a/pypeit/edgetrace.py b/pypeit/edgetrace.py index 1f5c9a13a2..39f46ebc0b 100644 --- a/pypeit/edgetrace.py +++ b/pypeit/edgetrace.py @@ -241,7 +241,7 @@ class EdgeTraceSet(calibframe.CalibFrame): used. The defaults are tuned for each spectrograph based on testing using data in the PypeIt development suite. See :ref:`pypeitpar` for the full documentation of the - :class:`pypeit.par.pypeitpar.EdgeTracePar` parameters. + :class:`~pypeit.par.pypeitpar.EdgeTracePar` parameters. Finally, note that the :attr:`design` and :attr:`object` data are currently empty, as part of a development path for matching slits @@ -285,7 +285,7 @@ class EdgeTraceSet(calibframe.CalibFrame): Attributes: traceimg - (:class:`~pypeit.images.buildcalibration.TraceImage`): + (:class:`~pypeit.images.buildimage.TraceImage`): See argument list. spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): @@ -296,7 +296,7 @@ class EdgeTraceSet(calibframe.CalibFrame): The list of raw files used to construct the trace image (:attr:`img`). Only defined if argument `img` in :func:`initial_trace` or :func:`auto_trace` is a - :class:`~pypeit.images.buildcalibration.TraceImage` object. + :class:`~pypeit.images.buildimage.TraceImage` object. img (`numpy.ndarray`_): Convenience for now. det (:obj:`int`): @@ -675,7 +675,7 @@ def rectify(self, flux, bpm=None, extract_width=None, mask_threshold=0.5, side=' for more detail. Used parameters from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`) are + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`) are `left_right_pca`. Args: @@ -770,7 +770,7 @@ def auto_trace(self, bpm=None, debug=False, show_stages=False): user-provided lists in the :attr:`par`. Used parameters from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`) are + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`) are `use_maskdesign`. Args: @@ -1108,7 +1108,7 @@ def _base_header(self, hdr=None): """ Construct the baseline header for all HDU extensions. - This appends the :class:`EdgeTracePar` and + This appends the :class:`~pypeit.par.pypeitpar.EdgeTracePar` and :class:`EdgeTraceBitMask` data to the headers of all HDU extensions. This is overkill, but avoids overriding :func:`pypeit.datamodel.DataContainer.to_hdu` by just @@ -2005,7 +2005,7 @@ def check_traces(self, cen=None, msk=None, subset=None, min_spatial=None, max_sp :func:`trace_pixels_off_detector`. The only used parameter from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`) is ``match_tol``. + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`) is ``match_tol``. .. warning:: @@ -2394,7 +2394,7 @@ def check_synced(self, rebuild_pca=False): synchronized partner. Used parameters from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`) are + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`) are `minimum_slit_gap`, `minimum_slit_length`, `minimum_slit_length_sci`, and `length_range`. @@ -2972,7 +2972,7 @@ def fit_refine(self, weighting='uniform', debug=False, idx=None): only traces that are *not* fully flagged are fit. Used parameters from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`) are + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`) are ``max_shift_abs``, ``max_spat_error``, ``fit_function``, ``fit_order``, ``fwhm_uniform``, ``fwhm_gaussian``, ``fit_maxdev``, ``fit_maxiter``, ``fit_niter``, and @@ -3089,11 +3089,12 @@ def can_pca(self): there are fewer than the minimum left *or* right edge traces. Used parameters from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`) are + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`) are ``fit_min_spec_length``, ``left_right_pca``, and ``pca_min_edges``. .. warning:: - This function calls :func:`check_trace` using + + This function calls :func:`check_traces` using `fit_min_spec_length` to flag short traces, meaning that :attr:`edge_msk` will can be altered by this call. @@ -3208,7 +3209,7 @@ def build_pca(self, use_center=False, debug=False): decompositions. Used parameters from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`) are + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`) are `fit_min_spec_length`, `left_right_pca`, `pca_n`, `pca_var_percent`, `pca_function`, `pca_order`, `pca_sigrej`, `pca_maxrej`, and `pca_maxiter`. @@ -3415,7 +3416,7 @@ def peak_refine(self, rebuild_pca=False, debug=False): :attr:`edge_fit`, and :attr:`fittype`. Used parameters from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`) are + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`) are ``left_right_pca``, ``edge_thresh``, ``smash_range``, ``edge_detect_clip``, ``trace_median_frac``, ``trace_thresh``, ``fit_function``, ``fit_order``, ``fwhm_uniform``, ``fwhm_uniform``, @@ -3634,7 +3635,7 @@ def _get_reference_locations(self, trace_cen, add_edge): by :func:`sync`. Used parameters from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`) are + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`) are ``sync_center``, ``sync_to_edge``, and ``gap_offset``. Args: @@ -3755,7 +3756,7 @@ def nudge_traces(self, trace_cen): (`max_nudge`), to be no closer than a minimum number (`det_buffer`) pixels from the detector edges. Both parameters are pulled from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`). No limit is + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`). No limit is imposed on the size of the shift if `max_nudge` is None. .. warning:: @@ -3849,7 +3850,7 @@ def sync(self, rebuild_pca=True, debug=False): exception is raised. Used parameters from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`) are + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`) are `det_buffer`, `left_right_pca`, and `sync_predict`. .. warning:: @@ -4090,7 +4091,7 @@ def insert_traces(self, side, trace_cen, loc=None, mode='user', resort=True, nud New traces to add are first nudged away from the detector edge (see :func:`nudge_traces`) according to parameters `max_nudge` and `det_buffer` from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`). They are then + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`). They are then inserted or appended to the existing traces and masked according to the provided `mode`. The traces are added to *both* the measured centroid list and the fitted model data. @@ -4233,7 +4234,7 @@ def bound_detector(self): masked as user-inserted. Only used parameter from :attr:`par` - (:class:`pypeit.par.pypeitpar.EdgeTracePar`) is + (:class:`~pypeit.par.pypeitpar.EdgeTracePar`) is `det_buffer`. """ # find where sobelsig is not masked @@ -4255,7 +4256,7 @@ def fully_masked_traces(self, flag=None, exclude=None): Args: flag (:obj:`str`, :obj:`list`, optional): The bit mask flags to select. If None, any flags are - used. See :func:`pypeit.bitmask.Bitmask.flagged`. + used. See :func:`pypeit.bitmask.BitMask.flagged`. exclude (:obj:`str`, :obj:`list`, optional): A set of flags to explicitly exclude from consideration as a masked trace. I.e., if any @@ -4610,14 +4611,14 @@ def _fill_design_table(self, maskdef_id, cc_params_b, cc_params_t, omodel_bspat, and the one traced on the image. One value per each edge side Args: - maskdef_id (:obj:`numpy.array`): + maskdef_id (`numpy.ndarray`_): Slit ID number from slit-mask design matched to traced slits. cc_params_b, cc_params_t (:obj:`tuple`): Three parameters of the cross-correlation (2 coefficients and RMS) between slit-mask design and traced edges for the left and right edges. - omodel_bspat, omodel_tspat (:obj:`numpy.array`): + omodel_bspat, omodel_tspat (`numpy.ndarray`_): Left and right spatial position of the slit edges from optical model - spat_id (:obj:`numpy.array`): + spat_id (`numpy.ndarray`_): ID assigned by PypeIt to each slit. same as in `SlitTraceSet`. @@ -4688,7 +4689,7 @@ def _fill_objects_table(self, maskdef_id): Args: - maskdef_id (:obj:`numpy.array`): + maskdef_id (`numpy.ndarray`_): Slit ID number from slit-mask design matched to traced slits. """ # Check that slitmask is initiated diff --git a/pypeit/extraction.py b/pypeit/extraction.py index 792110a9c3..8acfa6f3a9 100644 --- a/pypeit/extraction.py +++ b/pypeit/extraction.py @@ -39,10 +39,10 @@ class Extract: Final output mask extractmask (`numpy.ndarray`_): Extraction mask - slits (:class:`pypeit.slittrace.SlitTraceSet`): - sobjs_obj (:class:`pypeit.specobjs.SpecObjs`): + slits (:class:`~pypeit.slittrace.SlitTraceSet`): + sobjs_obj (:class:`~pypeit.specobjs.SpecObjs`): Only object finding but no extraction - sobjs (:class:`pypeit.specobjs.SpecObjs`): + sobjs (:class:`~pypeit.specobjs.SpecObjs`): Final extracted object list with trace corrections applied spat_flexure_shift (float): tilts (`numpy.ndarray`_): @@ -66,14 +66,14 @@ def get_instance(cls, sciImg, slits, sobjs_obj, spectrograph, par, objtype, glob waveTilts=None, tilts=None, wv_calib=None, waveimg=None, bkg_redux=False, return_negative=False, std_redux=False, show=False, basename=None): """ - Instantiate the Reduce subclass appropriate for the provided + Instantiate the Extract subclass appropriate for the provided spectrograph. - The class must be subclassed from Reduce. See :class:`Reduce` for + The class must be subclassed from Extract. See :class:`Extract` for the description of the valid keyword arguments. Args: - sciImg (:class:`~pypeit.images.scienceimage.ScienceImage`): + sciImg (:class:`~pypeit.images.pypeitimage.PypeItImage`): Image to reduce. slits (:class:`~pypeit.slittrace.SlitTraceSet`): Slit trace set object @@ -94,7 +94,7 @@ def get_instance(cls, sciImg, slits, sobjs_obj, spectrograph, par, objtype, glob be provided. tilts (`numpy.ndarray`_, optional): Tilts image. Either a tilts image or waveTilts object (above) must be provided. - wv_calib (:class:`~pypeit.wavetilts.WaveCalib`, optional): + wv_calib (:class:`~pypeit.wavecalib.WaveCalib`, optional): This is the waveCalib object which is optional, but either wv_calib or waveimg must be provided. waveimg (`numpy.ndarray`_, optional): Wave image. Either a wave image or wv_calib object (above) must be provided @@ -274,7 +274,7 @@ def nsobj_to_extract(self): # TODO Make this a method possibly in slittrace.py. Almost identical code is in find_objects.py def initialize_slits(self, slits, initial=False): """ - Gather all the :class:`SlitTraceSet` attributes + Gather all the :class:`~pypeit.slittrace.SlitTraceSet` attributes that we'll use here in :class:`Extract` Args @@ -317,7 +317,7 @@ def extract(self, global_sky, model_noise=None, spat_pix=None): Args: global_sky (`numpy.ndarray`_): Sky estimate - sobjs_obj (:class:`pypeit.specobjs.SpecObjs`): + sobjs_obj (:class:`~pypeit.specobjs.SpecObjs`): List of SpecObj that have been found and traced model_noise (bool): If True, construct and iteratively update a model inverse variance image @@ -474,7 +474,7 @@ def spec_flexure_correct(self, mode="local", sobjs=None): mode (str): "local" - Use sobjs to determine flexure correction "global" - Use waveimg and global_sky to determine flexure correction at the centre of the slit - sobjs (:class:`pypeit.specobjs.SpecObjs`, None): + sobjs (:class:`~pypeit.specobjs.SpecObjs`, None): Spectrally extracted objects """ if self.par['flexure']['spec_method'] == 'skip': @@ -643,7 +643,7 @@ def __repr__(self): class MultiSlitExtract(Extract): """ - Child of Reduce for Multislit and Longslit reductions + Child of Extract for Multislit and Longslit reductions See parent doc string for Args and Attributes @@ -780,7 +780,7 @@ def local_skysub_extract(self, global_sky, sobjs, spat_pix=None, model_noise=Tru class EchelleExtract(Extract): """ - Child of Reduce for Echelle reductions + Child of Extract for Echelle reductions See parent doc string for Args and Attributes diff --git a/pypeit/find_objects.py b/pypeit/find_objects.py index 73858d57b8..b45168d195 100644 --- a/pypeit/find_objects.py +++ b/pypeit/find_objects.py @@ -32,18 +32,18 @@ class FindObjects: science or standard-star exposures. Args: - sciImg (:class:`~pypeit.images.scienceimage.ScienceImage`): + sciImg (:class:`~pypeit.images.pypeitimage.PypeItImage`): Image to reduce. - slits (:class:`~pypeit.slittrace.SlitTracSet`): + slits (:class:`~pypeit.slittrace.SlitTraceSet`): Object providing slit traces for the image to reduce. spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): PypeIt Spectrograph class - par (:class:`~pypeit.par.pyepeitpar.PypeItPar`): + par (:class:`~pypeit.par.pypeitpar.PypeItPar`): Reduction parameters class objtype (:obj:`str`): Specifies object being reduced. Should be 'science', 'standard', or 'science_coadd2d'. - wv_calib (:class:`~pypeit.wavetilts.WaveCalib`, optional): + wv_calib (:class:`~pypeit.wavecalib.WaveCalib`, optional): This is only used for the IFU child when a joint sky subtraction is requested. waveTilts (:class:`~pypeit.wavetilts.WaveTilts`, optional): @@ -71,7 +71,7 @@ class FindObjects: Clear the ginga window before showing the object finding results. basename (:obj:`str`, optional): Base name for output files - manual (:class:`~pypeit.manual_extract.ManualExtractObj`, optional): + manual (:class:`~pypeit.manual_extract.ManualExtractionObj`, optional): Object containing manual extraction instructions/parameters. Attributes: @@ -91,7 +91,7 @@ class FindObjects: Final output mask extractmask (`numpy.ndarray`_): Extraction mask - slits (:class:`pypeit.slittrace.SlitTraceSet`): + slits (:class:`~pypeit.slittrace.SlitTraceSet`): sobjs_obj (:class:`pypeit.specobjs.SpecObjs`): Objects found spat_flexure_shift (:obj:`float`): @@ -272,7 +272,7 @@ def create_skymask(self, sobjs_obj): # TODO Make this a method possibly in slittrace.py. Almost identical code is in extraction.py def initialize_slits(self, slits, initial=False): """ - Gather all the :class:`SlitTraceSet` attributes + Gather all the :class:`~pypeit.slittrace.SlitTraceSet` attributes that we'll use here in :class:`FindObjects` Args: @@ -493,11 +493,15 @@ def global_skysub(self, skymask=None, update_crmask=True, trim_edg = (0, 0), skymask (`numpy.ndarray`_, None): A 2D image indicating sky regions (1=sky) update_crmask (bool, optional): + ?? trim_edg (tuple, optional): A two tuple of ints that specify the number of pixels to trim from the slit edges show_fit (bool, optional): + ?? show (bool, optional): + ?? show_objs (bool, optional): + ?? previous_sky (`numpy.ndarray`_, optional): Sky model estimate from a previous run of global_sky Used to generate an improved estimated of the variance @@ -601,25 +605,22 @@ def show(self, attr, image=None, global_sky=None, showmask=False, sobjs=None, Show one of the internal images .. todo:: - Should probably put some of these in ProcessImages + + - This docstring is incomplete! Parameters ---------- attr : str - global -- Sky model (global) - sci -- Processed science image - rawvar -- Raw variance image - modelvar -- Model variance image - crmasked -- Science image with CRs set to 0 - skysub -- Science image with global sky subtracted - image -- Input image - display : str, optional + String specifying the image to show. Options are: + - global -- Sky model (global) + - sci -- Processed science image + - rawvar -- Raw variance image + - modelvar -- Model variance image + - crmasked -- Science image with CRs set to 0 + - skysub -- Science image with global sky subtracted + - image -- Input image image : ndarray, optional - User supplied image to display - - Returns - ------- - + User supplied image to display """ mask_in = self.sciImg.fullmask if showmask else None @@ -727,7 +728,7 @@ def find_objects_pypeline(self, image, ivar, std_trace=None, Returns ------- - specobjs : :class:`~pypeot.specobjs.Specobjs` + specobjs : :class:`~pypeit.specobjs.SpecObjs` Container holding Specobj objects nobj : :obj:`int` Number of objects identified @@ -875,7 +876,7 @@ def find_objects_pypeline(self, image, ivar, std_trace=None, Returns ------- - specobjs : :class:`~pypeit.specobjs.Specobjs` + specobjs : :class:`~pypeit.specobjs.SpecObjs` Container holding Specobj objects nobj : :obj:`int` Number of objects identified diff --git a/pypeit/flatfield.py b/pypeit/flatfield.py index 9dc2958f71..b25617ec10 100644 --- a/pypeit/flatfield.py +++ b/pypeit/flatfield.py @@ -481,8 +481,8 @@ class FlatField: The current slit traces. wavetilts (:class:`~pypeit.wavetilts.WaveTilts`): The current wavelength tilt traces; see - wv_calib (??): - ?? + wv_calib (:class:`~pypeit.wavecalib.WaveCalib`): + Wavelength calibration object spat_illum_only (bool, optional): Only perform the spatial illumination calculation, and ignore the 2D bspline fit. This should only be set to true if you @@ -490,8 +490,8 @@ class FlatField: simultaneously generate a pixel flat and a spatial illumination profile from the same input, this should be False (which is the default). - qa_path (??, optional): - ?? + qa_path (str, optional): + Path to QA directory Attributes: rawflatimg (:class:`~pypeit.images.pypeitimage.PypeItImage`): @@ -555,15 +555,12 @@ def run(self, doqa=False, debug=False, show=False): This is a simple wrapper for the main flat-field methods: - - Flat-field images are processed using :func:`build_pixflat`. - - Full 2D model, illumination flat, and pixel flat images are constructed by :func:`fit`. - The results can be shown in a ginga window using :func:`show`. - The method is a simple wrapper for :func:`build_pixflat`, :func:`fit`, - and :func:`show`. + The method is a simple wrapper for :func:`fit` and :func:`show`. Args: doqa (:obj:`bool`, optional): @@ -656,7 +653,7 @@ def build_mask(self): Generate bad pixel mask. Returns: - :obj:`numpy.ndarray` : bad pixel mask + `numpy.ndarray`_ : bad pixel mask """ bpmflats = np.zeros_like(self.slits.mask, dtype=self.slits.bitmask.minimum_dtype()) for flag in ['SKIPFLATCALIB', 'BADFLATCALIB']: @@ -695,7 +692,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): Construct a model of the flat-field image. For this method to work, :attr:`rawflatimg` must have been - previously constructed; see :func:`build_pixflat`. + previously constructed. The method loops through all slits provided by the :attr:`slits` object, except those that have been masked (i.e., slits with @@ -734,7 +731,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): (see ``tweak_slits_thresh`` in :attr:`flatpar`), up to a maximum allowed shift from the existing slit edge (see ``tweak_slits_maxfrac`` in :attr:`flatpar`). See - :func:`pypeit.core.tweak_slit_edges`. If tweaked, the + :func:`~pypeit.core.flat.tweak_slit_edges`. If tweaked, the :func:`spatial_fit` is repeated to place it on the tweaked slits reference frame. - Use the bspline fit to construct the 2D illumination image @@ -759,7 +756,7 @@ def fit(self, spat_illum_only=False, doqa=True, debug=False): attributes are altered internally. If the slit edges are to be tweaked using the 1D illumination profile (``tweak_slits`` in :attr:`flatpar`), the tweaked slit edge arrays in the internal - :class:`~pypeit.edgetrace.SlitTraceSet` object, :attr:`slits`, + :class:`~pypeit.slittrace.SlitTraceSet` object, :attr:`slits`, are also altered. Used parameters from :attr:`flatpar` diff --git a/pypeit/fluxcalibrate.py b/pypeit/fluxcalibrate.py index a536176fde..672c9a4f74 100644 --- a/pypeit/fluxcalibrate.py +++ b/pypeit/fluxcalibrate.py @@ -23,7 +23,7 @@ def flux_calibrate(spec1dfiles, sensfiles, par=None, outfiles=None): sensfiles (list): List of sensitivity function files to use to flux calibrate the spec1d files. This list and the sensfiles list need to have the same length and be aligned - par (pypeit.par.pypeitpar.FluxCalibrate, optional): + par (:class:`~pypeit.par.pypeitpar.FluxCalibratePar`, optional): Parset object containing parameters governing the flux calibration. outfiles (list, optional): Names of the output files. If None, this is set to spec1dfiles and those are overwritten diff --git a/pypeit/images/buildimage.py b/pypeit/images/buildimage.py index 2ce086f116..1d58581f55 100644 --- a/pypeit/images/buildimage.py +++ b/pypeit/images/buildimage.py @@ -111,7 +111,7 @@ def construct_file_name(cls, calib_key, calib_dir=None, basename=None): Args: calib_key (:obj:`str`): String identifier of the calibration group. See - :func:`construct_calib_key`. + :func:`~pypeit.calibframe.CalibFrame.construct_calib_key`. calib_dir (:obj:`str`, `Path`_, optional): If provided, return the full path to the file given this directory. @@ -164,7 +164,7 @@ def buildimage_fromlist(spectrograph, det, frame_par, file_list, bias=None, bpm= The 1-indexed detector number(s) to process. If a tuple, it must include detectors viable as a mosaic for the provided spectrograph; see :func:`~pypeit.spectrographs.spectrograph.Spectrograph.allowed_mosaics`. - frame_par (:class:`~pypeit.par.pypeitpar.FramePar`): + frame_par (:class:`~pypeit.par.pypeitpar.FrameGroupPar`): Parameters that dictate the processing of the images. See :class:`~pypeit.par.pypeitpar.ProcessImagesPar` for the defaults. @@ -187,7 +187,7 @@ def buildimage_fromlist(spectrograph, det, frame_par, file_list, bias=None, bpm= (``sigma_clip`` is True), this sets the maximum number of rejection iterations. If None, rejection iterations continue until no more data are rejected; see - :func:`~pypeit.core.combine.weighted_combine``. + :func:`~pypeit.core.combine.weighted_combine`. ignore_saturation (:obj:`bool`, optional): If True, turn off the saturation flag in the individual images before stacking. This avoids having such values set to 0, which diff --git a/pypeit/images/pypeitimage.py b/pypeit/images/pypeitimage.py index 50cd06642f..35bc7d9862 100644 --- a/pypeit/images/pypeitimage.py +++ b/pypeit/images/pypeitimage.py @@ -965,7 +965,7 @@ def from_pypeitimage(cls, pypeitImage, calib_dir=None, setup=None, calib_id=None detname (:obj:`str`): The identifier used for the detector or detector mosaic for the relevant instrument; see - :func:`~pypeit.spectrograph.spectrograph.Spectrograph.get_det_name`. + :func:`~pypeit.spectrographs.spectrograph.Spectrograph.get_det_name`. Returns: :class:`PypeItImage`: Image with the appropriate type and diff --git a/pypeit/images/rawimage.py b/pypeit/images/rawimage.py index 7b3f1d8c07..aca870163c 100644 --- a/pypeit/images/rawimage.py +++ b/pypeit/images/rawimage.py @@ -60,7 +60,7 @@ class RawImage: Attributes: filename (:obj:`str`): Original file name with the data. - spectrograph (:class:`~pypeit.spectrograph.spectrographs.Spectrograph`): + spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): Spectrograph instance with the instrument-specific properties and methods. det (:obj:`int`, :obj:`tuple`): diff --git a/pypeit/inputfiles.py b/pypeit/inputfiles.py index 68b4a28516..67c68294e0 100644 --- a/pypeit/inputfiles.py +++ b/pypeit/inputfiles.py @@ -83,8 +83,7 @@ def readlines(ifile:str): ifile (str): Name of the file to parse. Returns: - :obj:`numpy.ndarray`: Returns a list of the valid lines in the - files. + `numpy.ndarray`_: Returns a list of the valid lines in the files. """ # Check the files if not os.path.isfile(ifile): @@ -258,10 +257,9 @@ def _read_data_file_table(lines): List of lines *within the data* block read from the input file. Returns: - tuple: list, Table. A list of the paths provided (can be empty) - and a Table with the data provided in the input file. + tuple: A :obj:`list` with the paths provided (can be empty) and an + `astropy.table.Table`_ with the data provided in the input file. """ - # Allow for multiple paths paths = [] for l in lines: @@ -361,7 +359,7 @@ def find_block(lines, block): def path_and_files(self, key:str, skip_blank=False, check_exists=True): """Generate a list of the filenames with - the full path from the column of the data Table + the full path from the column of the data `astropy.table.Table`_ specified by `key`. The files must exist and be within one of the paths for this to succeed. @@ -487,8 +485,8 @@ def get_spectrograph(self): parameter. Raises: - :class:`~pypeit.pypmsgs.PypeItError`: Raised if the relevant - configuration parameter is not available. + :class:`~pypeit.pypmsgs.PypeItError`: + Raised if the relevant configuration parameter is not available. """ if 'rdx' not in self.config.keys() or 'spectrograph' not in self.config['rdx'].keys(): msgs.error('Cannot define spectrograph. Configuration file missing \n' diff --git a/pypeit/metadata.py b/pypeit/metadata.py index fa551b28ae..60acb1d3c3 100644 --- a/pypeit/metadata.py +++ b/pypeit/metadata.py @@ -59,9 +59,8 @@ class PypeItMetaData: The list of files to include in the table. data (table-like, optional): The data to include in the table. The type can be anything - allowed by the instantiation of - :class:`astropy.table.Table`. - usrdata (:obj:`astropy.table.Table`, optional): + allowed by the instantiation of `astropy.table.Table`_. + usrdata (`astropy.table.Table`_, optional): A user provided set of data used to supplement or overwrite metadata read from the file headers. The table must have a `filename` column that is used to match to the metadata @@ -69,28 +68,28 @@ class PypeItMetaData: `data` is also provided. This functionality is only used when building the metadata from the fits files. strict (:obj:`bool`, optional): - Function will fault if there is a problem with the reading - the header for any of the provided files; see - :func:`~pypeit.spectrographs.spectrograph.get_headarr`. Set - to False to instead report a warning and continue. + Function will fault if there is a problem with the reading the + header for any of the provided files; see + :func:`~pypeit.spectrographs.spectrograph.Spectrograph.get_headarr`. + Set to False to instead report a warning and continue. Attributes: spectrograph - (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): + (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): The spectrograph used to collect the data save to each file. The class is used to provide the header keyword data to include in the table and specify any validation checks. - par (:class:`pypeit.par.pypeitpar.PypeItPar`): + par (:class:`~pypeit.par.pypeitpar.PypeItPar`): PypeIt parameters used to set the code behavior. If not provided, the default parameters specific to the provided spectrograph are used. configs (:obj:`dict`): A dictionary of the unique configurations identified. - type_bitmask (:class:`pypeit.core.framematch.FrameTypeBitMask`): + type_bitmask (:class:`~pypeit.core.framematch.FrameTypeBitMask`): The bitmask used to set the frame type of each fits file. - calib_bitmask (:class:`BitMask`): + calib_bitmask (:class:`~pypeit.bitmask.BitMask`): The bitmask used to keep track of the calibration group bits. - table (:class:`astropy.table.Table`): + table (`astropy.table.Table`_): The table with the relevant metadata for each fits file to use in the data reduction. """ @@ -155,10 +154,10 @@ def _build(self, files, strict=True, usrdata=None): files (:obj:`str`, :obj:`list`): One or more files to use to build the table. strict (:obj:`bool`, optional): - Function will fault if :func:`fits.getheader` fails to + Function will fault if `astropy.io.fits.getheader`_ fails to read any of the headers. Set to False to report a warning and continue. - usrdata (astropy.table.Table, optional): + usrdata (`astropy.table.Table`_, optional): Parsed for frametype for a few instruments (e.g. VLT) where meta data may not be required @@ -278,7 +277,7 @@ def merge(self, usrdata, match_type=True): this step by setting `match_type=False`. Args: - usrdata (:obj:`astropy.table.Table`): + usrdata (`astropy.table.Table`_): A user provided set of data used to supplement or overwrite metadata read from the file headers. The table must have a `filename` column that is used to @@ -289,14 +288,14 @@ def merge(self, usrdata, match_type=True): Raises: TypeError: - Raised if `usrdata` is not an `astropy.io.table.Table` + Raised if `usrdata` is not an `astropy.table.Table`_ KeyError: Raised if `filename` is not a key in the provided table. """ meta_data_model = meta.get_meta_data_model() # Check the input if not isinstance(usrdata, table.Table): - raise TypeError('Must provide an astropy.io.table.Table instance.') + raise TypeError('Must provide an astropy.table.Table instance.') if 'filename' not in usrdata.keys(): raise KeyError('The user-provided table must have \'filename\' column!') @@ -437,15 +436,12 @@ def construct_obstime(self, row): """ Construct the MJD of when the frame was observed. - .. todo:: - - Consolidate with :func:`convert_time` ? - Args: row (:obj:`int`): The 0-indexed row of the frame. Returns: - astropy.time.Time: The MJD of the observation. + `astropy.time.Time`_: The MJD of the observation. """ return time.Time(self['mjd'][row], format='mjd') @@ -456,7 +452,7 @@ def construct_basename(self, row, obstime=None): Args: row (:obj:`int`): The 0-indexed row of the frame. - obstime (:class:`astropy.time.Time`, optional): + obstime (`astropy.time.Time`_, optional): The MJD of the observation. If None, constructed using :func:`construct_obstime`. @@ -970,7 +966,7 @@ def find_configuration(self, setup, index=False): boolean array. Returns: - numpy.ndarray: A boolean array, or an integer array if + `numpy.ndarray`_: A boolean array, or an integer array if ``index=True``, with the table rows associated with the requested setup/configuration. """ @@ -1157,10 +1153,12 @@ def ignore_frames(self): Construct a list of frame types to ignore, and the corresponding indices of these frametypes in the table. Returns: - :obj:`dict`: Dictionary where the keys are the frame types that are - configuration-independent and the values are the metadata keywords - that can be used to assign the frames to a configuration group. - numpy.ndarray: An integer array with the table rows that should be ignored when defining the configuration. + :obj:`tuple`: Two objects are returned, (1) A dictionary where the + keys are the frame types that are configuration-independent and the + values are the metadata keywords that can be used to assign the + frames to a configuration group, and (2) an integer `numpy.ndarray` + with the table rows that should be ignored when defining the + configuration. """ ignore_indx = np.arange(len(self.table)) ignore_frames = self.spectrograph.config_independent_frames() @@ -1186,7 +1184,7 @@ def find_frames(self, ftype, calib_ID=None, index=False): Args: ftype (str): The frame type identifier. See the keys for - :class:`pypeit.core.framematch.FrameTypeBitMask`. If + :class:`~pypeit.core.framematch.FrameTypeBitMask`. If set to the string 'None', this returns all frames without a known type. calib_ID (:obj:`int`, optional): @@ -1197,7 +1195,7 @@ def find_frames(self, ftype, calib_ID=None, index=False): boolean array. Returns: - numpy.ndarray: A boolean array, or an integer array if + `numpy.ndarray`_: A boolean array, or an integer array if index=True, with the rows that contain the frames of the requested type. @@ -1229,7 +1227,7 @@ def find_frame_files(self, ftype, calib_ID=None): Args: ftype (str): The frame type identifier. See the keys for - :class:`pypeit.core.framematch.FrameTypeBitMask`. + :class:`~pypeit.core.framematch.FrameTypeBitMask`. calib_ID (:obj:`int`, optional): Index of the calibration group that it must match. If None, any row of the specified frame type is included. @@ -1261,7 +1259,7 @@ def set_frame_types(self, type_bits, merge=True): Set and return a Table with the frame types and bits. Args: - type_bits (numpy.ndarray): + type_bits (`numpy.ndarray`_): Integer bitmask with the frame types. The length must match the existing number of table rows. @@ -1270,7 +1268,7 @@ def set_frame_types(self, type_bits, merge=True): will *overwrite* any existing columns. Returns: - `astropy.table.Table`: Table with two columns, the frame + `astropy.table.Table`_: Table with two columns, the frame type name and bits. """ # Making Columns to pad string array @@ -1343,10 +1341,10 @@ def get_frame_types(self, flag_unknown=False, user=None, merge=True): Merge the frame typing into the exiting table. Returns: - :obj:`astropy.table.Table`: A Table with two columns, the - type names and the type bits. See - :class:`pypeit.core.framematch.FrameTypeBitMask` for the - allowed frame types. + `astropy.table.Table`_: A Table with two columns, the type names and + the type bits. See + :class:`~pypeit.core.framematch.FrameTypeBitMask` for the allowed + frame types. """ # Checks if 'frametype' in self.keys() or 'framebit' in self.keys(): @@ -1783,7 +1781,7 @@ def write(self, output=None, rows=None, columns=None, sort_col=None, overwrite=F file. Returns: - `astropy.table.Table`: The table object that would have been + `astropy.table.Table`_: The table object that would have been written/printed if ``output == 'table'``. Otherwise, the method always returns None. @@ -1893,7 +1891,7 @@ def find_calib_group(self, grp): The calibration group integer. Returns: - numpy.ndarray: Boolean array selecting those frames in the + `numpy.ndarray`_: Boolean array selecting those frames in the table included in the selected calibration group. Raises: diff --git a/pypeit/par/parset.py b/pypeit/par/parset.py index 5544ca2682..284917c799 100644 --- a/pypeit/par/parset.py +++ b/pypeit/par/parset.py @@ -317,7 +317,7 @@ def _data_table_string(data_table, delimeter='print'): columns and add a header (first row) and contents delimeter. Args: - data_table (:obj:`numpy.ndarray`): + data_table (`numpy.ndarray`_): Array of string representations of the data to print. Returns: @@ -415,7 +415,7 @@ def config_lines(par, section_name=None, section_comment=None, section_level=0, exclude_defaults=False, include_descr=True): """ Recursively generate the lines of a configuration file based on - the provided ParSet or dict (par). + the provided :class:`ParSet` or :obj:`dict` (see ``par``). Args: section_name (:obj:`str`, optional): @@ -620,23 +620,22 @@ def to_config(self, cfg_file=None, section_name=None, section_comment=None, sect Args: cfg_file (:obj:`str`, optional): - The name of the file to write/append to. If None - (default), the function will just return the list of - strings that would have been written to the file. These - lines can be used to construct a :class:`ConfigObj` - instance. + The name of the file to write/append to. If None (default), the + function will just return the list of strings that would have + been written to the file. These lines can be used to construct + a `configobj`_ instance. section_name (:obj:`str`, optional): The top-level name for the config section. This must be provided if :attr:`cfg_section` is None or any of the - parameters are not also ParSet instances themselves. + parameters are not also :class:`ParSet` instances themselves. section_comment (:obj:`str`, optional): The top-level comment for the config section based on - this ParSet. + this :class:`ParSet`. section_level (:obj:`int`, optional): - The top level of this ParSet. Used for recursive output - of nested ParSets. + The top level of this :class:`ParSet`. Used for recursive output + of nested :class:`ParSet` instances. append (:obj:`bool`, optional): - Append this configuration output of this ParSet to the + Append this configuration output of this :class:`ParSet` to the file. False by default. If not appending and the file exists, the file is automatically overwritten. quiet (:obj:`bool`, optional): @@ -649,7 +648,7 @@ def to_config(self, cfg_file=None, section_name=None, section_comment=None, sect Raises: ValueError: - Raised if there are types other than ParSet in the + Raised if there are types other than :class:`ParSet` in the parameter list, :attr:`cfg_section` is None, and no section_name argument was provided. """ @@ -771,7 +770,7 @@ def to_header(self, hdr=None, prefix=None, quiet=False): """ Write the parameters to a fits header. - Any element that has a value of None or is a ParSet itself is + Any element that has a value of None or is a :class:`ParSet` itself is *not* written to the header. Args: @@ -821,10 +820,9 @@ def to_header(self, hdr=None, prefix=None, quiet=False): @classmethod def from_header(cls, hdr, prefix=None): """ - Instantiate the ParSet using data parsed from a fits header. + Instantiate the :class:`ParSet` using data parsed from a fits header. - This is a simple wrapper for - :func:`ParSet.parse_par_from_hdr` and + This is a simple wrapper for :func:`ParSet.parse_par_from_hdr` and :func:`ParSet.from_dict`. .. warning:: diff --git a/pypeit/par/pypeitpar.py b/pypeit/par/pypeitpar.py index b0cd2fc128..1f0338381e 100644 --- a/pypeit/par/pypeitpar.py +++ b/pypeit/par/pypeitpar.py @@ -40,9 +40,9 @@ def __init__(self, existing_par=None, foo=None): for pk in parkeys: kwargs[pk] = cfg[pk] if pk in k else None - - If the parameter is another ParSet or requires instantiation, - provide the instantiation. For example, see how the - :class:`ProcessImagesPar` parameter set is defined in the + - If the parameter is another :class:`~pypeit.par.parset.ParSet` or + requires instantiation, provide the instantiation. For example, see + how the :class:`ProcessImagesPar` parameter set is defined in the :class:`FrameGroupPar` class. E.g.:: pk = 'foo' @@ -201,8 +201,8 @@ class ProcessImagesPar(ParSet): The parameters needed to perform basic image processing. These parameters are primarily used by - :class:`pypeit.processimages.ProcessImages`, the base class of many - of the pypeit objects. + :func:`~pypeit.images.buildimage.buildimage_fromlist`, the main function + used to process and combine images. For a table with the current keywords, defaults, and descriptions, see :ref:`parameters`. @@ -1304,7 +1304,7 @@ def __init__(self, only_slits=None, exclude_slits=None, offsets=None, spat_toler # wave method defaults['wave_method'] = None dtypes['wave_method'] = str - descr['wave_method'] = "Argument to `pypeit.core.wavecal.wvutils.get_wave_grid` method, which determines how " \ + descr['wave_method'] = "Argument to :func:`~pypeit.core.wavecal.wvutils.get_wave_grid` method, which determines how " \ "the 2d coadd wavelength grid is constructed. The default is None, which will use a linear grid" \ "for longslit/multislit coadds and a log10 grid for echelle coadds. " \ "Currently supported options with 2d coadding are:" \ @@ -2147,7 +2147,7 @@ def __init__(self, telgridfile=None, sn_clip=None, resln_guess=None, resln_frac_ dtypes['minmax_coeff_bounds'] = tuple descr['minmax_coeff_bounds'] = "Parameters setting the polynomial coefficient bounds for sensfunc optimization. " \ "Bounds are currently determined as follows. We compute an initial fit to the " \ - "sensfunc in the pypeit.core.telluric.init_sensfunc_model function. That deterines " \ + "sensfunc in the :func:`~pypeit.core.telluric.init_sensfunc_model` function. That deterines " \ "a set of coefficients. The bounds are then determined according to: " \ "[(np.fmin(np.abs(this_coeff)*obj_params['delta_coeff_bounds'][0], " \ "obj_params['minmax_coeff_bounds'][0]), " \ @@ -2960,7 +2960,7 @@ def __init__(self, filt_iter=None, sobel_mode=None, edge_thresh=None, sobel_enha defaults['fit_niter'] = 1 dtypes['fit_niter'] = int descr['fit_niter'] = 'Number of iterations of re-measuring and re-fitting the edge ' \ - 'data; see :func:`pypeit.core.trace.fit_trace`.' + 'data; see :func:`~pypeit.core.trace.fit_trace`.' # TODO: Allow this to be a list so that it can be detector specific? defaults['fit_min_spec_length'] = 0.6 @@ -3059,27 +3059,27 @@ def __init__(self, filt_iter=None, sobel_mode=None, edge_thresh=None, sobel_enha defaults['fwhm_uniform'] = 3.0 dtypes['fwhm_uniform'] = [int, float] descr['fwhm_uniform'] = 'The `fwhm` parameter to use when using uniform weighting in ' \ - ':func:`pypeit.core.trace.fit_trace` when refining the PCA ' \ + ':func:`~pypeit.core.trace.fit_trace` when refining the PCA ' \ 'predictions of edges. See description of ' \ - ':func:`pypeit.core.trace.peak_trace`.' + ':func:`~pypeit.core.trace.peak_trace`.' defaults['niter_uniform'] = 9 dtypes['niter_uniform'] = int descr['niter_uniform'] = 'The number of iterations of ' \ - ':func:`pypeit.core.trace.fit_trace` to use when using ' \ + ':func:`~pypeit.core.trace.fit_trace` to use when using ' \ 'uniform weighting.' defaults['fwhm_gaussian'] = 3.0 dtypes['fwhm_gaussian'] = [int, float] descr['fwhm_gaussian'] = 'The `fwhm` parameter to use when using Gaussian weighting in ' \ - ':func:`pypeit.core.trace.fit_trace` when refining the PCA ' \ + ':func:`~pypeit.core.trace.fit_trace` when refining the PCA ' \ 'predictions of edges. See description ' \ - ':func:`pypeit.core.trace.peak_trace`.' + ':func:`~pypeit.core.trace.peak_trace`.' defaults['niter_gaussian'] = 6 dtypes['niter_gaussian'] = int descr['niter_gaussian'] = 'The number of iterations of ' \ - ':func:`pypeit.core.trace.fit_trace` to use when using ' \ + ':func:`~pypeit.core.trace.fit_trace` to use when using ' \ 'Gaussian weighting.' defaults['det_buffer'] = 5 @@ -4073,8 +4073,8 @@ def __init__(self, calib_dir=None, bpm_usebias=None, biasframe=None, darkframe=N dtypes['calib_dir'] = str descr['calib_dir'] = 'The name of the directory for the processed calibration frames. ' \ 'The host path for the directory is set by the redux_path (see ' \ - ':class:`ReduxPar`). Beware that success when changing the ' \ - 'default value is not well tested!' + ':class:`~pypeit.par.pypeitpar.ReduxPar`). Beware that success ' \ + 'when changing the default value is not well tested!' defaults['raise_chk_error'] = True dtypes['raise_chk_error'] = bool @@ -4299,7 +4299,7 @@ class PypeItPar(ParSet): merge_with=user_cfg_lines) To write the configuration of a given instance of :class:`PypeItPar`, - use the :func:`to_config` function:: + use the :func:`~pypeit.par.parset.ParSet.to_config` function:: par.to_config('mypypeitpar.cfg') @@ -4432,7 +4432,7 @@ def from_cfg_file(cls, cfg_file=None, merge_with=None, evaluate=True): When `evaluate` is true, the function runs `eval()` on all the entries in the `ConfigObj` dictionary, done using - :func:`_recursive_dict_evaluate`. This has the potential to + :func:`~pypeit.par.util.recursive_dict_evaluate`. This has the potential to go haywire if the name of a parameter unintentionally happens to be identical to an imported or system-level function. Of course, this can be useful by allowing one to @@ -4440,13 +4440,13 @@ def from_cfg_file(cls, cfg_file=None, merge_with=None, evaluate=True): one has to be careful with the values that the parameters should be allowed to have. The current way around this is to provide a list of strings that should be ignored during - the evaluation, done using :func:`_eval_ignore`. + the evaluation, done using :func:`~pypeit.par.util._eval_ignore`. .. todo:: Allow the user to add to the ignored strings. Returns: - :class:`pypeit.par.core.PypeItPar`: The instance of the + :class:`~pypeit.par.pypeitpar.PypeItPar`: The instance of the parameter set. """ # Get the base parameters in a ConfigObj instance @@ -4514,7 +4514,7 @@ def from_cfg_lines(cls, cfg_lines=None, merge_with=None, evaluate=True): When `evaluate` is true, the function runs `eval()` on all the entries in the `ConfigObj` dictionary, done using - :func:`_recursive_dict_evaluate`. This has the potential to + :func:`~pypeit.par.util.recursive_dict_evaluate`. This has the potential to go haywire if the name of a parameter unintentionally happens to be identical to an imported or system-level function. Of course, this can be useful by allowing one to @@ -4522,13 +4522,13 @@ def from_cfg_lines(cls, cfg_lines=None, merge_with=None, evaluate=True): one has to be careful with the values that the parameters should be allowed to have. The current way around this is to provide a list of strings that should be ignored during - the evaluation, done using :func:`_eval_ignore`. + the evaluation, done using :func:`~pypeit.par.util._eval_ignore`. .. todo:: Allow the user to add to the ignored strings. Returns: - :class:`pypeit.par.core.PypeItPar`: The instance of the + :class:`~pypeit.par.pypeitpar.PypeItPar`: The instance of the parameter set. """ # Get the base parameters in a ConfigObj instance diff --git a/pypeit/par/util.py b/pypeit/par/util.py index a985279854..cba7b6eb45 100644 --- a/pypeit/par/util.py +++ b/pypeit/par/util.py @@ -61,11 +61,11 @@ def recursive_dict_evaluate(d): Recursively run :func:`eval` on each element of the provided dictionary. - A raw read of a configuration file with `ConfigObj` results in a + A raw read of a configuration file with `configobj`_ results in a dictionary that contains strings or lists of strings. However, when assigning the values for the various ParSets, the `from_dict` methods expect the dictionary values to have the appropriate type. - E.g., the ConfigObj will have something like d['foo'] = '1', when + E.g., the `configobj`_ will have something like d['foo'] = '1', when the `from_dict` method expects the value to be an integer (d['foo'] = 1). @@ -77,8 +77,9 @@ def recursive_dict_evaluate(d): raises an exception is returned as the original string. - This is currently only used in :func:`PypitPar.from_cfg_file`; see - further comments there. + This is currently only used in + :func:`~pypeit.par.pypeitpar.PypitPar.from_cfg_file`; see further comments + there. Args: d (dict): @@ -122,7 +123,7 @@ def recursive_dict_evaluate(d): return d - +# TODO: I don't think this is used. We should deprecate it. def get_parset_list(cfg, pk, parsetclass): """ Create a list of ParSets based on a root keyword for a set of @@ -143,7 +144,7 @@ def get_parset_list(cfg, pk, parsetclass): kwargs['detector'] = get_parset_list(cfg, 'detector', DetectorPar) Args: - cfg (:class:`ConfigObj`, :obj:`dict`): + cfg (`configobj`_, :obj:`dict`): The top-level configuration that defines a list of sub-ParSets. pk (str): diff --git a/pypeit/pypeit.py b/pypeit/pypeit.py index 974e872768..e1d0448d91 100644 --- a/pypeit/pypeit.py +++ b/pypeit/pypeit.py @@ -645,7 +645,7 @@ def get_sci_metadata(self, frame, det): 5 objects are returned:: - str: Object type; science or standard - str: Setup/configuration string - - astropy.time.Time: Time of observation + - `astropy.time.Time`_: Time of observation - str: Basename of the frame - str: Binning of the detector @@ -715,7 +715,7 @@ def objfind_one(self, frames, det, bg_frames=None, std_outfile=None): List of frames to use as the background. Can be empty. std_outfile : :obj:`str`, optional Filename for the standard star spec1d file. Passed directly to - :func:`get_std_trace`. + :func:`~pypeit.specobjs.get_std_trace`. Returns ------- @@ -906,7 +906,7 @@ def extract_one(self, frames, det, sciImg, objFind, initial_sky, sobjs_obj): is provided det (:obj:`int`): Detector number (1-indexed) - sciImg (:class:`PypeItImage`): + sciImg (:class:`~pypeit.images.pypeitimage.PypeItImage`): Data container that holds a single image from a single detector its related images (e.g. ivar, mask) objFind : :class:`~pypeit.find_objects.FindObjects` @@ -1052,13 +1052,13 @@ def refframe_correct(self, slits, ra, dec, obstime, slitgpm=None, waveimg=None, Right Ascension dec (float, str): Declination - obstime (:obj:`astropy.time.Time`): + obstime (`astropy.time.Time`_): Observation time slitgpm (`numpy.ndarray`_, None, optional): 1D boolean array indicating the good slits (True). If None, the gpm will be taken from slits waveimg (`numpy.ndarray`_, optional) Two-dimensional image specifying the wavelength of each pixel - sobjs (:class:`pypeit.specobjs.Specobjs`, None, optional): + sobjs (:class:`~pypeit.specobjs.SpecObjs`, None, optional): Spectrally extracted objects """ diff --git a/pypeit/pypeitsetup.py b/pypeit/pypeitsetup.py index 5aa7d2004a..89ff8c0207 100644 --- a/pypeit/pypeitsetup.py +++ b/pypeit/pypeitsetup.py @@ -37,8 +37,9 @@ class PypeItSetup: file name without the full path) to a specific frame type (e.g., arc, bias, etc.). The file name and type are expected to be the key and value of the dictionary, respectively. If None, this is - determined by the :func:`get_frame_types` method. - usrdata (:obj:`astropy.table.Table`, optional): + determined by the + :func:`~pypeit.metadata.PypeItMetaData.get_frame_types` method. + usrdata (`astropy.table.Table`_, optional): A user provided set of data used to supplement or overwrite metadata read from the file headers. The table must have a `filename` column that is used to match to the metadata @@ -167,8 +168,8 @@ def from_pypeit_file(cls, filename): @classmethod def from_file_root(cls, root, spectrograph, extension='.fits'): """ - Instantiate the :class:`PypeItSetup` object by providing a file - root. + Instantiate the :class:`~pypeit.pypeitsetup.PypeItSetup` object by + providing a file root. Args: root (:obj:`str`): @@ -176,9 +177,8 @@ def from_file_root(cls, root, spectrograph, extension='.fits'): :func:`~pypeit.io.files_from_extension`. spectrograph (:obj:`str`): The PypeIt name of the spectrograph used to take the - observations. This should be one of the available - options in - :func:`~pypeit.spectrographs.available_spectrographs`. + observations. This should be one of the available options in + :attr:`~pypeit.spectrographs.available_spectrographs`. extension (:obj:`str`, optional): The extension common to all the fits files to reduce; see :func:`~pypeit.io.files_from_extension`. @@ -190,7 +190,9 @@ def from_file_root(cls, root, spectrograph, extension='.fits'): @classmethod def from_rawfiles(cls, data_files:list, spectrograph:str, frametype=None): - """ Instantiate the :class:`PypeItSetup` object by providing a list of raw files. + """ + Instantiate the :class:`~pypeit.pypeitsetup.PypeItSetup` object by + providing a list of raw files. Args: data_files (list): @@ -202,10 +204,11 @@ def from_rawfiles(cls, data_files:list, spectrograph:str, frametype=None): file name without the full path) to a specific frame type (e.g., arc, bias, etc.). The file name and type are expected to be the key and value of the dictionary, respectively. If None, this is - determined by the :func:`get_frame_types` method. + determined by the + :func:`~pypeit.metadata.PypeItMetaData.get_frame_types` method. Returns: - :class:`PypeItSetup`: The instance of the class. + :class:`~pypeit.pypeitsetup.PypeItSetup`: The instance of the class. """ # Configure me @@ -229,14 +232,13 @@ def build_fitstbl(self, strict=True): """ Construct the table with metadata for the frames to reduce. - Largely a wrapper for :func:`pypeit.core.load.create_fitstbl`. + Largely a wrapper for :class:`~pypeit.metadata.PypeItMetaData`. Args: strict (:obj:`bool`, optional): - Function will fault if :func:`fits.getheader` fails to - read the headers of any of the files in - :attr:`file_list`. Set to False to only report a - warning and continue. + Function will fault if `astropy.io.fits.getheader`_ fails to + read the headers of any of the files in :attr:`file_list`. Set + to False to only report a warning and continue. Returns: `astropy.table.Table`_: Table with the metadata for each fits file @@ -259,7 +261,7 @@ def get_frame_types(self, flag_unknown=False): Include the frame types in the metadata table. This is mainly a wrapper for - :func:`PypeItMetaData.get_frame_types`. + :func:`~pypeit.metadata.PypeItMetaData.get_frame_types`. .. warning:: diff --git a/pypeit/pypmsgs.py b/pypeit/pypmsgs.py index d703ba953b..d927a427e5 100644 --- a/pypeit/pypmsgs.py +++ b/pypeit/pypmsgs.py @@ -43,16 +43,17 @@ class Messages: Parameters ---------- - log : str or None - Name of saved log file (no log will be saved if log=="") - verbosity : int (0,1,2) - Level of verbosity: - 0 = No output - 1 = Minimal output - 2 = All output (default) + log : str, optional + Name of saved log file (no log will be saved if log==""). If None, no + log is saved. + verbosity : int + Level of verbosity. Options are + - 0 = No output + - 1 = Minimal output + - 2 = All output (default) colors : bool - If true, the screen output will have colors, otherwise - normal screen output will be displayed + If true, the screen output will have colors, otherwise normal screen + output will be displayed """ def __init__(self, log=None, verbosity=None, colors=True): @@ -243,15 +244,23 @@ def pypeitpar_text(self, msglist): Parameters ---------- - msglist: list of str - A list containing the pypeit parameters. The last element of the list - must be the argument and the variable. For example, to print: + msglist: list + A list containing the pypeit parameter strings. The last element of + the list must be the argument and the variable. For example, to + print: - [sensfunc] - [[UVIS]] - polycorrect = False + .. code-block:: ini - you should set msglist = ['sensfunc', 'UVIS', 'polycorrect = False'] + [sensfunc] + [[UVIS]] + polycorrect = False + + you should set ``msglist = ['sensfunc', 'UVIS', 'polycorrect = False']``. + + Returns + ------- + parstring : str + The parameter string """ parstring = '\n' premsg = ' ' @@ -270,15 +279,19 @@ def pypeitpar(self, msglist): Parameters ---------- - msglist: list of str - A list containing the pypeit parameters. The last element of the list - must be the argument and the variable. For example, to print: + msglist: list + A list containing the pypeit parameter strings. The last element of + the list must be the argument and the variable. For example, to + print: + + .. code-block:: ini - [sensfunc] - [[UVIS]] - polycorrect = False + [sensfunc] + [[UVIS]] + polycorrect = False + + you should set ``msglist = ['sensfunc', 'UVIS', 'polycorrect = False']``. - you should set msglist = ['sensfunc', 'UVIS', 'polycorrect = False'] """ premsg = ' ' for ll, lin in enumerate(msglist): @@ -392,3 +405,4 @@ def set_logfile_and_verbosity(self, scriptname, verbosity): logname = f"{scriptname}_{timestamp}.log" if verbosity == 2 else None # Set the verbosity in msgs self.reset(log=logname, verbosity=verbosity) + diff --git a/pypeit/sampling.py b/pypeit/sampling.py index 3f9d306a7f..7225eced59 100644 --- a/pypeit/sampling.py +++ b/pypeit/sampling.py @@ -23,7 +23,7 @@ def spectral_coordinate_step(wave, log=False, base=10.0): of the wavelength; otherwise, return the linear step in angstroms. Args: - wave (numpy.ndarray): Wavelength coordinates of each spectral + wave (`numpy.ndarray`_): Wavelength coordinates of each spectral channel in angstroms. log (bool): (**Optional**) Input spectrum has been sampled geometrically. @@ -51,7 +51,7 @@ def spectrum_velocity_scale(wave): log(angstrom). Args: - wave (numpy.ndarray): Wavelength coordinates of each spectral + wave (`numpy.ndarray`_): Wavelength coordinates of each spectral channel in angstroms. It is expected that the spectrum has been sampled geometrically @@ -74,9 +74,9 @@ def angstroms_per_pixel(wave, log=False, base=10.0, regular=True): as determined by assuming the coordinate is at its center. Args: - wave (`numpy.ndarray`): + wave (`numpy.ndarray`_): (Geometric) centers of the spectrum pixels in angstroms. - log (`numpy.ndarray`, optional): + log (`numpy.ndarray`_, optional): The vector is geometrically sampled. base (:obj:`float`, optional): Base of the logarithm used in the geometric sampling. @@ -84,7 +84,7 @@ def angstroms_per_pixel(wave, log=False, base=10.0, regular=True): The vector is regularly sampled. Returns: - numpy.ndarray: The angstroms per pixel. + `numpy.ndarray`_: The angstroms per pixel. """ if regular: ang_per_pix = spectral_coordinate_step(wave, log=log, base=base) @@ -103,7 +103,7 @@ def _pixel_centers(xlim, npix, log=False, base=10.0): sampled vector given first, last and number of pixels Args: - xlim (numpy.ndarray) : (Geometric) Centers of the first and last + xlim (`numpy.ndarray`_) : (Geometric) Centers of the first and last pixel in the vector. npix (int) : Number of pixels in the vector. log (bool) : (**Optional**) The input range is (to be) @@ -113,7 +113,7 @@ def _pixel_centers(xlim, npix, log=False, base=10.0): natural logarithm. Returns: - numpy.ndarray, float: A vector with the npix centres of the + `numpy.ndarray`_, float: A vector with the npix centres of the pixels and the sampling rate. If logarithmically binned, the sampling is the step in :math`\log x`. """ @@ -133,7 +133,7 @@ def _pixel_borders(xlim, npix, log=False, base=10.0): number of pixels Args: - xlim (numpy.ndarray) : (Geometric) Centers of the first and last + xlim (`numpy.ndarray`_) : (Geometric) Centers of the first and last pixel in the vector. npix (int) : Number of pixels in the vector. log (bool) : (**Optional**) The input range is (to be) @@ -143,7 +143,7 @@ def _pixel_borders(xlim, npix, log=False, base=10.0): natural logarithm. Returns: - numpy.ndarray, float: A vector with the (npix+1) borders of the + `numpy.ndarray`_, float: A vector with the (npix+1) borders of the pixels and the sampling rate. If logarithmically binned, the sampling is the step in :math`\log x`. """ @@ -163,7 +163,7 @@ def resample_vector_npix(outRange=None, dx=None, log=False, base=10.0, default=N Determine the number of pixels needed to resample a vector given first, last pixel and dx Args: - outRange (list or numpy.ndarray) : Two-element array with the + outRange (list, `numpy.ndarray`_) : Two-element array with the starting and ending x coordinate of the pixel centers to divide into pixels of a given width. If *log* is True, this must still be the linear value of the x coordinate, not @@ -176,7 +176,7 @@ def resample_vector_npix(outRange=None, dx=None, log=False, base=10.0, default=N returned if either *outRange* or *dx* are not provided. Returns: - int, numpy.ndarray: Returns two objects: The number of pixels to + :obj:`tuple`: Returns two objects: The number of pixels to cover *outRange* with pixels of width *dx* and the adjusted range such that number of pixels of size dx is the exact integer. @@ -203,9 +203,8 @@ class Resample: Resample regularly or irregularly sampled data to a new grid using integration. - This is a generalization of the routine - :func:`ppxf.ppxf_util.log_rebin` provided by Michele Cappellari in - the pPXF package. + This is a generalization of the routine ``ppxf_util.log_rebin`` from from + the `ppxf`_ package by Michele Cappellari. The abscissa coordinates (`x`) or the pixel borders (`xBorders`) for the data (`y`) should be provided for irregularly sampled data. If @@ -263,24 +262,24 @@ class Resample: - Allow for a covariance matrix calculation. Args: - y (numpy.ndarray): + y (`numpy.ndarray`_): Data values to resample. Can be a numpy.ma.MaskedArray, and the shape can be 1 or 2D. If 1D, the shape must be :math:`(N_{\rm pix},)`; otherwise, it must be :math:`(N_y,N_{\rm pix})`. I.e., the length of the last axis must match the input coordinates. - e (numpy.ndarray, optional): + e (`numpy.ndarray`_, optional): Errors in the data that should be resampled. Can be a numpy.ma.MaskedArray, and the shape must match the input `y` array. These data are used to perform a nominal calculation of the error in the resampled array. - mask (numpy.ndarray, optional): + mask (`numpy.ndarray`_, optional): A boolean array (masked values are True) indicating values in `y` that should be ignored during the resampling. The mask used during the resampling is the union of this object and the masks of `y` and `e`, if they are provided as numpy.ma.MaskedArrays. - x (numpy.ndarray, optional): + x (`numpy.ndarray`_, optional): Abcissa coordinates for the data, which do not need to be regularly sampled. If the pixel borders are not provided, they are assumed to be half-way between adjacent pixels, and @@ -293,7 +292,7 @@ class Resample: A two-element array with the starting and ending value for the coordinates of the centers of the first and last pixels in y. Default is :math:`[0,N_{\rm pix}-1]`. - xBorders (numpy.ndarray, optional): + xBorders (`numpy.ndarray`_, optional): An array with the borders of each pixel that must have a length of :math:`N_{\rm pix}+1`. inLog (:obj:`bool`, optional): @@ -337,30 +336,30 @@ class Resample: interpolation between pixel samples. Attributes: - x (numpy.ndarray): + x (`numpy.ndarray`_): The coordinates of the function on input. - xborders (numpy.ndarray): + xborders (`numpy.ndarray`_): The borders of the input pixel samples. - y (numpy.ndarray): + y (`numpy.ndarray`_): The function to resample. - e (numpy.ndarray): + e (`numpy.ndarray`_): The 1-sigma errors in the function to resample. - m (numpy.ndarray): + m (`numpy.ndarray`_): The boolean mask for the input function. - outx (numpy.ndarray): + outx (`numpy.ndarray`_): The coordinates of the function on output. - outborders (numpy.ndarray): + outborders (`numpy.ndarray`_): The borders of the output pixel samples. - outy (numpy.ndarray): + outy (`numpy.ndarray`_): The resampled function. - oute (numpy.ndarray): + oute (`numpy.ndarray`_): The resampled 1-sigma errors. - outf (numpy.ndarray): + outf (`numpy.ndarray`_): The fraction of each output pixel that includes valid data from the input function. Raises: - ValueError: Raised if *y* is not of type numpy.ndarray, if *y* + ValueError: Raised if *y* is not of type `numpy.ndarray`_, if *y* is not one-dimensional, or if *xRange* is not provided and the input vector is logarithmically binned (see *inLog* above). diff --git a/pypeit/scripts/__init__.py b/pypeit/scripts/__init__.py index e5acdffcd1..410cc4f6d7 100644 --- a/pypeit/scripts/__init__.py +++ b/pypeit/scripts/__init__.py @@ -31,7 +31,7 @@ from pypeit.scripts import parse_slits from pypeit.scripts import qa_html from pypeit.scripts import ql -from pypeit.scripts import ql_multislit +#from pypeit.scripts import ql_multislit from pypeit.scripts import run_pypeit from pypeit.scripts import sensfunc from pypeit.scripts import setup diff --git a/pypeit/scripts/chk_for_calibs.py b/pypeit/scripts/chk_for_calibs.py index 9d7317dc7c..b7b16994e0 100644 --- a/pypeit/scripts/chk_for_calibs.py +++ b/pypeit/scripts/chk_for_calibs.py @@ -35,7 +35,7 @@ def main(args): args: Returns: - astropy.table.Table: + `astropy.table.Table`_: """ diff --git a/pypeit/scripts/chk_noise_1dspec.py b/pypeit/scripts/chk_noise_1dspec.py index 7501af7d21..33123e5533 100644 --- a/pypeit/scripts/chk_noise_1dspec.py +++ b/pypeit/scripts/chk_noise_1dspec.py @@ -4,6 +4,7 @@ .. include common links, assuming primary doc root is up one directory .. include:: ../include/links.rst + """ import numpy as np diff --git a/pypeit/scripts/chk_noise_2dspec.py b/pypeit/scripts/chk_noise_2dspec.py index 5f95b8ecdf..bb37af3452 100644 --- a/pypeit/scripts/chk_noise_2dspec.py +++ b/pypeit/scripts/chk_noise_2dspec.py @@ -4,6 +4,7 @@ .. include common links, assuming primary doc root is up one directory .. include:: ../include/links.rst + """ import numpy as np @@ -31,7 +32,7 @@ def plot(image:np.ndarray, chi_select:np.ndarray, flux_select:np.ndarray, """ Generate the plot Args: - image (np.ndarray): + image (`numpy.ndarray`_): Image of the slit to plot. chi_select (`numpy.ndarray`_): 2D array of the chi value for a selected part of the slit. @@ -39,7 +40,7 @@ def plot(image:np.ndarray, chi_select:np.ndarray, flux_select:np.ndarray, Array of wavelength of spectral features to be plotted. line_names (`numpy.ndarray`_): Array of names of spectral features to be plotted. - lbda_1d (np.ndarray): + lbda_1d (`numpy.ndarray`_): 1D array of the wavelength lbda_min (float): Minimum wavelength of the select pat of the slit diff --git a/pypeit/scripts/collate_1d.py b/pypeit/scripts/collate_1d.py index d4dad862f9..c7a5196f32 100644 --- a/pypeit/scripts/collate_1d.py +++ b/pypeit/scripts/collate_1d.py @@ -38,35 +38,40 @@ def get_report_metadata(object_header_keys, spec_obj_keys, file_info): """ - Gets the metadata from a SourceObject instance used building a report - on the results of collation. It is intended to be wrapped in by functools - partial object that passes in object_header_keys and spec_obj_keys. file_info - is then passed as in by the :obj:`pypeit.archive.ArchiveMetadata` object. - Unlike the other get_*_metadata functions, this is not used for archiving; it is - used for reporting on the results of collating. - - If another type of file is added to the ArchiveMetadata object, the file_info - argument will not be a SourceObject, In this case, a list of ``None`` values are - returned. + Gets the metadata from a :class:`~pypeit.core.collate.SourceObject` instance + used building a report on the results of collation. It is intended to be + wrapped in by functools partial object that passes in object_header_keys and + spec_obj_keys. file_info is then passed as in by the + :class:`~pypeit.archive.ArchiveMetadata` object. Unlike the other + get_*_metadata functions, this is not used for archiving; it is used for + reporting on the results of collating. + + If another type of file is added to the ArchiveMetadata object, the + file_info argument will not be a :class:`~pypeit.core.collate.SourceObject`, + In this case, a list of ``None`` values are returned. Args: object_header_keys (list of str): The keys to read fom the spec1d headers from the SourceObject. spec_obj_keys (list of str): - The keys to read from the (:obj:`pypeit.specobj.SpecObj`) objects in the SourceObject. + The keys to read from the (:class:`~pypeit.specobj.SpecObj`) objects in + the SourceObject. - file_info (:obj:`pypeit.scripts.collate_1d.SourceObject`)): - The source object containing the headers, filenames and SpecObj information for a coadd output file. + file_info (:class:`~pypeit.core.collate.SourceObject`): + The source object containing the headers, filenames and SpecObj + information for a coadd output file. Returns: tuple: A tuple of two lists:. - **data_rows** (:obj:`list` of :obj:`list`): The metadata rows built from the source object. + - **data_rows** (:obj:`list` of :obj:`list`): The metadata rows + built from the source object. + + - **files_to_copy** (iterable): An list of tuples of files to copy. + Because this function is not used for archving data, this is + always ``None``. - **files_to_copy** (iterable): - An list of tuples of files to copy. Because this function is not used for - archving data, this is always ``None``. """ if not isinstance(file_info, SourceObject): @@ -151,11 +156,13 @@ def exclude_source_objects(source_objects, exclude_map, par): Returns: tuple: Tuple containing two lists: - **filtered_objects** (:obj:`list`): A list of :class:`~pypeit.core.collate.SourceObject` - with any excluded ones removed. + - **filtered_objects** (:obj:`list`): A list of + :class:`~pypeit.core.collate.SourceObject` with any excluded ones + removed. + + - **missing_archive_msgs** (:obj:`list`): A list of messages + explaining why some source objects were excluded. - **missing_archive_msgs** (:obj:`list`): A list of messages explaining why some source - objects were excluded. """ filtered_objects = [] excluded_messages= [] @@ -208,9 +215,9 @@ def flux(par, spectrograph, spec1d_files, failed_fluxing_msgs): Flux calibrate spec1d files using archived sens func files. Args: - par (`obj`:pypeit.par.pypeitpar.PypeItPar): + par (:class:`~pypeit.par.pypeitpar.PypeItPar`): Parameters for collating, fluxing, and coadding. - spectrograph (`obj`:pypeit.spectrographs.spectrograph): + spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): Spectrograph for the files to flux. spec1d_files (list of str): List of spec1d files to flux calibrate. @@ -358,9 +365,11 @@ def coadd(par, coaddfile, source): """coadd the spectra for a given source. Args: - par (`obj`:Collate1DPar): Paramters for the coadding - source (`obj`:SourceObject): The SourceObject with information on - which files and spectra to coadd. + par (:class:`~pypeit.par.pypeitpar.Collate1DPar`): + Parameters for the coadding + source (:class:`~pypeit.core.collate.SourceObject`): + The SourceObject with information on which files and spectra to + coadd. """ # Set destination file for coadding par['coadd1d']['coaddfile'] = coaddfile @@ -433,7 +442,7 @@ def write_warnings(par, excluded_obj_msgs, failed_source_msgs, spec1d_failure_ms failed_source_msgs (:obj:`list` of :obj:`str`): Messages about which objects failed coadding and why. - spec1d_failure_msgs (:obj:)`list` of :obj:`str`): + spec1d_failure_msgs (:obj:`list` of :obj:`str`): Messages about failures with spec1d files and why. """ diff --git a/pypeit/scripts/ql.py b/pypeit/scripts/ql.py index fc077a5b3b..f1a2224d3b 100644 --- a/pypeit/scripts/ql.py +++ b/pypeit/scripts/ql.py @@ -32,6 +32,7 @@ #. Consider not writing out but return instead .. include:: ../include/links.rst + """ from pathlib import Path import time @@ -143,7 +144,7 @@ def quicklook_regroup(fitstbl): **This function directly alters the input object!** Args: - fitstbl (:class:~pypeit.metadata.PypeItMetaData`): + fitstbl (:class:`~pypeit.metadata.PypeItMetaData`): Metadata table for frames to be processed. """ comb_strt = 0 @@ -228,7 +229,8 @@ def generate_sci_pypeitfile(redux_path:str, ref_calib_dir (`Path`_): Path with the pre-processed calibration frames. A symlink will be created to this directory from within ``redux_path`` to mimic the - location of the calibrations expected by :class:`~pypeit.PypeIt`. + location of the calibrations expected by + :class:`~pypeit.pypeit.PypeIt`. ps_sci (:class:`~pypeit.pypeitsetup.PypeItSetup`): Setup object for the science frame(s) only. det (:obj:`str`, optional): diff --git a/pypeit/scripts/scriptbase.py b/pypeit/scripts/scriptbase.py index 4c935d75a6..0e40eef31d 100644 --- a/pypeit/scripts/scriptbase.py +++ b/pypeit/scripts/scriptbase.py @@ -3,6 +3,7 @@ .. include common links, assuming primary doc root is up one directory .. include:: ../include/links.rst + """ from IPython import embed diff --git a/pypeit/sensfunc.py b/pypeit/sensfunc.py index 9c2f3971e2..33937efba5 100644 --- a/pypeit/sensfunc.py +++ b/pypeit/sensfunc.py @@ -59,7 +59,7 @@ class SensFunc(datamodel.DataContainer): File name for the sensitivity function data. par (:class:`~pypeit.par.pypeitpar.SensFuncPar`): The parameters required for the sensitivity function computation. - par_fluxcalib (:class:`~pypeit.par.pypeitpar.FluxCalibPar`, optional): + par_fluxcalib (:class:`~pypeit.par.pypeitpar.FluxCalibratePar`, optional): The parameters required for flux calibration. These are only used for flux calibration of the standard star spectrum for the QA plot. If None, defaults will be used. @@ -799,13 +799,14 @@ def sensfunc_weights(cls, sensfile, waves, debug=False, extrap_sens=True): Args: sensfile (str): the name of your fits format sensfile - waves (ndarray): (nspec, norders, nexp) or (nspec, norders) - wavelength grid for your output weights + waves (`numpy.ndarray`_): + wavelength grid for your output weights. Shape is (nspec, + norders, nexp) or (nspec, norders). debug (bool): default=False show the weights QA Returns: - ndarray: sensfunc weights evaluated on the input waves + `numpy.ndarray`_: sensfunc weights evaluated on the input waves wavelength grid """ sens = cls.from_file(sensfile) diff --git a/pypeit/slittrace.py b/pypeit/slittrace.py index 941fa530e5..7ff941178c 100644 --- a/pypeit/slittrace.py +++ b/pypeit/slittrace.py @@ -72,8 +72,10 @@ class SlitTraceSet(calibframe.CalibFrame): .. include:: ../include/class_datamodel_slittraceset.rst Attributes: - left_flexure (`numpy.ndarray`_): Convenient spot to hold flexure corrected left - right_flexure (`numpy.ndarray`_): Convenient spot to hold flexure corrected right + left_flexure (`numpy.ndarray`_): + Convenient spot to hold flexure corrected left + right_flexure (`numpy.ndarray`_): + Convenient spot to hold flexure corrected right """ calib_type = 'Slits' """Name for type of calibration frame.""" @@ -420,12 +422,12 @@ def get_radec_image(self, wcs, alignSplines, tilts, initial=True, flexure=None): Parameters ---------- - wcs : astropy.wcs + wcs : `astropy.wcs.WCS`_ The World Coordinate system of a science frame alignSplines : :class:`pypeit.alignframe.AlignmentSplines` An instance of the AlignmentSplines class that allows one to build and transform between detector pixel coordinates and WCS pixel coordinates. - tilts : `numpy.ndarray` + tilts : `numpy.ndarray`_ Spectral tilts. initial : bool Select the initial slit edges? @@ -434,12 +436,16 @@ def get_radec_image(self, wcs, alignSplines, tilts, initial=True, flexure=None): Returns ------- - tuple : There are three elements in the tuple. The first two are 2D numpy arrays - of shape (nspec, nspat), where the first ndarray is the RA image, and the - second ndarray is the DEC image. RA and DEC are in units degrees. The third - element of the tuple stores the minimum and maximum difference (in pixels) - between the WCS reference (usually the centre of the slit) and the edges of - the slits. The third array has a shape of (nslits, 2). + raimg : `numpy.ndarray`_ + Image with the RA coordinates of each pixel in degrees. Shape is + (nspec, nspat). + decimg : `numpy.ndarray`_ + Image with the DEC coordinates of each pixel in degrees. Shape is + (nspec, nspat). + minmax : `numpy.ndarray`_ + The minimum and maximum difference (in pixels) between the WCS + reference (usually the centre of the slit) and the edges of the + slits. Shape is (nslits, 2). """ # Initialise the output raimg = np.zeros((self.nspec, self.nspat)) @@ -1300,16 +1306,18 @@ def get_maskdef_extract_fwhm(self, sobjs, platescale, fwhm_parset, find_fwhm): will be computed using the average fwhm of the detected objects. Args: - sobjs (:class:`pypeit.specobjs.SpecObjs`): + sobjs (:class:`~pypeit.specobjs.SpecObjs`): List of SpecObj that have been found and traced. platescale (:obj:`float`): Platescale. fwhm_parset (:obj:`float`, optional): - Parset that guides the determination of the fwhm of the maskdef_extract objects. - If None (default) the fwhm are computed as the averaged from the detected objects, + :class:`~pypeit.par.parset.Parset` that guides the determination + of the fwhm of the maskdef_extract objects. If None (default) + the fwhm are computed as the averaged from the detected objects, if it is a number it will be adopted as the fwhm. find_fwhm (:obj:`float`): - Initial guess of the objects fwhm in pixels (used in object finding) + Initial guess of the objects fwhm in pixels (used in object + finding) Returns: :obj:`float`: FWHM in pixels to be used in the optimal extraction @@ -1381,10 +1389,10 @@ def user_mask(self, det, user_slits): def mask_flats(self, flatImages): """ - Mask from a :class:`pypeit.flatfield.Flats` object + Mask based on a :class:`~pypeit.flatfield.FlatImages` object. Args: - flatImages (:class:`pypeit.flatfield.FlatImages`): + flatImages (:class:`~pypeit.flatfield.FlatImages`): """ # Loop on all the FLATFIELD BPM keys diff --git a/pypeit/spec2dobj.py b/pypeit/spec2dobj.py index 41c3500b35..fd6906288d 100644 --- a/pypeit/spec2dobj.py +++ b/pypeit/spec2dobj.py @@ -44,7 +44,7 @@ class Spec2DObj(datamodel.DataContainer): Args: Attributes: - head0 (`astropy.fits.Header`): + head0 (`astropy.io.fits.Header`_): Primary header if instantiated from a FITS file """ @@ -259,7 +259,7 @@ def _base_header(self, hdr=None): the header. Args: - hdr (`astropy.io.fits.Header`, optional): + hdr (`astropy.io.fits.Header`_, optional): Header object to update. The object is modified in-place and also returned. If None, an empty header is instantiated, edited, and returned. @@ -605,9 +605,9 @@ def write_to_fits(self, outfile, pri_hdr=None, update_det=None, Args: outfile (:obj:`str`, `Path`_): Output filename - pri_hdr (:class:`astropy.io.fits.Header`, optional): - Header to be used in lieu of default - Usually generated by :func:`pypeit,spec2dobj.AllSpec2DObj.build_primary_hdr` + pri_hdr (`astropy.io.fits.Header`_, optional): + Header to be used in lieu of default Usually generated by + :func:`~pypeit.spec2dobj.AllSpec2DObj.build_primary_hdr` slitspatnum (:obj:`str` or :obj:`list`, optional): Restricted set of slits for reduction If provided, do not clobber the existing file but only update @@ -615,7 +615,7 @@ def write_to_fits(self, outfile, pri_hdr=None, update_det=None, pri_hdr(): Baseline primary header. If None, initial primary header is empty. Usually generated by - :func:`pypeit,spec2dobj.AllSpec2DObj.build_primary_hdr` + :func:`~pypeit.spec2dobj.AllSpec2DObj.build_primary_hdr` update_det (:obj:`list`, optional): If the output file already exists, this sets the list of detectors/mosaics to update with the data in this object. If diff --git a/pypeit/specobj.py b/pypeit/specobj.py index 0836a50635..d3c840c6b3 100644 --- a/pypeit/specobj.py +++ b/pypeit/specobj.py @@ -481,9 +481,9 @@ def apply_flux_calib(self, wave_zp, zeropoint, exptime, tellmodel=None, extinct_ FLAM, FLAM_SIG, and FLAM_IVAR are generated Args: - wave_zp (float array): + wave_zp (`numpy.ndarray`_): Zeropoint wavelength array - zeropoint (float array): + zeropoint (`numpy.ndarray`_): zeropoint array exptime (float): Exposure time diff --git a/pypeit/specobjs.py b/pypeit/specobjs.py index 0acb0c0abe..37534b218d 100644 --- a/pypeit/specobjs.py +++ b/pypeit/specobjs.py @@ -46,7 +46,7 @@ class SpecObjs: Baseline header to use Attributes: - summary (astropy.table.Table): + summary (`astropy.table.Table`_): Summary table (?) """ version = '1.0.0' @@ -179,7 +179,7 @@ def unpack_object(self, ret_flam=False, extract_type='OPT'): """ Utility function to unpack the sobjs for one object and return various numpy arrays describing the spectrum and meta - data. The user needs to already have trimmed the Specobjs to + data. The user needs to already have trimmed the :class:`SpecObjs` to the relevant indices for the object. Args: @@ -257,7 +257,7 @@ def unpack_object(self, ret_flam=False, extract_type='OPT'): def get_std(self, multi_spec_det=None): """ - Return the standard star from this Specobjs. For MultiSlit this + Return the standard star from this :class:`SpecObjs`. For MultiSlit this will be a single specobj in SpecObjs container, for Echelle it will be the standard for all the orders. @@ -997,10 +997,11 @@ def get_std_trace(detname, std_outfile, chk_version=True): 1-indexed detector(s) to process. std_outfile (:obj:`str`): Filename with the standard star spec1d file. Can be None. + Returns: - `numpy.ndarray`_: Trace of the standard star on input detector. - Will be None if ``std_outfile`` is None, or if the selected detector/mosaic is not available - in the provided spec1d file. + `numpy.ndarray`_: Trace of the standard star on input detector. Will + be None if ``std_outfile`` is None, or if the selected detector/mosaic + is not available in the provided spec1d file. """ sobjs = SpecObjs.from_fitsfile(std_outfile, chk_version=chk_version) @@ -1038,12 +1039,12 @@ def lst_to_array(lst, mask=None): Args: lst : list - Should be number or Quantities - mask (ndarray of bool, optional): Limit to a subset of the list. True=good + Should be number or Quantities + mask (`numpy.ndarray`_, optional): + Boolean array used to limit to a subset of the list. True=good Returns: - ndarray or Quantity array: Converted list - + `numpy.ndarray`_, `astropy.units.Quantity`_: Converted list """ if mask is None: mask = np.array([True]*len(lst)) diff --git a/pypeit/spectrographs/bok_bc.py b/pypeit/spectrographs/bok_bc.py index 7884d8d956..bbd206da8e 100644 --- a/pypeit/spectrographs/bok_bc.py +++ b/pypeit/spectrographs/bok_bc.py @@ -278,11 +278,6 @@ def bpm(self, filename, det, shape=None, msbias=None): """ Generate a default bad-pixel mask. - Even though they are both optional, either the precise shape for - the image (``shape``) or an example file that can be read to get - the shape (``filename`` using :func:`get_image_shape`) *must* be - provided. - Args: filename (:obj:`str` or None): An example file to use to get the image shape. diff --git a/pypeit/spectrographs/gemini_gnirs.py b/pypeit/spectrographs/gemini_gnirs.py index 74692cab57..6c6fae1779 100644 --- a/pypeit/spectrographs/gemini_gnirs.py +++ b/pypeit/spectrographs/gemini_gnirs.py @@ -690,7 +690,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): Change in wavelength per spectral pixel. Returns: - `astropy.wcs.wcs.WCS`_: The world-coordinate system. + `astropy.wcs.WCS`_: The world-coordinate system. """ msgs.info("Calculating the WCS") # Get the x and y binning factors, and the typical slit length diff --git a/pypeit/spectrographs/gtc_osiris.py b/pypeit/spectrographs/gtc_osiris.py index 6efe17ad99..926eeb4953 100644 --- a/pypeit/spectrographs/gtc_osiris.py +++ b/pypeit/spectrographs/gtc_osiris.py @@ -500,7 +500,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): Change in wavelength per spectral pixel. Returns: - `astropy.wcs.wcs.WCS`_: The world-coordinate system. + `astropy.wcs.WCS`_: The world-coordinate system. """ msgs.info("Calculating the WCS") # Get the x and y binning factors, and the typical slit length diff --git a/pypeit/spectrographs/keck_kcwi.py b/pypeit/spectrographs/keck_kcwi.py index 1e80e16a75..895d17d71b 100644 --- a/pypeit/spectrographs/keck_kcwi.py +++ b/pypeit/spectrographs/keck_kcwi.py @@ -851,7 +851,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): the platescale will be used. Returns: - `astropy.wcs.wcs.WCS`_: The world-coordinate system. + `astropy.wcs.WCS`_: The world-coordinate system. """ msgs.info("Generating KCWI WCS") # Get the x and y binning factors, and the typical slit length diff --git a/pypeit/spectrographs/spectrograph.py b/pypeit/spectrographs/spectrograph.py index 637943fb02..8d2207553c 100644 --- a/pypeit/spectrographs/spectrograph.py +++ b/pypeit/spectrographs/spectrograph.py @@ -543,11 +543,6 @@ def bpm(self, filename, det, shape=None, msbias=None): """ Generate a default bad-pixel mask. - Even though they are both optional, either the precise shape for - the image (``shape``) or an example file that can be read to get - the shape (``filename`` using :func:`get_image_shape`) *must* be - provided. - Args: filename (:obj:`str`): An example file to use to get the image shape. Can be None. @@ -1473,7 +1468,7 @@ def get_wcs(self, hdr, slits, platescale, wave0, dwv, spatial_scale=None): the platescale will be used. Returns: - `astropy.wcs.wcs.WCS`_: The world-coordinate system. + `astropy.wcs.WCS`_: The world-coordinate system. """ msgs.warn("No WCS setup for spectrograph: {0:s}".format(self.name)) return None diff --git a/pypeit/specutils/pypeit_loaders.py b/pypeit/specutils/pypeit_loaders.py index b7cd629c99..3cd8cee4c8 100644 --- a/pypeit/specutils/pypeit_loaders.py +++ b/pypeit/specutils/pypeit_loaders.py @@ -101,7 +101,7 @@ def pypeit_spec1d_loader(filename, extract=None, fluxed=True, **kwargs): Returns ------- - spec : SpectrumList + spec : `specutils.SpectrumList`_ Contains all spectra in the PypeIt spec1d file """ # Try to load the file and ignoring any version mismatch @@ -152,7 +152,7 @@ def pypeit_onespec_loader(filename, grid=False, **kwargs): Returns ------- - spec : Spectrum1D + spec : `specutils.Spectrum1D`_ Spectrum in the PypeIt OneSpec file """ # Try to load the file and ignoring any version mismatch diff --git a/pypeit/tracepca.py b/pypeit/tracepca.py index a77b6fe3a2..2f59ff6e69 100644 --- a/pypeit/tracepca.py +++ b/pypeit/tracepca.py @@ -29,9 +29,9 @@ class TracePCA(DataContainer): Class to build and interact with PCA model of traces. This is primarily a container class for the results of - :func:`pypeit.core.pca.pca_decomposition`, - :func:`pypeit.core.pca.fit_pca_coefficients`, and - :func:`pypeit.core.pca.pca_predict`. + :func:`~pypeit.core.pca.pca_decomposition`, + :func:`~pypeit.core.pca.fit_pca_coefficients`, and + :func:`~pypeit.core.pca.pca_predict`. The datamodel attributes are: @@ -45,13 +45,13 @@ class TracePCA(DataContainer): other keyword arguments are ignored. npca (:obj:`bool`, optional): The number of PCA components to keep. See - :func:`pypeit.core.pca.pca_decomposition`. + :func:`~pypeit.core.pca.pca_decomposition`. pca_explained_var (:obj:`float`, optional): The percentage (i.e., not the fraction) of the variance in the data accounted for by the PCA used to truncate the number of PCA coefficients to keep (see `npca`). Ignored if `npca` is provided directly. See - :func:`pypeit.core.pca.pca_decomposition`. + :func:`~pypeit.core.pca.pca_decomposition`. reference_row (:obj:`int`, optional): The row (spectral position) in `trace_cen` to use as the reference coordinate system for the PCA. If None, set to @@ -62,7 +62,7 @@ class TracePCA(DataContainer): trace},)`. If None, the reference coordinate system is defined by the value of `trace_cen` at the spectral position defined by `reference_row`. See the `mean` - argument of :func:`pypeit.core.pca.pca_decomposition`. + argument of :func:`~pypeit.core.pca.pca_decomposition`. """ version = '1.1.0' @@ -132,13 +132,13 @@ def decompose(self, trace_cen, npca=None, pca_explained_var=99.0, reference_row= N_{\rm trace})`. Cannot be None. npca (:obj:`bool`, optional): The number of PCA components to keep. See - :func:`pypeit.core.pca.pca_decomposition`. + :func:`~pypeit.core.pca.pca_decomposition`. pca_explained_var (:obj:`float`, optional): The percentage (i.e., not the fraction) of the variance in the data accounted for by the PCA used to truncate the number of PCA coefficients to keep (see `npca`). Ignored if `npca` is provided directly. See - :func:`pypeit.core.pca.pca_decomposition`. + :func:`~pypeit.core.pca.pca_decomposition`. reference_row (:obj:`int`, optional): The row (spectral position) in `trace_cen` to use as the reference coordinate system for the PCA. If None, @@ -150,7 +150,7 @@ def decompose(self, trace_cen, npca=None, pca_explained_var=99.0, reference_row= coordinate system is defined by the value of `trace_cen` at the spectral position defined by `reference_row`. See the `mean` argument of - :func:`pypeit.core.pca.pca_decomposition`. + :func:`~pypeit.core.pca.pca_decomposition`. """ if trace_cen is None: raise ValueError('Must provide traces to construct the PCA.') @@ -197,7 +197,7 @@ def _reinit(self): def build_interpolator(self, order, ivar=None, weights=None, function='polynomial', lower=3.0, upper=3.0, maxrej=1, maxiter=25, minx=None, maxx=None, debug=False): """ - Wrapper for :func:`fit_pca_coefficients` that uses class + Wrapper for :func:`~pypeit.core.pca.fit_pca_coefficients` that uses class attributes and saves the input parameters. """ if self.is_empty: @@ -260,7 +260,7 @@ def _parse(cls, hdu, hdu_prefix=None, **kwargs): """ Parse the data from the provided HDU. - See :func:`pypeit.datamodel.DataContainer._parse` for the + See :func:`~pypeit.datamodel.DataContainer._parse` for the argument descriptions. """ # Run the default parser to get most of the data @@ -288,7 +288,7 @@ def from_dict(cls, d=None): Instantiate from a dictionary. This is a basic wrapper for - :class:`pypeit.datamodel.DataContainer.from_dict` that + :class:`~pypeit.datamodel.DataContainer.from_dict` that appropriately toggles :attr:`is_empty`. """ self = super(TracePCA, cls).from_dict(d=d) @@ -334,13 +334,13 @@ def pca_trace_object(trace_cen, order=None, trace_bpm=None, min_length=0.6, npca decomposition. npca (:obj:`bool`, optional): The number of PCA components to keep. See - :func:`pypeit.core.pca.pca_decomposition`. + :func:`~pypeit.core.pca.pca_decomposition`. pca_explained_var (:obj:`float`, optional): The percentage (i.e., not the fraction) of the variance in the data accounted for by the PCA used to truncate the number of PCA coefficients to keep (see `npca`). Ignored if `npca` is provided directly. See - :func:`pypeit.core.pca.pca_decomposition`. + :func:`~pypeit.core.pca.pca_decomposition`. reference_row (:obj:`int`, optional): The row (spectral position) in `trace_cen` to use as the reference coordinate system for the PCA. If None, set to @@ -356,26 +356,26 @@ def pca_trace_object(trace_cen, order=None, trace_bpm=None, min_length=0.6, npca Minimum and maximum values used to rescale the independent axis data. If None, the minimum and maximum values of `coo` are used. See - :func:`utils.robust_polyfit_djs`. + :func:`~pypeit.core.fitting.robust_fit`. trace_wgt (`numpy.ndarray`_, optional): Weights to apply to the PCA coefficient of each trace during the fit. Weights are independent of the PCA component. See `weights` parameter of - :func:`pypeit.core.pca.fit_pca_coefficients`. Shape must + :func:`~pypeit.core.pca.fit_pca_coefficients`. Shape must be :math:`(N_{\rm trace},)`. function (:obj:`str`, optional): Type of function used to fit the data. lower (:obj:`float`, optional): Number of standard deviations used for rejecting data **below** the mean residual. If None, no rejection is - performed. See :func:`utils.robust_polyfit_djs`. + performed. See :func:`~pypeit.core.fitting.robust_fit`. upper (:obj:`float`, optional): Number of standard deviations used for rejecting data **above** the mean residual. If None, no rejection is - performed. See :func:`utils.robust_polyfit_djs`. + performed. See :func:`~pypeit.core.fitting.robust_fit`. maxrej (:obj:`int`, optional): Maximum number of points to reject during fit iterations. - See :func:`utils.robust_polyfit_djs`. + See :func:`~pypeit.core.fitting.robust_fit`. maxiter (:obj:`int`, optional): Maximum number of rejection iterations allows. To force no rejection iterations, set to 0. diff --git a/pypeit/utils.py b/pypeit/utils.py index a23b831a2d..8d3bce10ea 100644 --- a/pypeit/utils.py +++ b/pypeit/utils.py @@ -531,15 +531,15 @@ def nan_mad_std(data, axis=None, func=None): Args: data (array-like): Data array or object that can be converted to an array. - axis (int, sequence of int, None, optional): + axis (int, tuple, optional): Axis along which the robust standard deviations are computed. The default (`None`) is to compute the robust standard deviation of the flattened array. Returns: - float, `numpy.ndarray`: The robust standard deviation of the + float, `numpy.ndarray`_: The robust standard deviation of the input data. If ``axis`` is `None` then a scalar will be - returned, otherwise a `~numpy.ndarray` will be returned. + returned, otherwise a `numpy.ndarray`_ will be returned. """ return stats.mad_std(data, axis=axis, func=func, ignore_nan=True) @@ -930,17 +930,19 @@ def fast_running_median(seq, window_size): scipy.ndimage.median_filter with the reflect boundary condition, but is ~ 100 times faster. - Args: - seq (list or 1-d numpy array of numbers): - window_size (int): size of running window. - - Returns: - `numpy.ndarray`_: median filtered values - Code originally contributed by Peter Otten, made to be consistent with scipy.ndimage.median_filter by Joe Hennawi. Now makes use of the Bottleneck library https://pypi.org/project/Bottleneck/. + + Args: + seq (list, `numpy.ndarray`_): + 1D array of values + window_size (int): + size of running window. + + Returns: + `numpy.ndarray`_: median filtered values """ # Enforce that the window_size needs to be smaller than the sequence, otherwise we get arrays of the wrong size # upon return (very bad). Added by JFH. Should we print out an error here? @@ -1241,7 +1243,7 @@ def yamlify(obj, debug=False): obj : :class:`object` An object suitable for yaml serialization. For example `numpy.ndarray`_ is converted to :class:`list`, - :class:`numpy.int64` is converted to :class:`int`, etc. + ``numpy.int64`` is converted to :class:`int`, etc. """ # TODO: Change to np.floating? if isinstance(obj, (np.float64, np.float32)): @@ -1421,7 +1423,7 @@ def lhs(n, samples=None, criterion=None, iterations=None, seed_or_rng=12345): Returns ------- - H : 2d-array + H : `numpy.ndarray`_ An n-by-samples design matrix that has been normalized so factor values are uniformly spaced between zero and one. @@ -1588,12 +1590,12 @@ def _pdist(x): Parameters ---------- - x : 2d-array + x : `numpy.ndarray`_ An m-by-n array of scalars, where there are m points in n dimensions. Returns ------- - d : array + d : `numpy.ndarray`_ A 1-by-b array of scalars, where b = m*(m - 1)/2. This array contains all the pair-wise point distances, arranged in the order (1, 0), (2, 0), ..., (m-1, 0), (2, 1), ..., (m-1, 1), ..., (m-1, m-2). diff --git a/pypeit/wavecalib.py b/pypeit/wavecalib.py index 98b9d6220c..6d77918174 100644 --- a/pypeit/wavecalib.py +++ b/pypeit/wavecalib.py @@ -409,18 +409,18 @@ class BuildWaveCalib: Class to guide wavelength calibration Args: - msarc (:class:`pypeit.images.pypeitimage.PypeItImage`): + msarc (:class:`~pypeit.images.pypeitimage.PypeItImage`): Arc image, created by the ArcImage class - slits (:class:`pypeit.slittrace.SlitTraceSet`): + slits (:class:`~pypeit.slittrace.SlitTraceSet`): Slit edges - spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): + spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): The `Spectrograph` instance that sets the instrument used to take the observations. Used to set :attr:`spectrograph`. - par (:class:`pypeit.par.pypeitpar.WaveSolutionPar`): - The parameters used for the wavelength solution - Uses ['calibrations']['wavelengths'] - meta_dict (dict: optional): + par (:class:`~pypeit.par.pypeitpar.WavelengthSolutionPar`): + The parameters used for the wavelength solution. + Uses ``['calibrations']['wavelengths']``. + meta_dict (dict, optional): Dictionary containing meta information required for wavelength calibration. Specifically for non-fixed format echelles this dict must contain the following keys: @@ -431,20 +431,20 @@ class BuildWaveCalib: det (int, optional): Detector number - msbpm (ndarray, optional): + msbpm (`numpy.ndarray`_, optional): Bad pixel mask image qa_path (str, optional): For QA Attributes: - steps : list + steps (list): List of the processing steps performed - wv_calib : dict + wv_calib (dict): Primary output. Keys 0, 1, 2, 3 are solution for individual previously slit steps - arccen (ndarray): + arccen (`numpy.ndarray`_): (nwave, nslit) Extracted arc(s) down the center of the slit(s) - maskslits : ndarray (nslit); bool + maskslits (`numpy.ndarray`_): Slits to ignore because they were not extracted. WARNING: Outside of this Class, it is best to regenerate the mask using make_maskslits() @@ -454,7 +454,8 @@ class BuildWaveCalib: require that we write it to disk with self.msarc.image nonlinear_counts (float): Specifies saturation level for the arc lines - wvc_bpm (`numpy.ndarray`_): Mask for slits attempted to have a wv_calib solution + wvc_bpm (`numpy.ndarray`_): + Mask for slits attempted to have a wv_calib solution """ # TODO: Is this used anywhere? @@ -756,9 +757,10 @@ def echelle_2dfit(self, wv_calib, debug=False, skip_QA=False): Flag to skip construction of the nominal QA plots. Returns: - list: list of :class:`pypeit.fitting.PypeItFit`: objects containing information from 2-d fit. - Frequently a list of 1 fit. The main exception is for - a mosaic when one sets echelle_separate_2d=True + list : List of :class:`~pypeit.core.fitting.PypeItFit` objects + containing information from 2-d fit. Frequently a list of 1 fit. + The main exception is for a mosaic when one sets + echelle_separate_2d=True """ if self.spectrograph.pypeline != 'Echelle': msgs.error('Cannot execute echelle_2dfit for a non-echelle spectrograph.') @@ -857,18 +859,19 @@ def extract_arcs(self, slitIDs=None): Wrapper to arc.get_censpec() - Args: - slitIDs (:obj:`list`, optional): - A list of the slit IDs to extract (if None, all slits will be extracted) - - Returns: - tuple: Returns the following: - - self.arccen: ndarray, (nspec, nslit): arc spectrum for - all slits - - self.arc_maskslit: ndarray, bool (nsit): boolean array - containing a mask indicating which slits are good - True = masked (bad) - + Parameters + ---------- + slitIDs : :obj:`list`, optional + A list of the slit IDs to extract (if None, all slits will be + extracted) + + Returns + ------- + arccen : `numpy.ndarray`_ + arc spectrum for all slits, shape is (nspec, nslit): + wvc_bpm : `numpy.ndarray`_ + boolean array containing a mask indicating which slits are good. True + = masked (bad). """ # Do it on the slits not masked in self.slitmask arccen, arccen_bpm, arc_maskslit = arc.get_censpec( diff --git a/pypeit/wavemodel.py b/pypeit/wavemodel.py index 7e7c0bbd10..58fed838b2 100644 --- a/pypeit/wavemodel.py +++ b/pypeit/wavemodel.py @@ -1,5 +1,8 @@ """ Module to create models of arc lines. + +.. include:: ../include/links.rst + """ import os @@ -31,7 +34,7 @@ def blackbody(wavelength, T_BB=250., debug=False): Parameters ---------- - wavelength : np.array + wavelength : `numpy.ndarray`_ wavelength vector in microns T_BB : float black body temperature in Kelvin. Default is set to: @@ -39,10 +42,10 @@ def blackbody(wavelength, T_BB=250., debug=False): Returns ------- - blackbody : np.array + blackbody : `numpy.ndarray`_ spectral radiance of the black body in cgs units: B_lambda = 2.*h*c^2/lambda^5.*(1./(exp(h*c/(lambda*k_b*T_BB))-1.) - blackbody_counts : np.array + blackbody_counts : `numpy.ndarray`_ Same as above but in flux density """ @@ -85,10 +88,12 @@ def addlines2spec(wavelength, wl_line, fl_line, resolution, Parameters ---------- - wavelength : np.array + wavelength : `numpy.ndarray`_ wavelength vector of the input spectrum - wl_line, fl_line : np.arrays - wavelength and flux of each individual line + wl_line : `numpy.ndarray`_ + wavelength of each individual line + fl_line : `numpy.ndarray`_ + flux of each individual line resolution : float resolution of the spectrograph. In other words, the lines will have a FWHM equal to: @@ -96,12 +101,12 @@ def addlines2spec(wavelength, wl_line, fl_line, resolution, scale_spec : float rescale all the normalization of the final spectrum. Default scale_spec=1. - debug : boolean + debug : bool If True will show debug plots Returns ------- - line_spec : np.array + line_spec : `numpy.ndarray`_ Spectrum with lines """ @@ -142,10 +147,11 @@ def oh_lines(): Returns ------- - wavelength, amplitude : np.arrays - Wavelength [in microns] and amplitude of the OH lines. + wavelength : `numpy.ndarray`_ + Wavelength [in microns] of the OH lines. + amplitude : `numpy.ndarray`_ + Amplitude of the OH lines. """ - msgs.info("Reading in the Rousselot (2000) OH line list") oh = np.loadtxt(data.get_skisim_filepath('rousselot2000.dat'), usecols=(0, 1)) @@ -158,16 +164,16 @@ def transparency(wavelength, debug=False): Parameters ---------- - wavelength : np.array + wavelength : `numpy.ndarray`_ wavelength vector in microns - debug : boolean + debug : bool If True will show debug plots Returns ------- - transparency : np.array - Transmission of the sky over the considered wavelength rage. - 1. means fully transparent and 0. fully opaque + transparency : `numpy.ndarray`_ + Transmission of the sky over the considered wavelength range. 1. means + fully transparent and 0. fully opaque """ msgs.info("Reading in the atmospheric transmission model") @@ -219,11 +225,11 @@ def h2o_lines(): Returns ------- - wavelength, flux : np.arrays - Wavelength [in microns] and flux of the H2O atmospheric - spectrum. + wavelength : `numpy.ndarray`_ + Wavelength [in microns] of the H2O atmospheric spectrum. + flux : `numpy.ndarray`_ + Flux of the H2O atmospheric spectrum. """ - msgs.info("Reading in the water atmsopheric spectrum") h2o = np.loadtxt(data.get_skisim_filepath('HITRAN.txt'), usecols=(0, 1)) @@ -239,11 +245,11 @@ def thar_lines(): Returns ------- - wavelength, flux : np.arrays - Wavelength [in angstrom] and flux of the ThAr lamp - spectrum. + wavelength : `numpy.ndarray`_ + Wavelength [in microns] of the ThAr lamp spectrum. + flux : `numpy.ndarray`_ + Flux of the ThAr lamp spectrum. """ - msgs.info("Reading in the ThAr spectrum") thar = data.load_thar_spec() @@ -280,7 +286,7 @@ def nearIR_modelsky(resolution, waveminmax=(0.8,2.6), dlam=40.0, If flgd='True' it is a bin in velocity (km/s). If flgd='False' it is a bin in linear space (microns). Default is: 40.0 (with flgd='True') - flgd : boolean + flgd : bool if flgd='True' (default) wavelengths are created with equal steps in log space. If 'False', wavelengths will be created wit equal steps in linear space. @@ -302,15 +308,16 @@ def nearIR_modelsky(resolution, waveminmax=(0.8,2.6), dlam=40.0, WAVE_WATER : float wavelength (in microns) at which the H2O are inclued. Default: WAVE_WATER = 2.3 - debug : boolean + debug : bool If True will show debug plots Returns ------- - wave, sky_model : np.arrays - wavelength (in Ang.) and flux of the final model of the sky. + wave : `numpy.ndarray`_ + Wavelength (in Ang.) of the final model of the sky. + sky_model : `numpy.ndarray`_ + Flux of the final model of the sky. """ - # Create the wavelength array: wv_min = waveminmax[0] wv_max = waveminmax[1] @@ -439,20 +446,22 @@ def optical_modelThAr(resolution, waveminmax=(3000.,10500.), dlam=40.0, If flgd='True' it is a bin in velocity (km/s). If flgd='False' it is a bin in linear space (microns). Default is: 40.0 (with flgd='True') - flgd : boolean + flgd : bool if flgd='True' (default) wavelengths are created with equal steps in log space. If 'False', wavelengths will be created wit equal steps in linear space. thar_outfile : str name of the fits file where the model sky spectrum will be stored. default is 'None' (i.e., no file will be written). - debug : boolean + debug : bool If True will show debug plots Returns ------- - wave, thar_model : np.arrays - wavelength (in Ang.) and flux of the final model of the ThAr lamp emission. + wave : `numpy.ndarray`_ + Wavelength (in Ang.) of the final model of the ThAr lamp emission. + thar_model : `numpy.ndarray`_ + Flux of the final model of the ThAr lamp emission. """ # Create the wavelength array: @@ -543,21 +552,21 @@ def conv2res(wavelength, flux, resolution, central_wl='midpt', Parameters ---------- - wavelength : np.array + wavelength : `numpy.ndarray`_ wavelength - flux : np.array + flux : `numpy.ndarray`_ flux resolution : float resolution of the spectrograph central_wl if 'midpt' the central pixel of wavelength is used, otherwise the central_wl will be used. - debug : boolean + debug : bool If True will show debug plots Returns ------- - flux_convolved :np.array + flux_convolved : `numpy.ndarray`_ Resulting flux after convolution px_sigma : float Size of the sigma in pixels at central_wl @@ -603,7 +612,8 @@ def conv2res(wavelength, flux, resolution, central_wl='midpt', def iraf_datareader(database_dir, id_file): - """Reads in a line identification database created with IRAF + """ + Reads in a line identification database created with IRAF identify. These are usually locate in a directory called 'database'. This read pixel location and wavelength of the lines that have been id with IRAF. Note that the first pixel in IRAF @@ -612,16 +622,18 @@ def iraf_datareader(database_dir, id_file): Parameters ---------- - database_dir : string + database_dir : str directory where the id files are located. - id_file : string + id_file : str filename that is going to be read. Returns ------- - pixel, line_id : np.arrays - Position of the line in pixel and ID of the line. - For IRAF output, these are usually in Ang. + pixel : `numpy.ndarray`_ + Position of the line in pixel of the line. For IRAF output, these are + usually in Ang. + line_id : `numpy.ndarray`_ + ID of the line. """ lines_database = [] @@ -657,9 +669,9 @@ def create_linelist(wavelength, spec, fwhm, sigdetec=2., Parameters ---------- - wavelength : np.array + wavelength : `numpy.ndarray`_ wavelength - spec : np.array + spec : `numpy.ndarray`_ spectrum fwhm : float fwhm in pixels used for filtering out arc lines that are too @@ -679,7 +691,7 @@ def create_linelist(wavelength, spec, fwhm, sigdetec=2., iraf_frmt : bool if True, the file is written in the IRAF format (i.e. wavelength, ion name, amplitude). - convert_air_to_vac (bool): + convert_air_to_vac : bool If True, convert the wavelengths of the created linelist from air to vacuum """ @@ -734,7 +746,7 @@ def create_OHlinelist(resolution, waveminmax=(0.8,2.6), dlam=40.0, flgd=True, ni If flgd='True' it is a bin in velocity (km/s). If flgd='False' it is a bin in linear space (microns). Default is: 40.0 (with flgd='True') - flgd : boolean + flgd : bool if flgd='True' (default) wavelengths are created with equal steps in log space. If 'False', wavelengths will be created wit equal steps in linear space. @@ -758,7 +770,7 @@ def create_OHlinelist(resolution, waveminmax=(0.8,2.6), dlam=40.0, flgd=True, ni iraf_frmt : bool if True, the file is written in the IRAF format (i.e. wavelength, ion name, amplitude). - debug : boolean + debug : bool If True will show debug plots """ @@ -808,7 +820,7 @@ def create_ThArlinelist(resolution, waveminmax=(3000.,10500.), dlam=40.0, flgd=T If flgd='True' it is a bin in velocity (km/s). If flgd='False' it is a bin in linear space (angstrom). Default is: 40.0 (with flgd='True') - flgd : boolean + flgd : bool if flgd='True' (default) wavelengths are created with equal steps in log space. If 'False', wavelengths will be created wit equal steps in linear space. @@ -832,9 +844,9 @@ def create_ThArlinelist(resolution, waveminmax=(3000.,10500.), dlam=40.0, flgd=T iraf_frmt : bool if True, the file is written in the IRAF format (i.e. wavelength, ion name, amplitude). - debug : boolean + debug : bool If True will show debug plots - convert_air_to_vac (bool): + convert_air_to_vac : bool If True, convert the wavelengths of the created linelist from air to vacuum """ diff --git a/pypeit/wavetilts.py b/pypeit/wavetilts.py index 73509ac121..d4fc963379 100644 --- a/pypeit/wavetilts.py +++ b/pypeit/wavetilts.py @@ -79,7 +79,7 @@ class WaveTilts(calibframe.CalibFrame): 'find Tiltimg file when running pypeit_chk_tilts()'), 'tilt_traces': dict(otype=table.Table, descr='Table with the positions of the ' 'traced and fitted tilts for all the slits. ' - 'see :func:`make_tbl_tilt_traces` for more details. ') + 'see :func:`~pypeit.wavetilts.BuildWaveTilts.make_tbl_tilt_traces` for more details. ') } def __init__(self, coeffs, nslit, spat_id, spat_order, spec_order, func2d, bpmtilts=None, @@ -96,7 +96,7 @@ def _bundle(self): """ Bundle the data in preparation for writing to a fits file. - See :func:`pypeit.datamodel.DataContainer._bundle`. Data is + See :func:`~pypeit.datamodel.DataContainer._bundle`. Data is always written to a 'TILTS' extension. """ _d = super(WaveTilts, self)._bundle(ext='TILTS') @@ -116,7 +116,7 @@ def is_synced(self, slits): Barfs if not Args: - slits (:class:`pypeit.slittrace.SlitTraceSet`): + slits (:class:`~pypeit.slittrace.SlitTraceSet`): """ if not np.array_equal(self.spat_id, slits.spat_id): @@ -131,6 +131,7 @@ def fit2tiltimg(self, slitmask, flexure=None): Args: slitmask (`numpy.ndarray`_): + ?? flexure (float, optional): Spatial shift of the tilt image onto the desired frame (typically a science image) @@ -254,16 +255,16 @@ class BuildWaveTilts: Class to guide arc/sky tracing Args: - mstilt (:class:`pypeit.images.buildimage.TiltImage`): + mstilt (:class:`~pypeit.images.buildimage.TiltImage`): Tilt image. QA file naming inherits the calibration key (``calib_key``) from this object. - slits (:class:`pypeit.slittrace.SlitTraceSet`): + slits (:class:`~pypeit.slittrace.SlitTraceSet`): Slit edges - spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): + spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): Spectrograph object - par (:class:`pypeit.par.pypeitpar.WaveTiltsPar` or None): + par (:class:`~pypeit.par.pypeitpar.WaveTiltsPar` or None): The parameters used to fuss with the tilts - wavepar (:class:`pypeit.par.pypeitpar.WaveSolutionPar` or None): + wavepar (:class:`~pypeit.par.pypeitpar.WavelengthSolutionPar` or None): The parameters used for the wavelength solution det (int): Detector index qa_path (:obj:`str`, optional): @@ -274,22 +275,24 @@ class BuildWaveTilts: Attributes: - spectrograph (:class:`pypeit.spectrographs.spectrograph.Spectrograph`): - steps : list - mask : ndarray, bool - True = Ignore this slit - all_trcdict : list of dict - All trace dict's - tilts : ndarray - Tilts for a single slit/order - all_ttilts : list of tuples - Tuple of tilts ndarray's - final_tilts : ndarray - Final tilts image + spectrograph (:class:`~pypeit.spectrographs.spectrograph.Spectrograph`): + ?? + steps (list): + ?? + mask (`numpy.ndarray`_): + boolean array; True = Ignore this slit + all_trcdict (list): + All trace dict's + tilts (`numpy.ndarray`_): + Tilts for a single slit/order + all_tilts (list): + Tuple of tilts `numpy.ndarray`_ objects + final_tilts (`numpy.ndarray`_): + Final tilts image gpm (`numpy.ndarray`_): - Good pixel mask - Eventually, we might attach this to self.mstilt although that would then - require that we write it to disk with self.mstilt.image + Good pixel mask. Eventually, we might attach this to self.mstilt + although that would then require that we write it to disk with + self.mstilt.image """ # TODO This needs to be modified to take an inmask @@ -382,12 +385,16 @@ def find_lines(self, arcspec, slit_cen, slit_idx, bpm=None, debug=False): Wrapper to tracewave.tilts_find_lines() Args: - arcspec: - slit_cen: + arcspec (): + ?? + slit_cen (): + ?? slit_idx (int): Slit index, zero-based bpm (`numpy.ndarray`_, optional): + ?? debug (bool, optional): + ?? Returns: tuple: 2 objectcs @@ -438,20 +445,18 @@ def fit_tilts(self, trc_tilt_dict, thismask, slit_cen, spat_order, spec_order, s Args: trc_tilt_dict (dict): Contains information from tilt tracing - slit_cen (ndarray): (nspec,) Central trace for this slit + slit_cen (`numpy.ndarray`_): (nspec,) Central trace for this slit spat_order (int): Order of the 2d polynomial fit for the spatial direction spec_order (int): Order of the 2d polytnomial fit for the spectral direction slit_idx (int): zero-based, integer index for the slit in question - - Optional Args: - show_QA: bool, default = False + show_QA (bool, optional): show the QA instead of writing it out to the outfile - doqa: bool, default = True + doqa (bool, optional): Construct the QA plot Returns: - `numpy.ndarray`_: coeff: ndarray (spat_order + 1, spec_order+1) - Array containing the coefficients for the 2d legendre polynomial fit + `numpy.ndarray`_: Array containing the coefficients for the 2d + legendre polynomial fit. Shape is (spat_order + 1, spec_order+1). """ # Index self.all_fit_dict[slit_idx], self.all_trace_dict[slit_idx] \ @@ -472,7 +477,6 @@ def trace_tilts(self, arcimg, lines_spec, lines_spat, thismask, slit_cen, Trace the tilts Args: - arcimg (`numpy.ndarray`_): Arc image. Shape is (nspec, nspat). lines_spec (`numpy.ndarray`_): @@ -491,7 +495,8 @@ def trace_tilts(self, arcimg, lines_spec, lines_spat, thismask, slit_cen, Integer index indicating the slit in question. Returns: - dict: Dictionary containing information on the traced tilts required to fit the filts. + dict: Dictionary containing information on the traced tilts required + to fit the filts. """ trace_dict = tracewave.trace_tilts(arcimg, lines_spec, lines_spat, thismask, slit_cen, @@ -512,7 +517,7 @@ def model_arc_continuum(self, debug=False): The method uses the arc spectra extracted using :attr:`extract_arcs` and fits a characteristic low-order continuum for each slit/order using - :func:`pypeit.util.robust_polyfit_djs` and the parameters + :func:`~pypeit.core.fitting.robust_fit` and the parameters `cont_function`, `cont_order`, and `cont_rej` from :attr:`par`. The characteristic continuum is then rescaled to match the continuum at each spatial position in the @@ -532,7 +537,7 @@ def model_arc_continuum(self, debug=False): Run the method in debug mode. Returns: - numpy.ndarray: Returns a 2D image with the same shape as + `numpy.ndarray`_: Returns a 2D image with the same shape as :attr:`mstilt` with the model continuum. """ # TODO: Should make this operation part of WaveTiltsPar ... @@ -937,12 +942,15 @@ def show_tilts_mpl(tilt_img, tilt_traces, show_traces=False, left_edges=None, tilt_img (`numpy.ndarray`_): TiltImage tilt_traces (`astropy.table.Table`_): - Table containing the traced and fitted tilts. - See :func:`make_tbl_tilt_traces` for information on the table columns. + Table containing the traced and fitted tilts. See + :func:`~pypeit.wavetilts.BuiltWaveTilts.make_tbl_tilt_traces` for + information on the table columns. show_traces (bool, optional): Show the traced tilts - left_edges, right_edges (`numpy.ndarray`_, optional): - Left and right edges of the slits + left_edges (`numpy.ndarray`_, optional): + Left edges of the slits + right_edges (`numpy.ndarray`_, optional): + Right edges of the slits slit_ids (`numpy.ndarray`_, optional): Slit IDs cut (tuple, optional):