From f5585caf8a823049e15b91c397ae430b1beeb0b3 Mon Sep 17 00:00:00 2001 From: JonKing93 Date: Fri, 21 Aug 2020 13:42:39 -0700 Subject: [PATCH] Created alpha testing repo --- 1. Grid files/error_check.m | 395 ---- 1. Grid files/gridfile-dependency-list.txt | 199 -- .../@ensemble/add.m | 35 - .../@ensemble/checkEnsFile.m | 22 - .../@ensemble/ensemble.m | 126 -- .../@ensemble/load.m | 51 - .../@ensemble/loadMetadata.m | 13 - .../@ensemble/updateMetadata.m | 12 - .../@ensemble/useMembers.m | 40 - .../@ensemble/useStateIndices.m | 33 - .../@ensemble/useVars.m | 48 - .../@ensembleMetadata/closestLatLonIndices.m | 136 -- .../@ensembleMetadata/coordinates.m | 47 - .../@ensembleMetadata/dimCheck.m | 20 - .../@ensembleMetadata/ensembleMetadata.m | 146 -- .../@ensembleMetadata/getClosestLatLonIndex.m | 18 - .../@ensembleMetadata/getLatLonSequence.m | 80 - .../@ensembleMetadata/getTimeSequence.m | 15 - .../@ensembleMetadata/lookup.m | 93 - .../@ensembleMetadata/timepoints.m | 52 - .../@ensembleMetadata/useH.m | 25 - .../@ensembleMetadata/useMembers.m | 31 - .../@ensembleMetadata/useStateIndices.m | 65 - .../@ensembleMetadata/useVars.m | 47 - .../@ensembleMetadata/varCheck.m | 19 - .../@ensembleMetadata/varIndices.m | 23 - .../@stateDesign/add.m | 86 - .../@stateDesign/append.m | 32 - .../@stateDesign/buildEnsemble.m | 85 - .../@stateDesign/changeDimType.m | 15 - .../@stateDesign/checkIndices.m | 32 - .../@stateDesign/copy.m | 78 - .../@stateDesign/couple.m | 66 - .../@stateDesign/coupledVariables.m | 20 - .../@stateDesign/draw.m | 21 - .../@stateDesign/edit.m | 127 -- .../@stateDesign/ensDimension.m | 109 -- .../@stateDesign/ensembleSize.m | 7 - .../@stateDesign/findDimIndices.m | 19 - .../@stateDesign/findVarIndices.m | 18 - .../@stateDesign/getNaNflag.m | 27 - .../@stateDesign/getVariables.m | 24 - .../@stateDesign/info.m | 137 -- .../@stateDesign/initializeDraws.m | 25 - .../@stateDesign/limitMembers.m | 28 - .../@stateDesign/makeDraws.m | 23 - .../@stateDesign/matchMetadata.m | 47 - .../@stateDesign/notifyChangedType.m | 50 - .../@stateDesign/notifySecondaryCoupling.m | 25 - .../@stateDesign/notifySecondaryOverlap.m | 33 - .../@stateDesign/overlap.m | 51 - .../@stateDesign/remove.m | 34 - .../@stateDesign/removeOverlap.m | 25 - .../@stateDesign/resetChangedDim.m | 30 - .../@stateDesign/saveDraws.m | 15 - .../@stateDesign/stateDesign.m | 224 --- .../@stateDesign/stateDimension.m | 32 - .../@stateDesign/trim.m | 22 - .../@stateDesign/uncouple.m | 37 - .../@stateDesign/varIndices.m | 39 - .../@stateDesign/varMetadata.m | 42 - .../@stateDesign/weightedMean.m | 74 - .../@stateDesign/write.m | 62 - .../@varDesign/buildEnsemble.m | 95 - .../@varDesign/dataIndices.m | 44 - .../@varDesign/varDesign.m | 105 - .../Mean Adjustment/getMeanAdjustment.m | 56 - .../@PSM/Bias Correction/helpBiasCorrectors.m | 47 - 3. PSMs/@PSM/PSM.m | 115 -- 3. PSMs/@PSM/convertUnits.m | 14 - 3. PSMs/@PSM/review.m | 21 - 3. PSMs/@PSM/run.m | 31 - 3. PSMs/@PSM/setBiasCorrector.m | 31 - 3. PSMs/@PSM/setStateIndices.m | 10 - 3. PSMs/@PSM/setUnitConversion.m | 34 - 3. PSMs/Bias Correctors/biasCorrector.m | 84 - 3. PSMs/Bias Correctors/meanCorrector.m | 67 - 3. PSMs/Bias Correctors/nullCorrector.m | 26 - 3. PSMs/Bias Correctors/renormCorrector.m | 71 - 3. PSMs/Demo for developers/README.txt | 6 - 3. PSMs/Demo for developers/myPSM.m | 241 --- 3. PSMs/Demo for developers/psmName.m | 56 - .../@deloPSM/deloPSM.m | 49 - .../@deloPSM/errorCheckPSM.m | 8 - .../@deloPSM/getStateIndices.m | 12 - .../@deloPSM/runForwardModel.m | 44 - .../@linearPSM/errorCheckPSM.m | 14 - .../@linearPSM/getStateIndices.m | 68 - .../@linearPSM/linearModel.m | 28 - .../@linearPSM/linearPSM.m | 74 - .../@linearPSM/runForwardModel.m | 19 - .../@mgcaPSM/errorCheckPSM.m | 8 - .../@mgcaPSM/getStateIndices.m | 7 - .../@mgcaPSM/mgcaPSM.m | 82 - .../@mgcaPSM/runForwardModel.m | 44 - .../@ukPSM/UK_forward_model.m | 44 - .../@ukPSM/bayes_posterior_v2.mat | Bin 54887 -> 0 bytes .../@ukPSM/errorCheckPSM.m | 11 - .../@ukPSM/getStateIndices.m | 37 - .../@ukPSM/runForwardModel.m | 16 - .../@ukPSM/seasonalPolygon.m | 35 - .../Specific Forward Models/@ukPSM/ukPSM.m | 69 - .../@vstempPSM/errorCheckPSM.m | 14 - .../@vstempPSM/getStateIndices.m | 25 - .../@vstempPSM/runForwardModel.m | 11 - .../@vstempPSM/vstemp.m | 144 -- .../@vstempPSM/vstempPSM.m | 57 - .../VSlite (defunct)/VSLite4dash.m | 553 ------ .../VSlite (defunct)/VSLite_v2_5_1.m | 606 ------ .../estimate_vslite_params_v2_3.m | 1730 ----------------- .../VSlite (defunct)/vslitePSM.m | 98 - 3. PSMs/Specific Forward Models/trivialPSM.m | 33 - 4. DA Analyses/@dash/calculateYe.m | 35 - 4. DA Analyses/@dash/checkReconH.m | 42 - 4. DA Analyses/@dash/dash.m | 81 - 4. DA Analyses/@dash/decompose.m | 20 - 4. DA Analyses/@dash/inflate.m | 23 - 4. DA Analyses/@dash/localizationWeights.m | 21 - 4. DA Analyses/@dash/processYeR.m | 105 - 4. DA Analyses/@dash/regrid.m | 71 - 4. DA Analyses/@dash/regridTripolar.m | 82 - 4. DA Analyses/@dash/restrictVarsToPSMs.m | 74 - 4. DA Analyses/@dash/spatialLocalization.m | 108 - 4. DA Analyses/@dash/temporalLocalization.m | 82 - 4. DA Analyses/@dash/uncertainMean.m | 116 -- 4. DA Analyses/@dash/version.m | 3 - 4. DA Analyses/@dashFilter/dashFilter.m | 22 - 4. DA Analyses/@dashFilter/setValues.m | 130 -- 4. DA Analyses/@kalmanFilter/adjustH.m | 9 - 4. DA Analyses/@kalmanFilter/appendYe.m | 47 - 4. DA Analyses/@kalmanFilter/checkValues.m | 44 - 4. DA Analyses/@kalmanFilter/jointENSRF.m | 143 -- 4. DA Analyses/@kalmanFilter/jointKalman.m | 45 - 4. DA Analyses/@kalmanFilter/kalmanFilter.m | 117 -- .../@kalmanFilter/reconstructVars.m | 77 - 4. DA Analyses/@kalmanFilter/run.m | 101 - 4. DA Analyses/@kalmanFilter/serialENSRF.m | 132 -- 4. DA Analyses/@kalmanFilter/serialKalman.m | 36 - 4. DA Analyses/@kalmanFilter/settings.m | 157 -- 4. DA Analyses/@kalmanFilter/unappendYe.m | 32 - 4. DA Analyses/@particleFilter/bigpf.m | 84 - 4. DA Analyses/@particleFilter/normexp.m | 68 - .../@particleFilter/particleFilter.m | 87 - 4. DA Analyses/@particleFilter/pf.m | 61 - 4. DA Analyses/@particleFilter/pfWeights.m | 35 - 4. DA Analyses/@particleFilter/run.m | 31 - 4. DA Analyses/@particleFilter/settings.m | 76 - 4. DA Analyses/@sensorSites/sensorSites.m | 73 - 4. DA Analyses/@sensorTest/assessPlacement.m | 33 - 4. DA Analyses/@sensorTest/optimalSensor.m | 102 - 4. DA Analyses/@sensorTest/run.m | 21 - 4. DA Analyses/@sensorTest/sensorTest.m | 81 - 4. DA Analyses/@sensorTest/setValues.m | 52 - 4. DA Analyses/@sensorTest/settings.m | 41 - .../blendCovariance.m | 4 - .../calibrationRatio.m | 3 - .../ensembleCovariances.m | 8 - .../jointOfflineKalmanFilter/ensrf.m | 78 - .../ensrfCovariance.m | 21 - .../jointOfflineKalmanFilter/ensrfUpdates.m | 86 - .../jointOfflineKalmanFilter/ensrf_outline.m | 139 -- .../inflateCovariance.m | 6 - .../jointOfflineKalmanFilter.m | 33 - .../jointOfflineKalmanFilter/kalmanAdjusted.m | 5 - .../kalmanDenominator.m | 4 - .../jointOfflineKalmanFilter/kalmanGain.m | 3 - .../localizeCovariance.m | 4 - .../posteriorCalculations.m | 19 - .../preallocateENSRF.m | 52 - .../updateDeviations.m | 4 - .../jointOfflineKalmanFilter/updateMean.m | 14 - 4. DA Analyses/posteriorMean.m | 35 - {1. Grid files/@gridfile => @gridfile}/add.m | 0 .../@gridfile => @gridfile}/buildSources.m | 0 .../buildSourcesForFiles.m | 0 .../checkAllowedDims.m | 0 .../checkMetadataField.m | 0 .../checkMetadataStructure.m | 0 .../checkSourcesMatchGrid.m | 0 .../collectFullPaths.m | 0 .../collectPrimitives.m | 0 .../commaDelimitedDims.m | 0 .../convertSourceToPrimitives.m | 0 .../@gridfile => @gridfile}/defineMetadata.m | 0 .../@gridfile => @gridfile}/expand.m | 0 .../@gridfile => @gridfile}/findFileSources.m | 0 .../@gridfile => @gridfile}/gridfile.m | 0 .../hasDuplicateRows.m | 0 {1. Grid files/@gridfile => @gridfile}/info.m | 0 {1. Grid files/@gridfile => @gridfile}/load.m | 0 .../@gridfile => @gridfile}/metadata.m | 0 {1. Grid files/@gridfile => @gridfile}/new.m | 0 .../@gridfile => @gridfile}/padPrimitives.m | 0 .../@gridfile => @gridfile}/processMetadata.m | 0 .../@gridfile => @gridfile}/remove.m | 0 .../@gridfile => @gridfile}/renameSources.m | 0 .../@gridfile => @gridfile}/repeatedLoad.m | 0 .../@gridfile => @gridfile}/review.m | 0 .../@gridfile => @gridfile}/rewriteMetadata.m | 0 {1. Grid files/@gridfile => @gridfile}/save.m | 0 .../@gridfile => @gridfile}/sourceFilepath.m | 0 .../@gridfile => @gridfile}/update.m | 0 .../updateMetadataField.m | 0 Global Utilities/checkFile.m | 44 - Global Utilities/convertToV7_3.m | 79 - Global Utilities/gaspariCohn.m | 45 - Global Utilities/getDimIDs.m | 40 - Global Utilities/haversine.m | 35 - Global Utilities/isstrflag.m | 13 - Global Utilities/isstrlist.m | 6 - Global Utilities/loadKeep.m | 25 - Global Utilities/progressbar/progressbar.m | 365 ---- .../progressbar/progressbar_license.txt | 24 - Global Utilities/samplingMatrix.m | 100 - Global Utilities/scsIndices.m | 9 - Global Utilities/subdim.m | 33 - Tutorials/T1_fileIO.m | 381 ---- Tutorials/T2_designStateVectors.m | 298 --- Tutorials/T3_buildEnsemble.m | 199 -- Tutorials/T4_PSMs.m | 145 -- Tutorials/T5_Data_Assimilation.m | 193 -- 1. Grid files/dataSource.m => dataSource.m | 0 1. Grid files/matSource.m => matSource.m | 0 1. Grid files/ncSource.m => ncSource.m | 0 224 files changed, 14058 deletions(-) delete mode 100644 1. Grid files/error_check.m delete mode 100644 1. Grid files/gridfile-dependency-list.txt delete mode 100644 2. State Vectors and Ensembles/@ensemble/add.m delete mode 100644 2. State Vectors and Ensembles/@ensemble/checkEnsFile.m delete mode 100644 2. State Vectors and Ensembles/@ensemble/ensemble.m delete mode 100644 2. State Vectors and Ensembles/@ensemble/load.m delete mode 100644 2. State Vectors and Ensembles/@ensemble/loadMetadata.m delete mode 100644 2. State Vectors and Ensembles/@ensemble/updateMetadata.m delete mode 100644 2. State Vectors and Ensembles/@ensemble/useMembers.m delete mode 100644 2. State Vectors and Ensembles/@ensemble/useStateIndices.m delete mode 100644 2. State Vectors and Ensembles/@ensemble/useVars.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/closestLatLonIndices.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/coordinates.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/dimCheck.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/ensembleMetadata.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/getClosestLatLonIndex.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/getLatLonSequence.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/getTimeSequence.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/lookup.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/timepoints.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/useH.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/useMembers.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/useStateIndices.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/useVars.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/varCheck.m delete mode 100644 2. State Vectors and Ensembles/@ensembleMetadata/varIndices.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/add.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/append.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/buildEnsemble.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/changeDimType.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/checkIndices.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/copy.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/couple.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/coupledVariables.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/draw.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/edit.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/ensDimension.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/ensembleSize.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/findDimIndices.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/findVarIndices.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/getNaNflag.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/getVariables.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/info.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/initializeDraws.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/limitMembers.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/makeDraws.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/matchMetadata.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/notifyChangedType.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/notifySecondaryCoupling.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/notifySecondaryOverlap.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/overlap.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/remove.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/removeOverlap.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/resetChangedDim.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/saveDraws.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/stateDesign.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/stateDimension.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/trim.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/uncouple.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/varIndices.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/varMetadata.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/weightedMean.m delete mode 100644 2. State Vectors and Ensembles/@stateDesign/write.m delete mode 100644 2. State Vectors and Ensembles/@varDesign/buildEnsemble.m delete mode 100644 2. State Vectors and Ensembles/@varDesign/dataIndices.m delete mode 100644 2. State Vectors and Ensembles/@varDesign/varDesign.m delete mode 100644 3. PSMs/@PSM/Bias Correction/Mean Adjustment/getMeanAdjustment.m delete mode 100644 3. PSMs/@PSM/Bias Correction/helpBiasCorrectors.m delete mode 100644 3. PSMs/@PSM/PSM.m delete mode 100644 3. PSMs/@PSM/convertUnits.m delete mode 100644 3. PSMs/@PSM/review.m delete mode 100644 3. PSMs/@PSM/run.m delete mode 100644 3. PSMs/@PSM/setBiasCorrector.m delete mode 100644 3. PSMs/@PSM/setStateIndices.m delete mode 100644 3. PSMs/@PSM/setUnitConversion.m delete mode 100644 3. PSMs/Bias Correctors/biasCorrector.m delete mode 100644 3. PSMs/Bias Correctors/meanCorrector.m delete mode 100644 3. PSMs/Bias Correctors/nullCorrector.m delete mode 100644 3. PSMs/Bias Correctors/renormCorrector.m delete mode 100644 3. PSMs/Demo for developers/README.txt delete mode 100644 3. PSMs/Demo for developers/myPSM.m delete mode 100644 3. PSMs/Demo for developers/psmName.m delete mode 100644 3. PSMs/Specific Forward Models/@deloPSM/deloPSM.m delete mode 100644 3. PSMs/Specific Forward Models/@deloPSM/errorCheckPSM.m delete mode 100644 3. PSMs/Specific Forward Models/@deloPSM/getStateIndices.m delete mode 100644 3. PSMs/Specific Forward Models/@deloPSM/runForwardModel.m delete mode 100644 3. PSMs/Specific Forward Models/@linearPSM/errorCheckPSM.m delete mode 100644 3. PSMs/Specific Forward Models/@linearPSM/getStateIndices.m delete mode 100644 3. PSMs/Specific Forward Models/@linearPSM/linearModel.m delete mode 100644 3. PSMs/Specific Forward Models/@linearPSM/linearPSM.m delete mode 100644 3. PSMs/Specific Forward Models/@linearPSM/runForwardModel.m delete mode 100644 3. PSMs/Specific Forward Models/@mgcaPSM/errorCheckPSM.m delete mode 100644 3. PSMs/Specific Forward Models/@mgcaPSM/getStateIndices.m delete mode 100644 3. PSMs/Specific Forward Models/@mgcaPSM/mgcaPSM.m delete mode 100644 3. PSMs/Specific Forward Models/@mgcaPSM/runForwardModel.m delete mode 100644 3. PSMs/Specific Forward Models/@ukPSM/UK_forward_model.m delete mode 100644 3. PSMs/Specific Forward Models/@ukPSM/bayes_posterior_v2.mat delete mode 100644 3. PSMs/Specific Forward Models/@ukPSM/errorCheckPSM.m delete mode 100644 3. PSMs/Specific Forward Models/@ukPSM/getStateIndices.m delete mode 100644 3. PSMs/Specific Forward Models/@ukPSM/runForwardModel.m delete mode 100644 3. PSMs/Specific Forward Models/@ukPSM/seasonalPolygon.m delete mode 100644 3. PSMs/Specific Forward Models/@ukPSM/ukPSM.m delete mode 100644 3. PSMs/Specific Forward Models/@vstempPSM/errorCheckPSM.m delete mode 100644 3. PSMs/Specific Forward Models/@vstempPSM/getStateIndices.m delete mode 100644 3. PSMs/Specific Forward Models/@vstempPSM/runForwardModel.m delete mode 100644 3. PSMs/Specific Forward Models/@vstempPSM/vstemp.m delete mode 100644 3. PSMs/Specific Forward Models/@vstempPSM/vstempPSM.m delete mode 100644 3. PSMs/Specific Forward Models/VSlite (defunct)/VSLite4dash.m delete mode 100644 3. PSMs/Specific Forward Models/VSlite (defunct)/VSLite_v2_5_1.m delete mode 100644 3. PSMs/Specific Forward Models/VSlite (defunct)/estimate_vslite_params_v2_3.m delete mode 100644 3. PSMs/Specific Forward Models/VSlite (defunct)/vslitePSM.m delete mode 100644 3. PSMs/Specific Forward Models/trivialPSM.m delete mode 100644 4. DA Analyses/@dash/calculateYe.m delete mode 100644 4. DA Analyses/@dash/checkReconH.m delete mode 100644 4. DA Analyses/@dash/dash.m delete mode 100644 4. DA Analyses/@dash/decompose.m delete mode 100644 4. DA Analyses/@dash/inflate.m delete mode 100644 4. DA Analyses/@dash/localizationWeights.m delete mode 100644 4. DA Analyses/@dash/processYeR.m delete mode 100644 4. DA Analyses/@dash/regrid.m delete mode 100644 4. DA Analyses/@dash/regridTripolar.m delete mode 100644 4. DA Analyses/@dash/restrictVarsToPSMs.m delete mode 100644 4. DA Analyses/@dash/spatialLocalization.m delete mode 100644 4. DA Analyses/@dash/temporalLocalization.m delete mode 100644 4. DA Analyses/@dash/uncertainMean.m delete mode 100644 4. DA Analyses/@dash/version.m delete mode 100644 4. DA Analyses/@dashFilter/dashFilter.m delete mode 100644 4. DA Analyses/@dashFilter/setValues.m delete mode 100644 4. DA Analyses/@kalmanFilter/adjustH.m delete mode 100644 4. DA Analyses/@kalmanFilter/appendYe.m delete mode 100644 4. DA Analyses/@kalmanFilter/checkValues.m delete mode 100644 4. DA Analyses/@kalmanFilter/jointENSRF.m delete mode 100644 4. DA Analyses/@kalmanFilter/jointKalman.m delete mode 100644 4. DA Analyses/@kalmanFilter/kalmanFilter.m delete mode 100644 4. DA Analyses/@kalmanFilter/reconstructVars.m delete mode 100644 4. DA Analyses/@kalmanFilter/run.m delete mode 100644 4. DA Analyses/@kalmanFilter/serialENSRF.m delete mode 100644 4. DA Analyses/@kalmanFilter/serialKalman.m delete mode 100644 4. DA Analyses/@kalmanFilter/settings.m delete mode 100644 4. DA Analyses/@kalmanFilter/unappendYe.m delete mode 100644 4. DA Analyses/@particleFilter/bigpf.m delete mode 100644 4. DA Analyses/@particleFilter/normexp.m delete mode 100644 4. DA Analyses/@particleFilter/particleFilter.m delete mode 100644 4. DA Analyses/@particleFilter/pf.m delete mode 100644 4. DA Analyses/@particleFilter/pfWeights.m delete mode 100644 4. DA Analyses/@particleFilter/run.m delete mode 100644 4. DA Analyses/@particleFilter/settings.m delete mode 100644 4. DA Analyses/@sensorSites/sensorSites.m delete mode 100644 4. DA Analyses/@sensorTest/assessPlacement.m delete mode 100644 4. DA Analyses/@sensorTest/optimalSensor.m delete mode 100644 4. DA Analyses/@sensorTest/run.m delete mode 100644 4. DA Analyses/@sensorTest/sensorTest.m delete mode 100644 4. DA Analyses/@sensorTest/setValues.m delete mode 100644 4. DA Analyses/@sensorTest/settings.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/blendCovariance.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/calibrationRatio.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/ensembleCovariances.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/ensrf.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/ensrfCovariance.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/ensrfUpdates.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/ensrf_outline.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/inflateCovariance.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/jointOfflineKalmanFilter.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/kalmanAdjusted.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/kalmanDenominator.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/kalmanGain.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/localizeCovariance.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/posteriorCalculations.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/preallocateENSRF.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/updateDeviations.m delete mode 100644 4. DA Analyses/jointOfflineKalmanFilter/updateMean.m delete mode 100644 4. DA Analyses/posteriorMean.m rename {1. Grid files/@gridfile => @gridfile}/add.m (100%) rename {1. Grid files/@gridfile => @gridfile}/buildSources.m (100%) rename {1. Grid files/@gridfile => @gridfile}/buildSourcesForFiles.m (100%) rename {1. Grid files/@gridfile => @gridfile}/checkAllowedDims.m (100%) rename {1. Grid files/@gridfile => @gridfile}/checkMetadataField.m (100%) rename {1. Grid files/@gridfile => @gridfile}/checkMetadataStructure.m (100%) rename {1. Grid files/@gridfile => @gridfile}/checkSourcesMatchGrid.m (100%) rename {1. Grid files/@gridfile => @gridfile}/collectFullPaths.m (100%) rename {1. Grid files/@gridfile => @gridfile}/collectPrimitives.m (100%) rename {1. Grid files/@gridfile => @gridfile}/commaDelimitedDims.m (100%) rename {1. Grid files/@gridfile => @gridfile}/convertSourceToPrimitives.m (100%) rename {1. Grid files/@gridfile => @gridfile}/defineMetadata.m (100%) rename {1. Grid files/@gridfile => @gridfile}/expand.m (100%) rename {1. Grid files/@gridfile => @gridfile}/findFileSources.m (100%) rename {1. Grid files/@gridfile => @gridfile}/gridfile.m (100%) rename {1. Grid files/@gridfile => @gridfile}/hasDuplicateRows.m (100%) rename {1. Grid files/@gridfile => @gridfile}/info.m (100%) rename {1. Grid files/@gridfile => @gridfile}/load.m (100%) rename {1. Grid files/@gridfile => @gridfile}/metadata.m (100%) rename {1. Grid files/@gridfile => @gridfile}/new.m (100%) rename {1. Grid files/@gridfile => @gridfile}/padPrimitives.m (100%) rename {1. Grid files/@gridfile => @gridfile}/processMetadata.m (100%) rename {1. Grid files/@gridfile => @gridfile}/remove.m (100%) rename {1. Grid files/@gridfile => @gridfile}/renameSources.m (100%) rename {1. Grid files/@gridfile => @gridfile}/repeatedLoad.m (100%) rename {1. Grid files/@gridfile => @gridfile}/review.m (100%) rename {1. Grid files/@gridfile => @gridfile}/rewriteMetadata.m (100%) rename {1. Grid files/@gridfile => @gridfile}/save.m (100%) rename {1. Grid files/@gridfile => @gridfile}/sourceFilepath.m (100%) rename {1. Grid files/@gridfile => @gridfile}/update.m (100%) rename {1. Grid files/@gridfile => @gridfile}/updateMetadataField.m (100%) delete mode 100644 Global Utilities/checkFile.m delete mode 100644 Global Utilities/convertToV7_3.m delete mode 100644 Global Utilities/gaspariCohn.m delete mode 100644 Global Utilities/getDimIDs.m delete mode 100644 Global Utilities/haversine.m delete mode 100644 Global Utilities/isstrflag.m delete mode 100644 Global Utilities/isstrlist.m delete mode 100644 Global Utilities/loadKeep.m delete mode 100644 Global Utilities/progressbar/progressbar.m delete mode 100644 Global Utilities/progressbar/progressbar_license.txt delete mode 100644 Global Utilities/samplingMatrix.m delete mode 100644 Global Utilities/scsIndices.m delete mode 100644 Global Utilities/subdim.m delete mode 100644 Tutorials/T1_fileIO.m delete mode 100644 Tutorials/T2_designStateVectors.m delete mode 100644 Tutorials/T3_buildEnsemble.m delete mode 100644 Tutorials/T4_PSMs.m delete mode 100644 Tutorials/T5_Data_Assimilation.m rename 1. Grid files/dataSource.m => dataSource.m (100%) rename 1. Grid files/matSource.m => matSource.m (100%) rename 1. Grid files/ncSource.m => ncSource.m (100%) diff --git a/1. Grid files/error_check.m b/1. Grid files/error_check.m deleted file mode 100644 index 7c3a483d..00000000 --- a/1. Grid files/error_check.m +++ /dev/null @@ -1,395 +0,0 @@ -%% This does an error check on the gridfile package. -clear; -clc; - -% Start by creating a new gridfile -full = 'D:\Climate Data\Models\CESM LME\TREFHT, Near surface air temperature\Raw\b.e11.BLMTRC5CN.f19_g16.002.cam.h0.TREFHT.085001-184912.nc'; -f1 = 'b.e11.BLMTRC5CN.f19_g16.002.cam.h0.TREFHT.085001-184912.nc'; -f2 = 'T-run2-1850,2005-time,lon,lat.mat'; - - -%% Test metadata definition -lat = ncread(full,'lat'); -lon = ncread(full,'lon'); -time = (datetime(850,1,15):calmonths(1):datetime(2010,12,15))'; -run = 2; - -% Duplicate dimension name -try - gridfile.defineMetadata('lat', lat, 'lat', lon); -catch - fprintf('Successfully caught duplicate dimension name.\n'); -end - -% Unallowed metadata -try - gridfile.defineMetadata('lat', {1234}); -catch - fprintf('Successfully caught unallowed metadata.\n'); -end -try - gridfile.defineMetadata('lat', rand(5,5,5)); -catch - fprintf('Caught non-matrix metadata.\n'); -end -try - gridfile.defineMetadata('lat', [lat(1), lat(1)]'); -catch - fprintf('Caught duplicate metadata.\n'); -end - -% Correct build -meta = gridfile.defineMetadata('lat', lat, 'lon', lon, 'time', time, 'run', run); -fprintf('Built metadata.\n'); - -%% Create new file - -if isfile('myfile.grid') - delete('myfile.grid'); -end -if isfile('merge.grid') - delete('merge.grid'); -end - -% Check .grid append -file = 'myfile'; -gridfile.new(file, meta); -if isfile('myfile.grid') - fprintf('Appended extension\n'); -end - -% Catch unallowed atts -try - gridfile.new(file, meta, 5); -catch - fprintf('Caught unallowed attributes.\n'); -end -atts = struct('Model','LME','Units','Kelvin'); -try - gridfile.new(file, meta, [atts, atts]); -catch - fprintf('Caught multiple attributes.\n'); -end - -% Don't overwrite file -try - gridfile.new(file, meta, atts); -catch - fprintf('Caught file overwrite.\n'); -end - -% Overwrite -gridfile.new(file, meta, atts, true); -fprintf('Successfully overwrote file.\n'); - -%% Create .grid object -file = 'myfile.grid'; -grid = gridfile(file); -if isa(grid, 'gridfile') - fprintf('Built gridfile object.\n'); -end - -%% Add data sources - -meta = gridfile.defineMetadata('lat',lat,'lon',lon,'time',time(1:12000),'run',2); -dimOrder = ["lon","lat","time"]; - -% Unrecognized type -try - grid.add('blarn', full, 'TREFHT', dimOrder, meta); -catch - fprintf('Caught unrecognized type.\n'); -end - -% Incorrect type -try - grid.add('mat', full, 'TREFHT', dimOrder, meta); -catch - fprintf('Caught incorrect type.\n'); -end - -% Add full off of path -addpath(genpath("D:\Climate Data")); -rmpath(genpath("D:\Climate Data")); -grid.add('nc', full, 'TREFHT', dimOrder, meta); -fprintf('Found full path\n'); -grid.remove(full); - -% Find name on path -addpath(genpath("D:\Climate Data")); -grid.add('nc', f1, 'TREFHT', dimOrder, meta); -fprintf('Found on active path.\n'); -grid.remove(full); - -% Incorrect variable -try - grid.add('nc', full, 'blarn', dimOrder, meta); -catch - fprintf('Caught incorrect variable\n'); -end - -% Unrecognized metadata -bad = gridfile.defineMetadata('lat', lat, 'lon', lon+600, 'time', time, 'run', run); -try - grid.add('nc', full, 'TREFHT', dimOrder, bad); -catch - fprintf('Caught unrecognized metadata.\n'); -end - -% Different order -bad = lon; -bad(1) = lon(2); -bad(2) = lon(1); -bad = gridfile.defineMetadata('lat',lat,'lon', bad,'time',time,'run',run); -try - grid.add('nc',full,'TREFHT',dimOrder, bad); -catch - fprintf('Caught incorrect order.'); -end - -% Missing dimensions -bad = gridfile.defineMetadata('lon', lon,'time',time,'run',run); -try - grid.add('nc', full, 'TREFHT', dimOrder, bad); -catch - fprintf('Caught missing dimensions.\n'); -end - -% Incorrect dimension order -try - grid.add('nc',full,'TREFHT', ["time","lon","lat"], meta); -catch - fprintf('Caught incorrect rows / wrong dimension order.\n'); -end - -% Allow missing singletons -allowed = gridfile.defineMetadata('lat',lat,'lon',lon,'time',time(1:12000)); -grid.add('nc', full, 'TREFHT', dimOrder, allowed); -fprintf('Allowed singletons\n'); -grid.remove(full); - -% Add post-processing -val1 = ncread(full, 'TREFHT', [1 1 1], [1 1 1]); -grid.add('nc', full, 'TREFHT', dimOrder, meta, 'convert', [1 -273.15], 'fill', val1); - -meta.time = time(12001:13872); -range = [186 300]; -grid.add('mat', f2, 'T', ["time","lon","lat"], meta, 'validRange', range, 'convert', [1 -273.15]); -fprintf('Added post-processing.\n'); - -fprintf('Built grid. \n'); - -% Merge dimensions -latgrid = repmat( lat', [numel(lon), 1]); -longrid = repmat( lon, [1 numel(lat)]); -coord = [latgrid(:), longrid(:)]; -mergemeta = gridfile.defineMetadata('coord', coord, 'time', time, 'run', run); -merge = gridfile.new('merge', mergemeta, atts); - -mergemeta.time = time(1:12000); -merge.add('nc', full, 'TREFHT', ["coord","coord","time"], mergemeta,'convert', [1 -273.15], 'fill', val1); -mergemeta.time = time(12001:13872); -merge.add('mat', f2, 'T', ["time","coord","coord"], mergemeta, 'validRange', range, 'convert', [1 -273.15]); -fprintf('Merged dimensions.\n'); - -%% Load metadata -meta = grid.metadata; -if ~isfield(meta, 'attributes') - error('bad'); -end -fprintf('Included attributes\n'); - -%% Display info - -grid.info -fprintf('Displayed info.\n'); -grid.info(f2); -fprintf('Included source from file.\n'); -grid.info(2); -fprintf('Included source from index.\n'); -grid.info('all'); -fprintf('Included all sources.\n'); -[gridInfo, sourceInfo] = grid.info('all'); -fprintf('Returned info structures\n'); - -%% Load - -% Data from files -T1 = ncread(full, 'TREFHT'); -T2 = matfile(f2); -T2 = T2.T; - -T1(T1==val1) = NaN; -T2(T2range(2)) = NaN; -T1 = T1 - 273.15; -T2 = T2 - 273.15; -fileT = cat(3, T1, permute(T2, [2 3 1]), NaN(144, 96, 60)); - -% Complete grid -[T, Tmeta] = grid.load; -fprintf('Loaded grid.\n'); - -% Check values -if ~isequaln( T, fileT ) - error('bad'); -end -fprintf('Checked values.\n'); - -% Check for NaN infilling -if ~all(isnan(T(:,:,13873:end)),'all') - error('bad'); -end -fprintf('Infilled NaNs\n'); - -% Request dimension order -[T, Tmeta] = grid.load(["run","lat","time","lon"]); -if ~isequal(size(T), [1 96 13932 144]) - error('bad'); -end -fprintf('Ordered dimensions.\n'); - -% Check reordering values -if ~isequaln(T, permute(fileT, [4 2 3 1]) ) - error('bad'); -end -fprintf('Checked values.\n'); - -% Subset data -lons = [2 6 3 19]; -lats = meta.lat>0; -times = year(meta.time)>1800 & year(meta.time)<1900; -[T, Tmeta] = grid.load( ["lon", "lat", "time"], {lons, lats, times} ); -fprintf('Loaded subset\n'); - -% Check subset values -filesub = fileT(lons, lats, times); -if ~isequaln( T, filesub ) - error('bad'); -end -fprintf('Checked values\n'); - -% Check subset metadata values -if ~isequal(meta.lon(lons), Tmeta.lon) - error('bad'); -end -fprintf('Checked metadata values\n'); - -% Exclude a source -times = year(meta.time)>1800 & year(meta.time)< 1806; -[T, Tmeta] = grid.load( ["lon", "lat", "time"], {lons, lats, times} ); -fprintf('Excluded source\n'); - -% Check subset values -filesub = fileT(lons, lats, times); -if ~isequaln( T, filesub ) - error('bad'); -end -fprintf('Checked values\n'); - -% Merge values -[T, Tmeta] = merge.load; -fprintf('Merged dimensions\n'); - -% Check values -siz = size(fileT); -filesub = reshape(fileT, [siz(1)*siz(2), siz(3)]); -if ~isequaln(T, filesub) - error('bad'); -end -fprintf('Checked values\n'); - -% Merge subset -times = year(meta.time)>1800 & year(meta.time)<1900; -coords = [1 9 6 15]; -[T, Tmeta] = merge.load( ["coord", "time"], {coords, times}); -fprintf('Merged subset\n'); - -% Check values -siz = size(fileT); -filesub = reshape(fileT, [siz(1)*siz(2), siz(3)]); -filesub = filesub(coords, times); -if ~isequaln(T, filesub) - error('bad'); -end -fprintf('Checked values\n'); - -%% Rewrite metadata - -% Catch incorrect size -try - grid.rewriteMetadata( 'run', (1:4)' ); -catch - fprintf('Caught incorrect rows\n'); -end - -% Rewrite -newval = 3; -grid.rewriteMetadata('run', newval); -meta = grid.metadata; -if ~isequal(meta.run, newval) - error('bad'); -end -fprintf('Rewrote metadata\n'); - -%% Remove -% (Checked implicitly during the "add" error checks) - -%% Expand - -% Catch undefined dimension -try - grid.expand('var', 'blarn'); -catch - fprintf('Caught undefined dimension.\n'); -end - -% Catch incorrect columns -try - grid.expand('run', [3 5]); -catch - fprintf('Caught incorrect columns\n'); -end - -% Expand -expvals = [4;5]; -grid.expand('run', expvals); -fprintf('Expanded grid\n'); - -% Check metadata -meta = grid.metadata; -if ~isequal(meta.run, [newval; expvals]) - error('bad'); -end -fprintf('Checked metadata\n'); - -% Check size -if ~isequal(grid.size(6), 3) - error('bad'); -end -fprintf('Checked size\n'); - -%% Rename sources - -[path, name, ext] = fileparts(full); -newfile = strcat(path,'\rename\', name, ext); -movefile(full, newfile); - -% Catch incorrect new file -try - grid.renameSources(newfile, f2); -catch - fprintf('Caught incorrect rename\n'); -end - -% Rename -grid.renameSources; -fprintf('Renamed sources\n'); - -% Check file path -folders = split(grid.source.file(1,:),'/'); -if ~strcmp(newfile, fullfile(folders{:}) ) - error('bad'); -end -fprintf('Checked file name\n'); - -movefile(newfile, full); \ No newline at end of file diff --git a/1. Grid files/gridfile-dependency-list.txt b/1. Grid files/gridfile-dependency-list.txt deleted file mode 100644 index 86d55f0f..00000000 --- a/1. Grid files/gridfile-dependency-list.txt +++ /dev/null @@ -1,199 +0,0 @@ -defineMetadata - dash.assertStrFlag - dash.isstrflag - checkMetadataField - hasDuplicateRows -new - dash.assertStrFlag - dash.isstrflag - checkMetadataStructure - checkMetadataField - hasDupicateRows - dash.errorStringList - dash.dimensionNames - processMetadata - gridfile - dash.assertStrFlag - dash.isstrflag - dash.checkFileExists - update -metadata - gridfile - dash.assertStrFlag - dash.isstrflag - dash.checkFileExists -gridfile - dash.assertStrFlag - dash.isstrflag - dash.checkFileExists - update -add - update - dash.parseInputs - dash.assertStrList - dash.isstrlist - dash.errorStringList - dataSource.new - matSource - dataSource.checkVariable - dataSource - dash.assertStrList - dash.isstrlist - dash.assertStrFlag - dash.isstrflag - dash.checkFileExists - ncSource - dataSource.checkVariable - dataSource - dash.assertStrList - dash.isstrlist - dash.assertStrFlag - dash.isstrflag - dash.checkFileExists - checkAllowedDims - dash.errorStringList - checkMetadataStructure - checkMetadataField - hasDupicateRows - dash.errorStringList - convertSourceToPrimitives - commaDelimitedDims - sourceFilepath - dash.relativePath - dash.unixStylePath - padPrimitives - save - update -remove - update - dash.assertStrFlag - dash.isstrflag - findFileSources - collectPrimitives - collectPrimitives - save - update -renameSources - update - collectFullPaths - collectPrimitives - dash.assertStrList - dash.isstrlist - findFileSources - collectPrimitives - collectPrimitives - buildSourcesForFiles - collectPrimitives - dataSource.new - matSource - dataSource.checkVariable - dataSource - dash.assertStrList - dash.isstrlist - dash.assertStrFlag - dash.isstrflag - dash.checkFileExists - ncSource - dataSource.checkVariable - dataSource - dash.assertStrList - dash.isstrlist - dash.assertStrFlag - dash.isstrflag - dash.checkFileExists - checkSourcesMatchGrid - collectPrimitives - sourceFilepath - dash.relativePath - dash.unixStylePath - padPrimitives - save - update -rewriteMetadata - update - dash.assertStrFlag - dash.isstrflag - checkAllowedDims - dash.errorStringList - checkMetadataField - hasDuplicateRows - updateMetadataField - processMetadata - save - update -expand - update - dash.assertStrFlag - dash.isstrflag - checkAllowedDims - dash.errorStringList - checkMetadataField - hasDuplicateRows - hasDuplicateRows - updateMetadataField - processMetadata - save - update -info - update - dash.isstrlist - findFileSources - collectPrimitives - dash.assertPositiveIntegers - buildSources - collectFullPaths - collectPrimitives - buildSourcesForFiles - collectPrimitives - dataSource.new - matSource - dataSource.checkVariable - dataSource - dash.assertStrList - dash.isstrlist - dash.assertStrFlag - dash.isstrflag - dash.checkFileExists - ncSource - dataSource.checkVariable - dataSource - dash.assertStrList - dash.isstrlist - dash.assertStrFlag - dash.isstrflag - dash.checkFileExists -load - update - dash.assertStrList - dash.isstrlist - checkAllowedDims - dash.errorStringList - dash.assertNumericVectorN - dash.assertPositiveIntegers - repeatedLoad - buildSources - collectFullPaths - collectPrimitives - buildSourcesForFiles - collectPrimitives - dataSource.new - matSource - dataSource.checkVariable - dataSource - dash.assertStrList - dash.isstrlist - dash.assertStrFlag - dash.isstrflag - dash.checkFileExists - ncSource - dataSource.checkVariable - dataSource - dash.assertStrList - dash.isstrlist - dash.assertStrFlag - dash.isstrflag - dash.checkFileExists - dataSource.read - dash.equallySpacedIndices - ncSource.load - matSource.load \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensemble/add.m b/2. State Vectors and Ensembles/@ensemble/add.m deleted file mode 100644 index 3f77ee63..00000000 --- a/2. State Vectors and Ensembles/@ensemble/add.m +++ /dev/null @@ -1,35 +0,0 @@ -function[] = add( obj, nEns ) -%% Adds additional ensemble members to a .ens file -% -% obj.add( nEns ) -% Adds a specified number of new members. -% -% ----- Inputs ----- -% -% nEns: The number of ensemble members to add. A scalar, positive integer. - -% Error check -if ~isnumeric(nEns) || ~isscalar(nEns) || nEns<=0 || mod(nEns,1)~=0 - error('nEns must be a positive scalar integer.'); -end - -% Add more draws to each set of coupled variables -cv = obj.design.coupledVariables; -for set = 1:numel(cv) - obj.design = obj.design.makeDraws( cv{set}, nEns, obj.random ); -end - -% Write to file -obj.design.write( obj.file, obj.random, obj.writenan, false ); - -% Update the ensemble object. -m = load( obj.file, '-mat', 'ensSize', 'design', 'hasnan' ); -obj.ensSize = m.ensSize; -obj.design = m.design; -obj.hasnan = m.hasnan; -obj.metadata = ensembleMetadata( obj.design ); -if isequal(obj.loadMembers, 1:obj.ensSize(2)-nEns ) - obj.loadMembers = 1:obj.ensSize(2); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensemble/checkEnsFile.m b/2. State Vectors and Ensembles/@ensemble/checkEnsFile.m deleted file mode 100644 index ff3984c6..00000000 --- a/2. State Vectors and Ensembles/@ensemble/checkEnsFile.m +++ /dev/null @@ -1,22 +0,0 @@ -function[ens] = checkEnsFile( ~, file ) -%% Error checks a .ens file and returns a matfile object. - -% Check that the file exists and is .ens -checkFile( file, 'extension', '.ens', 'exist', true ); - -% Load a matfile and check the fields are not corrupted -ens = matfile(file); - -fields = who(ens); -if ~ismember('complete', fields) || ~ens.complete - error('The file "%s" was not written successfully. Try rewriting.', file); -end - -requiredFields = ["M", "design", "random","ensSize","hasnan","writenan"]; -for f = 1:numel(requiredFields) - if ~ismember( requiredFields(f), fields ) - error('The file "%s" is missing the "%s" field. It may be corrupted. Consider re-writing.', file, requiredFields(f) ); - end -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensemble/ensemble.m b/2. State Vectors and Ensembles/@ensemble/ensemble.m deleted file mode 100644 index f116a912..00000000 --- a/2. State Vectors and Ensembles/@ensemble/ensemble.m +++ /dev/null @@ -1,126 +0,0 @@ -classdef ensemble < handle -% ensemble -% Builds and loads ensembles. -% -% ensemble Properties: -% file - The .ens file associated with the ensemble. -% writenan - Whether NaN values are permitted to write to file. -% (Currently only supports true) -% hasnan - Whether each ensemble member contains NaN values. -% ensSize - The size of the ensemble -% random - Whether the ensemble members are random or ordered -% design - The stateDesign associated with the ensemble. -% -% ensemble Methods: -% load - Loads desired ensemble members. -% add - Adds additional ensemble members to the stateDesign. -% useVars - Only load specific variables -% useVarsForPSMs - Only load the elements of specified variables needed -% to run PSMs -% useMembers - Only load specific ensemble members -properties (SetAccess = private) - file; % The .ens file associated with the ensemble - metadata; % Ensemble metadata - - % Values also in the .ens file - writenan; % Whether NaN values have been written to file. - hasnan; % Whether a variable in an ensemble member has NaN values - ensSize; % The size of saved ensemble - loadSize; % The size of the loaded ensemble - random; % Whether the ensemble is ordered or random - design; % The state design associated with the ensemble - - % Load specifications - loadMembers; % Which ensemble members to load - loadH; % Which state indices to load -end - -% Constructor -methods - function obj = ensemble( file ) - % Creates a new ensemble object from a .ens file - % - % obj = ensemble( file ) - % Returns the ensemble object for a .ens file. - % - % ----- Inputs ----- - % - % file: A .ens file with a saved ensemble. - % - % ----- Outputs ----- - % - % obj: A new ensemble object - - % Check that the file exists. - checkFile( file, 'extension', '.ens', 'exist', true ); - obj.file = string( which( file ) ); - - % Load the fields - try - required = {'random','ensSize','hasnan','writenan','design','complete'}; - m = load( file, '-mat', required{:} ); - catch - error('The ensemble file may be corrupted.'); - end - - % Check for failed write or missing fields - if ~isfield(m, 'complete') || ~isscalar(m.complete) || ~islogical(m.complete) || ~m.complete - error('The file %s is not valid. It may have failed during a write operation.', file ); - end - fileFields = fields(m); - if any( ~ismember(fileFields, required) ) - error('File %s does not contain all required fields. It may be corrupted.'); - end - - % Set the properties - obj.random = m.random; - obj.ensSize = m.ensSize; - obj.loadSize = m.ensSize; - obj.hasnan = m.hasnan; - obj.writenan = m.writenan; - obj.design = m.design; - - % Create ensemble metadata - obj.metadata = ensembleMetadata( m.design ); - - % By default, load everything - obj.loadMembers = 1:obj.ensSize(2); - obj.loadH = []; - end -end - -% User methods -methods - - % Adds additional ensemble members to an ensemble. - add( obj, nAdd ); - - % Loads an ensemble from a .ens file - [M, meta] = load( obj ); - - % Specifies which ensemble members to load - useMembers( obj, members ); - - % Specifies which variables to load - useVars( obj, vars ); - - % Specify which state indices to load - useStateIndices( obj, H ); - - % Reset the ensemble to loading everything - reset( obj ); -end - -% To be removed -methods - meta = loadMetadata( obj ); -end - -% Internal utilities -methods - - % Checks a .ens file exists and is not corrupted. Returns a matfile - ens = checkEnsFile( ~, file ); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensemble/load.m b/2. State Vectors and Ensembles/@ensemble/load.m deleted file mode 100644 index fc5ffcbf..00000000 --- a/2. State Vectors and Ensembles/@ensemble/load.m +++ /dev/null @@ -1,51 +0,0 @@ -function[M, ensMeta] = load( obj ) -%% Loads an ensemble from a .ens file. -% -% [M, ensMeta] = obj.load -% Load the ensemble and associated metadata. -% -% ----- Outputs ----- -% -% M: The loaded ensemble -% -% ensMeta: Metadata for the loaded ensemble. - -% Error check. Get the matfile -ens = obj.checkEnsFile( obj.file ); - -% Get load / keep for columns -[members, order] = sort( obj.loadMembers ); -nMembers = numel(members); -[scsCol, keepCols] = loadKeep( members ); -cols = scsIndices( scsCol ); - -% Get load / keep for rows -loadH = obj.loadH; -if isempty(loadH) - loadH = 1:obj.ensSize(1); -end -[scsRow, keepRows] = loadKeep( find(loadH) ); -rows = scsIndices( scsRow ); - -% Attempt to load the entire panel -try - M = ens.M(rows, cols); - M = M(:, keepCols); - -% If the panel is too big for memory, load ensemble members iteratively. -catch - M = NaN( numel(rows), nMembers ); - progressbar(0); - for k = 1:nMembers - M(:,m) = ens.M( rows, members(m) ); - progressbar(m/nMembers); - end -end - -% Remove unncessary rows. Reorder ensemble members from scs with reverse sort -M = M(keepRows, sort(order)); - -% Return metadata -ensMeta = obj.metadata; - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensemble/loadMetadata.m b/2. State Vectors and Ensembles/@ensemble/loadMetadata.m deleted file mode 100644 index 440e691e..00000000 --- a/2. State Vectors and Ensembles/@ensemble/loadMetadata.m +++ /dev/null @@ -1,13 +0,0 @@ -function[meta] = loadMetadata( obj ) -% Returns the metadata for the variables and ensemble members to be loaded. -% -% meta = obj.loadMetadata -% -% ----- Outputs ----- -% -% meta: Metadata for the loaded variables and ensemble members - -% Warn of future removal -warning('ensemble.loadMetadata is no longer necessary and will be removed in a future release. Please update your code to "ensemble.metadata"'); -meta = obj.metadata; -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensemble/updateMetadata.m b/2. State Vectors and Ensembles/@ensemble/updateMetadata.m deleted file mode 100644 index cca6772a..00000000 --- a/2. State Vectors and Ensembles/@ensemble/updateMetadata.m +++ /dev/null @@ -1,12 +0,0 @@ -function[] = updateMetadata( obj ) -% Update ensemble metadata when load parameters are specified. - -% Build from new to avoid double calls -newMeta = ensembleMetadata( obj.design ); -newMeta = newMeta.useMembers( obj.loadMembers ); -if ~isempty(obj.loadH) - newMeta = newMeta.useStateIndices( obj.loadH ); -end -obj.metadata = newMeta; - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensemble/useMembers.m b/2. State Vectors and Ensembles/@ensemble/useMembers.m deleted file mode 100644 index c9d7c378..00000000 --- a/2. State Vectors and Ensembles/@ensemble/useMembers.m +++ /dev/null @@ -1,40 +0,0 @@ -function[] = useMembers( obj, members ) -% Specify which ensemble members to load. -% -% obj.useMembers( members ) -% Specify which ensemble members to load. -% -% obj.useMembers( 'all' ) -% Load all ensemble members -% -% ----- Inputs ----- -% -% members: A set of indices. Either a vector of linear indices, or a -% logical vector with nEns elements. - -% Values for reset flag -nEns = obj.ensSize(2); -if strcmpi( members, 'all' ) - members = 1:nEns; -end - -% Error check -if ~isvector(members) - error('members must be a vector.'); -elseif islogical(members) && length(members)~=nEns - error('When members is a logical vector, it must have one element for each ensemble member (%.f)', nEns ); -elseif ~isnumeric(members) || ~isreal(members) || any(members<1) || any(mod(members,1)~=0) || any(members>nEns) - error('members must be a set of positive integers on the interval [1, %.f]', nEns); -end - -% Update load parameters -if islogical(members) - members = find(members); -end -obj.loadMembers = members(:)'; - -% Update metadata -obj.loadSize(2) = numel(members); -obj.updateMetadata; - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensemble/useStateIndices.m b/2. State Vectors and Ensembles/@ensemble/useStateIndices.m deleted file mode 100644 index 3923920d..00000000 --- a/2. State Vectors and Ensembles/@ensemble/useStateIndices.m +++ /dev/null @@ -1,33 +0,0 @@ -function[] = useStateIndices( obj, H ) -% Specify which state indices to load from a .ens file. -% -% obj.useStateIndices( H ) -% Specify which indices to load. -% -% obj.useStateIndices( 'all' ) -% Load all state indices. -% -% ----- Inputs ----- -% -% H: The state indices to load. A logical vector with one element per state -% index. - -% Values for the reset flag -nState = obj.ensSize(1); -if strcmpi( H, 'all' ) - H = true(nState,1); -end - -% Error check -if ~isvector(H) || ~islogical(H) || numel(H)~=nState - error('H must be a logical vector with %.f elements.', nState ); -end - -% Update load parameters -obj.loadH = H; - -% Update metadata -obj.loadSize(1) = sum(obj.loadH); -obj.updateMetadata; - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensemble/useVars.m b/2. State Vectors and Ensembles/@ensemble/useVars.m deleted file mode 100644 index dd6a32c4..00000000 --- a/2. State Vectors and Ensembles/@ensemble/useVars.m +++ /dev/null @@ -1,48 +0,0 @@ -function[] = useVars( obj, vars ) -% Specifies which variables to load from a .ens file. -% -% obj.useVars( vars ) -% Specify which variables to load. -% -% obj.useVars( 'all' ) -% Load all variables. -% -% ----- Inputs ----- -% -% vars: The names of the variables to load. A string vector, cellstring -% vector, or character row vector. - -% Values for reset flag -if isstrflag(vars) && strcmpi( vars, 'all' ) - vars = meta.varName; -end - -% Error check. -if ~isstrlist(vars) - error('vars must be a string vector, cellstring vector, or character row vector.'); -end -allMeta = ensembleMetadata( obj.design ); -v = allMeta.varCheck(vars); - -% Collect variable indices -nVars = numel(v); -indices = cell( nVars, 1 ); -for k = 1:nVars - indices{k} = allMeta.varIndices( allMeta.varName(v(k)) ); -end -indices = cell2mat(indices); - -% Combine with any previously specified load indices. Update load -% parameters -varH = false( obj.ensSize(1), 1 ); -varH(indices) = true; -if ~isempty(obj.loadH) - varH = varH | obj.loadH; -end -obj.loadH = varH; - -% Update metadata -obj.loadSize(1) = sum(obj.loadH); -obj.updateMetadata; - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/closestLatLonIndices.m b/2. State Vectors and Ensembles/@ensembleMetadata/closestLatLonIndices.m deleted file mode 100644 index 8237fc94..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/closestLatLonIndices.m +++ /dev/null @@ -1,136 +0,0 @@ -function[H] = closestLatLonIndices( obj, coords, varNames, varargin ) -%% Gets the state indices closest to a set of coordinates. -% -% H = obj.closestLatLonIndices( coords, varNames ) -% Finds the closest state vector elements for specified variables. -% -% H = obj.closestLatLonIndices( ..., dim1, meta1, ... dimN, metaN ) -% Finds the closest H for specific elements along specified dimensions. -% Searches each index associated with elements in the specified metadata. -% -% For Example: -% >> H = obj.closestLatLonIndices( ..., 'lev', [100; 200; 300] ) -% -% would find three state elements. The closest on the level with metadata -% equal to 100, the closest on level 200, and the closest on level 300. -% -% Similarly: -% >> H = obj.closestLatLonIndices( ..., 'lev', [100; 200], 'run', [1; 2] ) -% -% would find 4 state elements. The closest on level 100 in runs 1 and 2, -% and the closest on level 200 in runs 1 and 2. -% -% ----- Inputs ----- -% -% coords: A set of lat-lon coordinates. First column is lat, -% second is lon (nSite x 2). -% -% varNames: A set of variable names. Must be a string column. -% -% dimN: The name of the Nth dimension with specific indices to search -% -% metaN: The metadata of indices along which to search for dimension N -% -% ----- Outputs ----- -% -% H: State vector indices. (nIndices x nSite) - -% Parse inputs using generalized dimension names -[dimID,~,~,lon,lat,~,~,~,tri] = getDimIDs; -nDim = numel(dimID); -norestrict = [lon;lat;tri]; - -metaValue = cell( nDim, 1 ); -empty = repmat( {[]}, [nDim, 1] ); - -[metaValue{:}] = parseInputs( varargin, cellstr(dimID), empty, empty ); -varNames = string(varNames); - -% Get the number of indices to search in each dimension. Throw error if -% indices were provided for lat, lon, or tri -nEls = zeros( nDim, 1 ); -for d = 1:nDim - if ismember(dimID(d), norestrict) && ~isempty(metaValue{d}) - error('Search values are not allowed for the %s dimension.', norestrict(d) ); - end - nEls(d) = size( metaValue{d}, 1 ); -end - -% Get the N-D subscript of each combination of dimensions. -searchDim = find( nEls~=0 ); -nDim = numel(searchDim); -nEls = nEls( searchDim ); -subDex = subdim( (1:prod(nEls))', nEls ); - -% Preallocate the H indices -nSite = size( coords, 1 ); -nComb = 1; -if ~isempty(subDex) - nComb = size(subDex,1); -end -nVar = numel(varNames); -H = NaN( nComb*nVar, nSite ); - -% Get the v-index for each variable. Extract metadata for each sequence -% element and find distances to coordinates -for var = 1:numel(varNames) - v = obj.varCheck( varNames(var) ); - latlon = obj.getLatLonSequence( varNames(var) ); - dist = haversine( coords, latlon ); - - % Initialize the dimension search - searchIndex = cell(nDim,1); - subSearch = subDex; - nModulus = NaN( 1, nDim ); - - % For each dimension with search values, get the indices where the - % metadata is located. Subscript to N-D - for dim = 1:nDim - d = searchDim(dim); - stateMeta = obj.stateMeta.(obj.varName(v)).(dimID(d)); - - if size(metaValue{d}, 2) ~= size(stateMeta,2) - error('The metadata search values do not have the same number of rows as the ensemble metadata.'); - end - - [~, searchIndex{dim}] = ismember( metaValue{d}, stateMeta, 'rows' ); - if any( searchIndex{dim} == 0 ) - error('Variable %s does not have matching metadata along the %s dimension.', varNames(var), dimID(d) ); - end - subSearch(:,dim) = searchIndex{dim}(subDex(:,dim)); - nModulus(dim) = prod( obj.varSize(v,1:d-1) ); - end - - % On a complete grid, sequence lengths are preserved. Modulate the - % closest site over all search dimentions - if ~obj.partialGrid(v) - [~, site] = min( dist, [], 2 ); - nAdd = 0; - if ~isempty( subSearch ) - nAdd = sum( (subSearch-1).*nModulus, 2 ); - end - H((var-1).*nComb+(1:nComb),:) = (obj.varLimit(v,1) - 1) + nAdd + site'; - - % On a partial grid, cycle through the subsearches. - else - nEls = size(dist,2); - for s = 1:nComb - - % Get the linear indices of the subsearch sequence. - sub = ones(1, numel(dimID)); - sub(searchDim) = subSearch(s,:); - sub = num2cell(sub); - ind = sub2ind( obj.varSize(v,:), sub{:}) + (0:nEls-1); - - % Use the partialH for the sequence to reduce the distances. - % Find the minimum and get the H index - partial = obj.partialH{v}(ind); - seqdist = dist(:, partial); - [~, site] = min( seqdist, [], 2 ); - H((var-1).*nComb+s, :) = (obj.varLimit(v,1)-1) + sum(obj.partialH{v}(1:ind(1)-1)) + site; - end - end -end - -end - \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/coordinates.m b/2. State Vectors and Ensembles/@ensembleMetadata/coordinates.m deleted file mode 100644 index b439ebc9..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/coordinates.m +++ /dev/null @@ -1,47 +0,0 @@ -function[coord] = coordinates( obj ) -% Gets point lat-lon coordinates for the entire ensemble. -% -% coord = obj.coordinates -% -% *** Returns NaN coordinates for spatial means and variables lacking -% lat-lon coordinate information. -% -% ----- Outputs ----- -% -% coord: Lat-lon coordinates of each state vector element. (nState x 2) - -% Preallocate -coord = NaN( obj.varLimit(end), 2 ); - -% Try to extract coordinates from each variable. Just leave as NaN if it -% fails, or if they are spatial means. -for v = 1:numel(obj.varName) - try - latlon = obj.getLatLonSequence( obj.varName(v) ); - - % Replicate over a complete grid - nIndex = prod(obj.varSize(v,:)); - nRep = nIndex ./ size(latlon,1); - latlon = repmat( latlon, [nRep,1] ); - - % Reduce if a partial grid - if obj.partialGrid(v) - latlon = latlon(obj.partialH{v}, :); - end - trycat = true; - catch - fprintf('Warning: Unable to determine coordinates for variable %s.\n', obj.varName(v)); - trycat = false; - end - - % Try concatenating the data - if trycat - try - coord( obj.varIndices(obj.varName(v)), : ) = latlon; - catch - fprintf('Warning: Cannot concatenate coordinates for variable %s.\n', obj.varName(v) ); - end - end -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/dimCheck.m b/2. State Vectors and Ensembles/@ensembleMetadata/dimCheck.m deleted file mode 100644 index 4f5c84e1..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/dimCheck.m +++ /dev/null @@ -1,20 +0,0 @@ -function dims = dimCheck( obj, dims ) -% Checks if a set of dimensions are in the ensemble metadata. -% Returns them as a string array. - -% Check that dims is either a character row, cellstring, or -% string -if ~isstrlist(dims) - error('dims must be a character row vector, cellstring, or string array.'); -end - -% Convert to string for simplicity -dims = string(dims); - -% Check that the dims are actually in the ensemble metadata -goodDims = fields( obj.stateMeta.(obj.varName(1)) ); -if any( ~ismember( dims, goodDims ) ) - error('"%s" is not a dimension in the ensemble metadata.', dims( find(~ismember(dims,goodDims),1) ) ); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/ensembleMetadata.m b/2. State Vectors and Ensembles/@ensembleMetadata/ensembleMetadata.m deleted file mode 100644 index 0badbdb8..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/ensembleMetadata.m +++ /dev/null @@ -1,146 +0,0 @@ -classdef ensembleMetadata - % ensembleMetadata - % Manages metadata for a state vector ensemble - % - % ensembleMetadata Methods: - % ensembleMetadata - Creates a new ensemble metadata object - % lookup - Returns metadata at specified indices - % varIndices - Returns the state vector indices associated with a variable - % useVars - Returns ensemble metadata for a state vector comprised of specified variables - % closestLatLonIndices - Returns state vector indices closest to a - % set of lat-lon coordinates. - % - % ensembleMetadata Utility Methods: - % dimCheck - Checks that a set of dimensions are in the metadata - % varCheck - Checks that a set of variables are in the metadata - - % ----- Written By ----- - % Jonathan King, University of Arizona, 2019 - - % The user is not permitted to access any of the properties. These - % values must be set by the constructor and looked up elsewhere. - properties (SetAccess = private) - varName % Variable name - varLimit % Index limits in state vector - varSize % Size of gridded data for each variable - stateMeta % Metadata for each state element - ensMeta % Metadata for each ensemble member - - design % The stateDesign associated with the ensemble - ensSize % The number of state elements and members - partialGrid % Whether a variable has a complete grid - partialH % Which elements to use in a partial grid - nEls % Number of elements in partial grids (NaN for complete grids) - end - - - % Constructor block - methods - function obj = ensembleMetadata( inArg ) - % Constructor for an ensemble metadata object - % - % obj = ensembleMetadata( design ) - % Creates an ensemble metadata object for a state vector design. - % - % obj = ensembleMetadata( ensFile ) - % Returns the ensemble metadata object for the ensemble saved in a - % .ens file. - % - % ----- Inputs ----- - % - % design: A state vector design object. - % - % ensFile: The name of a .ens file. Either a character vector - % or a string. - % - % ----- Outputs ----- - % - % obj: The new ensemble metadata object. - - % If the input is a state design, just use it directly. - if isa(inArg, 'stateDesign') - obj.design = inArg; - if numel(inArg) > 1 - error('design must be a scalar object. The current design is a stateDesign array.'); - end - - % For an ens file input, error check before extracting desing - else - checkFile( inArg, 'extension', '.ens', 'exist', true ); - m = matfile( inArg ); - obj.design = m.design; - end - - % Record the variable names, index limits, dimensional sizes, - % used metadata - obj.varName = obj.design.varName; - [obj.varLimit, obj.varSize] = obj.design.varIndices; - [obj.stateMeta, obj.ensMeta] = obj.design.varMetadata; - - % Info for incomplete grids - nVar = numel(obj.varName); - obj.partialGrid = false(nVar,1); - obj.partialH = cell( nVar, 1 ); - obj.nEls = NaN( nVar, 1 ); - - % Get the size of the ensemble - obj.ensSize = obj.design.ensembleSize; - end - end - - % Methods for the user - methods - - % Looks up metadata for a variable - meta = lookup( obj, dims, inArg ); - - % Return the indices associated with a variable - H = varIndices( obj, varName ); - - % Reduces to specified variables - ensMeta = useVars( obj, vars ); - - % Reduces to specified ensemble members - ensMeta = useMembers( obj, members ); - - % Reduces to specific state indices - ensMeta = useStateIndices( obj, H ); - end - - % Indexing - methods - - % Checks that dimensions are in the metadata, converts to string - dims = dimCheck( obj, dims ); - - % Checks that variables are in the metadata. Returns variable index - v = varCheck( obj, vars ); - - % Which values should be kept from the original ensemble - H = useH( obj ); - end - - % Lat-lon lookup for PSM development - methods - - % Gets lat-lon metadata for one sequence element of a variable - latlon = getLatLonSequence( obj, varName ); - - % Time metadata for one sequence element - time = getTimeSequence( obj, varName ); - - % Finds the closest state vector elements to a lat-lon coordinate - H = closestLatLonIndices( obj, coords, varNames, varargin ); - - % !!!! Patch for rename to closestLatLonIndices, this will be removed in a future release. - H = getClosestLatLonIndex( obj, coords, varNames, varargin ) - - % Gets the lat lon metadata for the entire ensemble - latlon = coordinates( obj ); - - % Gets timepoints for the entire ensemble - times = timepoints( obj ); - - end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/getClosestLatLonIndex.m b/2. State Vectors and Ensembles/@ensembleMetadata/getClosestLatLonIndex.m deleted file mode 100644 index 62c8b1b5..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/getClosestLatLonIndex.m +++ /dev/null @@ -1,18 +0,0 @@ -function[H] = getClosestLatLonIndex( obj, coords, varNames, varargin ) -% This function redirects to closestLatLonIndices. It is meant as a patch -% to the rename to "closestLatLonIndices" of v3.4.0. It will be removed in -% a future release. -warning(['ensembleMetadata.getClosestLatLonIndex has been renamed to ensembleMetadata.closestLatLonIndex and ',... - 'will be removed in a future release. Please consider updating your code.']); - -if ~exist('coords','var') - coords = []; -end -if ~exist('varNames', 'var') - varNames = []; -end -if ~exist( 'varargin', 'var') - varargin = {}; -end -H = obj.closestLatLonIndices( coords, varNames, varargin{:} ); -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/getLatLonSequence.m b/2. State Vectors and Ensembles/@ensembleMetadata/getLatLonSequence.m deleted file mode 100644 index 5a012fbb..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/getLatLonSequence.m +++ /dev/null @@ -1,80 +0,0 @@ -function[latlon] = getLatLonSequence( obj, varName ) -%% Gets the set of lat-lon coordinates for one sequence element of a -% variable in an ensemble. -% -% latlon = obj.getLatLonSequence( varName ) -% -% ----- Inputs ----- -% -% varName: The name of the variable - -% Get the dimension names -[~,~,~,lon,lat,~,~,~,tri] = getDimIDs; -dims = [lon;lat;tri]; - -% Get the variable index. Ensure it is not a partial grid -v = obj.varCheck(varName); -varName = string(varName); - -% Check whether there is any metadata in each dimension -hasmeta = true(3,1); -for d = 1:numel(dims) - if isscalar( obj.stateMeta.(varName).(dims(d)) ) && isnan( obj.stateMeta.(varName).(dims(d)) ) - hasmeta(d) = false; - end -end - -% Ensure that only one of lat-lon, and tri have metadata -if all(hasmeta) - error('Variable %s has both tripolar and lat-lon metadata.', varName ); -elseif all( ~hasmeta ) - error('Variable %s has neither tripolar nor lat-lon metadata.', varName ); -elseif ~all( hasmeta(1:2) ) && ~all( ~hasmeta(1:2) ) - error('Only one of the lat and lon dimensions of variable %s has metadata.', varName ); -end - -% If tripolar -if hasmeta(3) - - % Get the metadata - nEls = obj.varSize(v,3); - latlon = obj.stateMeta.(obj.varName(v)).(tri)(1:nEls,:,:); - - % Error check the metadata - if ~ismatrix( latlon ) - error('The %s data is a spatial mean.', dims(3) ); - elseif size(latlon,2)~=2 || ~isnumeric(latlon) - error('%s metadata for variable %s must be a 2 column matrix of numeric values.', dims(3), varName); - end - -% Otherwise, if lat and lon -else - - % Sub-index the metadata for a complete grid - nEls = prod( obj.varSize(v,[1 2]) ); - subDex = subdim( (1:nEls)', obj.varSize(v, [1 2]) ); - - % Get the metadata - lat = obj.stateMeta.(obj.varName(v)).(lat)(subDex(:,2),:,:); - lon = obj.stateMeta.(obj.varName(v)).(lon)(subDex(:,1),:,:); - - % Error check - if ~ismatrix(lat) - error('Variable %s is a spatial mean along the %s dimension.', varName, dims(2) ); - elseif ~isvector(lat) || ~isnumeric(lat) - error('%s metadata for variable %s must be a vector of numeric values.', dims(2), varName) - elseif ~ismatrix(lon) - error('Variable %s is a spatial mean along the %s dimension.', varName, dims(1) ); - elseif ~isvector(lon) || ~isnumeric(lon) - error('%s metadata for variable %s must be a vector of numeric values.', dims(1), varName); - end - - % Concatenate, give useful error message if failure - try - latlon = cat(2, lat, lon); - catch - error('The lat and lon metadata cannot be concatenated.'); - end -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/getTimeSequence.m b/2. State Vectors and Ensembles/@ensembleMetadata/getTimeSequence.m deleted file mode 100644 index 6d250a57..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/getTimeSequence.m +++ /dev/null @@ -1,15 +0,0 @@ -function[time] = getTimeSequence( obj, varName ) -% Gets the time metadata for one sequence element of a variable in an -% ensemble. -% -% time = obj.getTimeSequence( varName ) - -% Dimension names, variable index -[~,~,~,~,~,~,time] = getDimIDs; -v = obj.varCheck(varName); - -% Sub-index the metadata for a complete grid -nEls = obj.varSize(v,5); -time = obj.stateMeta.(obj.varName(v)).(time)(1:nEls,:,:); - -end diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/lookup.m b/2. State Vectors and Ensembles/@ensembleMetadata/lookup.m deleted file mode 100644 index 27a4bd0b..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/lookup.m +++ /dev/null @@ -1,93 +0,0 @@ -function[meta] = lookup( obj, dims, inArg ) -% Returns ensemble metadata at specific indices and dimensions. -% -% meta = obj.lookup( dims, H ) -% Returns the metadata along specified dimensions at state indices 'H'. -% -% meta = obj.lookup( dims, varName ) -% Returns the metadata along specified dimensions at all indices for -% the specified variable. -% -% ***Note: H and varName may not reference more than 1 -% variable. -% -% ----- Inputs ----- -% -% dims: A set of dimension names. Either a single character -% vector, cellstring, or string array,. -% -% H: A vector of indices in the state vector. May not contain -% indices for more than 1 variable. -% -% varName: The name of a variable. -% -% ----- Outputs ----- -% -% meta: -% If a single dimension is specified, a matrix of metadata. -% Each row corresponds to a specific index. -% -% If multiple dimensions are specified, returns a structure. -% The fields of the structure are the metadata matrices for -% each queried dimension. - -% If the input is a variable name, just get the indices -if isstrflag( inArg) - v = obj.varCheck( inArg ); - H = obj.varIndices( inArg ); - -% But don't allow multiple variable names -elseif isstrlist( inArg ) - error('varName can only refer to a single variable.'); - -% Assume anything else is a set of indices. Error check -else - H = inArg; - if ~isvector(inArg) || ~isnumeric(inArg) || any(inArg < 1) || any(mod(inArg,1)~=0) - error('H indices must be a numeric vector of positive integers.'); - elseif any( H > obj.varLimit(end,2) ) - error('H contains indices longer than the state vector.'); - end - H = H(:); - - % Then get the associated variables and ensure there is only 1 - [v, ~] = find( H' >= obj.varLimit(:,1) & H' <= obj.varLimit(:,2) ); - v = unique(v); - if numel(v)~=1 - error('H cannot reference more than a single variable.'); - end -end -dims = obj.dimCheck( dims ); - -% For a partial grid, need to load all variable indices. But for a complete -% grid, we can subscript the metadata -loadH = H; -if obj.partialGrid(v) - loadH = obj.varLimit(1) + (0:prod(obj.varSize(v,:))-1)'; - H = H - obj.varLimit(v,1) + 1; -end - -% Adjust load H to just the variable and subscript -loadH = loadH - obj.varLimit(v,1) + 1; -subDex = subdim( loadH, obj.varSize(v,:) ); - -% Get the metadata structure at each loaded index for each dimension -meta = struct(); -for d = 1:numel(dims) - col = ismember(obj.design.var(v).dimID, dims(d)); - meta.(dims(d)) = obj.stateMeta.(obj.varName(v)).(dims(d))(subDex(:,col),:,:); - - % If on a partial grid, reduce by partialH. Then extract the requested - % indices - if obj.partialGrid(v) - meta.(dims(d)) = meta.(dims(d))(obj.partialH{v},:,:); - meta.(dims(d)) = meta.(dims(d))(H,:,:); - end -end - -% If a single dimension was requested, return the array directly -if numel(dims) == 1 - meta = meta.(dims); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/timepoints.m b/2. State Vectors and Ensembles/@ensembleMetadata/timepoints.m deleted file mode 100644 index 09c3cd10..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/timepoints.m +++ /dev/null @@ -1,52 +0,0 @@ -function[times] = timepoints( obj ) -% Gets time metadata for the entire ensemble -% -% times = obj.timepoints - -% Find the first variable with non-NaN metadata -[~,~,~,~,~,~,time] = getDimIDs; -hasmeta = false; -for v = 1:numel(obj.varName) - if ~isnan( obj.stateMeta.(obj.varName(v)).(time) ) - hasmeta = true; - break; - end -end - -% Preallocate -siz = [obj.varLimit(end), 1]; -if ~hasmeta || ~isdatetime( obj.stateMeta.(obj.varName(v)).(time) ) - times = NaN( siz ); -else - times = NaT( siz ); -end - -% Try to extract time points for each variable -for v = 1:numel(obj.varName) - try - times = obj.getTimeSequence( obj.varName(v) ); - catch - warning('Unable to determine coordinates for variable %s.', obj.varName(v) ); - end - - % Replicate over a complete grid - nIndex = prod(obj.varSize(v,:)); - nRep = nIndex ./ size(times,1); - times = repmat( times, [nRep,1] ); - - % Reduce if a partial grid - if obj.partialGrid(v) - times = times(obj.partialH{v}); - end - - % Concatenate - try - times( obj.varIndices(obj.varName(v)) ) = repmat( times, [nRep, 1] ); - catch - warning('Cannot concatenate time points for variable %s.', obj.varName(v)); - end -end - -end - - \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/useH.m b/2. State Vectors and Ensembles/@ensembleMetadata/useH.m deleted file mode 100644 index 37aa1af8..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/useH.m +++ /dev/null @@ -1,25 +0,0 @@ -function[H] = useH( obj ) -%% Collects the full set of H indices (relative to the original saved ensemble) - -% Fresh metadata. Preallocate H -ensMeta = ensembleMetadata( obj.design ); -H = false( ensMeta.ensSize(1), 1 ); - -% For each variable in the original ensemble, get the indices -for k = 1:numel(ensMeta.varName) - varIndex = ensMeta.varIndices( ensMeta.varName(k) ); - [ismem, v] = ismember( ensMeta.varName(k), obj.varName ); - - % Exists and is partially gridded - if ismem && obj.partialGrid(v) - H(varIndex) = obj.partialH{v}; - - % Exists in full - elseif ismem - H(varIndex) = true; - end - - % Otherwise, defaults to false -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/useMembers.m b/2. State Vectors and Ensembles/@ensembleMetadata/useMembers.m deleted file mode 100644 index 24b9f0a3..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/useMembers.m +++ /dev/null @@ -1,31 +0,0 @@ -function[obj] = useMembers( obj, members ) -% Limits ensemble metadata to specific ensemble members. -% -% ensMeta = obj.useMembers( members ) -% Reduces the ensemble metadata for a specific set of ensemble members. -% -% ----- Inputs ----- -% -% members: A vector of linear indices of ensemble members. -% -% ----- Outputs ----- -% -% ensMeta: The reduced ensemble metadata - -% Get the number of ensemble members in the original ensemble -siz = obj.design.ensembleSize; -nEns = siz(2); - -% Error check -if ~isvector(members) || ~isnumeric(members) || ~isreal(members) || any(members<1) || any( mod(members,1)~=0 ) || any(members>nEns) - error('members must be a vector of positive integers on the interval [1 %.f].', nEns ); -end - -% Update the metadata -obj.ensSize(2) = numel(members); - -design = obj.design.limitMembers( members ); -newMeta = ensembleMetadata( design ); -obj.ensMeta = newMeta.ensMeta; - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/useStateIndices.m b/2. State Vectors and Ensembles/@ensembleMetadata/useStateIndices.m deleted file mode 100644 index a4429d1a..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/useStateIndices.m +++ /dev/null @@ -1,65 +0,0 @@ -function[ensMeta] = useStateIndices( obj, H ) -% Limits ensemble metadata to specific state indices. -% -% ensMeta = obj.useStateIndices( H ) -% Reduces the ensemble metadata to specified state indices. -% -% ----- Inputs ----- -% -% H: A logical vector indicating which state indices to use. All indices -% are relative to the original (saved) ensemble. -% -% ----- Outputs ----- -% -% ensMeta: The reduced ensemble metadata - -% Number of state elements in original ensemble. -siz = obj.design.ensembleSize; -nState = siz(1); - -% Error check -if ~isvector(H) || ~islogical(H) || numel(H)~=nState - error('H must be a logical vector with %.f elements.', nState ); -end - -% Get the metadata for the complete ensemble. -ensMeta = ensembleMetadata( obj.design ); -nVar = numel(ensMeta.varName); - -% Update the metadata for each variable -H = find(H); -for v = nVar:-1:1 - varIndices = H>=ensMeta.varLimit(v,1) & H<=ensMeta.varLimit(v,2); - - % The variable is in not in the state indices - if ~any( varIndices ) - ensMeta.varSize(v,:) = []; - ensMeta.stateMeta = rmfield( ensMeta.stateMeta, ensMeta.varName(v) ); - ensMeta.ensMeta = rmfield( ensMeta.ensMeta, ensMeta.varName(v) ); - ensMeta.varLimit(v,:) = []; - ensMeta.partialGrid(v) = []; - ensMeta.partialH(v) = []; - ensMeta.nEls(v) = []; - ensMeta.varName(v) = []; - - - % The variable is in the indices. Check for a partial grid - else - nEls = sum(varIndices); - if nEls < prod(ensMeta.varSize(v,:)) - ensMeta.partialGrid(v) = true; - ensMeta.partialH{v} = ismember( ensMeta.varIndices(ensMeta.varName(v)), H ); - ensMeta.nEls(v) = nEls; - end - end -end - -% Recalculate limits and ensemble size -nEls = ensMeta.varLimit(:,2) - ensMeta.varLimit(:,1) + 1; -nEls(ensMeta.partialGrid) = ensMeta.nEls(ensMeta.partialGrid); -lastIndex = cumsum( nEls ); -firstIndex = [1; lastIndex(1:end-1)+1]; -ensMeta.varLimit = [firstIndex, lastIndex]; -ensMeta.ensSize(1) = lastIndex(end); - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/useVars.m b/2. State Vectors and Ensembles/@ensembleMetadata/useVars.m deleted file mode 100644 index c9b0206d..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/useVars.m +++ /dev/null @@ -1,47 +0,0 @@ -function[ensMeta] = useVars( obj, vars ) -% Limits ensemble metadata to specific variables. -% -% ensMeta = obj.useVars( vars ) -% Reduces the ensemble metadata to the specified variables. -% -% ----- Inputs ----- -% -% vars: A list of variable names. A string vector, cellstring vector, or -% character row vector. -% -% ----- Outputs ----- -% -% ensMeta: The reduced ensemble metadata - -% Error check -if ~isstrlist( vars ) - error('vars must be a string vector, cellstring vector, or character row vector.'); -end -ensMeta = ensembleMetadata( obj.design ); -v = ensMeta.varCheck( vars ); -nVar = numel(v); - -% Get the state indices used in the current ensemble -Hcurr = obj.useH; - -% Get state indices of all specified variables -Hvar = false( ensMeta.ensSize(1), 1 ); -for k = 1:nVar - varIndex = ensMeta.varIndices( ensMeta.varName(v(k)) ); - Hvar(varIndex) = true; -end - -% If there are any partial grids, this follows useStateIndices. Do the new -% variables in full, as well as all previous indices. -if any( obj.partialGrid ) - Hnew = Hcurr | Hvar; - -% But if all grids are complete, remove any unspecified variables -else - Hnew = Hvar; -end - -% Update -ensMeta = obj.useStateIndices( Hnew ); - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/varCheck.m b/2. State Vectors and Ensembles/@ensembleMetadata/varCheck.m deleted file mode 100644 index 16fdbf50..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/varCheck.m +++ /dev/null @@ -1,19 +0,0 @@ -function v = varCheck( obj, vars ) -% Checks if a set of variables are in the ensemble metadata. If they are, -% returns the variable indices -% -% vars: A cellstring, character row vector, or string array - -% Error check input. Convert to string for internal use. -if ~isstrlist(vars) - error('vars must be a character row vector, cellstring, or string array.'); -end -vars = string(vars); - -% Check they exist. Return indices -[ismem, v] = ismember( vars, obj.varName ); -if any( ~ismem ) - error('"%s" is not a variable in the ensemble metadata.', vars(find(~ismem,1)) ); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@ensembleMetadata/varIndices.m b/2. State Vectors and Ensembles/@ensembleMetadata/varIndices.m deleted file mode 100644 index 0e7eeb5d..00000000 --- a/2. State Vectors and Ensembles/@ensembleMetadata/varIndices.m +++ /dev/null @@ -1,23 +0,0 @@ -function[H] = varIndices( obj, varName ) -% Returns the state element indices associated with a particular variable. -% -% H = obj.varIndices( varName ) -% -% ----- Inputs ----- -% -% varName: The name of a variable. -% -% ----=- Outputs ----- -% -% H: The state indices associated with the variable - -% Check this is a single variable in the metadata. Get its index -if ~isstrflag(varName) - error('varName must be a character row or string scalar.'); -end -v = obj.varCheck( varName ); - -% Get the indices -H = (obj.varLimit(v,1) : obj.varLimit(v,2))'; - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/add.m b/2. State Vectors and Ensembles/@stateDesign/add.m deleted file mode 100644 index 8055a1e0..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/add.m +++ /dev/null @@ -1,86 +0,0 @@ -function[obj] = add( obj, varName, file, autoCouple ) -%% Adds a variable to a state vector design. -% -% design = obj.add( varName, file, ) -% Adds a variable to a state vector design. -% -% design = obj.add( varName, file, autoCouple ) -% Specify whether to automatically couple the variable to new variables -% added to the state vector. Default is true. -% -% ----- Inputs ----- -% -% obj: A state vector design. -% -% varName: The name of the variable. A string scalar or character row -% vector. -% -% file: The name of the gridfile containing data for the variable. A string -% scalar or character row vector. -% -% autoCouple: Indicates whether the variable should be automatically -% coupled to new variables added to the state vector. A logical -% scalar. Default is true. -% -% ----- Outputs ----- -% -% design: The updated state vector design. - -% ----- Written By ----- -% Jonathan King, University of Arizona, 2019 - -% Set unspecified fields -if ~exist( 'autoCouple', 'var' ) - autoCouple = true; -end - -% Type check the inputs. Convert strings to "string". Prevent duplicate -% variable names -[varName, file] = setup( obj, varName, file, autoCouple ); - -% Initialize a new varDesign -newVar = varDesign(file, varName); - -% Add the variable to the state vector -obj.var = [obj.var; newVar]; -obj.varName = [obj.varName; varName]; - -% Ensure the dimension order is the same for all variables -if ~isequal( newVar.dimID, obj.var(1).dimID ) - error('The order of dimensions in variable %s does not match the order for variable %s.', varName, obj.varName(1) ); -end - -% Set default coupling and overlap -obj.autoCouple(end+1) = autoCouple; -obj.isCoupled(end+1,end+1) = true; -obj.allowOverlap(end+1) = false; - -% If autocoupling, get the other variables and couple -if autoCouple - v = find( obj.autoCouple )'; - obj = obj.couple( obj.varName(v) ); -end - -end - -% Does type checking on the inputs -function[varName, file] = setup( obj, varName, file, autoCouple ) - -if ~isstrflag(varName) - error('varName must be a string.'); -elseif ~isstrflag( file ) - error('file must be a string.'); -elseif ~islogical(autoCouple) || ~isscalar(autoCouple) - error('autoCouple must be a scalar logical.'); -end - -varName = string(varName); -file = string( which(file) ); - -if ~isvarname(varName) - error('varName must be valid Matlab variable name (starts with a letter, composed only of letters, digits, and underscores).'); -elseif ~isempty(obj.varName) && ismember(varName, obj.varName) - error('Cannot repeat variable names.'); -end - -end diff --git a/2. State Vectors and Ensembles/@stateDesign/append.m b/2. State Vectors and Ensembles/@stateDesign/append.m deleted file mode 100644 index c44803f0..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/append.m +++ /dev/null @@ -1,32 +0,0 @@ -function[obj] = append( obj, design ) -%% Appends an existing state design to the current design. -% -% newDesign = obj.cat( design ) -% -% ----- Inputs ----- -% -% design: A second state vector design -% -% ----- Outputs ----- -% -% newDesign: The concatenated design - -% Check that the input is a state design and that there are no repeat names -if ~isa( design, 'stateDesign' ) || ~isscalar(design) - error('design must be a scalar stateDesign object.'); -elseif any( ismember( design.varName, obj.varName) ) - error('design cannot contain variable names in the current state design.'); -end - -% Add the new variable data -obj.var = [obj.var; design.var]; -obj.varName = [obj.varName; design.varName]; -obj.allowOverlap = [obj.allowOverlap, design.allowOverlap]; - -% Couple all the autocouple variables. Add the new coupling matrix -nNew = numel( design.var ); -obj.isCoupled( end+(1:nNew), end+(1:nNew) ) = design.isCoupled; -obj.autoCouple = [obj.autoCouple, design.autoCouple]; -obj = obj.couple( obj.varName(obj.autoCouple) ); - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/buildEnsemble.m b/2. State Vectors and Ensembles/@stateDesign/buildEnsemble.m deleted file mode 100644 index 0276c95e..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/buildEnsemble.m +++ /dev/null @@ -1,85 +0,0 @@ -function[ens] = buildEnsemble( obj, nEns, file, random, overwrite, writeNaN ) -%% Builds an ensemble from a state vector design. -% -% ens = obj.buildEnsemble( nEns, file ) -% Builds an ensemble with nEns ensemble members and saves it to file. -% -% ens = obj.buildEnsemble( nEns, file, random ) -% Specifies whether ensemble members should be selected sequentially, or -% drawn at random. Default is random. -% -% ens = obj.buildEnsemble( nEns, file, random, overwrite ) -% Specify whether the function may overwrite pre-existing files. -% -% ens = obj.buildEnsemble( nEns, file, random, overwrite, writeNaN ) -% Specifies whether ensemble members containing NaN should be written to -% file. Default is true. -% -% ----- Inputs ----- -% -% nEns: The number of ensemble members. A positive integer. -% -% file: A filename. Must end with a .ens extension. -% -% random: A scalar logical indicating whether ensemble members should be -% drawn randomly or sequentially. Default is true (random). -% -% overwrite: A scalar logical indicating whether pre-existing files may be -% overwritten. Default is false. -% -% writeNaN: A scalar logical indicating whether ensemble members containing -% NaN elements should be written to the .ens file. Default is true. -% -% ----- Outputs ----- -% -% ens: An ensemble object. - -% Set defaults -if ~exist('random','var') || isempty(random) - random = true; -end -if ~exist('overwrite','var') || isempty(overwrite) - overwrite = false; -end -if ~exist('writeNaN','var') || isempty(writeNaN) - writeNaN = true; -end - -% Error check the inputs. -if ~isnumeric(nEns) || ~isscalar(nEns) || mod(nEns,1)~=0 || nEns<=0 - error('nEns must be a positive, scalar integer.'); -elseif ~isscalar(random) || ~islogical(random) - error('random must be a scalar logical.'); -elseif ~isscalar(overwrite) || ~islogical(overwrite) - error('overwrite must be a scalar logical.'); -elseif ~isscalar( writeNaN) || ~islogical( writeNaN ) - error('writeNaN must be a scalar logical.'); -end -checkFile( file, 'extension', '.ens' ); -if ~overwrite && exist(fullfile(pwd,file),'file') - error('file "%s" already exists in the current directory.', file); -end -file = fullfile(pwd,file); - -% !!! TODO -if ~writeNaN - error('writeNaN must be true. (An update is in the works...) Sorry!'); -end - -% Trim ensemble indices so that only complete sequences are allowed -obj = obj.trim; - -% Restrict ensemble indices of coupled variables to intersecting metadata. -cv = obj.coupledVariables; -for set = 1:numel(cv) - obj = obj.matchMetadata( cv{set} ); - - % Select the ensemble members - obj = obj.makeDraws( cv{set}, nEns, random ); -end - -% Write the ensemble to a .ens file. Return the associated ensemble object -obj.write( file, random, writeNaN, true ); -ens = ensemble( file ); - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/changeDimType.m b/2. State Vectors and Ensembles/@stateDesign/changeDimType.m deleted file mode 100644 index 774a1293..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/changeDimType.m +++ /dev/null @@ -1,15 +0,0 @@ -function[obj] = changeDimType( obj, v, d ) -% Change a dimension from state to ens or vice versa - -% Flip the dimension -obj.var(v).isState(d) = ~obj.var(v).isState(d); - -% Get coupled, but exclude self -cv = find( obj.isCoupled(v,:) ); -cv = cv( cv ~= v ); - -% Reset values, notify user -obj = obj.resetChangedDim(cv, d); -obj.notifyChangedType( v, d, cv ); - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/checkIndices.m b/2. State Vectors and Ensembles/@stateDesign/checkIndices.m deleted file mode 100644 index 22fb87cf..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/checkIndices.m +++ /dev/null @@ -1,32 +0,0 @@ -function[index] = checkIndices( obj, index, v, d ) -% Error check indices and place in a common format for internal use. - -% If a logical, must be a vector the length of the dimension -if islogical(index) - - % Must be a vector the length of the dimension - if ~isvector(index) || numel(index)~=obj.var(v).dimSize(d) - error('Logical indices must be a vector the length of the dimension.'); - end - - % Convert to linear - index = find( index ); - - -% Otherwise, should be linear indices. Do some error checking. -else - if isempty( index ) - error('The indices for the %s dimension of variable %s are empty.', obj.var(v).dimID(d), obj.varName(v)); - elseif ~isnumeric(index) || ~isvector(index) - error('The indices for the %s dimension of variable %s are not numeric vectors.', obj.var(v).dimID(d), obj.varName(v)); - elseif any(index<=0) || any( mod(index,1)~=0 ) - error('The indices for the %s dimension of variable %s are not positive integers.', obj.var(v).dimID(d), obj.varName(v)); - elseif any( index>obj.var(v).dimSize(d) ) - error('Some indices for the %s dimension in variable %s are larger than the dimension size.', obj.var(v).dimID(d), obj.varName(v)); - end -end - -% Convert to sorted column -index = sort( index(:) ); - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/copy.m b/2. State Vectors and Ensembles/@stateDesign/copy.m deleted file mode 100644 index a3502ac3..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/copy.m +++ /dev/null @@ -1,78 +0,0 @@ -function[obj] = copy( obj, fromVar, toVars ) -%% Copies design specifications from a template variable to a set of other variables. -% ***Note: The variables will NOT be coupled. -% -% design = obj.copy( fromVar, toVars ) -% Copies design specifications from a variable to other variables. Copied -% state and ensemble indices are set via metadata matching and NOT as a -% direct copy of linear indices. -% -% ----- Inputs ----- -% -% obj: A state vector design -% -% fromVar: The template variable. A string scalar. -% -% toVars: The variables into which indices will be copied. Either a -% character row vector, cellstring, or string vector. -% -% ----- Outputs ----- -% -% design: The updated state vector design - -% ----- Written By ----- -% Jonathan King, University of Arizona, 2019 - -% Get the template variable and copying variables -tv = obj.findVarIndices( fromVar ); -v = obj.findVarIndices( toVars ); - -% Only allow a single template variable. Get a quick reference to make the -% code readable -if numel(tv) > 1 - error('fromVar must be a string scalar.'); -end -tvar = obj.var(tv); - -% For each dimension -for d = 1:numel(tvar.dimID) - - % Get the indexed metadata for the template variable - meta = tvar.meta.(tvar.dimID(d))( tvar.indices{d},: ); - - % For each copying variable - for k = 1:numel(v) - var = obj.var(v(k)); - - % Get the indices with matching metadata. If both have NaN - % metadata, then this is a singleton dimension with index 1. - currMeta = var.meta.(tvar.dimID(d)); - index = 1; - if ~(isscalar(currMeta) && isnan(currMeta)) || ~(isscalar(meta) && isnan(meta)) - [~, index] = intersect( currMeta, meta, 'rows', 'stable' ); - end - - % Edit state dimensions. Must have the same number of state indices - if tvar.isState(d) - if numel(index) ~= size(meta,1) - error('The %s variable does not have metadata matching all state indices of the template %s variable in the %s dimension.', var.name, tvar.name, tvar.dimID(d)); - end - obj = obj.stateDimension( v(k), d, 'index', index, 'mean', tvar.takeMean(d), 'nanflag', tvar.nanflag{d} ); - - - % Edit ensemble dimension. Must have some ensemble indices. - else - if isempty(index) - error('The %s variable does not have metadata matching any of the metadata for the template variable %s in the %s dimension.', var.name, tvar.name, tvar.dimID(d) ); - end - obj = obj.ensDimension( v(k), d, 'index', index, 'seq', tvar.seqDex{d}, 'meta', tvar.seqMeta{d}, 'mean', tvar.meanDex{d}, 'nanflag', tvar.nanflag{d} ); - end - - % Set overlap permissions - if d == 1 - obj.allowOverlap(v(k)) = obj.allowOverlap(tv); - end - end -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/couple.m b/2. State Vectors and Ensembles/@stateDesign/couple.m deleted file mode 100644 index 85420b3a..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/couple.m +++ /dev/null @@ -1,66 +0,0 @@ -function[obj] = couple( obj, varNames ) -%% Couples specified variables. -% -% design = obj.couple( varNames ) -% Couples a set of variables. Ensemble dimensions, overlap permissions, and -% autoCouple status will be set to that of the first listed variable. -% -% If the dimension type of a dimension is changed (from state to ensemble -% or vice versa) for a variable, then all mean and sequence design -% specifications will be deleted. The user will be notified of these changes. -% -% ----- Inputs ----- -% -% obj: A stateDesign object. -% -% varNames: A set of variable names. Either a cellstring or string array. -% -% ----- Outputs ----- -% -% design: The updated state vector design. - -% ----- Written By ----- -% Jonathan King, University of Arizona, 2019 - -% Get the indices of the variables in the design -v = obj.findVarIndices( varNames ); - -% Get the set of all variables coupled to the specified variables. Preserve -% the order so that variable 1 remains the template for overlap, ensemble -% dimensions, and autocoupling -[~, vall] = find( obj.isCoupled( v, : ) ); -vall = unique( [v; vall], 'stable' ); -nVars = numel(vall); - -% Notify the user when variables not in the initial list are also coupled. -obj.notifySecondaryCoupling( v, vall ); - -% Get the overlap, ensemble dimensions, and autocoupling settings -overlap = obj.allowOverlap(v(1)); -isState = obj.var(v(1)).isState; -autoCouple = obj.autoCouple(v(1)); - -% For each variable -for k = 1:nVars - var = vall(k); - - % Couple with all the other variables - obj.isCoupled( var, vall ) = true; - obj.isCoupled( vall, var ) = true; - - % For any dimensions changing type, delete mean and sequence data. - % Notify the user. - flipDim = find( isState ~= obj.var(var).isState ); - if ~isempty(flipDim) - for d = 1:numel(flipDim) - obj = obj.resetChangedDim( var, flipDim(d) ); - end - obj.notifyChangedType( var, flipDim ); - end - - % Set the autocoupler and overlap - obj.autoCouple(var) = autoCouple; - obj.allowOverlap(var) = overlap; -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/coupledVariables.m b/2. State Vectors and Ensembles/@stateDesign/coupledVariables.m deleted file mode 100644 index ba1da5ea..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/coupledVariables.m +++ /dev/null @@ -1,20 +0,0 @@ -function[cv] = coupledVariables( obj ) -%% Get the variable indices of each set of coupled varialbes. - -% Couple variables to themselves -nVar = numel(obj.var); -obj.isCoupled( 1:nVar+1:end ) = true; - -% Get each set of coupled variables. Once a variable is added to a set, -% remove it from the list of unset variables. -nSets = size( unique( obj.isCoupled, 'rows' ), 1); -cv = cell(nSets,1); -unset = 1:nVar; - -for s = 1:nSets - set = find( obj.isCoupled( unset(1), : ) ); - cv{s} = set; - unset( ismember(unset,set) ) = []; -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/draw.m b/2. State Vectors and Ensembles/@stateDesign/draw.m deleted file mode 100644 index 64373532..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/draw.m +++ /dev/null @@ -1,21 +0,0 @@ -function[subDraws, undrawn] = draw( ~, nDraws, subDraws, undrawn, random, ensSize ) -% Make a selection of draws. - -% Error if ensemble cannot complete -if nDraws > numel(undrawn) - error('Cannot find enough ensemble members. Try using a smaller ensemble.'); -end - -% Draw in a random or ordered method -if random - drawIndex = randperm( numel(undrawn), nDraws ); -else - drawIndex = 1:nDraws; -end -draws = undrawn(drawIndex); -undrawn( drawIndex ) = []; - -% Add to the end of the draws array -subDraws( end-nDraws+1:end, : ) = subdim( draws, ensSize ); - -end diff --git a/2. State Vectors and Ensembles/@stateDesign/edit.m b/2. State Vectors and Ensembles/@stateDesign/edit.m deleted file mode 100644 index 7a748f27..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/edit.m +++ /dev/null @@ -1,127 +0,0 @@ -function[obj] = edit( obj, varName, dim, dimType, varargin ) -%% Edits the design specifications of a variable in the state vector. -% -% BASIC USAGE: -% -% design = obj.edit( varName, dim, dimType, ... flags/values ) -% Edits a variable in the state design. Edits a particular dimension, and -% specifies whether the dimension is a state or ensemble dimension. Various -% flags and additional arguments further modify the design. -% -% By default, all dimensions are set to state dimensions, and all indices -% are enabled. Optional arguments are detailed below. -% -% -% STATE DIMENSIONS: obj.edit( varName, dim, 'state', ... ) -% -% obj.edit( ..., 'index', stateIndex ) -% Specifies state indices for the dimension. -% -% obj.edit( ..., 'mean', takeMean ) -% Specify whether to take a mean over the dimension. -% -% -% ENSEMBLE DIMENSIONS: obj.edit( varName, dim, 'ens', ... ) -% -% obj.edit( ..., 'index', ensIndex ) -% Specifies ensemble indices for the dimension. -% -% obj.edit( ..., 'mean', meanIndex ) -% Specifies sequential indices over which to take a mean. -% -% obj.edit( ..., 'seq', seqIndex, 'meta', seqMeta ) -% Specifies sequence indices and associated metadata for the dimension. -% -% obj.edit( ..., 'overlap', allowOverlap ) -% Specify whether the variable (and associated coupled variables) permit -% overlapping, non-duplicate sequences. By default, overlap is prohibited. -% -% -% EITHER: -% -% obj.edit( ..., 'mean', true/meanIndex, 'nanflag', nanflag ) -% Specify how to treat NaN elements when taking a mean. -% -% -% ----- Inputs ----- -% -% obj: A state vector design -% -% varName: The name of the variable to edit -% -% dim: The name of the dimension to edit -% -% dimType: The type of dimension. -% 'state': A state dimension. -% 'ens': An ensemble dimension. -% -% -% State dimension arguments: -% -% stateIndex: A list of state indices. Either linear indices, or a -% logical vector the length of the dimension. -% -% takeMean: A scalar logical indicating whether to take a mean over a -% state dimension. -% -% -% Ensemble dimension arguments: -% -% ensIndex: A list of ensemble indices. Either linear indices, or a -% logical vector the length of the dimension. -% -% meanIndex: A list of mean indices. Either linear indices, or a -% logical vector the length of the dimension. -% -% seqIndex: A list of sequence indices. Either linear indices, or a -% logical vector the length of the dimension. -% -% seqMeta: Metadata associated with each sequence element. Either a -% matrix with one row per sequence element, or a vector with -% one element per sequence element. -% -% allowOverlap: A scalar logical indicating whether overlap is permitted -% for the variable and associated coupled variables. -% -% -% Either dimension arguments: -% -% nanflag: A flag indicating how to treat NaN elements in a mean. -% 'includenan' (Default): Include NaN values. -% 'omitnan': Remove NaN values before taking means.% -% -% ----- Outputs ----- -% -% design: The updated state vector design. - -% ----- Written By ----- -% Jonathan King, University of Arizona, 2019 - -% (This is really just an interface so the user doesn't need to remember -% the name of two different fxns. The main purpose is to provide a central -% location for the "help" information, and to feed inputs into -% stateDimension or ensDimension, which do the actual work.) - -% Error check -if ~isstrflag(varName) - error('varName must be a string scalar.'); -elseif ~isstrflag(dim) - error('dim must be a string scalar.'); -elseif ~isstrflag(dimType) - error('dimType must be a string scalar.'); -elseif ~ismember( dimType, ["state","ens"] ) - error('dimType must either be "state" or "ens".'); -end - -% Variable and dimenion on which to operate -v = obj.findVarIndices( varName ); -d = obj.findDimIndices( v, dim ); - -% Edit a state or ensemble dimension -if strcmpi( dimType, "state" ) - obj = obj.stateDimension( v, d, varargin{:} ); -else - obj = obj.ensDimension( v, d, varargin{:} ); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/ensDimension.m b/2. State Vectors and Ensembles/@stateDesign/ensDimension.m deleted file mode 100644 index 63e8ea87..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/ensDimension.m +++ /dev/null @@ -1,109 +0,0 @@ -function[obj] = ensDimension( obj, v, d, varargin ) -% Internal function that edits ensemble dimensions -% v: Variable index in state Design -% -% d: Dimension index in varDesign - -% Parse inputs. Use pre-existing values as defaults so that multiple calls -% don't overwrite values -[index, seq, seqMeta, mean, nanflag] = parseInputs( varargin, ... - {'index', 'seq', 'meta', 'mean', 'nanflag'}, ... - {obj.var(v).indices{d}, obj.var(v).seqDex{d}, obj.var(v).seqMeta{d}, ... - obj.var(v).meanDex{d}, obj.var(v).nanflag{d}}, {[],[],[],[],[]}); - -% Minor error checking. Get defaults for seq and mean (0) when empty. -[seq, mean] = setup( seq, mean, varargin(1:2:end-1) ); - -% Check and set the indices (sorted linear column). Note the 0/1 indexing for seq and mean -obj.var(v).indices{d} = obj.checkIndices( index, v, d ); -obj.var(v).seqDex{d} = obj.checkIndices( seq+1, v, d ) - 1; -obj.var(v).meanDex{d} = obj.checkIndices( mean+1, v, d ) - 1; - -% Error check sequence metadata. Set default NaN for singleton -obj.var(v).seqMeta{d} = checkSeqMeta( seqMeta, seq ); - -% Can only take a mean if meanDex has more than 1 element. -obj.var(v).takeMean(d) = true; -if numel(mean) == 1 - obj.var(v).takeMean(d) = false; -end - -% Finally, implement NaN flag behavior -obj.var(v).nanflag{d} = obj.getNaNflag( v, d, nanflag, varargin(1:2:end-1) ); - -% If changing dimension type, change for all coupled variables -% Delete coupled sequence and mean, notify user. -if obj.var(v).isState(d) - obj = obj.changeDimType( v, d ); -end - -end - -function [seq, mean] = setup( seq, mean, inArgs ) - -% If we switched from a state to an ens dimension, seq could be -% unspecified. Need to set the default. But also don't let a user set -% and empty seq. Later, we'll want to use checkIndices, but we should first -% block logicals. -if isempty(seq) - if ismember('seq', inArgs) - error('Cannot specify empty sequence indices.'); - end - seq = 0; -elseif islogical(seq) - error('Sequence indices cannot be a logical.'); -end - -% If we switched from state to ens, then meanDex could be empty. Give it a -% default value. However, don't let a user specify empty indices. Later, -% we'll want to use checkIndices, but we should first block logicals. -if isempty(mean) - if ismember( 'mean', inArgs) - error('Cannot specify empty mean indices.'); - end - mean = 0; -elseif islogical(mean) - error('mean indices cannot be logicals, they must be linear indices.'); -end - -end - -function[seqMeta] = checkSeqMeta( seqMeta, seq ) - -% If a row vector the length of seq, convert to column -if isrow(seqMeta) && length(seqMeta)==numel(seq) - seqMeta = seqMeta'; -end - -% If we switched from state to ens, then seqMeta will be empty -- check if -% the sequence is a single index, if so we can use NaN metadata. But if it -% is more than a single index, throw an error. -% -% Otherwise, heavy error checking on saved / input values. -if isempty(seqMeta) - if numel(seq) > 1 - error('Must provide sequence metadata.'); - end - seqMeta = NaN; - -elseif size(seqMeta,1) ~= numel(seq) - error('Sequence metadata must have one row per sequence index.'); -elseif ~ischar(seqMeta) && ~islogical(seqMeta) && ~isnumeric(seqMeta) && ~iscellstr(seqMeta) && ~isstring(seqMeta) - error('Sequence metadata must be a numeric, char, string, or cellstring data type.'); -elseif isnumeric(seqMeta) && any(isnan(seqMeta(:))) && numel(seq)>1 - error('Sequence metadata may not contain NaN.'); -elseif ~ismatrix(seqMeta) - error('Sequence metadata must be a matrix.'); -end - -% Use "strings" internally -if ischar(seqMeta) || isstring(seqMeta) - seqMeta = string(seqMeta); -end - -% Check all is unique -if size(seqMeta,1) ~= size( unique(seqMeta,'rows'), 1 ) - error('The sequence metadata contains duplicate values.'); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/ensembleSize.m b/2. State Vectors and Ensembles/@stateDesign/ensembleSize.m deleted file mode 100644 index e2af4753..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/ensembleSize.m +++ /dev/null @@ -1,7 +0,0 @@ -function[siz] = ensembleSize( obj ) -% Gets the size of the ensemble associated with the design -nEns = size( obj.var(1).drawDex, 1 ); -varLimit = obj.varIndices; -nState = varLimit(end); -siz = [nState, nEns]; -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/findDimIndices.m b/2. State Vectors and Ensembles/@stateDesign/findDimIndices.m deleted file mode 100644 index cde70105..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/findDimIndices.m +++ /dev/null @@ -1,19 +0,0 @@ -function[d] = findDimIndices( obj, v, dim ) -% Get the dimension indices of named dimensions. - -% Ensure that dim is a string scalar. Convert to "string" for internal use -if ~isstrlist( dim ) - error('dim must be a character row vector, cellstring, or string vector.'); -end -dim = string(dim); - -% Get the indices -[ismem, d] = ismember( dim, obj.var(v).dimID ); - -% Throw error if not a dimension -if any(~ismem) - error('%s is not a dimension in the state design.', dim(find(~ismem,1)) ); -end - -end - diff --git a/2. State Vectors and Ensembles/@stateDesign/findVarIndices.m b/2. State Vectors and Ensembles/@stateDesign/findVarIndices.m deleted file mode 100644 index 1d0a3c7f..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/findVarIndices.m +++ /dev/null @@ -1,18 +0,0 @@ -function[v] = findVarIndices( obj, varName ) -% Get the dimension indices of named variables - -% Ensure that varName is a set of names. Convert to "string" for internal use -if ~isstrlist(varName) - error('varName must be a character row vector, cellstring, or string vector.'); -end -varName = string(varName); - -% Get the indices of the names in the state design -[ismem, v] = ismember( varName(:), obj.varName ); - -% Throw error if any variables are not in the state design. -if any( ~ismem ) - error('Variable %s is not in the state design', varName(find(~ismem,1)) ); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/getNaNflag.m b/2. State Vectors and Ensembles/@stateDesign/getNaNflag.m deleted file mode 100644 index bb7e284b..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/getNaNflag.m +++ /dev/null @@ -1,27 +0,0 @@ -function[nanflag] = getNaNflag( obj, v, d, nanflag, inArgs ) -% Implements nanflag behavior for complex edits. - -% Quick reference -takeMean = obj.var(v).takeMean(d); - -% If setting takeMean to false -- if the user provided a -% nanflag, throw error. For anything else, set the nanflag to []. -if ~takeMean - if ismember('nanflag', inArgs) && ~isempty(nanflag) - error('Cannot specify a NaN flag when not taking a mean.'); - elseif ~isempty(nanflag) - fprintf('No longer taking a mean. Deleting NaN flag for dimension %s of variable %s.\n', obj.var(v).dimID(d), obj.varName(v) ); - end - nanflag = []; - -% If setting takeMean to true -- if nanflag is [], default to includenan, -% otherwise error check -else - if isempty(nanflag) - nanflag = "includenan"; - elseif ~isstrflag(nanflag) || ~ismember(nanflag, ["omitnan","includenan"]) - error('nanflag must be either "omitnan" or "includenan".'); - end -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/getVariables.m b/2. State Vectors and Ensembles/@stateDesign/getVariables.m deleted file mode 100644 index bb227f84..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/getVariables.m +++ /dev/null @@ -1,24 +0,0 @@ -function[obj] = getVariables( obj, varNames ) -%% Gets the part of a state design associated with specified variables -% -% design = obj.getVariables( varNames ) -% -% ----- Inputs ----- -% -% varNames: A list of variable names. -% -% ----- Outputs ---- -% -% design: The stateDesign associated with the specified variables. - -% Get the variables, check they are allowed -v = obj.findVarIndices( varNames ); - -% Get the part of the design associated with the variables -obj.var = obj.var(v); -obj.varName = obj.varName(v); -obj.allowOverlap = obj.allowOverlap(v); -obj.isCoupled = obj.isCoupled(v,v); -obj.autoCouple = obj.autoCouple(v); - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/info.m b/2. State Vectors and Ensembles/@stateDesign/info.m deleted file mode 100644 index c7f0ddb1..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/info.m +++ /dev/null @@ -1,137 +0,0 @@ -function[] = info( obj, varNames, dims, showMeta ) -%% Displays information about a sate vector design. -% -% obj.info -% Outputs information on every dimension of every variable in a state -% vector design. -% -% obj.info( varNames ) -% Outputs information on every dimension for specified variables in a state -% vector design. -% -% obj.info( varNames, dims ) -% Outputs information on specific dimensions for specified variables. -% -% obj.info( varNames, dims, showMeta ) -% Specify whether to display metadata for each dimension. -% -% ----- Inputs ----- -% -% obj: A stateDesign object. -% -% varNames: A set of variable names. Either a character row vector, -% cellstring, or string vector. -% -% dims: A list of dimension names. Either a character row vector, -% cellstring, or string vector. -% -% showMeta: A scalar logical. Specifies whether to display dimension -% metadata. - -% ----- Written By ----- -% Jonathan king, University of Arizona, 2019 - -% Set defaults -if ~exist('varNames','var') || isempty(varNames) - varNames = obj.varName; -end -if ~exist('dims','var') || isempty(dims) - dims = obj.var(1).dimID; -end -if ~exist('showMeta','var') || isempty(showMeta) - showMeta = false; -end - -% Error check the inputs. Get the indices of the variables and dimensions -[v, dim] = setup( obj, varNames, dims, showMeta ); - -% Display design name and variables header -fprintf('State Vector Design: %s\n', obj.name ); -fprintf('Variables:\n'); - -% For each variable -for k = 1:numel(v) - var = obj.var( v(k) ); - - % Name, gridfile - fprintf('\t%s\n', obj.var(v(k)).name ); - fprintf( '\t\tGridfile: %s\n', var.file ); - - % Coupled variables, overlap status, dimension header - cv = find( obj.isCoupled(v(k),:) ); - cv = cv(cv~=v(k)); % Remove self - fprintf([ '\t\tCoupled Variables: ', sprintf('%s, ', obj.varName(cv)), '\b\b\n' ]); - - overlap = "Forbidden"; - if obj.allowOverlap(v(k)) - overlap = "Allowed"; - end - fprintf('\t\tEnsemble overlap: %s\n', overlap); - - fprintf('\t\tDimensions:\n'); - - % For each dimension - for j = 1:numel(dim) - d = dim(j); - - % Name, size, type, mean, nanflag - fprintf('\t\t\t%s\n', var.dimID(d)); - fprintf('\t\t\t\tNumber of indices: %0.f\n', numel(var.indices{d}) ); - - dimType = "State"; - if ~var.isState(d) - dimType = "Ensemble"; - end - fprintf('\t\t\t\tType: %s', dimType); - - takeMean = "True"; - if ~var.takeMean(d) - takeMean = "False"; - end - fprintf('\t\t\t\tMean: %s', takeMean); - - if var.takeMean - fprintf('\t\t\t\tNaN Flag: %s\n', var.nanflag{d} ); - end - - % If an ensemble dimension, show sequence and mean indices - if ~var.isState(d) - if ~isequal( var.seqDex{d}, 0 ) - fprintf( ['\t\t\t\tSequence Indices: ', sprintf('%i, ',var.seqDex{d}), '\b\b\n'] ); - end - - if ~isequal( var.meanDex{d}, 0 ) - fprintf( ['\t\t\t\tMean Indices: ', sprintf('%i, ', var.meanDex{d}), '\b\b\n'] ); - end - end - - % Show metadata if desired - if showMeta - fprintf('\t\t\t\t%s Index Metadata:\n', dimType); - disp( var.meta.(var.dimID(d))(var.indices{d},:) ); - fprintf('\b'); - - if ~isempty( var.seqMeta{d} ) && ~isnan( var.seqMeta{d} ) - fprintf('\t\t\t\tSequence Metadata:\n'); - disp( var.seqMeta{d} ); - fprintf('\b'); - end - end - - % Extra space for next line - fprintf('\n'); - end -end - -end - -function[v,d] = setup( obj, varNames, dims, showMeta ) - -v = obj.findVarIndices( varNames ); -d = obj.findDimIndices( v(1), dims ); - -if ~islogical(showMeta) || ~isscalar(showMeta) - error('showMeta must be a scalar logical.'); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/initializeDraws.m b/2. State Vectors and Ensembles/@stateDesign/initializeDraws.m deleted file mode 100644 index 8e99cf4a..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/initializeDraws.m +++ /dev/null @@ -1,25 +0,0 @@ -function[overlap, ensSize, undrawn, subDraws] = initializeDraws( obj, cv, nDraws ) -% Initialize a set of new draws - -% Get the overlap and ensemble size information -var1 = obj.var( cv(1) ); -overlap = obj.allowOverlap( cv(1) ); - -ensDim = find( ~var1.isState ); -nEnsDim = numel(ensDim); - -ensSize = NaN( 1, nEnsDim ); -for d = 1:nEnsDim - ensSize(d) = numel( var1.indices{ ensDim(d) } ); -end - -% Preallocate the draw arrays -if obj.new - undrawn = (1:prod(ensSize))'; - subDraws = NaN( nDraws, nEnsDim ); -else - undrawn = var1.undrawn; - subDraws = [var1.drawDex; NaN(nDraws, nEnsDim)]; -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/limitMembers.m b/2. State Vectors and Ensembles/@stateDesign/limitMembers.m deleted file mode 100644 index 6e66d977..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/limitMembers.m +++ /dev/null @@ -1,28 +0,0 @@ -function[obj] = limitMembers( obj, members ) -% Reduces a state design to specified ensemble members. -% -% design = obj.limitMembers( members ) -% -% ----- Inputs ----- -% -% members: A vector of linear indices of ensemble members. -% -% ----- Outputs ----- -% -% design: The reduced state design - -% Error check -if isempty( obj.var ) - error('The state design does not have any variables.'); -elseif isempty( obj.var(1).drawDex ) - error('The state design does not have any ensemble members.'); -elseif ~isvector(members) || ~isnumeric(members) || ~isreal(members) || any(members<1) || any( mod(members,1)~=0 ) || any(members>numel(obj.var(1).drawDex)) - error('members must be a vector of positive integers that do not exceed %.f.', numel(obj.var(1).drawDex) ); -end - -% Limit the ensemble members -for v = 1:numel( obj.var ) - obj.var(v).drawDex = obj.var(v).drawDex(members,:); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/makeDraws.m b/2. State Vectors and Ensembles/@stateDesign/makeDraws.m deleted file mode 100644 index b910c998..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/makeDraws.m +++ /dev/null @@ -1,23 +0,0 @@ -function[obj] = makeDraws( obj, cv, nEns, random ) -% Select a set of ensemble draws - -% Initialize values in preparation for making draws. Note if there are -% previous draws and add to them. -nDraws = nEns; -[overlap, ensSize, undrawn, subDraws] = obj.initializeDraws( cv, nDraws ); - -% Make draws. Remove overlapping draws if necessary. Continue until -% the ensemble is complete or impossible. -while nDraws > 0 - [subDraws, undrawn] = obj.draw( nDraws, subDraws, undrawn, random, ensSize ); - - if ~overlap - [subDraws] = obj.removeOverlap( subDraws, cv ); - nDraws = sum( isnan( subDraws(:,1) ) ); - end -end - -% Save the draws for each variable -obj = obj.saveDraws( cv, subDraws, undrawn ); - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/matchMetadata.m b/2. State Vectors and Ensembles/@stateDesign/matchMetadata.m deleted file mode 100644 index 8d3ef49a..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/matchMetadata.m +++ /dev/null @@ -1,47 +0,0 @@ -function[obj] = matchMetadata( obj, cv ) -%% Restricts ensemble indices of coupled variables to matching metadata. - -% Get the first variable and ensemble dimensions -var1 = obj.var( cv(1) ); -ensDim = var1.dimID( ~var1.isState ); -ensIndex = find( ~var1.isState ); -nVar = numel(cv); - -% For each ensemble dimension, run through the set of variables and remove -% any non-intersecting metadata -for dim = 1:numel(ensDim) - d = ensIndex(dim); - meta = var1.meta.(ensDim(dim))( var1.indices{d}, : ); - - for v = 2:nVar - var = obj.var( cv(v) ); - varMeta = var.meta.(ensDim(dim))(var.indices{d}, :); - meta = intersect( meta, varMeta, 'rows', 'stable' ); - end - - % Throw an error if no metadata remains - if isempty(meta) - overlapError(obj, cv, ensDim(dim)); - end - - % Now we have the metadata intersect. Run through each variable and - % remove ensemble indices that are not in this intersect. - for v = 1:nVar - var = obj.var( cv(v) ); - varMeta = var.meta.(ensDim(dim))(var.indices{d}, :); - [~, keep] = intersect( varMeta, meta, 'rows', 'stable' ); - obj.var( cv(v) ).indices{d} = var.indices{d}(keep); - end -end - -end - -% A fancy error message. -function[] = overlapError(design, cv, dim) - -coupled = sprintf('%s, ', design.var(cv).name); -error( ['The ensemble indices of the %s dimension of coupled variables: %s', ... - 'have no overlapping metadata.', newline, ... - 'Check that the ensemble indices point to the same values, and that the .grid metadata is in a common format.'], ... - dim, coupled ); -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/notifyChangedType.m b/2. State Vectors and Ensembles/@stateDesign/notifyChangedType.m deleted file mode 100644 index 773e23f9..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/notifyChangedType.m +++ /dev/null @@ -1,50 +0,0 @@ -function[] = notifyChangedType( obj, v, d, cv ) -% Alert user if dimension changes type automatically -% v: The variable index -% -% d: The dimension index -% -% cv: Optional coupled variables - -% If coupled variables are provided, only notify -if ~exist('cv','var') || (exist('cv','var') && ~isempty(cv)) - - % Get plurals for dimensions and variables - ds = "s"; - dverb = "are"; - if numel(d) == 1 - ds = ""; - dverb = "is"; - end - - cs = "s"; - cgroup = "these"; - if exist('cv','var') && numel(cv)==1 - cs = ""; - cgroup = "this"; - end - - vs = ""; - vgroup = "this"; - if exist('cv', 'var') - vs = cs; - vgroup = cgroup; - end - - line1 = [sprintf('Dimension%s ',ds), sprintf('"%s", ', obj.var(v).dimID(d)), ... - sprintf('\b\b %s changing type for variable "%s".\n', dverb, obj.varName(v)) ]; - - line2 = ''; - if exist('cv', 'var') - line2 = [sprintf('Coupled variable%s ',cs), sprintf('"%s", ', obj.varName(cv)), ... - sprintf('\b\b will also be altered.\n')]; - end - - line3 = sprintf('Deleting sequence and mean design specifications for %s variable%s.\n\n', ... - vgroup, vs ); - - - fprintf([line1, line2, line3]); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/notifySecondaryCoupling.m b/2. State Vectors and Ensembles/@stateDesign/notifySecondaryCoupling.m deleted file mode 100644 index bf782de2..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/notifySecondaryCoupling.m +++ /dev/null @@ -1,25 +0,0 @@ -function[] = notifySecondaryCoupling( obj, v, vall ) -% Alert user if unnamed variables are automatically coupled -% v: Initial variable indices. -% -% vall: The total set of variables. - -% Get the variables not in the initial list -sv = vall( ~ismember(vall, v) ); - -% Notify if additional variables are to be coupled -if ~isempty(sv) - s = "s"; - verb = "are"; - if numel(sv) == 1 - s = ""; - verb = "is"; - end - - % Notify - fprintf([sprintf('The variable%s ',s), sprintf('"%s", ', obj.varName(sv)), '\b\b\n',... - sprintf('%s already coupled to ',verb), sprintf('"%s", ', obj.varName(v)), '\b\b\n', ... - 'and will also be coupled.\n\n']); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/notifySecondaryOverlap.m b/2. State Vectors and Ensembles/@stateDesign/notifySecondaryOverlap.m deleted file mode 100644 index b0223f66..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/notifySecondaryOverlap.m +++ /dev/null @@ -1,33 +0,0 @@ -function[] = notifySecondaryOverlap( obj, v, vall, tf ) -% Alert user if overlap permissions of unnamed variables are automatically altered -% v: Initial variable indices -% -% vall: Total set of variables -% -% tf: Whether to permit or forbid overlap - -% Get the variables not in the initial list -sv = vall( ~ismember(vall, v) ); - -% Notify if additional variables have overlap adjusted -if ~isempty(sv) - s = "s"; - verb = "are"; - if numel(sv) == 1 - s = ""; - verb = "is"; - end - - if tf - type = "allow"; - else - type = "forbid"; - end - - % Notify - fprintf([sprintf('The variable%s ',s), sprintf('"%s", ', obj.varName(sv)), '\b\b\n',... - sprintf('%s coupled to ',verb), sprintf('"%s", ', obj.varName(v)), '\b\b\n', ... - sprintf('and will also be updated to %s overlap.\n\n', type) ]); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/overlap.m b/2. State Vectors and Ensembles/@stateDesign/overlap.m deleted file mode 100644 index 4bfddae4..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/overlap.m +++ /dev/null @@ -1,51 +0,0 @@ -function[obj] = overlap( obj, tf, varNames ) -% Sets whether ensemble members of variables can contain partially -% duplicated data. -% -% design = obj.overlap( tf ) -% design = obj.overlap( tf, 'all' ) -% Sets overlap permission for every variable to true or false. -% -% design = obj.overlap( tf, varNames ) -% Sets the overlap permission for specific variables to true or false. -% Coupled variables will also be adjusted. -% -% ----- Inputs ----- -% -% tf: A scalar logical indicating whether to allow overlap. By default, -% variables do not allow overlap. -% -% varNames: A list of variable for which to adjust overlap permissions. -% -% ----- Outputs ----- -% -% design: The updated stateDesign object. - -% Set defaults, error check -if ~exist('varNames','var') || isempty(varNames) - varNames = "all"; -elseif ~isstrlist(varNames) - error('varNames must be a string vector, cellstring vector, or character row vector.'); -end -varNames = string(varNames); -if ~isscalar(tf) || ~islogical(tf) - error('tf must be a scalar logical.'); -elseif ismember("all",varNames) && numel(varNames)>1 - error('Cannot use "all" with variable names.'); -end - -% If "all", just use all variables. Otherwise get the variable indices. -vall = 1:numel(obj.var); -if numel(varNames) > 1 || ~strcmp(varNames,'all') - v = obj.findVarIndices(varNames); - - % Find coupled variables and notify the user - [~,vall] = find( obj.isCoupled(v,:) ); - vall = unique( [v; vall], 'stable' ); - obj.notifySecondaryOverlap( v, vall, tf ); -end - -% Update the overlap permissions -obj.allowOverlap( vall ) = tf; - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/remove.m b/2. State Vectors and Ensembles/@stateDesign/remove.m deleted file mode 100644 index f2314c45..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/remove.m +++ /dev/null @@ -1,34 +0,0 @@ -function[obj] = remove(obj, varNames) -%% Deletes variables from a state vector design. -% -% design = obj.remove( varNames ) -% Deletes a set of variables from a state vector. -% -% ----- Inputs ----- -% -% obj: A state vector design. -% -% varNames: A list of variable names. Either a character row, cellstring, -% or string vector. -% -% ----- Outputs ----- -% -% design: The updated state vector design. - -% ----- Written By ----- -% Jonathan King, University of Arizona, 2019 - -if ~isempty( varNames ) - % Get the variable indices - v = obj.findVarIndices( varNames ); - - % Delete from design fields - obj.var(v) = []; - obj.varName(v) = []; - obj.isCoupled(v,:) = []; - obj.isCoupled(:,v) = []; - obj.autoCouple(v) = []; - obj.allowOverlap(v) = []; -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/removeOverlap.m b/2. State Vectors and Ensembles/@stateDesign/removeOverlap.m deleted file mode 100644 index af4f8c8e..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/removeOverlap.m +++ /dev/null @@ -1,25 +0,0 @@ -function[subDraws] = removeOverlap( obj, subDraws, cv ) -% Remove overlapping draws for a set of coupled variables - -% Iterate through all the variables (because -% the variables may have different spacing in different dimensions). -for v = 1:numel(cv) - var = obj.var( cv(v) ); - - % Get the data indices from which values are loaded in the .grid file - [dataIndex, nSeq] = var.dataIndices( subDraws ); - - % Get the indices of repeated / overlapping sampling indices. Overwrite - % overlapping draws with NaN and move to the end of the array. - [~, uniqIndex] = unique( dataIndex, 'rows', 'stable' ); - overlap = (1:size(dataIndex,1))'; - overlap = overlap( ~ismember(overlap, uniqIndex) ); - - badDraw = unique( ceil( overlap / nSeq ) ); - subDraws( badDraw, : ) = NaN; - - failed = ismember( 1:size(subDraws,1), badDraw ); - subDraws = [ subDraws(~failed,:); subDraws(failed,:) ]; -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/resetChangedDim.m b/2. State Vectors and Ensembles/@stateDesign/resetChangedDim.m deleted file mode 100644 index a40a94fb..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/resetChangedDim.m +++ /dev/null @@ -1,30 +0,0 @@ -function[obj] = resetChangedDim( obj, var, d ) -% Flip type, delete mean and sequence data. Notify user. - -% For each variable -for k = 1:numel(var) - v = var(k); - - % Change the type - obj.var(v).isState(d) = ~obj.var(v).isState(d); - - % Always remove mean - obj.var(v).takeMean(d) = false; - obj.var(v).nanflag{d} = []; - - % If now a state dimension, completely delete mean and sequence data - if obj.var(v).isState(d) - obj.var(v).meanDex{d} = []; - obj.var(v).seqDex{d} = []; - obj.var(v).seqMeta{d} = []; - - % But if an ensemble, use the defaults - else - obj.var(v).meanDex{d} = 0; - obj.var(v).seqDex{d} = 0; - obj.var(v).seqMeta{d} = NaN; - end -end - -end - diff --git a/2. State Vectors and Ensembles/@stateDesign/saveDraws.m b/2. State Vectors and Ensembles/@stateDesign/saveDraws.m deleted file mode 100644 index 06b3edca..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/saveDraws.m +++ /dev/null @@ -1,15 +0,0 @@ -function[obj] = saveDraws( obj, cv, subDraws, undrawn ) -% Save the draws for each set of coupled variables. - -% Get the ensemble dimensions -var1 = obj.var( cv(1) ); -ensDim = find( ~var1.isState ); - -% Save the values -for v = 1:numel(cv) - obj.var( cv(v) ).drawDex = subDraws; - obj.var( cv(v) ).undrawn = undrawn; - obj.var( cv(v) ).drawDim = ensDim; -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/stateDesign.m b/2. State Vectors and Ensembles/@stateDesign/stateDesign.m deleted file mode 100644 index 7d907dd8..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/stateDesign.m +++ /dev/null @@ -1,224 +0,0 @@ -classdef stateDesign - % stateDesign - % Stores design specifications and parameters for a state vector, and - % builds ensembles. - % - % stateDesign Methods: - % stateDesign - Creates a new stateDesign object. - % add - Adds a new variable - % edit - Edits a the design specifications of a dimension of a variable. - % weightedMean - Specify weights for weighted means. - % copy - Copies design specifications from a template variable to other variables. - % info - Displays information about the state vector. - % buildEnsemble - Create an ensemble from the design. - % - % Advanced stateDesign Methods: - % overlap - Adjust overlap permissions of variables - % uncouple - Uncouples specified variables - % couple - Couples specified variables - % remove - Deletes specified variables from the design. - % getVariables - Gets the design associated with specific variables - % append - Appends a second stateDesign to the current object - - % ----- Written By ----- - % Jonathan King, University of Arizona, 2019 - - % User can see, but not touch. - properties (SetAccess = private) - name; % An identifier for the state vector. - var; % The array of variable designs - varName; % The names of the variables in the design. - allowOverlap; % Whether the variable permits overlapping non-duplicate sequences - isCoupled; % Notes whether variables are coupled - autoCouple; % Whether the variable should be automatically coupled to new variables. - new; % Whether this is an unwritten stateDesign - end - - % Constructor block. - methods - function obj = stateDesign( name ) - % Constructor for a stateDesign object. - % - % obj = stateDesign( name ) - % Creates an empty stateDesign object and assigns an - % identifying name. - % - % ----- Inputs ----- - % - % name: An identifying name for the stateDesign. Either a - % string scalar or character row vector. - % - % ----- Outputs ----- - % - % obj: The new, empty stateDesign object. - - % Check that the name is allowed - if ~isstrflag(name) - error('The stateDesign name must be a string.'); - end - - % Set the name - obj.name = name; - - % Initialize logical arrays - obj.isCoupled = logical([]); - obj.autoCouple = logical([]); - - % No ensembles yet.. - obj.new = true; - end - end - - % Basic user methods. - methods - - % Adds a new variable to the state vector. - obj = add( obj, varName, file, autoCouple ); - - % Edits the design specifications of a variable in the state vector. - obj = edit( obj, varName, dim, dimType, varargin ); - - % Specify weights for a weighted mean - obj = weightedMean( var, dims, weights, nanflag ); - - % Copies indices from one variable to other variables. - obj = copy( obj, fromVar, toVars ); - - % Displays information about the state vector - info( obj, varName, dims, showMeta ); - - % Create an ensemble from the design. - ens = buildEnsemble( obj, nEns, file, random, writeNaN ); - - end - - % Advanced user methods - methods - - % Adjusts overlap permissions - obj = overlap( obj, tf, varNames ); - - % Couples specified variables. - obj = couple( obj, varNames ); - - % Uncouples specified variables. - obj = uncouple( obj, varNames ); - - % Removes a set of variables from the state vector. - obj = remove( obj, varNames ); - - % Appends a separate state design - design = append( obj, design ); - - % Gets the part of a state design associated with specific variables - design = getVariables( obj, varNames ); - - end - - % Methods to modify the design - methods - - %% Index search and name error checking - - % Find the index of a variable in the list of variables in the - % state vector. - v = findVarIndices( obj, varName ); - - % Find the index of a dimension in the list of variables - d = findDimIndices( obj, v, dim ); - - %% Dimension indices - - % Edit design for a state dimension - obj = stateDimension( obj, varName, dim, varargin ); - - % Edit design for an ensemble dimension - obj = ensDimension( obj, varName, dim, varargin ); - - % Process indices for internal use - index = checkIndices( obj, index, v, d ); - - % Implements nanflag behavior for complex edits. - nanflag = getNaNflag( obj, v, d, nanflag, inArgs ) - - %% Coupling - - % Flips a dimension and applies to all coupled variables. - obj = changeDimType( obj, v, d ); - - % Flips type, deletes mean and sequence data, notifies user. - obj = resetChangedDim( obj, var, d ); - - end - - % Methods to notify user of automatic design changes - methods - - % Notify the user when sequence and mean data are deleted for - % coupled dimensions that change type. - notifyChangedType( obj, v, d, cv ); - - % Notify the user when secondary variables are coupled - notifySecondaryCoupling( obj, v, vall ); - - % Notify when coupled variables have overlap permissions altered - notifySecondaryOverlap( obj, v, vall, tf ); - - end - - % Methods for ensemble metadata objects - methods - - % Get the metadata associated with each variable - [stateMeta, ensMeta] = varMetadata( obj ); - - % Get the size of the ensemble - [siz] = ensembleSize( obj ); - - % Returns the state vector index limits and dimensional size of - % each variable. - [varLimits, varSize, isState, nMean] = varIndices( obj ); - - % Reduce the number of ensemble members used - design = limitMembers( obj, members ); - - end - - % Methods for generating ensembles - methods - - % Returns the variable indices of each set of coupled variables. - cv = coupledVariables( obj ); - - % Removes ensemble indices that don't allow a full sequence - obj = trim( obj ); - - % Restricts a set of coupled variables to ensemble indices with - % matching metadata - obj = matchMetadata( obj, cv ); - - % Select draws - obj = makeDraws( obj, cv, nEns, random ); - - % Writes the ensemble to file - [] = write( obj, file, random, writenan, new ) - end - - % Methods for making draws - methods - - % Initializes an array of draws for a design. - [overlap, ensSize, undrawn, subDraws] = initializeDraws( obj, cv, nDraws ); - - % Selects a set of N-D subscripted draws - [subDraws, undrawn] = draw( obj, nDraws, subDraws, undrawn, random, ensSize ); - - % Removes overlapping draws from an ensemble - subDraws = removeOverlap( obj, subDraws, cv ) - - % Saves finalized draws to variables - obj = saveDraws( obj, cv, subDraws, undrawn ); - - end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/stateDimension.m b/2. State Vectors and Ensembles/@stateDesign/stateDimension.m deleted file mode 100644 index a6837d33..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/stateDimension.m +++ /dev/null @@ -1,32 +0,0 @@ -function[obj] = stateDimension( obj, v, d, varargin ) -% Internal function for editing state dimensions -% v: Variable index in state Design -% -% d: Dimension index in varDesign - -% Parse inputs. Use the pre-existing values as the defaults. (This way a -% second call to obj.edit, as may happen in the console, doesn't overwrite) -[index, takeMean, nanflag] = parseInputs( varargin, {'index', 'mean', 'nanflag'}, ... - {obj.var(v).indices{d}, obj.var(v).takeMean(d), obj.var(v).nanflag{d}}, {[],[],[]}); - -% Error check and set takeMean and the nanflag -obj.var(v).takeMean(d) = errCheck(takeMean); -obj.var(v).nanflag{d} = obj.getNaNflag( v, d, nanflag, varargin(1:2:end-1) ); - - % Error check, process, and record indices. (Sorted column, linear) -obj.var(v).indices{d} = obj.checkIndices( index, v, d ); - -% If changing dimension type, change for all coupled variables -% Delete coupled sequence and mean, notify user. -if ~obj.var(v).isState(d) - obj = obj.changeDimType( v, d ); -end - -end - -function[takeMean] = errCheck(takeMean) - if ~isscalar(takeMean) || ~islogical(takeMean) - error('takeMean must be a scalar logical.'); - end -end - diff --git a/2. State Vectors and Ensembles/@stateDesign/trim.m b/2. State Vectors and Ensembles/@stateDesign/trim.m deleted file mode 100644 index b03de872..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/trim.m +++ /dev/null @@ -1,22 +0,0 @@ -function[obj] = trim( obj ) -%% Trims ensemble dimensions to only allow complete sequences - -% Get the ensemble dimensions for each variable -for v = 1:numel( obj.var ) - var = obj.var(v); - ensDims = find( ~var.isState ); - - % And remove any indices for which a full sequence would surpass the - % length of the dimension - for dim = 1:numel(ensDims) - d = ensDims(dim); - - dimLength = var.dimSize(d); - seqLength = max(var.seqDex{d}) + max(var.meanDex{d}); - - remove = var.indices{d} > dimLength - seqLength; - obj.var(v).indices{d}(remove) = []; - end -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/uncouple.m b/2. State Vectors and Ensembles/@stateDesign/uncouple.m deleted file mode 100644 index e62c4a70..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/uncouple.m +++ /dev/null @@ -1,37 +0,0 @@ -function[obj] = uncouple( obj, varNames ) -%% Uncouples variables in a state vector design from all other variables. -% -% design = obj.uncouple( varNames ) -% Uncouples a set of variables from all other variables. Disables the -% autocoupler. -% -% ---- Inputs ----- -% -% obj: A state vector design -% -% varNames: The list of variables to uncouple. Either a cellstring or -% string vector. -% -% ----- Outputs ----- -% -% design: The updated state vector design. - -% ----- Written By ----- -% Jonathan King, University of Arizona, 2019 - -% Get the indices of the variables in the state vector design -v = obj.findVarIndices( varNames ); - -% Uncouple each variable from all others -for k = 1:numel(v) - obj.isCoupled( v(k), : ) = false; - obj.isCoupled( :, v(k) ) = false; - - % But the variable should remain coupled with itself.... - obj.isCoupled( v(k), v(k) ) = true; - - % Remove the autocoupler - obj.autoCouple( v(k) ) = false; -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/varIndices.m b/2. State Vectors and Ensembles/@stateDesign/varIndices.m deleted file mode 100644 index 3b8f7c24..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/varIndices.m +++ /dev/null @@ -1,39 +0,0 @@ -function[varLimits, varSize, isState, nMean] = varIndices( obj ) -%% Returns the state vector index limits and dimensional size of each variable - -% Get the variables -vars = obj.var(:); - -% Preallocate -nVar = numel( vars ); -nDim = numel( vars(1).dimID ); -varSize = NaN( nVar, nDim ); -isState = true( nVar, nDim ); -nMean = ones( nVar, nDim ); - -% Get the size of each variable in each dimension. Number of state indices, -% or number of sequence indices (adjusted for means) -for v = 1:nVar - - for d = 1:nDim - if vars(v).isState(d) && ~vars(v).takeMean(d) % State dimension, no mean - varSize(v,d) = numel( vars(v).indices{d} ); - elseif vars(v).isState(d) % State dimension, with mean - varSize(v,d) = 1; - nMean(v,d) = numel( vars(v).indices{d} ); - else % Ensemble dimensions - varSize(v,d) = numel( vars(v).seqDex{d} ); - isState(v,d) = false; - if vars(v).takeMean(d) % Ensemble dimension with mean - nMean(v,d) = numel( vars(v).meanDex{d} ); - end - end - end -end - -% Record the limits -nEls = prod( varSize, 2 ); -lastIndex = cumsum(nEls); -varLimits = [lastIndex-nEls+1, lastIndex]; - -end diff --git a/2. State Vectors and Ensembles/@stateDesign/varMetadata.m b/2. State Vectors and Ensembles/@stateDesign/varMetadata.m deleted file mode 100644 index 2cec6a59..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/varMetadata.m +++ /dev/null @@ -1,42 +0,0 @@ -function[stateMeta, ensMeta] = varMetadata( obj ) -% Gets the ensemble metadata for each variable - -% Use a structure to store output -stateMeta = struct; -ensMeta = struct; - -% Run through each dimension of each variable, reading metadata -nVar = numel(obj.var); -nDim = numel(obj.var(1).dimID); -for v = 1:nVar - var = obj.var(v); - ensDim = 1; - for d = 1:nDim - - % For state dimensions, get metadata at state indices. If taking a - % mean, propagate down the third dimension. - if var.isState(d) - dimMeta = var.meta.(var.dimID(d))( var.indices{d}, : ); - if var.takeMean(d) - dimMeta = permute( dimMeta, [3 2 1] ); - end - - % For ensemble dimensions, use the sequence metadata. If there were - % draws, record them as ensemble metadata - else - dimMeta = var.seqMeta{d}; - - if ~isempty( var.drawDex ) - ensMeta.(obj.varName(v)).(var.dimID(d)) = ... - var.meta.(var.dimID(d))( var.indices{d}(var.drawDex(:,ensDim)), : ); - ensDim = ensDim + 1; - end - end - - % Update the statemetadata regardless of dimension type - stateMeta.(obj.varName(v)).(var.dimID(d)) = dimMeta; - end -end - -end - \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/weightedMean.m b/2. State Vectors and Ensembles/@stateDesign/weightedMean.m deleted file mode 100644 index 2e1716e9..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/weightedMean.m +++ /dev/null @@ -1,74 +0,0 @@ -function[obj] = weightedMean( obj, var, dims, weights, nanflag ) -%% Specifies weights to use when taking a mean. -% -% design = obj.weightedMean( var, dims, weights ) -% Uses a weighted mean for specified dimensions of a particular variable. -% -% design = obj.weightedMean( var, dims, weights, nanflag ) -% Indicates how to treat NaN values in a mean. Default is to include nan. -% -% ----- Inputs ----- -% -% var: The name of a variable. A string. -% -% dims: The dimensions over which to apply the weighted mean. A string -% vector. -% -% weights: The weights to apply. For dims = ["dim1", "dim2", ... "dimN"], -% this is an array of size [length(dim1), length(dim2), ... length(dimN)]. -% -% nanflag: A string scalar indicating whether to include NaN values -% "includenan": Use NaN values when computing a mean -% "omitnan": Remove NaN values when computing a mean -% -% ----- Outputs ----- -% -% design: The updated stateDesign object. - -% Set defaults. Get indices -if ~exist('nanflag','var') || isempty(nanflag) - nanflag = "includenan"; -end -v = obj.findVarIndices( var ); -dims = obj.findDimIndices( v, dims ); - -% Error check -[~,dimSize] = obj.varIndices; -dimSize = dimSize(v,:); -if ~isstrflag(var) - error('var must be a string scalar or character row vector.'); -elseif ~isstrflag(nanflag) || ~ismember(nanflag, ["includenan","omitnan"]) - error('nanflag must either be "includenan" or "omitnan".'); -elseif ~isnumeric(weights) || ~isreal(weights) - error('Weights must be a numeric, real array.'); -elseif ~isequal( size(weights), dimSize(dims) ) - errString = ['[', sprintf('%.f x ', dimSize(dims)), sprintf('\b\b\b]')]; - error('The size of the weights array must match the length of each dimension in the state vector: %s.', errString); -elseif ~isempty( obj.var(v).weightDims ) && any( obj.var(v).weightDims(:,dims), 'all' ) - [~, dim] = find( obj.var(v).weightDims(:,dims) ); - error(['The dimensions ', sprintf('"%s", ', obj.var(v).dimID(dim)), 'are already being used in weighted means.']); -end - - - -% Permute the weights to match the order of dimensions in the grid file -[dims, reorder] = sort( dims ); -weights = permute( weights, reorder ); -resize = ones( 1, numel(obj.var(v).dimID) ); -resize(dims) = size(weights); -weights = reshape( weights, resize ); - -% Set takeMean and nanflag for all relevant dimensions -for d = 1:numel(dims) - obj.var(v).takeMean( dims(d) ) = true; - obj.var(v).nanflag{ dims(d) } = nanflag; -end - -% Record the weights -newDims = false( 1, numel(obj.var(v).dimID) ); -newDims( dims ) = true; -obj.var(v).weightDims(end+1,:) = newDims; - -obj.var(v).weights = cat(1, obj.var(v).weights, {weights} ); - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@stateDesign/write.m b/2. State Vectors and Ensembles/@stateDesign/write.m deleted file mode 100644 index 932ba497..00000000 --- a/2. State Vectors and Ensembles/@stateDesign/write.m +++ /dev/null @@ -1,62 +0,0 @@ -function[] = write( obj, file, random, writenan, new ) -% Writes the ensemble to file. - -% Handle pre-existing or non-existing files. -if new && exist(fullfile(pwd,file),'file') - delete(file); -elseif ~new && ~exist('file','file') - error('The file "%s" in the ensemble object does not exist. It may have been deleted or removed from the active path.', file); -end - -% Get the matfile and fill in values as appropriate -ens = matfile(file,'Writable', true); -ens.complete = false; -ensSize = obj.ensembleSize; -if new - nNew = ensSize(2); - nWritten = 0; - ens.M( 1:ensSize(1), 1:nNew ) = NaN; - ens.random = random; - ens.writenan = writenan; - ens.hasnan = []; - hasnan = false( numel(obj.var), ensSize(2)); -else - nWritten = ens.ensSize(1,2); - nNew = ensSize(2) - nWritten; - ens.M( :, nWritten + (1:nNew) ) = NaN; - hasnan = false( numel(obj.var), nNew); -end - -% Determine the unique gridFiles -nVar = numel( obj.var ); -gridFiles = cell(nVar, 1); -[gridFiles{:}] = deal( obj.var.file ); -gridFiles = string( gridFiles ); -uniqGrids = unique( gridFiles ); -nGrid = numel( uniqGrids ); - -% Preallocate passed values for data sources in shared gridFiles -[~, passIndex] = ismember( gridFiles, uniqGrids ); -passVals = cell( nGrid, 1 ); -for g = 1:nGrid - grid = load( uniqGrids(g), '-mat', 'nSource' ); - passVals{g} = cell( 1, grid.nSource+1 ); -end - -% Get the ensemble for each variable. Record NaN members and write to file -[varLimit, varSize, ~, nMean] = obj.varIndices; -for v = 1:numel( obj.var ) - [M, passVals{passIndex(v)}] = ... - obj.var(v).buildEnsemble( nWritten, varSize(v,:), nMean(v,:), passVals{passIndex(v)} ); - hasnan(v,:) = any( isnan(M), 1 ); - ens.M( varLimit(v,1):varLimit(v,2), nWritten+(1:nNew) ) = M; -end - -% Finish adding values to the .ens file -ens.hasnan = [ens.hasnan, hasnan]; -ens.ensSize = ensSize; -obj.new = false; -ens.design = obj; -ens.complete = true; - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@varDesign/buildEnsemble.m b/2. State Vectors and Ensembles/@varDesign/buildEnsemble.m deleted file mode 100644 index 2517951c..00000000 --- a/2. State Vectors and Ensembles/@varDesign/buildEnsemble.m +++ /dev/null @@ -1,95 +0,0 @@ -function[M, passVals] = buildEnsemble( var, nWritten, varSize, nMean, passVals ) -% Builds part of a prior for a single variable - -% Preallocate the ensemble for the variable -nNew = size(var.drawDex,1) - nWritten; -nState = prod( varSize ); -M = NaN( nState, nNew ); - -% Preallocate scs and keep -nDim = numel(var.dimID); -scs = NaN(3, nDim); -keep = cell(nDim,1); - -% Get scs and keep for the state dimensions -stateDim = find( var.isState ); -for d = 1:numel(stateDim) - [scs(:,stateDim(d)), keep{stateDim(d)}] = loadKeep( var.indices{stateDim(d)} ); -end - -% Get the add indices and unordering for the ensemble dimensions -ensDim = find( ~var.isState ); -addIndex = cell( numel(ensDim), 1 ); -unorder = repmat( {':'}, [nDim, 1]); -for d = 1:numel(ensDim) - addIndex{d} = var.seqDex{ensDim(d)}' + var.meanDex{ensDim(d)}; - [addIndex{d}, i] = sort( addIndex{d}(:) ); - [~, unorder{ensDim(d)}] = sort( i ); -end - -% Get the sequence number associated with each ensemble dimension element. -nSeq = prod( varSize(ensDim) ); -nDup = [1, varSize(ensDim)]; -nRep = [nMean(ensDim), 1]; -seq = 1; -k = 1; -for d = 1:numel(nDup) - kNew = nDup(d) * k; - addk = repmat( 0:k:kNew-k, [size(seq,1),1] ); - k = kNew; - - seq = repmat( seq, [nDup(d),1] ); - seq = seq + addk(:); - seq = repmat( seq, [nRep(d),1] ); -end - -% Get a sequence sorting order and unpermute order for the data reshape -[~, seqOrder] = sort( seq ); -seqOrder = [ repmat({':'}, [numel(stateDim),1]); seqOrder ]; -[~, unpermute] = sort( [stateDim;ensDim] ); -unpermute = [unpermute; max(unpermute)+1]; - -% Load each ensemble member -progressbar( char(var.name) ); -for mc = 1:nNew - draw = nWritten + mc; - - % Get scs and keep for the ensemble dimensions - for d = 1:numel(ensDim) - ensMember = var.drawDex(draw, d); - indices = var.indices{ensDim(d)}(ensMember) + addIndex{d}; - [scs(:,ensDim(d)), keep{ensDim(d)}] = loadKeep( indices ); - end - - % Load data, discard unneeded values. Unsort add indices - [data, passVals] = gridFile.read( var.file, scs, passVals ); - data = data( keep{:} ); - data = data( unorder{:} ); - - % Reshape sequences along the end+1 dimension - data = reshape( data, [ varSize(stateDim).*nMean(stateDim), prod([nMean(ensDim),varSize(ensDim)]) ] ); - data = data( seqOrder{:} ); - data = reshape( data, [varSize(stateDim).*nMean(stateDim), nMean(ensDim), nSeq] ); - data = permute( data, unpermute ); - - % Take weighted means - dims = 1:numel(var.dimID); - for w = 1:numel( var.weights ) - wdim = find( var.weightDims(w,:) ); - data = sum( data.*var.weights{w}, wdim, var.nanflag{wdim(1)} ) ./ sum(var.weights{w}, 'all'); - dims( ismember(dims,wdim) ) = []; - end - - % Take normal means over the remaining dimensions - for d = 1:numel( dims ) - if var.takeMean(dims(d)) - data = mean( data, dims(d), var.nanflag{dims(d)} ); - end - end - - % Store ensemble member as state vector. - M(:,mc) = data(:); - progressbar( mc / nNew ); -end - -end \ No newline at end of file diff --git a/2. State Vectors and Ensembles/@varDesign/dataIndices.m b/2. State Vectors and Ensembles/@varDesign/dataIndices.m deleted file mode 100644 index e0d931d0..00000000 --- a/2. State Vectors and Ensembles/@varDesign/dataIndices.m +++ /dev/null @@ -1,44 +0,0 @@ -function[dataIndex, nSeq] = dataIndices( var, subDraws ) -%% Determines all of the grid file indices that will be loaded for a -% variable with a given set of ND subscripted draws - -ensDim = find( ~var.isState ); -nDim = numel(ensDim); - -% Preallocate ensemble indices for draws, sequence elements in each -% dimension, size of the sequence -notnan = ~isnan( subDraws(:,1) ); -nDraws = sum( notnan ); -ensIndices = NaN( nDraws, nDim ); - -seqElements = cell( 1, nDim ); -nEls = NaN( 1, nDim ); - -% For each ensemble dimension, get the ensemble indices. Add them to the -% sequence and mean indices to get the set of sequence elements. -for d = 1:nDim - ensIndices(:,d) = var.indices{ensDim(d)}( subDraws(notnan,d) ); - - seq = var.seqDex{ensDim(d)} + var.meanDex{ensDim(d)}'; - seqElements{d} = seq(:); - nEls(d) = numel(seq); -end - -% Get sequence subscript indices for N-D. Use them to subscript the -% sequence elements -nSeq = prod(nEls); -subIndices = subdim( (1:nSeq)', nEls ); - -subSequences = NaN( nSeq, nDim ); -for d = 1:nDim - subSequences(:,d) = seqElements{d}( subIndices(:,d) ); -end - -% Combine the ensemble indices and sequence elements to get the set of -% indices from which data is read from the grid files. -subSequences = repmat( subSequences, [nDraws, 1] ); -ensIndices = repmat( ensIndices(:)', [nSeq, 1] ); -ensIndices = reshape( ensIndices, [nSeq*nDraws, nDim] ); -dataIndex = ensIndices + subSequences; - -end diff --git a/2. State Vectors and Ensembles/@varDesign/varDesign.m b/2. State Vectors and Ensembles/@varDesign/varDesign.m deleted file mode 100644 index b0086304..00000000 --- a/2. State Vectors and Ensembles/@varDesign/varDesign.m +++ /dev/null @@ -1,105 +0,0 @@ -classdef varDesign - % varDesign - % This is a custom structure that holds design parameters for a single - % variable in a state vector. - % - % This class should not be used by users. Instead, modify design - % parameters via the "stateDesign" class. - - % ----- Written By ----- - % Jonathan King, University of Arizona, 2019 - - - % The values needed for each variable. - properties - name; % Variable name - - % Grid file properties - file; % File name - dimID; % Dimensional ordering - dimSize; % Dimension size - meta; % Metadata - - % State or Ensemble properties - isState; % Whether a dimension is a state dimension. - indices; % The allowed indices for state or ensemble dimensions - takeMean; % Toggle to take a mean - nanflag; % How to treat NaN - - % Indices - seqDex; % The indices used to get dimensional sequences - seqMeta; % The metadata value for sequence members - meanDex; % The indices used to take a mean - - % Ensemble draws - drawDex; - drawDim; - undrawn; - - % Weighted means - weightDims; % The dimensions associated with each set of weights - weights; % The weights to use when taking a mean - end - - % Constructor - methods - function obj = varDesign( file, name ) - - % Get the name. Convert to string for internal use. - if ~isstrflag(name) - error('Variable name must be a string scalar.'); - elseif ~isstrflag(name) - error('File name must be a string scalar.'); - end - obj.name = string(name); - obj.file = string(file); - - % Get the grid file metadata - [meta, dimID, dimSize] = gridFile.meta( obj.file ); - - % Ensure that the gridfile contains all known IDs - allID = getDimIDs; - if any(~ismember(allID, dimID)) - missing = allID( find(~ismember(allID, dimID),1) ); - error(['The gridfile %s is missing the %s dimension.\n',... - 'The function getDimIDs.m may have been edited after the gridfile was created.'], file, missing); - end - - % Set metadata - obj.dimID = dimID; - obj.dimSize = dimSize; - obj.meta = meta; - - % Get the number of dimensions - nDim = numel(dimID); - - % Preallocate dimensional quantities - obj.isState = true(nDim,1); - obj.indices = cell(nDim,1); - obj.takeMean = false(nDim,1); - obj.nanflag = cell(nDim,1); - obj.seqDex = cell(nDim,1); - obj.seqMeta = cell(nDim,1); - obj.meanDex = cell(nDim,1); - - obj.drawDex = []; - obj.undrawn = []; - - for d = 1:nDim - obj.indices{d} = (1:dimSize(d))'; - end - end - end - - % Utilities - methods - - % Determine all grid file indices that will be loaded for a set of - % draws. - [dataIndex, nSeq] = dataIndices( var, subDraws ) - - % Builds the ensemble for the variable - [M, passVals] = buildEnsemble( var, nWritten, varSize, nMean, passVals ) - end - -end \ No newline at end of file diff --git a/3. PSMs/@PSM/Bias Correction/Mean Adjustment/getMeanAdjustment.m b/3. PSMs/@PSM/Bias Correction/Mean Adjustment/getMeanAdjustment.m deleted file mode 100644 index 811f679e..00000000 --- a/3. PSMs/@PSM/Bias Correction/Mean Adjustment/getMeanAdjustment.m +++ /dev/null @@ -1,56 +0,0 @@ -function[addConstant] = getMeanAdjustment( Xt, Xs, nanflag ) -%% Gets the additive constant needed to adjust the mean of a source dataset -% to match the mean of a target dataset. -% -% [addConstant] = getMeanAdjustment( Xt, Xs ) -% Determines the additive constants needed to adjust the means of the rows -% of Xs to match the means of the rows of Xt. -% -% [addConstant] = getMeanAdjustment( Xt, Xs, nanflag ) -% Specifies how to treat NaN values in the datasets. -% -% ----- Inputs ----- -% -% Xt: The target ("true") dataset. Each row is treated independently. (nVar x nTime1) -% -% Xs: The source dataset. Typically the model prior. Each row is treated -% independently. (nVar x nTime2) -% -% nanflag: A string specifying how to treat NaNs in the datsets. Default -% behavior is to include NaN in means. -% 'includenan': Includes NaN values in means. -% 'omitnan': Removes NaN values before computing means. -% -% ----- Outputs ----- -% -% addConstant: The additive constants needed to adjust the means of the rows -% of Xs to match the means of the rows of Xt. - -% ----- Written By ----- -% Jonathan King, University of Arizona, 2019 - -% Set default -if ~exist('nanflag','var') - nanflag = 'includenan'; -end - -% Error check -if ~ismatrix(Xt) || ~isnumeric(Xt) - error('Xt must be a numeric matrix.'); -elseif ~ismatrix(Xs) || ~isnumeric(Xs) - error('Xs must be a numeric matrix.'); -elseif size(Xt,1) ~= size(Xs,1) - error('The number of rows in Xs must match the number of rows of Xt.'); -elseif ~ismember( nanflag, ["omitnan","includenan"] ) - error('Unrecognized nanflag.'); -end - -% Get the mean of each row -meanT = mean(Xt, 2, nanflag); -meanS = mean(Xs, 2, nanflag); - -% Get the additive constants -addConstant = meanT - meanS; - -end - diff --git a/3. PSMs/@PSM/Bias Correction/helpBiasCorrectors.m b/3. PSMs/@PSM/Bias Correction/helpBiasCorrectors.m deleted file mode 100644 index 7e8a359d..00000000 --- a/3. PSMs/@PSM/Bias Correction/helpBiasCorrectors.m +++ /dev/null @@ -1,47 +0,0 @@ -function[] = helpBiasCorrectors( name ) -%% This prints information on available bias correctors to the console. -% It also provides information for specific correctors. -% -% helpBiasCorrectors -% Provides the names of available bias correctors along with a brief -% description. -% -% helpBiasCorrectors( name ) -% Provides more detailed information on a specific bias corrector. - -% List the available correctors -if nargin == 0 - fprintf( sprintf( ['Available Bias Correctors:', newline, ... - '\tMean: Adjusts the mean of the values from the model prior given to a PSM to', ... - newline, sprintf('\t\tmatch the mean of a target dataset.'), ... - newline, newline... - '\tRenorm: Adjusts the mean and standard deviation of values given to a PSM to', ... - newline, sprintf('\t\tmatch a target dataset.'), newline ] ) ); - -% Mean adjustment instructions -elseif strcmpi(name, 'mean') - fprintf( sprintf( ['Mean Adjustment bias corrector how-to:', newline, ... - '\tStart by determining the additive constant needed to adjust the mean of', newline,... - '\tthe prior to match the target. This may be done offline, or using the', newline, ... - '\tfunction "getMeanAdjustment.m".', newline, newline, ... - '\tActivate the bias corrector using the .useMeanCorrector method of the PSM.', newline, ... - '\tUsage is:', newline, '\t\t >> myPSM.useMeanCorrector( add );', newline ])); - - -% Renormalization instructions -elseif strcmpi(name, 'renorm') - fprintf( sprintf( ['Renorm bias corrector how-to:', newline, ... - '\tStart by determining the additive and multiplicative constants needed to', newline, ... - '\tadjust the mean and variance of the prior to match the target. This may be', newline, ... - '\tdone offline, or by using the function "getRenormalization.m".', newline, newline, ... - '\tActivate the bias corrector using the .useRenormCorrector method of the PSM.', newline, ... - '\tUsage is:', newline, '\t\t >> myPSM.useRenormCorrector( timesConstant, addConstant )', newline] ) ); - - -% Error case -else - error('Unrecognized name'); -end - -end - \ No newline at end of file diff --git a/3. PSMs/@PSM/PSM.m b/3. PSMs/@PSM/PSM.m deleted file mode 100644 index 9d7c75b2..00000000 --- a/3. PSMs/@PSM/PSM.m +++ /dev/null @@ -1,115 +0,0 @@ -classdef (Abstract) PSM < handle - % PSM - % Implements an interface for proxy system models to interact with unit - % converting, bias correction, and data analysis functions. - % - % PSM Methods: - % setUnitConversion - Set values used for unit conversion. - % setBiasCorrector - Specify a bias corrector to use. - % - % Abstract PSM Methods: (These must be implemented in individual PSMs) - % getStateIndices - Determine which state vector elements are needed to run - % errorCheckPSM - Internal error checking - % runForwardModel - Run a specific forward model - - % ----- Written By ----- - % Jonathan King, University of Arizona, 2019 - - properties - H; % Sampling indices - addUnit; % Additive unit conversion - multUnit; % Multiplicative unit conversion - biasCorrection = nullCorrector; % A bias corrector - end - - % Abstract methods unique to individual PSMs - methods (Abstract = true) - - getStateIndices( obj, ensMeta ); - % This method determines the state vector elements that are needed - % to run an instance of a PSM for a particular proxy. - % - % The indices of those state vector elements are then saved as - % the "H" property. (This is why the method has no outputs. The - % outputs are saved within the "H" property of each PSM.) - % - % This method will require the ensemble metadata as an input. When - % this method is implemented in concrete PSM classes, it may also - % use additional input arguments. - - errorCheckPSM( obj ); - % This method implements internal error checking for specific PSM. - % - % It is intended to check whether or not a PSM is ready to be used - % for data assimilation. - % - % Dash calls this method for each PSM before starting a data - % assimilation in order to ensure that the user did not forget any - % steps when building their PSMs. - - runForwardModel( obj, M, t, d ); - % This is the function used by dash to run individual forward - % models. - % - % It has 3 INPUTS - % M: A set of values extracted from the state vectors in the - % model ensemble. Most PSMs will only need this input. The - % size will be (nSamplingIndices x nEns) - % - % t: The index of the time step being processed in the assimilation. - % - % This will be unnecessary for most standard PSMs, but could - % be necessary for time-dependent forward models. For example, - % a model that incorporates biological evolution over time - % could use different calibrations for different assimilation - % time steps. - % - % d: The index of the observation being processed. - % - % This will rarely be necessary for PSMs. It would be - % useful for developers using the "handle" capabilities of a - % PSM to assign multiple proxy records to a single PSM - % instance. This might be desirable for PSMs with a high - % computational cost of initialization. - % - % The method also has 2 OUTPUTS: - % Ye: These are the proxy estimates generated by running the - % forward model. Size is (1 x nEns) - % - % R: This is an estimate of estimate variance generated by the - % forward model. This is an optional output; it is fine to - % write PSMs that do not calculate R. Dash will only require - % this output when the user does not specify a value for R at - % the beginning of assimilation. - end - - % Interface methods that also apply unit conversion and bias correction - methods - - % Interfaces bias correction, unit conversion, dash, with unique PSMs - [Ye, R] = run( obj, M, t, d ); - - % Interface error checking for bias correction, unit conversion, - % dash, and unique PSMs. - review( obj, nState ); - - % Set state indices manually - setStateIndices( obj, H ); - - end - - % Unit conversion and bias correction - methods - - % Set the additive and multiplicative unit conversions - setUnitConversion( obj, varargin ); - - % Error check unit conversion - reviewUnitConversion( obj, H ); - - % Select a bias corrector - setBiasCorrector( obj, type, varargin ); - - end - -end \ No newline at end of file diff --git a/3. PSMs/@PSM/convertUnits.m b/3. PSMs/@PSM/convertUnits.m deleted file mode 100644 index 41117a40..00000000 --- a/3. PSMs/@PSM/convertUnits.m +++ /dev/null @@ -1,14 +0,0 @@ -function[M] = convertUnits( obj, M ) -% Converts units via M* = AM + B -% -% M = obj.convertUnits( M ) - -if ~isempty(obj.multUnit) - M = M .* obj.multUnit; -end - -if ~isempty(obj.addUnit) - M = M + obj.addUnit; -end - -end \ No newline at end of file diff --git a/3. PSMs/@PSM/review.m b/3. PSMs/@PSM/review.m deleted file mode 100644 index 02548524..00000000 --- a/3. PSMs/@PSM/review.m +++ /dev/null @@ -1,21 +0,0 @@ -function[] = review( obj, nState ) -% Interfaces internal error check with dash -% -% obj.reviewPSM( nState ) - -% H and unit conversion error checking -if isempty(obj.H) - error('The PSM does not have sampling indices, H.'); -elseif ~isnumeric(obj.H) || ~iscolumn(obj.H) || ~isreal(obj.H) || any(obj.H<0) || any(mod(obj.H,1)~=0) || any(obj.H>nState) - error('H must be a numeric column vector of integers on the interval [1, %.f].', nState ); -elseif ~isempty(obj.addUnit) && ~isequal(size(obj.H),size(obj.addUnit)) - error('addUnit is not the length of H.'); -elseif ~isempty(obj.multUnit) && ~isequal(size(obj.H),size(obj.multUnit)) - error('multUnit is not the length of H.'); -end - -% Error check the bias corrector and specific PSM -obj.biasCorrection.review( obj.H ); -obj.errorCheckPSM; - -end \ No newline at end of file diff --git a/3. PSMs/@PSM/run.m b/3. PSMs/@PSM/run.m deleted file mode 100644 index 1f5a4e2f..00000000 --- a/3. PSMs/@PSM/run.m +++ /dev/null @@ -1,31 +0,0 @@ -function[Ye, R] = run( obj, M, t, d ) -% Interfaces PSMs between unit conversion, bias correction, dash, and the -% unique PSM instance. -% -% [Ye, R] = obj.runPSM( M, t, d ) -% -% ----- Inputs ----- -% -% M: The portion of the ensemble required to run the PSM. (nH x nEns) -% -% t: A time index. Use NaN for most purposes. -% -% d: An observation index. Use NaN for most purposes. -% -% ----- Outputs ----- -% -% Ye: Model estimates (1 x nEns) -% -% R: R values associated with the observations. Includes dynamically -% generated R. - -% All PSMs should start by converting units -M = obj.convertUnits( M ); - -% They should then apply any bias correction -M = obj.biasCorrection.biasCorrect( M ); - -% And finally, run the forward model -[Ye, R] = obj.runForwardModel( M, t, d ); - -end diff --git a/3. PSMs/@PSM/setBiasCorrector.m b/3. PSMs/@PSM/setBiasCorrector.m deleted file mode 100644 index b989e374..00000000 --- a/3. PSMs/@PSM/setBiasCorrector.m +++ /dev/null @@ -1,31 +0,0 @@ -function[] = setBiasCorrector( obj, type, varargin ) -% Selects a bias corrector -% -% obj.setBiasCorrector( 'none' ) -% Does not apply any bias correction during DA. The default setting. -% -% obj.setBiasCorrector( 'mean', inArgs ) -% Uses a mean adjustment bias corrector. Please see the constructor for -% meanCorrector for input arguments. -% -% obj.setBiasCorrector( 'renorm', inArgs ) -% Uses a renormalization bias corrector. Please see the constructor of -% renormCorrector for input arguments. - -% Null corrector -if strcmpi( type, 'none' ) - obj.biasCorrection = nullCorrector; - -% Mean adjustment -elseif strcmpi( type, 'mean' ) - obj.biasCorrection = meanCorrector( varargin{:} ); - -% Renormalization -elseif strcmpi(type, 'renorm') - obj.biasCorrection = renormCorrector( varargin{:} ); - -else - error('Unrecognized bias corrector.'); -end - -end \ No newline at end of file diff --git a/3. PSMs/@PSM/setStateIndices.m b/3. PSMs/@PSM/setStateIndices.m deleted file mode 100644 index 474d8406..00000000 --- a/3. PSMs/@PSM/setStateIndices.m +++ /dev/null @@ -1,10 +0,0 @@ -function[] = setStateIndices( obj, H ) -% Sets the state vector indices manually. -% -% obj.setStateIndices( H ) -% -% ----- Inputs ----- -% -% H: State vector indices -obj.H = H; -end \ No newline at end of file diff --git a/3. PSMs/@PSM/setUnitConversion.m b/3. PSMs/@PSM/setUnitConversion.m deleted file mode 100644 index 9f951eb4..00000000 --- a/3. PSMs/@PSM/setUnitConversion.m +++ /dev/null @@ -1,34 +0,0 @@ -function[] = setUnitConversion( obj, varargin ) -% Set unit conversion parameters for a PSM -% -% obj.unitConversion( ..., 'times', C ) -% Specifies multiplicative constans to apply for unit conversion. -% -% obj.unitConversion( ..., 'add', C ) -% Specifies additive constants to apply for unit conversion. -% -% ***Note: You cannot call this function before generating state indices. - -% Need to check length, don't allow before state indices -if isempty( obj.H ) - error('Cannot set unit conversion values until state indices (H) have been generated.'); -end - -% Parse, error check -[add, mult] = parseInputs( varargin, {'add', 'times'}, {[],[]}, {[],[]} ); -nH = numel(obj.H); -if ~isempty(add) && ( ~isvector(add) || ~isnumeric(add) || length(add)~=nH ) - error('The additive unit conversion values must be a numeric vector with one element per state index (%.f).', nH); -elseif ~isempty(mult) && (~isvector(mult) || ~isnumeric(mult)) - error('The multiplicative unit conversion values must be a numeric vector with one element per state index (%.f).', nH); -end - -% Convert to column -add = add(:); -mult = mult(:); - -% Set the values -obj.addUnit = add; -obj.multUnit = mult; - -end \ No newline at end of file diff --git a/3. PSMs/Bias Correctors/biasCorrector.m b/3. PSMs/Bias Correctors/biasCorrector.m deleted file mode 100644 index 74dd17b0..00000000 --- a/3. PSMs/Bias Correctors/biasCorrector.m +++ /dev/null @@ -1,84 +0,0 @@ -classdef (Abstract) biasCorrector < handle - % biasCorrector - % Implements behavior for bias correction algorithms - - properties - type = "none"; - end - - % Constructor - methods - function obj = biasCorrector() - end - end - - % Abstract methods implemented by individual correctors - methods (Abstract) - - % Error checks the corrector - review( obj, H ); - - % Applies bias correction to an ensemble - M = biasCorrect( obj, M ); - - end - - % Renorm utility - methods (Static) - function[timesUnit, addUnit] = getRenormalization( Xt, Xs, nanflag ) - % Gets multiplicative and additive constants needed to match a - % source dataset to the mean and variance of a target dataset. - % - % [timesUnit, addUnit] = biasCorrector.getRenormalization( Xt, Xs, nanflag ) - % - % ----- Inputs ----- - % - % Xt: The target dataset. Each row is treated independently. (nVar x nTime1) - % - % Xs: The source dataset. Typically the model prior. Each row is treated - % independently. (nVar x nTime2) - % - % nanflag: A string specifying how to treat NaNs in the datsets. Default - % behavior is to include NaN in means. - % 'includenan': Includes NaN values in means. - % 'omitnan': Removes NaN values before computing means. - % - % ----- Outputs ----- - % - % timesUnit: The multiplicative constants needed to adjust the standard - % deviations of the rows of Xs to match the rows of Xt. - % - % addUnit: The additive constants needed to adjust the means of the rows - % of Xs to match the means of the rows of Xt. - - % Set default - if ~exist('nanflag','var') - nanflag = 'includenan'; - end - - % Error check - if ~ismatrix(Xt) || ~isnumeric(Xt) || ~isreal(Xt) - error('Xt must be a real, numeric matrix.'); - elseif ~ismatrix(Xs) || ~isnumeric(Xs) || ~isreal(Xs) - error('Xs must be a real, numeric matrix.'); - elseif size(Xt,1) ~= size(Xs,1) - error('The number of rows in Xs must match the number of rows of Xt.'); - elseif ~ismember( nanflag, ["omitnan","includenan"] ) - error('Unrecognized nanflag.'); - end - - % Get the means and standard deviations - meanS = mean(Xs, 2, nanflag); - meanT = mean(Xt, 2, nanflag); - - stdS = std( Xs, 0, 2, nanflag ); - stdT = std( Xt, 0, 2, nanflag ); - - % Get the renormalization constants - timesUnit= (stdT ./ stdS); - addUnit = meanT - (stdT .* meanS ./ stdS); - end - end - -end - \ No newline at end of file diff --git a/3. PSMs/Bias Correctors/meanCorrector.m b/3. PSMs/Bias Correctors/meanCorrector.m deleted file mode 100644 index ec46f8fc..00000000 --- a/3. PSMs/Bias Correctors/meanCorrector.m +++ /dev/null @@ -1,67 +0,0 @@ -classdef meanCorrector < biasCorrector - % meanCorrector - % Implements bias correction via adjustment of mean - % - % meanCorrector Methods: - % meanCorrector - Creates a mean adjustment bias corrector. - % biasCorrect - Applies bias correction - % review - Error checks the bias corrector - - properties - addUnit; - end - - % Constructor - methods - function obj = meanCorrector( Xt, Xs, nanflag ) - % Creates a mean adjustment bias corrector. - % - % obj = meanCorrector( Xt, Xs ) - % Uses a target and source dataset to determine an additive - % constant to apply to inputs. - % - % obj = meanCorrector( Xt, Xs, nanflag ) - % Specify how to treat NaN. Default is 'includenan'. - % - % ----- Inputs ----- - % - % Xt: A target dataset. The source dataset will be adjusted to - % match the mean of this target. (nVar x nEns) - % - % Xs: A source dataset. Typically from a model prior. (nVar x nEns) - % - % nanflag: A string specifying how to treat NaN values - % 'includenan' (Default) - % 'omitnan' - - % Set default - if ~exist('nanflag','var') || isempty(nanflag) - nanflag = 'includenan'; - end - - % Get the mean adjustment - [~, addUnit] = biasCorrector.getRenormalization( Xt, Xs, nanflag ); - - % Save values - obj.addUnit = addUnit; - obj.type = "mean"; - end - end - - % Review and run - methods - - % Error checks - function[] = review( obj, H ) - if numel(H) ~= numel(obj.addUnit) - error('The number of state indices (H) does not match the number of variables in the bias corrector.'); - end - end - - % Applies the bias correction - function[M] = run( obj, M ) - M = M + obj.addUnit; - end - end - -end \ No newline at end of file diff --git a/3. PSMs/Bias Correctors/nullCorrector.m b/3. PSMs/Bias Correctors/nullCorrector.m deleted file mode 100644 index c491c4af..00000000 --- a/3. PSMs/Bias Correctors/nullCorrector.m +++ /dev/null @@ -1,26 +0,0 @@ -classdef nullCorrector < biasCorrector - % nullCorrector - % Implements no bias correction - - properties - end - - methods - % Constructor - function obj = nullCorrector - obj.type = "none"; - end - - % Null Error checking - function[] = review(~,~) - end - - % Null bias correction - function[M] = biasCorrect(~,M) - end - end - -end - - - \ No newline at end of file diff --git a/3. PSMs/Bias Correctors/renormCorrector.m b/3. PSMs/Bias Correctors/renormCorrector.m deleted file mode 100644 index 3ccf97df..00000000 --- a/3. PSMs/Bias Correctors/renormCorrector.m +++ /dev/null @@ -1,71 +0,0 @@ -classdef renormCorrector < biasCorrector - % renormCorrector - % Implements bias correction by adjusting mean and variance - % - % renormCorrector Methods: - % renormCorrector - Creates a renormalization bias corrector. - % biasCorrect - Applies bias correction - % review - Error checks the bias corrector - - properties - addUnit; - timesUnit; - end - - % Constructor - methods - function obj = renormCorrector( Xt, Xs, nanflag ) - % Creates a renormalization bias corrector - % - % obj = renormCorrector( Xt, Xs ) - % Uses a target and source dataset to determine an additive and - % multiplicative constant to apply to inputs. - % - % obj = renormCorrector( Xt, Xs, nanflag ) - % Specify how to treat NaN. Default is 'includenan'. - % - % ----- Inputs ----- - % - % Xt: A target dataset. The source dataset will be adjusted to - % match the mean of this target. (nVar x nEns) - % - % Xs: A source dataset. Typically from a model prior. (nVar x nEns) - % - % nanflag: A string specifying how to treat NaN values - % 'includenan' (Default) - % 'omitnan' - - % Set default - if ~exist('nanflag','var') || isempty(nanflag) - nanflag = 'includenan'; - end - - % Get the constants - [timesUnit, addUnit] = biasCorrector.getRenormalization( Xt, Xs, nanflag ); - - % Save values - obj.addUnit = addUnit; - obj.timesUnit = timesUnit; - obj.type = "renorm"; - end - end - - % Run and review - methods - - % Error checks - function[] = review( obj, H ) - if numel(H) ~= numel(obj.addUnit) - error('The number of state indices (H) does not match the number of variables in the bias corrector.'); - end - end - - % Apply the correction - function[M] = biasCorrect( obj, M ) - M = M .* obj.timesUnit; - M =M + obj.addUnit; - end - - end - -end \ No newline at end of file diff --git a/3. PSMs/Demo for developers/README.txt b/3. PSMs/Demo for developers/README.txt deleted file mode 100644 index 661a3586..00000000 --- a/3. PSMs/Demo for developers/README.txt +++ /dev/null @@ -1,6 +0,0 @@ -There are two PSM demo classes provided here. - -myPSM is a verbose demo intended for first time designers of a PSM. - -psmName is a succinct demo intended for developers who want a quick -reference guide. \ No newline at end of file diff --git a/3. PSMs/Demo for developers/myPSM.m b/3. PSMs/Demo for developers/myPSM.m deleted file mode 100644 index 7006da65..00000000 --- a/3. PSMs/Demo for developers/myPSM.m +++ /dev/null @@ -1,241 +0,0 @@ -%% This is a verbose template for designing a new PSM - -% The first line of code says that this file describes a 'myPSM' object -% -% The "myPSM < PSM" section says that the 'myPSM' object is a specific type -% of PSM. This means that "myPSM" is required to implement all the -% functions from the PSM interface. -% -% To create your own PSM, change "myPSM" to the desired name of your PSM. -classdef myPSM < PSM - - % This section describes the properties of myPSM - % - % These are the variables that are stored in each individual instance - % of a myPSM. If I make multiple myPSM objects (because I have multiple - % proxies that use a myPSM), every myPSM object will have these - % variables. However, the values stored in these variables can be - % different for each individual myPSM object. - % - % For example, I might make two myPSM objects, one for site A and one - % for site B. Say I make a "coordinates" property. Then myPSM A and - % myPSM B will both store values associated with the coordinates of a - % site. But the actual coordinates stored by myPSM A will be different - % from the coordinates stored by myPSM B. - % - % (A note on terminology: from this point onward, I will refer to - % individual PSM objects as an "instance" of a particular PSM.) - % - % In this template, I list some possible properties. However, these are - % non-exhaustive and only suggestions. The specific properties needed - % for each PSM will vary. - properties - - coordinates; % Site coordinates (Used to select optimal sites from the state vector) - - myProp1; % Some properties needed to run a myPSM - myProp2; - myProp3; - - % You can set a default value for a property by setting it equal to - % the default value in the "properties" block. - myPropX = 100; % A property with a default value of 100 - end - - % This next section describes the methods of a myPSM - % - % These are the individual functions specific to a myPSM. They MUST - % include the functions specified by the "PSM" interface, but can also - % include other functions needed to make the PSM work. - methods - - % CONSTRUCTOR - % - % All PSMs must include a special function called a constructor. - % This is a function with the same name as the myPSM. It is - % responsible for actually creating and initializing each instance - % of a myPSM. It returns a single output (traditionally named "obj" - % (for "object"), which is the specific instance of a myPSM. - function obj = myPSM( coord, propVal1, input3, varargin ) - - % The primary responsibility of the constructor method is to - % intialize all the properties for the PSM. Let's do that now - % - % Use dot indexing to access the value in a property or to - % change the value in a property. This is very similar to a - % "struct" object (a structure). - obj.coordinates = coord; - obj.myProp1 = propVal1; - - % Get the value of myProp2, which requires some additional - % calculations. - obj.myProp2 = myFunction( input3 ); - - % Say the value of myProp3 depends in some way on running the - % forward model on the prior model ensemble. For example, - % "myProp3" could be a standardization determined by running - % the PSM on the first model time step. - % - % We'll want to calculate myProp3 later, so we'll just - % initialize it as empty now. - obj.myProp3 = []; - - % This is also a good place to change any default properties. - % Here is an example of some code that could change the default - % value in myProp3. - % - % If the user gives a 5th input, it will replace the default - % value of myPropX. - if ~isempty(varargin) - obj.myPropX = varargin; - end - end - - % Let's say we still need to determine the value of myProp3. Here - % is a method that will do that. - % - % To run a method, we will use dot indexing. This is a little - % different from normal functions. - % To run "myMethod", we will need to create an actual instance of a - % "myPSM" (using the constructor), and then call the function using - % dot indexing, so - % - % >> A = myPSM( coord, propVal1, input3 ) - % >> A.myMethod( input1, input2, input3) - % - % Looking at the function line, we can see that there is a fourth - % input called "obj", but that I did not give "myMethod" an input - % named "obj" when I called the method. This is because "obj" is a - % reference to A, the specific instance of a "myPSM" that called - % the method. - % - % The "obj" input, the reference to the PSM that called the method, - % is automatically sent to ALL METHODS (with the exception of the - % constructor) as the first input. So when writing a method, the - % first input must always be "obj". - % - % Looking at the function line, we can see that there are no - % outputs. This is because "myProp3" is a property of "myPSM". - % Essentially, it is a saved variable. By running this method, we - % will save the variable in the "myProp3" property and not need to - % return anything. - function[] = myMethod( obj, input1, input2, input3 ) - - % We can call functions within the method - value = myFunction( input1, input2 ); - - % We can reference properties using dot indexing - value2 = myFunction2( obj.propVal2 ); - - % And we can call other methods, also using dot-indexing - value3 = obj.anotherMethod( input3 ); - - % Finally, we'll want to set the value of propVal3 - obj.propVal3 = value3; - end - - - % GET STATE INDICES - % - % This is one of the functions required by the PSM interface. - % This function decides which elements in the state vector should - % be used to run the PSM. Specifically, it saves the indices of the - % desired elements within the state vector (hence 'state indices'). - % - % The specifics of this will be different for each PSM. However, I - % have provided a general function to select state indices based on - % lat-lon coordinates that should be sufficient for most simple - % PSMs. - % - % Again, note that the first input is the mandatory "obj". You can - % use a different name than "obj" if you prefer a different name, - % but naming the reference "obj" is a standard convention for - % object-oriented programming with MATLAB. - % - % This method has no outputs because the state indices will be - % saved in the "H" property. You may notice that I did not create - % an "H" property in the myPSM. This is because "PSM" the interface - % that describes all PSMs, declares "H" as a property. So all PSMs - % will have the "H" property created automatically. - % - % It is strongly recommended that you always save the state indices - % in the same order for each instance of a PSM. For example, if - % your PSM needed temperature and precipitation values from June, - % July, and August, you should always save the state indices in the - % same order, perhaps: - % - % >> obj.H = [T-June, T-July, T-Aug, P-June, P-July, P-Aug] - % - % regardless of the order of the variables in the ensemble, and - % regardless of the order of any inputs given to the method. - function[] = getStateIndices( obj, ensMeta, var1Name, var2Name, varargin ) - obj.H = someFunction( ensMeta, varNames, varargin ); - end - - - - % ERROR CHECK PSM - % - % This is a method required by the PSM interface. It is used to - % error check a PSM and ensure that it is ready for data - % assimilation. - % - % You shouldn't change the inputs to this method because it is - % built in to the data assimilation code with no input (excepting - % "obj") - function[] = errorCheckPSM( obj ) - - % Do error checking on values specific to the PSM - if obj.propVal1 < 0 - error('propVal1 should be positive!'); - elseif isnan( obj.propValX ) - error('propValX cannott be NaN!'); - elseif isempty( obj.propVal2) - error('propVal2 cannot be empty!'); - end - - % Etcetera - end - - - - % RUN FORWARD MODEL - % - % This is the final method required by the PSM interface. It - % does the heavy lifting of actually running a PSM. It has two - % outputs: Ye - The proxy estimate for each ensemble member, and - % R - Estimated observation error. - % - % Note that "R" is an optional output. Dash will only ask for "R" - % if the user does not specify R at the beginning of data - % assimilation. So it's fine to not include any code that - % calculates R in the PSM; this will simply require the user to - % always specify R. - % - % The inputs are "obj", M - the values in the ensemble at the - % sampling values, t - the particular time step being processed in - % the data assimilation, and d - the index of the proxy being - % estimated. - % - % Most PSMs will not need the d input. If you are a developer - % wishing to utilize the "handle" properties of a PSMs, see the - % developer documentation. - % - % Using the "t" input is also - % uncommon, but could be necessary if your PSM is time dependent. - % For example, if you wanted to incorporate the effects of - % evolution on a biological calibration curve over time, you may wish to know - % the current time step in order to select the appropriate curve. - % - % Overwhelmingly, most PSMs should only need the "M" input, so that - % is the setup for this demo. We don't need the "t" and "d" inputs - % for this method, but the PSM interface requires that this method - % accepts four inputs. We can use the tilde operator (~) as a - % placeholder for inputs that we don't need for our function. - function[Ye, R] = runForwardModel( obj, M, ~, ~ ) - - % Run the PSM forward model via some function. - [Ye, R] = myForwardModel( M ); - end - end -end \ No newline at end of file diff --git a/3. PSMs/Demo for developers/psmName.m b/3. PSMs/Demo for developers/psmName.m deleted file mode 100644 index c2a8f053..00000000 --- a/3. PSMs/Demo for developers/psmName.m +++ /dev/null @@ -1,56 +0,0 @@ -%% This is a less verbose psm template - -classdef psmName < PSM - - properties - coords; % Site coordinates - - someProp; % another properties - default_100 = 100; % Property with default value - end - - methods - - %% Constructor - function obj = psmName( coord, prop, varargin ) - obj.coords = coord; - obj.someProp = prop; - - % Change values of default - if nargin == 4 - obj.default_100 = 100; - end - end - - %% Get state indices - function[] = getStateIndices( obj, ensMeta, varNames ) - obj.H = someFunction( ensMeta, varNames ); - end - - %% Review PSM - function[] = errorCheckPSM( obj ) - - % Some error checking examples - if obj.coord(1) < -90 || obj.coord(1) > 90 - error('Latitude must be on [-90 90]'); - end - if isnan( obj.someProp ) - error('someProp cannot be NaN'); - end - - end - - - - - %% Run the PSM - function[Ye, R] = runForwardModel( obj, M, ~, ~ ) - - % Run the forward model - Ye = myForwardModel( M, obj.someProp ); - - % Optionally calculate R - R = someFunction( Ye ); - end - end -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@deloPSM/deloPSM.m b/3. PSMs/Specific Forward Models/@deloPSM/deloPSM.m deleted file mode 100644 index 96bd725e..00000000 --- a/3. PSMs/Specific Forward Models/@deloPSM/deloPSM.m +++ /dev/null @@ -1,49 +0,0 @@ -classdef deloPSM < PSM - - properties - %lat and lon coordinates of site - coord; - %you need to give the PSM the species of foraminifera. - Species; - %array of seasonal min and max values. note large dummy values for - %annual models - GrowthSeas = [3.6 29.2; 22.5 31.9; 6.7 21.1; -0.9 15.3; 20.2 30.6; -5 50; -5 50]; - %names of allowable species - SpeciesNames = {'bulloides','ruber','incompta','pachy','sacculifer','all','all_sea'}; - %string array of Bayesian parameters. These are the default values. - bayes = ["poolann_params.mat";"poolsea_params.mat";"hiersea_params.mat"]; - end - - % Constructor - methods - % Constructor. This creates an instance of a PSM - function obj = deloPSM( lat, lon, Species, varargin ) - % Get optional inputs - [bayes] = parseInputs(varargin, {'Bayes'}, {[]}, {[]}); - - % Set the coordinates - obj.coord = [lat lon]; - % Set species - obj.Species = Species; - % Set optional arguments - if ~isempty(bayes) - obj.bayes = bayes; - end - end - end - - % PSM methods - methods - - % Get the state vector elements needed to run the PSM - getStateIndices( obj, ensMeta, sstName, deloName, monthMeta, varargin ); - - % Internal error checking - errorCheckPSM( obj ); - - % Generate Ye and R from ensemble - [delo,R] = runForwardModel( obj, M, ~, ~ ) - - end - -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@deloPSM/errorCheckPSM.m b/3. PSMs/Specific Forward Models/@deloPSM/errorCheckPSM.m deleted file mode 100644 index 5e01d4ad..00000000 --- a/3. PSMs/Specific Forward Models/@deloPSM/errorCheckPSM.m +++ /dev/null @@ -1,8 +0,0 @@ -function[] = errorCheckPSM( obj ) - if ~isvector( obj.H ) || length(obj.H)~=13 - error('H is not the right size.'); - end - if ~ismember(obj.Species,obj.SpeciesNames) - error('Species not recognized'); - end -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@deloPSM/getStateIndices.m b/3. PSMs/Specific Forward Models/@deloPSM/getStateIndices.m deleted file mode 100644 index fdfeb29a..00000000 --- a/3. PSMs/Specific Forward Models/@deloPSM/getStateIndices.m +++ /dev/null @@ -1,12 +0,0 @@ -function[] = getStateIndices( obj, ensMeta, sstName, deloName, monthMeta, varargin ) -% Gets state indices for a deloPSM -% -% obj.getStateIndices( ensMeta, sstName, deloName, monthMeta, varargin ) - - % Concatenate the variable names - % varNames = [string(sstName), string(deloName)]; - % Get the time dimension - [~,~,~,~,~,~,timeID] = getDimIDs; - obj.H = ensMeta.closestLatLonIndices( obj.coord, sstName, timeID, monthMeta, varargin{:} ); - obj.H(13) = ensMeta.closestLatLonIndices( obj.coord, deloName); -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@deloPSM/runForwardModel.m b/3. PSMs/Specific Forward Models/@deloPSM/runForwardModel.m deleted file mode 100644 index 899a4b81..00000000 --- a/3. PSMs/Specific Forward Models/@deloPSM/runForwardModel.m +++ /dev/null @@ -1,44 +0,0 @@ -function[delo,R] = runForwardModel( obj, M, ~, ~ ) - %get ensemble means of SST and SSS for growth seasons - SSTens = mean(M(1:12,:),2); - %find the species - species_ind = strcmp(obj.Species,obj.SpeciesNames); - growth = obj.GrowthSeas(species_ind,:); - %minimum months for seasonal average - min_month = 3; - %get months in range - indt=SSTens >= growth(1) & SSTens <= growth(2); - gots_t=find(indt); - %get months outside of range - nots_t=find(~indt); -while length(gots_t) < min_month - diffs(1,:)=abs(SSTens(nots_t)-growth(1)); - diffs(2,:)=abs(SSTens(nots_t)-growth(2)); - %find the closest value - closest=min(diffs,[],[1 2]); - %get its location - [~,y]=find(diffs==closest); - %update gots - gots_t = [gots_t;nots_t(y)]; - %update nots - nots_t(y)=[]; - clear diffs -end - %average months for seasonal T - SST=mean(M(gots_t,:),1); - %delosw should be the 13th entry in M - d18Osw=M(13,:); - % Run the forward model. Output is 1500 possible estimates for - % each ensemble member (nEns x 1000) - delo = bayfox_forward(SST,d18Osw,obj.Species,obj.bayes); - - % Estimate R from the variance of the model for each ensemble - % member. (scalar) - R = mean( var(delo,[],2), 1); - - % Take the mean of the 1500 possible values for each ensemble - % member as the final estimate. (1 x nEns) - delo = mean(delo,2); - % transpose for Ye - delo = delo'; -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@linearPSM/errorCheckPSM.m b/3. PSMs/Specific Forward Models/@linearPSM/errorCheckPSM.m deleted file mode 100644 index 713f9b68..00000000 --- a/3. PSMs/Specific Forward Models/@linearPSM/errorCheckPSM.m +++ /dev/null @@ -1,14 +0,0 @@ -function[] = errorCheckPSM( obj ) -% Error checks a linear PSM -if isempty(obj.slopes) || isempty(obj.intercept) || isempty(obj.Hlim) - error('The slope, intercept and Hlim cannot be empty.'); -elseif any(isnan(obj.slopes)) || any(isnan(obj.intercept)) || any(isnan(obj.Hlim)) - error('The slope, intercept, and Hlim cannot be NaN.'); -elseif ~ismatrix( obj.Hlim ) || size(obj.Hlim,2)~=2 - error('Hlim must be a 2-column matrix.'); -elseif ~isvector(obj.slopes) || length(obj.slopes) ~= size(obj.Hlim,1) - error('slope and be a vector with the same number of elements as rows in Hlim.'); -elseif ~isscalar(obj.intercept) - error('The intercept must be a scalar.'); -end -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@linearPSM/getStateIndices.m b/3. PSMs/Specific Forward Models/@linearPSM/getStateIndices.m deleted file mode 100644 index a8ae85df..00000000 --- a/3. PSMs/Specific Forward Models/@linearPSM/getStateIndices.m +++ /dev/null @@ -1,68 +0,0 @@ -function[] = getStateIndices( obj, ensMeta, varNames, searchParams ) -% Gets the state indices required to run a multivariate linear PSM -% -% obj.getStateIndices( ensMeta, varNames ) -% Finds the state vector element closest to each listed variable. -% -% obj.getStateIndices( ensMeta, varNames, searchParams ) -% Specifies additional search parameters. -% -% ----- Inputs ----- -% -% ensMeta: An ensemble metadata object -% -% varNames: The name of the variable associated with each linear slope -% A cellstring or string vector. (nSlope x 1) -% -% searchParams: Additional search parameters for each variable. A cell -% vector with one element for each slope. Each element is a cell -% with formatting: {dim1, meta1, ... dimN, metaN}. When multiple search -% indices are specified for a single variable, the mean of all -% associated state elements is used as the variable. -% See PSM.getClosestLatLonIndex for details on dimN and metaN. -% -% *** For Example: -% >> varNames = ["SST"; "T"] -% >> searchParams = { -% {'lev', 1}; -% {'lev', 100, 'time', ["May";"June";"July] } -% } -% -% would find use the SST element on the level with metadata 1 for -% the first slope, and the mean of the closest T elements on the level -% with metadata 100 in May, June, and July for the second slope. - -% Set defaults -if ~exist('searchParams','var') || isempty(searchParams) - searchParams = repmat( {{}}, [numel(obj.slopes), 1] ); -end - -% Error check -if ~isa( ensMeta, 'ensembleMetadata' ) || ~isscalar(ensMeta) - error('ensMeta must be a scalar ensembleMetadata object.'); -elseif ~isstrlist(varNames) - error('varNames must be a cellstring or string vector.'); -elseif ~iscell(searchParams) || ~isvector(searchParams) || numel(searchParams)~=numel(obj.slopes) - error('searchParams must be a cell vector with %.f elements.', numel(obj.slopes) ); -end -for v = 1:numel(searchParams) - if ~iscell( searchParams{v} ) || (~isempty(searchParams{v}) && ~isvector(searchParams{v}) ) - error('Element %.f of searchParams is not a cell vector.', v ); - end -end -varNames = string(varNames); - -% Initialize values -obj.H = []; -obj.Hlim = NaN( numel(obj.slopes), 2 ); - -% Get the state indices for each variable -k = 0; -for v = 1:numel(varNames) - Hvar = ensMeta.closestLatLonIndices( obj.coord, varNames(v), searchParams{v}{:} ); - obj.H = [obj.H; Hvar]; - obj.Hlim(v,:) = k + [1, numel(Hvar)]; - k = k + numel(Hvar); -end - -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@linearPSM/linearModel.m b/3. PSMs/Specific Forward Models/@linearPSM/linearModel.m deleted file mode 100644 index df1fc9a5..00000000 --- a/3. PSMs/Specific Forward Models/@linearPSM/linearModel.m +++ /dev/null @@ -1,28 +0,0 @@ -function[Y] = linearModel( X, slopes, intercept ) -% The linear model function used by a linearPSM -% -% Y = linearPSM.linearModel( X, slopes, intercept ) -% -% ----- Inputs ----- -% -% X: A set of data values. (nVar x nEns) -% -% slopes: The linear multiplicative constants (nVar x 1) -% -% intercept: The linear additive constant. A scalar. - -% Error check -if ~ismatrix(X) || ~isnumeric(X) - error('X must be a numeric matrix'); -elseif ~iscolumn(slopes) || length(slopes)~=size(X,1) || ~isnumeric(slopes) - error('slopes must be a numeric column vector with one element per row in X.'); -elseif ~isnumeric(intercept) || ~isscalar(intercept) - error('intercept must be a numeric scalar.'); -end - -% Apply the model -Y = sum( X.*slopes, 1 ) + intercept; - -end - - \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@linearPSM/linearPSM.m b/3. PSMs/Specific Forward Models/@linearPSM/linearPSM.m deleted file mode 100644 index 84b32e4c..00000000 --- a/3. PSMs/Specific Forward Models/@linearPSM/linearPSM.m +++ /dev/null @@ -1,74 +0,0 @@ -classdef linearPSM < PSM - % linearPSM - % Implements a multivariate linear proxy system model. - % - % linearPSM Methods: - % linearPSM - Creates a new linearPSM object - % getStateIndices - Find the state vector elements needed to run the PSM - % runForwardModel - Runs the multivariate linear forward model. - % linearModel - Static function implementing the linear model - - properties - coord; % Proxy site coordinates, [lat, lon] - slopes; % The multiplicative constant to apply to each variable - intercept; % The additive constant - Hlim; % The limits of the state elements associated with each slope - end - - % Constructor - methods - function obj = linearPSM( lat, lon, slopes, intercept ) - % Creates a linear PSM object - % - % obj = linearPSM( lat, lon, slopes, intercept ) - % Creates a new multivariate linear PSM. Note that slopes may - % apply to the mean of several state vector elements. See - % getStateIndices for details on this functionality. - % - % ----- Inputs ----- - % - % slopes: A vector containing the multiplicative constant to - % apply to each subsequent variable. - % - % intercept: The scalar additive constant - % - % lat: Latitude coordinate of the proxy site - % - % lon: Longitude coordinate of the proxy site - - % Error check - if ~isvector(slopes) || ~isnumeric(slopes) || ~isreal(slopes) - error('slopes must be a numeric vector.') - elseif ~isscalar(intercept) || ~isnumeric(intercept) || ~isreal(intercept) - error('intercept must be a numeric scalar.'); - elseif ~isscalar(lat) || ~isscalar(lon) || ~isnumeric(lat) || ~isnumeric(lon) - error('lat and lon must be numeric scalars.'); - end - - % Save - obj.slopes = slopes(:); - obj.intercept = intercept; - obj.coord = [lat, lon]; - end - end - - % PSM methods - methods - - % State indices - getStateIndices( obj, ensMeta, varNames, searchParams ); - - % Error checking - errorCheckPSM( obj ); - - % Run the forward model - [Ye, R] = runForwardModel( obj, M, ~, ~ ); - - end - - % Static call to linear model function - methods (Static) - Y = linearModel( X, slopes, intercept ); - end - -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@linearPSM/runForwardModel.m b/3. PSMs/Specific Forward Models/@linearPSM/runForwardModel.m deleted file mode 100644 index 381d5ca5..00000000 --- a/3. PSMs/Specific Forward Models/@linearPSM/runForwardModel.m +++ /dev/null @@ -1,19 +0,0 @@ -function[Ye, R] = runForwardModel( obj, M, ~, ~ ) -% Runs a linear PSM -% -% [Ye, R] = obj.runForwardModel( M ) - -% Preallocate -nVar = numel(obj.slopes); -var = NaN( nVar, size(M,2) ); - -% Obtain any variables that are means of multiple state elements -for v = 1:nVar - var(v,:) = mean( M(obj.Hlim(v,1):obj.Hlim(v,2), :), 1 ); -end - -% Calculate Ye and R -Ye = linearPSM.linearModel( var, obj.slopes, obj.intercept ); -R = []; - -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@mgcaPSM/errorCheckPSM.m b/3. PSMs/Specific Forward Models/@mgcaPSM/errorCheckPSM.m deleted file mode 100644 index 9dbfeab2..00000000 --- a/3. PSMs/Specific Forward Models/@mgcaPSM/errorCheckPSM.m +++ /dev/null @@ -1,8 +0,0 @@ -function[] = errorCheckPSM( obj ) - if ~isvector( obj.H ) || length(obj.H)~=24 - error('H is not the right size.'); - end - if ~ismember(obj.Species,obj.SpeciesNames) - error('Species not recognized'); - end -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@mgcaPSM/getStateIndices.m b/3. PSMs/Specific Forward Models/@mgcaPSM/getStateIndices.m deleted file mode 100644 index c3fada2f..00000000 --- a/3. PSMs/Specific Forward Models/@mgcaPSM/getStateIndices.m +++ /dev/null @@ -1,7 +0,0 @@ -function[] = getStateIndices( obj, ensMeta, sstName, sssName, monthName, varargin ) - % Concatenate the variable names - varNames = [string(sstName), string(sssName)]; - % Get the time dimension - [~,~,~,~,~,~,timeID] = getDimIDs; - obj.H = ensMeta.closestLatLonIndices( obj.coord, varNames, timeID, monthName, varargin{:} ); -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@mgcaPSM/mgcaPSM.m b/3. PSMs/Specific Forward Models/@mgcaPSM/mgcaPSM.m deleted file mode 100644 index ec0cb7c1..00000000 --- a/3. PSMs/Specific Forward Models/@mgcaPSM/mgcaPSM.m +++ /dev/null @@ -1,82 +0,0 @@ -classdef mgcaPSM < PSM - - properties - %lat and lon coordinates of site - coord; - %pH needs to be passed to the PSM since it isn't modeled - pH; - %ditto with omega - Omega; - %you need to enter the cleaning method. - Clean; - %you need to give the PSM the species of foraminifera. - Species; - %age is needed for seawater correction in deep time. Otherwise it - %is fine to pass a dummy value of zero. - Age = 0; - %flag for seawater correction, default is none. - SeaCorr = 0; - %Prior mean to limit forward model, default is 4; - PriorMean = 4; - %Prior std to limit forward model, default is 4 (very wide). - PriorStd = 4; - %array of seasonal min and max values. note large dummy values for - %annual models - GrowthSeas = [3.6 29.2; 22.5 31.9; 6.7 21.1; -0.9 15.3; 20.2 30.6; -5 50; -5 50]; - %names of allowable species - SpeciesNames = {'bulloides','ruber','incompta','pachy','sacculifer','all','all_sea'}; - %string array of Bayesian parameters. These are the default values. - bayes = ["pooled_model_params.mat";"pooled_sea_model_params.mat";"species_model_params.mat"]; - end - - % Constructor - methods - % Constructor. This creates an instance of a PSM - function obj = mgcaPSM( lat, lon, pH, Omega, Clean, Species, varargin ) - % Get optional inputs - [age, sw, pmean, pstd, bayes] = parseInputs(varargin, {'Age','SeaCorr','PriorMean','PriorStd','Bayes'}, {[],[],[],[],[]}, {[],[],[],[],[]}); - - % Set the coordinates - obj.coord = [lat lon]; - % Set pH - obj.pH = pH; - % Set Omega - obj.Omega = Omega; - % Set Clean - obj.Clean = Clean; - % Set species - obj.Species = Species; - % Set optional arguments - if ~isempty(age) - obj.Age = age; - end - if ~isempty(sw) - obj.SeaCorr = sw; - end - if ~isempty(pmean) - obj.PriorMean = pmean; - end - if ~isempty(pstd) - obj.PriorStd = pstd; - end - if ~isempty(bayes) - obj.bayes = bayes; - end - end - end - - % PSM methods - methods - - % Find the state vector elements needed to run the PSM - getStateIndices( obj, ensMeta, sstName, sssName, monthName, varargin ); - - % Internal error checking - errorCheckPSM( obj ); - - % Run the forward model and get Ye and R - [mg,R] = runForwardModel( obj, M, ~, ~ ) - - end - -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@mgcaPSM/runForwardModel.m b/3. PSMs/Specific Forward Models/@mgcaPSM/runForwardModel.m deleted file mode 100644 index 17e66be4..00000000 --- a/3. PSMs/Specific Forward Models/@mgcaPSM/runForwardModel.m +++ /dev/null @@ -1,44 +0,0 @@ -% Now run the forward model -function[mg,R] = runForwardModel( obj, M, ~, ~ ) - %get ensemble means of SST and SSS for growth seasons - SSTens = mean(M(1:12,:),2); - %find the species - species_ind = strcmp(obj.Species,obj.SpeciesNames); - growth = obj.GrowthSeas(species_ind,:); - %minimum months for seasonal average - min_month = 3; - %get months in range - indt=SSTens >= growth(1) & SSTens <= growth(2); - gots_t=find(indt); - %get months outside of range - nots_t=find(~indt); -while length(gots_t) < min_month - diffs(1,:)=abs(SSTens(nots_t)-growth(1)); - diffs(2,:)=abs(SSTens(nots_t)-growth(2)); - %find the closest value - closest=min(diffs,[],[1 2]); - %get its location - [~,y]=find(diffs==closest); - %update gots - gots_t = [gots_t;nots_t(y)]; - %update nots - nots_t(y)=[]; - clear diffs -end - %average months for seasonal T - SST=mean(M(gots_t,:),1); - SSS=mean(M(gots_t+12,:),1); - % Run the forward model. Output is 1500 possible estimates for - % each ensemble member (nEns x 1000) - mg = baymag_forward(obj.Age,SST,obj.Omega,SSS,obj.pH,obj.Clean,obj.Species,obj.SeaCorr,obj.PriorMean,obj.PriorStd,obj.bayes); - - % Estimate R from the variance of the model for each ensemble - % member. (scalar) - R = mean( var(mg,[],2), 1); - - % Take the mean of the 1500 possible values for each ensemble - % member as the final estimate. (1 x nEns) - mg = mean(mg,2); - % transpose for Ye - mg = mg'; -end diff --git a/3. PSMs/Specific Forward Models/@ukPSM/UK_forward_model.m b/3. PSMs/Specific Forward Models/@ukPSM/UK_forward_model.m deleted file mode 100644 index 7cef4017..00000000 --- a/3. PSMs/Specific Forward Models/@ukPSM/UK_forward_model.m +++ /dev/null @@ -1,44 +0,0 @@ -function [uk] = UK_forward_model(ssts, bayesFile) -% Models uk'37 from SSTs using the BAYSPLINE calibration. -% -% uk = UK_forward_model( ssts, bayes ) -% -% ----- Inputs ----- -% -% ssts: A vector of sea surface temperatures (Celsius) (1 x N) or (N x 1) -% -% bayes: A Bayesian posterior to use for the calibration. -% -% ----- Outputs ----- -% -% uk: A set of 1500 uk'37 estimates for each SST. (1500 x N) - -bayes = load(bayesFile); -% Convert ssts to column vector. -ssts=ssts(:); - -%NOTE: calibration is seasonal for the following regions: North Pacific (Jun-Aug), -%North Atlantic (Aug-Oct), Mediterrenean (Nov-May). If the data lie in the -%following polygons then you should provide seasonal SSTs: - -%set up spline parameters with set knots -order=3;%spline order, 3 for quadratic -kn = augknt(bayes.knots,order); %knots - -%spmak assembles the b-spline with the given knots and current coeffs -bs_b=spmak(kn,bayes.bdraws); - -%fnxtr linearly extrapolates the spline to evaluate values at SSTs -%outside the calibration range (0-30). w/o this, B-spline will return a NaN -%at SSTs out of this range. -bs=fnxtr(bs_b); -%evaluate the mean value of the spline for your SST obs: -mean_now=fnval(bs,ssts); -%draw from the distribution: - -% Facilitates comparison of different methods. -rng('default'); - -uk=normrnd(mean_now,repmat(sqrt(bayes.tau2),1,length(ssts))); - -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@ukPSM/bayes_posterior_v2.mat b/3. PSMs/Specific Forward Models/@ukPSM/bayes_posterior_v2.mat deleted file mode 100644 index 0b66d33fab09a0d8262cf667751376fefd0c62b6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 54887 zcma%>Q*b2=kcB6gsFIU!Y9 zAz?x`dPYJyAyqnaD_b*KLPc967juA<9XFw(h=!CrJ1Z@rh?AL-i4q#6x zZDdc#%tXk{%+1EZ&BQ^-%*ez=`2Wo}6k1T_cgmQRFC5 ze@!D)1+Xu~lyMSHQl_X`3s93#(K9XLoL@3$9V3{{X;a4%$d$?_G=(KeZhB_BPxG6B z&MP~!8#cbZw;!ITFfcHwDKIdRiNzsvKe0a_*!VOgF4&(R|Ct(o`E;MqkduT} zq9bScNPi^a<;FV|{hAR`N(>%i{?}G>WMq8|N2{geKuZ~5ks$%Z*c6g}-jMh#Nnr%% zEGug$-H+B?VyMX<-2uxhS=-M#?-AmtFxK}phZ`gidv>XbUd*|9_Unr(ypdqVV&~1k zIy{%H*WZ25#U{=@`()U1l@Lj z1jz94$YCcB3?6&-S=2BB$q2=JC>5s?ZEbCOiv}4ZVq5uL(!+w}`qLp*Q(AKGa$VV_@;&a$&*+&* z-q1PNaBUWxQ{g_&lM6{I*phlA{#%Q=U?;p4o{+O^mOn~ooJ|SgU1pCd7~JK;Pv%nd zU6(!~rxP_iW(klhH)XmA^q4U*-K)laWrIvF^Wno13r5a;o~JL_x_1)0)+?6Dr*#n< z9}V;{8os8X`Qj9UB(@!o&$M&(YBg&fnh>Z2NoK%_0w(@))eT{vTOQ$Qc-FwhK1Kd%%CE zk0HIz$dv%phrpRp8_(KBnD{lu495Tb?jK;*i!>r}`m>mAThiPKExOD#jA<)7F#aHN z%Ms1OXj8Otv-S&3IEwD)xMh)1tqOV1W=O=b@pd-B9)p>UltAu&?e*%8#)4Km;D|84 zVnYrd+8?VJFJpFe%C?^We4jR?O>{zK*cq9_tf*X@K0xAW8+`ZL77vg8vx7Ses*}+1 zr_UceNlwoE`i)F5XR9u4*9r1vm!Fk}HMB2jvht!#;BVVLtEayuJrd@nMV^$L60&cM z4)0fDp!{YjjeNe!M6aFX(Rls+>{oOx5Dv7)8`jx{eZGHU%jYo`0g8$p zgeNo_+oR{mF&_XP7)J2tyX;(cYuc3)vo#UKt@YGp#RlUd4aXN9Z(RHxqWbypHzFec zT0s$s%%I?6XqtlzMwd{aB}YF7gWs zr-yyf!4~j)l~UX`MizcYLveB#;p}fJpx$giNPl@jez!}Tx&RxpDN7<2v%A$gF2-wY z4diS-OP#CK#IU6+LD^d~dd<9xDRjEQ%eCQwa^yt4do9-Mv#TYiaR6ws9-aU1OrMbN zN`o4~yei9?$hBS;TWIqcYRw!O(e^lxV;Ar@Jvry9m-mJKe!$^vEcl3Snd(Cf)$8+& zaM-m%|2hSb-W^aX(oIYCQ=PPXA`~q`0Lu1GiM3OH!%}tyJvG+O#^NJ)9w2y$CY-g`S)~+){L-w z?LoAIC&7c6Xk$5-tR|c8qetarzv4>$LG?GQ!;R=y>r`Txbol)Q1{=l#sLPZ0@K^^* zM73&@naS>uZmI+kyd(huc7&E-XiK?Yn7;zM`S$lJz$Y&>`r{uD-I$*@m!XcYT6}nA zP(F|k9B&9Jciqtj7irv2{$nwF{jWdr>D?I}SiI}aW>pta-<`TD%2lyZw+u8*&Ol$u z7*CUWFi>qKa#Q48jbHyzF8rQ)y)dOhCM#88u?Rcb<Q-uj~FLWq8f zzY*`VEw=X|U&ZQUxF;!85OFVq@%|VOu+1C~o!*!hYMrL=N5bgPVOh-!NB<)^#UJ#E5r^A`K%u*rx#66|`&^6=84#7w`lskYOv$lU&h`er zqK#`_^EE9hw^Y~vkk<1R7#e^dM~vuYyA@H9dckfg zq<1EHo=SOg$(tAF@oS*{S}Lu+jn8=3Y>epCt&IY<5rQ&E_44mX zjgud*cpy!uJ#t-VXib_LD0%~;eF~1|Q5=pxd4rRt+aLl`3xw{-synZ_;F77pnkO31 z+dQ+4UCfZ+lmNQ_0GRXjV#F?^DW2@c+bQA=DGNkh=OIM^GR)Bd1-@;4%05Fio#i7X z0(pDbb2`(-LWR1eE-8VfZ!UwH{>a#RjS*#m;dW%nu6M7g=-AcWv|TCU%RzsKKI0Zd zOf%(mJra%cwjNo4xKI>CHsK$%qBO{L@pPbPiN9uL3M$3OgJBsLFUF$WXw}^<*Q}($ zk5geRfmco_Q7=SlmJNjF{^hqj&=59H15xjOilm@P(EOsn1gD7;6&GNOCiwj#c z_luW}b%{JCwDr8zP2cUfurL#&b0aE}LE^KsWm`Vjb&c*TG?4Y*POa(l3nYHW$7%jL zdVg^0t$vLxD!%ow<8bt$C-P;5EziZ|uT^HIng=TU8<6xyDm9;sRIIo11hH z_6oRh=XC;mS=|`w*zTR2V1*)O)Jx*bS1inK*HC;2OpBk2e6e@;`RD3CvanWj$l99! ztY(IKAud`#P57Hg^uOd7kBxT?zY8oRc(+uRMs{uugB_D{y*_No!EZ;O0scgLW1gQU zb4bd{Ve+$SrpdlXWIya?vA^zXihb#+es7socP+{iz0ni;rv`(h@Q)6Gt~Tc zUA_qMQ#WEOY{0NJ|K4zWY-9xEQa~7?`!5(WA%E&Q(**+A*mq@URqou1E*5uKNLTsm zuJ-gud>i7u0cvdMz33C>AG{(yGp5A{TM5HYl~q&J zx>#t_%m!AxpTe)PxtlIOs#zNw>WG9S?F~cz(a3{8@6D6`8&jdWSv4bmbsZRcab4#m z%N|t(820_#U6^LF%&+Y$<%p5>y33>=5>&qF!N-ihjr?`k2e{K(Wq1CXe)P?0s)o++ zPU&LU?H^XKcrVay_!VUq-Q%ooO2k7VJg*Q4LiZsMyMN+?x4o8TFJUy$pW}MC=aqEZ zI~n~=L(*Q*ZYh-ji+ek*g6M}`UT~K)yC;kP2c9DnX#>)S^T}-}kEx)bRsXQw%o9Ty zz4uHi%gpd+m%?C|;kV(H*U)o9sccG**IAP%38sNY&F5Mx33fb|z z<%JtovA)U>=v#FnlWqV>iKD{Xv!6+@uMtp^s&rBV1jzPfy&{v^qi*W$d0H3x^{e3U z-?pw(IeD#F;Uh4$YE0qnXC0h(N0j4muoN!le6LcYiBA`|<)jUD+78OBqnUxx?t^~1 zHbz1dM-gty$Vi798sbM)&|UM>W6j8B`?KspN;`rV4*7T6_@2CfVtDEj2s(qJSGPi44P(_tFz9>-Tgy>-E>3=?6YORLZnUgCUVp=mzOaSO zCN0>t+40X8D5Zj{F1#l{ZD@a6pgX&1Fc-Ab-OYI>dH?C%fBKh9p{O*_=zXB@p7oxO zd#Rr2b7BRg8i@K45RU+qW{2VrGVuj+fSx8T7=m-Vu%@ z`tULRTT1K9(|!X9XsyAaZW2umw@zm&;IFC9cgc%313tNzVLCdlwbCfBv!SyS<%>`B z-qovg$=a6?T1t3i{@Bo0B*!Ch4=Wk{Zap)s zq=t*jA0=>Ituy-fZeW%`fkt5h5V*X;yMj$q*!zKx33)$p;B+tS#8-*4qLVqrO%nfufg}X=_rg34j+Pi2gFP^$2%7t zxiKW@^j;!o^130>-$=SLjqs9-Tj}xugxhPv?vHAGLJoY^6=>4qJF5J@FJdvF7>|^x zAWD9Fyqj^W2jl(0X^F2lAMPpXpSYg<>EQ?k9#=t90zJR(G8);lGu%Wf4~h4_kR>to z2R%e{q}8#gEkluqeSM`##3V2mFIyjVlnl&n|AVcJU;2$2m^M#L_d0J4Jv@Ba4P{nV zN>bpz;X^jtNDX%Ro-aNpWVsOquzzz&^!90v5%pT45LGy3S;K=I{_dT?Z|g zIPT{O7x7cQSrE$kGwEJMoPcFHMQBS7kXw+})s{WH;C9RFHprq9V={1Xo5Y|w$=*|N zEhom^Wo}(qS^&04EtnU=yLd}i`1W~nIOooTk-X9A60r>orL~S0ByX6MBwN%HOb_i-c37M|nGE7HF18 znJ0hc<+2f?|4>0y9Y$-6-=6eM-8SiV$dilPTJw@JH1_3qHqb@l#qycj@JRYfuWaLW zt>y$S{lUp~S{SDUN}se(B(<}ObW~;?EnTSr6msDW_bUkAr{T{mWhhsyJ!ePn+I7Bn zZ2o!>G95Xuvf!%%BeHtRRVhRB0Gq(9NX2;kEa7?hlFkhUtvarZ7_0miFX4;9#{N}c zT!JsMr#csNILykr|6yJWIC*dPPOo@cG(aI*Y+s0Q+o={9CCjXUFYow4J1`J)HJHEH zZfXy0&4)i~R$xd%%R1KKRc3XlxHuv9J>I=MVfw@F*H*<*k`IEOSjEh~=<|u@nR1aZ zB5o&I+QTxAk5JSi0GG{`Kv#voKy+yqo8HKv-f%Y|pRp(_Bkrr_BRcLy4x9gU)kt7! zQt9wPq~^%p(xMsmdMz9HJQvS@fgIT2N_?Q#nqiqUf$PpQ+0PHSX@3U;zcHY-uQY#f z2(KBMt)y_?wa-gwX3C_I`$?M=n>YIU_}B}_G!;Wn#FD?D`dB(h!7^0!f!D!xkQil-S({L(40F+#^b&;ne1Iy+6~7LRLF!YjNc#`6UDu+M@k+M z8%18I+o3_KGlY6JV4ziZK6O3eq1|5u`L_Qhf^y8wcg@Qy7qL3?N{?XqlT`76lwp3< zC)m-uo0*d@xmn?7kQPgOi(Ojxpn8?jZL(R7!d`UVL9eXG>l3Z+TX-fY(Jo_%z084s z?iJb_)8eSq>A4!Dj?XU}B62-xa4h`RhFjZ`Ko(TP z*hwbVmw1;=TvWW41Hm;=MdU+dvN-fqxH=OaQIs2nvc(iLYEift3)Z!?vc^kH~lErhx-PfzjnD^0L>ZPdH0L>OO#?+n7b`7Zl2%K`;mY< zGa7bVrbziteupB7J10PuY=S2i5hQO2-Bp!uTJ#4aGf^a7ayw_1RmHDA%~n}r(D#@mokYK z#r<8I<@1`<@T`J)j&e?|gqZe%?URu~&DgCXJ#$&!7rgo$g*zf$=SkN+4v=b$)#usL zc1>zz>P{CbxWP-v2jX_xf0mtUhg4F1W9yfamdJqwX{0J5DY4ri02N_7)O`y7wz@AV zoGS|qNQz`XG~rLCr2JvQ+Nle2i7bRTQS%YtTxB3+v0I? ze(4l?zEy3(VZ=dhn#<74zn3m&%g7UfO{K&i>8x72m?h6E4OV+{Ic|S-h6ZvyADvMp zYmyASXMC;DD>!6Gf)PyHD8F>6=eA}B9jU(GK1u{kx9C=yx+jK}z7RhMt;nC8=wYFu1IC%}vA z4=&9Wajk8?kSPY41qOsf-n6|(fO@bKGpvgGZIYxZ(XQ?NKX2S~2jk7G)pYW{<812& zfpauR3Q*zIBv04KwY9f(>JkEf`Y|~Ftk`1T{&w#Zv%R{M<%|&=<~V0Ch*^@G2_yZ~Y}p=x;OG#}sh6T`6$Ktx zcpu_U)}tyKk6;>_SPH1A<9B973$|BAg5XH0A9fPGob^OS{xv53?&KT2crxu(sc{n! zV#NL!4n|2ueyVJ#!Q~d@y%%pvibeHj*!fRy?8Xo*C0SwKb`(xv-3;$m*O2O|?8Aw4i)NP%hqG6oH*@0(8elo=ZrDlt%1MtAg}bM#L{&Q!8>i6G7hP z*DkA?kngOmth{4~158T(tNJR`+~OJ?@m`DlbzUepvL`pa+g-l)UhuMT#>hECLba|7 zFC2raIVR!42Qc{7fD2x7Dft3NYAtU$D`e|juEpJ%fu2UNMrBct43}Q69&-qUhhUpt zy|>Yz8PnOP2YsB8)9MHQ+YGqCx`)!x8CuJii(dG;L&BwH_I>gSh1=bATo-S&IyLK~ z;cuBy^W@secrMZdcA#LBBn@1q{`3SrS!7+;ZbvPn0D?1((dRlV$W>~sUbZJ`dOB1F z?8+BIe&cBkvFqc>6%vX$nNJMeKaPv`gh*G5{2_9j`g@z!#h|Vi_@A( z;(0QiTf4fNoSTvGy_yG%LPw#S#>-M~#whCgmiCyt;{&(z5CrVy^d#g4qKnAAU~R>B z(OuUUjTY|TgiDpl(>ih%kde#{)jQ< zfLpT6`B+x1&p&fVgwaOj6sVH#-rCB^69fdp#lo$t)OCpqWlf!D;ArfR+6J99CWKE} zVIt_Uh)R>MigZ`Mvpc;`*RuZiNH0o$jXIjP^{*%DrUixI*E3m0l?fy$+AAO9~*>Ui{EDPOQ`R1 zRzc`0p4C=+3E9kiV#t?U5p!~d*7eG!QVdI8Snt1t2 zr0iK57w6ZxybG1)c+cgALctIdd3svfzGTWhfX2@2Uy}`9uQ$Xa4~Z!hE%4>EJ1U$@ zIFhwC)OWW%ifa&B7UX*UU?5__?Xq1$P4IGO#H(%H)Rc8ZUpiwOD$c%<2O-yGBPkm1 zCD*YG2iiDei<0Rz84$c2`YE$w8aK01iBT>RnEBs13QP|2=GNNF#WpNdTYDXbx`G6| zyb;*FF)BV(+SfACPNm7FvCbVfAX2|^FhlHX zT01je$O9B~{@C``Jf^=wAEe6C$1CY3@VfL5e_~@Wx%^0$)m$8+0FcfzKJ51Az}c*? z8GRCE(MS*iPOF|;T{9!>%5**u&=jlO{kxVE0-Mc}aq065P1H*xQm?{9Bc}CiIwPLG zlv>Yt$W8UU+aXR7Sj|bCJ#GHuC{?tl&cy7Z3n<)LoOL|8Q@3((1ld4FqHd>|VC;oi z=*Y9^rgc@1%qB8AfFY4FVZZux7G&{ebth{2-MzqnMEK|Dw}v}ssy}Xe?qB3YrHDU~ z6O z|9KluZ--O^H64tWof4%n5uYyds*WaQLxKCD%p@eTmeb2G%P1uS(fOF5H<%86o`vG) zgVNNtDjs;itz1ZPooU$REUh}SG>XqjKUlmtyEpgl6hu3>D|}%XvAg~JhFTyj&J@!7 z*35WG-*qr9f;L7*!ZD?#_PGusSK!EfWijntR7xe?-;(}y#%X({T4OBU7M{XW2K)zu zOWqG<4R{Oo&o&4BSWpIrSG5v_x&qDVXNd4{SaiRbX28FUKJ+{$ry6JnoS4t*BTe{i2|R>q>b_G+3qi(B9Bn`8nLGL(R-f# zDRab|z0LA#7>ro4nT{8KU0(&8)%Sxrz9&h za}l zBMT4$s*E?y(~?_|t8 zn6sm&yX2)1I_6282^;12EB<`y=QkSI(2?9vwpm^O-c-+eJAn0WVmz(D0t561fZm?& zYWs?1{<3FE3}p;zJp~2x%*ke)2P`i1`9P=q?voZLsxC7^`h>G@`M>cAO_=!?Kw z(+AD1IRSya#yc5^+0syZb&+ZKVlVkKB;iv3NUmc%7fxOys(-RqR{_fr9q&F4c8)4Q&Bt*o_du&|RElnG#lU~a(ojX0pZC7Kiqs(&LXok`*3r5Of|@I-Y?y_?M0}% z%9E?YPPGijbZ83BWWhoYadqqo=s|sR52NF`Gm({RtZ-{3f~_}tAQyS?D0Nvcv@k@W zNXEZlpUqeeqjl0_{0l)7QC_b^MXa$CcliOZv;cpv>NBinI_;BUFEy`LUTtAL(?_WfbAVUatIMmrCgK5<557hFb`kH$O zJlDcObWzWj)Wtz!aJZ>_EKmDRo4;RVrZWuRb{+unwkT&|k%*6WhQTHJD;Lz=ps$sa zRL?c^zb0qYC3HuPG%zaM4`iwbd1v1}$sfuQWJ;s9OO@jMG2e|MOeSbg>{`gOirL@tY3d zkptcMW0zSs#Lm}mn*BrOV&MHa&`oz|q z4&v<>=iOPNWTdAgH|psK`%k@!S2M_!K1 ztNqTt@Clg8_LlHg>#lHW4a>T&I{lM119JR8oR$3x!d~-thLgOq;Ct~6G5`%NhJP)r z<7`pLA&2gHq z>|H+La5#wge0iXS&_0k(sqh`oqivV<{?rL_YmE23N{AgoRO`Aj5bX?bh$teos_F1l zsP|e(jnS1<%Wfm6+e&-=ZQ&XSmY&_(Y++|CvPOKpWi^%9trreKuF)vif7V?Rok^R9ElWT5>Q^sr|uea`CMF(}jH73)~MMH9oHqTt9E zGYA_R3{rnIgE}dyfXOYi*w)XCK8ugHu|i86hwXMU!}Xc4N8L^4=Z6fzS4M`7(-h(T z`_T)ycYBHFWV<;K#mxaP?(d62 zM2C}?y__5;4T|^|-Z-|r9>=k}&BAvZU3G1y2yi9RsUOr#yP3SG_)EpCuWw!V7>|r_ z?7N>We>*=RC7*}1=C;@C#QYZ}!hYj|tH{69bSfh<=v<+{sbDjf8!`4Et3l_OcOq&G zOYc3)Z!@as|Kcn2S@r1RG&D@#IIdF!cW+Na@PR8A%3AGGxgf@W!bRg8ol)%H(>Hc! zPlnu8d5AUsol)?)KTpY*1&dH<=hR*~j4bq(N4A!M5L}(;I82MN#J$VYqtOPPSK@dWDPz@C6xBc(8(E2`q5eNBA2pr1}{WnK*IQ&;_ zOF395hn|-*jd1wC$a>Bh^FJ{V*EqZ4MVR&FZ~}AP0>i)Z$_(>=CBcyS+xYnkOoWjq zo6Jj%uRCbBeb--eV8lT9Md(HG9lQ6vJpZIJ3(jAO(*oGhQw(sjptDMaWiaDU#e4AT zFxXPRTQU(ge{)<2z?=5Xq@|NnOlBHRux;ltiv^zgsKxKbHTE&6DlPD(LRx^wPPqG* z^hZmov3s9 zdyqr>?z%g&T}&0Kk8EpJU~R3;FI6u}@9RE=B#)pTrD|C<5-9m=3HP2K>m{jp3CFvcf^tQ{rJ}6#i>gf5_6x27@t4ud$69z+|^^Vt;57nMz0P&i9BYwSh>{ET`LEWekGuzLLZ z=voMr-S&ZQMUrq|;w5$Ms2y16e=#Ez2^R5~PYQ(PwFr~M>6S)q;eXRN=5WvL_Z=e+v;@ z^8~fFnTqwP+#LiWUHeDCj6liI#E&j>zGdUHyg-$uSm_g#531d4Eh(8b<|ALzhf-N$rTo(c>9s44V_ zsDVpv@bl025fgPBwXB7p!^~(Ks5Ya7XKkH11KkoGS{l?g4MIYIvy0tOkkD9~zSikx zARuX_iOTUA;CG$Sv}_VP+dz-*zmc;a)X0hyuE_zpyWQ6xt!Rdq*vYT4e+0IXZnNRZ zh?)xhGvf$Bu0GwZ$`z83-53a&qW^e0%&RL?Cx;9nXBvW@`aA@6Vjp9IJc1x~P1!*w zSV=v_iSOvf`hFE0wu^ckiOj4{X+Ti@My_mO+qGS=~d z)^};8)!d%(ODvDryEq-ZsZ+dbH`4j?XZXWq5&XfLwCEY};Zh1Sg@y!?Jp2+cE{`TC zAEUdVI`ViH1L>AeO1LU%EkG6@g0H~#;wMK^An#!eq?}3Lq95;yIkiHi)6|Tfoz@+% zDvS4UMw)M)89BVgw_9n4LNRn~@fqt647s}cnGj86fauZj*Yr+a`iFKRE9B!qGN$@uDNw%fg4=;w@aw(zoj>G=+4v`L+6vl;2TCtlD zMUwJj9iyGCiS|{WT`b#0m?&OwdqnsF@1qfFHE9nl+_}U5`s54Y&s2ZsxWB=|{P1oi*2QB${^cFx|*w?7rStGm~c(M9h;9j^~HQ9wyJsf^IKm z7vmUnBzsYJyjn>S`~_P?@?!iw^}toBOVWamWu+07u2~A6)&|q5L_R2vN!^jRlqN+Q zjwe(H1BwS9h~pgR+uwd@8%uYDWSrySB|23!Z_1-^m>2r^lsor-tau;m6x) z;=*;2I$J*u^5(vHrRbH*3M10`S`51-rFh4#_YU(RGGo&T@0nvUmcGB4vq{>;|EKNX z=XF?Av|X}$)YFV&Cr)_BPw$$6oLXogZTP}jIs!?F@zyHv+xExKehc0eAER4seJ}#N zr?&U>zr4_&pT^mWyq`I*4fs=ydPIq)pM=<g+J2un{CU3| z5AKi=fZNZ>2{tb=Ky%P0?UUij`Xy2d1_N63_cV0fNa5(~h#b_ z$4_t)^dn$|7Jwz&`vqAuzNtid*CXexoX>}b2w1s9a{dh%c=Oe1e z?sae{IF|4H&-(fDR`B;e^VOvAcLNai39mhZ-^7*07WW17A|tmqiuXiz-wS%UOX0|i zxM3OUYZE~3lOY|{sm*gOa7ALEi+gfW4$X-Fhtm!HvG|@BIBdd+w7lQ>j@dq^fSx4# zO^B5De7)3YNgUwTe>8x|GZkz;DCZCwFzt=&pZ62yyL@s%h4OTI=h#}f-6=)+-uI|< zEP$$BHKo5#=uhmW&_YGrTYZZ8MjMAx&=PA?_4{J9Ue3M%^^4`&Z;`)Vz(c-TK=>Br zXL^74cC2`|m!EBqJI;$+zludoJL2+&0-satym0hOu3#Qff?F3Pn_Hwta~QT;LplLp1v9PmH<`tB7Pq*qS0v%vm0 z_fll0sIy1*DmU4GwS(UF3|oc|zrAlx8}rd-3wr*9sBIX@>t<_dr6&jlc>eT&6672T z%By7tQ=j1C9rE_;tEh~87*RaDJ;dDK?6^HKU4FnS;6Z1sJY+cx>F(iY6Wk3sbJ;5| zzeRWXt*7zTV?NT7eVBUAKu$5ttL^kMC*~hIs&Qoi-awYdqB#L-p6P&3skkv<$H=tjd?ewaH`a*1tByHCo!`NB}D_gGdF$--G&R zvIqy45}#}@I?DvYKb0{W%BUiIJM;1@Y(if?UMo2jVega+skoWdBYv$ZI(5jNdvD2B zeBYtJ(D#+!`VQBAd?0rChCXi z>U&fFNvo{~ed9RmdO!rf|KuQ~X=w}h9leF6z7MSFPz+b)o76bvu5UWRvKV$+9EQVjU;q6u+N9N}^|-R~33rv8`jSuwFg#l(8m@luEQ366eZJ`5o)dw# z@rjeJ8XF`p;rkRJM|dX|!8N0QJR_2IKQ8KLL_;ba=2$OrC}?4I&Xud&e0ywr-f62>{?L;zMD$Oq}~= zpu^*1%=3z5x-#_q(o9P7dyYj#Bl`AoS@CSpQ%^Eu4J$p=`}hk9-hN0c=)kWJ{B#3& zuj+9~c~hMCo5lC(^6J>$8g&e*xr!o%O_5{C*FQ^czE{3=HJwiEC!``bW;3Jq_~^0B zzipJD0Kv=C#IIt)r!H^cTjTs^GkA`ypX1t$j^kajRZYJc^dwAYGp(_^t=#MyTFz20 z)O##>3oUYOH1Bf6tIRl#le|QgI>I+V1yN$4SP^Kzle0lmUoIKq!T5kK$;pq&u@mcu zf}#yvhkx5oVze;}@&=0W6^0{5e4HUtyen;)T_y}OiU2$wA zJ^M8if~$_p59?cWHi zBic>8y#cM1@6&tca&(hSLxI)_56F-G^`Xnx4e24NOv6Aq3n|H7ER^H;ZgJ@Xe=Cre zX8TmBWD&su*j-=#*uT-fXf`~a2?iE}&t&?<`7H6CN8Db;w_fvNA;=Um=a%uCXTfQ^ zw{Th{fBbz{K3u`SF8u!&+SF!X_d~Lb1XTOG>aM zBZ;I;3R5Gxq#3uqXHc1c{BLQ8(JoY{v6>)Yk3vT zH7=2-mF8(xZ3X=>r?|4vR8_{8iWF_xyyr8s)|9jq-QbAqhQ)T07ioE^c=Ow9ql^5x zb-M`3s(aegO6bcV7NHrnR;7p_YaT6!^UPyW{Ws}*+s!R9Yo~GAL~Cp%T^7ZC$|r($ zXn-n0d!M5^U=q)igO-!*JP}(s|MkazDTE~@o6GjpA-B}E+BX|=hxw_SXr9!ys=kX;tw|a+fD; z2Z>hsI?3p~;;NL=sAiJGBQu-yLU6ZgSfTY=?ps7EqxSu|V%7m^twKJVWNpMHTRq0@SI$r_4mFueVV@S0{YB9!?Wqk-P>$oY*eR-=Bj(+ z@oBr4Dld&NH|mXl21AT4MINEK#P3gBBdkBYL}A+en3fj#W|ptXYa`j+@c2)2Z5>6q zM5BjsEuRzc_$S*a46X@ejSL&j-PrJA%e-drjx&0lk;5lv**$}QDrkz@45S5@K~d6R z)9leRqPSJ*CnZ0d&8UDc=8uk3&X$V>dJ}YEsE|79yme-0QyZ&?>K;EUqHv2m##P;StCY+P~n>R&yZ;$%3qKWB8+s{Skc16v(0GWsc2MzE z>C8%|mZFoF#UeRjDIeqN2y!$ST>~OW-BK^=R{#! z%Kg~K`!yy{Fl&XWx*qYIVaa%LXx&J>+LBEmhS1bC(U~9QYNLr%e@@0s4`4|@>hyLB zW|Pg|j&N%`_PHDNdxmD7ZGv8kg&o5me^qyshXXOcrK+i+?7?ScaxUAZ6bo!+`Zzq( z5KT#C6^6c${5}dub*GefZXSa3pfr_{ktMN(4G>z7n#{m7c41{sTDpxbcTZ?P{{CP+ zxY<`}Jer+}2i6SoXrwV8vx}}5BPk$=E%B$-FZu5qy-%8DNN5GzV$!6^@}yGdT?zXS z`Z@T|8IKWlH*_O-8yP3Is}sf*UIEN-+Hp0#OqKU=+cL?rnY&@4Luth-F7yo8r!y&VSYeg(An<>C34ue)oUKr zRxM#kcBa$t7VzV@QXiLrBFxu7Sk7Ubq59*2-D&4p;uC)NdLD2lnGipovkZNS*LRTS zXK7Qwkn$w#&Dt}ooulE?agNsBr@UFixSzr8{HB@R zbXZREOm03)R?)+Uov=R(r^MM8XM!hmcV(^o$kJjp?S>-o>bK^e8O~j zxch3<9){v;wnsb$4o7dfJMv?H^%n*t>l(A<>ZtN;|8kaEEyDw!;Jmj_3l?R8XP?*J zdVQ>pA*q4sYTIj3|D4LgQ_u;YFs`*OoeZfj?l^x`5cixvILqQSLw=cZH%$BSyuI}H zi@z9B)?QS%c7&mA6+=u}I`C5ax&A>qL%$AO_H+pauIm(dO{y6x>ojj|#+>uI%XrK+ z7@DoK*>dJl;8|1R#Z~a$@iNMa2S3N$`I&ZO7541@I^|OIQDmy>M#)l!WIB2ZbnU_0 zdPV2G(ASlwjn926frHukyTY(90>3^O`JfNN5`&LJYFN4D)04&A2C*5{&zKv>x}9T+LE<+LUc! z)-26ECdEeV?WVT|@-P14yyqq|UI9%kX)nHK*pIpPAK>|~5;!^gVnvw?@OZ*YXLhe5 zOE0TUFQ?7GTy#FQJnlk$<2>_|3|YKqtm5$l^hw6!zc0W?uVIYgg$af-Qhge4o1&i^ z#}}j~L5EGp`dh%`-n;gCpGH5lMf8@>!@2p-9nZ$DU@6j4<@_<=xQtgK$A2ww&hM1& z2VQum96ZIMm8Ch7?tHD7-{y3Km_gJfKmE7&k}QTk#Liokhy6ExzG}mQH4L>s>tAK; z$x>s|;EPYYpm%2`l*2KPP3}iN2L!+e2x}<#9b~8}#^~B9;3=ZuYPvA==49u)MNQb( zO5?cXrUdBp$woF6JRvvUmLm-Q*;U(BSPdNxnAo|X5_>mr(0oq#cZOEACdG)OzhWOY z&zx5b{nu|-=Ul`>&miZ{cFT{6?nll?xNp`e1_Hw zEZk8!33zgsm7cntr8{o*yZV98zcc%#p6Q_v-qAbm=(6f0(&NR=3QM{v_Rve=E!-^4PYtaPfzHhTmeSHZjUguE zQ{H6o|JoFbc`|C?1^bp?Bha@$W;^Z`LCdH zg>k_;@2NO1@#U3)O4J$c(0?ltb=Vg#`l5cHq0F%b`L1ihXNIrDS?ovC-^CBbpx5h{ z>^*i5_tp%`Tpp_ez2WET<+}+!%I9^;@L(kM_qRTw=Lq9(s_O?E%RBZ zr;K@|kKZ<{#r@hs6)XJjv2mj|%GDwAor{@fi61uQV+c2M+GZuXO7I z558EE#xyDb2YSCf0@449i;F}@f%^jnXH~i+phLNZYYKti-w~hoN`GL;O#MxRDsy@vg&WVwAJ9RsH;U%a*QEJANk*tw;HhoL1a#T^ zWDS>CHA|9JoZ!VmHq?o99SWFTpNC}$~9z8O%e9;jHZDKe*QEl+kYke(a}2b zm#Of{yLbc*O>pnB0K){nCk*|VW|Z_akR{JXjY*eqAKPEtWWNtQvGkb45-0ezfu6}{ zpyw-8-Z+0mA5@%8W3EGIWxIzgX85oq<-PjR1^7#zlBW{|TcDd_az^*gz~5}#;9vC; zesZRs<7e=gbH9szJM?6lgr8&IOz5auo~t)>?D`zD7X$D=GOX~Poj8w)_OKFz?z{Ur z2wF?yeHQKJ$t?_Z%eU|6z}K38b+jnP_q9@GBJ9w=D^2guPQiWkELwit!v1JqyluO5 zBi`?0n`Q?ds`X4C`GWbjmaxGoKjFjnPv3S7{AhB^*~jf6LycCe^v%GB(m!VAX*HwI zVf!T1fXkP2W8>mZvQ+(IWP}%ZpZTeFOP4M~sl$z5hLDGrKY45LBnLkBR71p3>=D<_ zO)oCN?`5v)FH(W7>Iq)devW=Q4jvuNyUUVa!_?>fz^VT%EpbWckA#r6gE4%ROv1Ho zUx0TR!RHBEgTWII&K;_N{6`Z;Mt_S*h$UHV>F?8loVfUKahJW?Fw4B<) zQbyRk=2LfA`p|jm^-wPA8vL4h209jDVz^q>y^9*xg^cMqFvP2CJmd%6acN$AYX$BZ zHlxwx4S4R+XZ4H=@CEe+*DgH;pP#ro!@C0atJ`NYwD}F@)>Igr27NjccExo>LPv0RQ`QH~96UH1UVO68BNfj%>xgDoqJIU|zMbHQA8Kx64VbSK-6*;2hkc0p z70Y#=p_|tQ7fxWGdaNyU($EhlMNt`k@b-ZH?4kC5e6TI$-~{xNFYf(xr*rV9fmRkb zfQuosXEQy3+iPDVt^MIUXE$X9DdIUT#raw}&>!zF=A#Wyp{JJxYnNl6<(!r7F8|s^ z@gAExW6^(SUB$Xd=)<(WDLhKx&!M~RA}5faPaIz6AO8aPb)Bm-4S3CT*D5*!osO^R znX?6YW+&8Qq>s<5zuuNC-_4Rovs}x$Y?c%)tC>mA$J5?((l+4tXR@yOXCRN5`1XGpjP8#_`&1=@h)$M7Oi5o z*Wk0za?5l@=>KMds!NpDo~UIKHT7t zCK@HiQ2(^P5q{KhS?sILGU%gB^yu9uz|sAdxBkE5frkfY^E<##?QeJWx*%u%ZRw>d zjr#ZquUrfz+m4I(K(l6D~E`+_fwO+W3fv$hb&5=O9DofgB zB@G=T?y*C9;9c2oWApF*U@6i6t*|lrwmU}o!8-W3OrDPNqQflh-#(xqiai*9^mTr9 zA4{nl629q5!3Uk+zDFE=5NcJKVTZnJa2+aN3IFlU_bPuT_Bmo_+lv9r`BeX%6^_WW z$uaYcHUaONy3T$X@a^NfXY{5+?|hV!)M5&ex5{?6WF5o)E5FM6iTwAWZhV;xbga%g ztJNKOTX3>rMiF!~^I~?D#HTLuk^B|L(Z$4nrzas_wl6?`*&H{c{NX-O2b@>^sy|A*YiQxCuJacs@u8JTiBX=kO)q z(WE(hb_V?NtdVO=Lh9`{W!TD#{V^ibSq=d~NC^Ua9VOBL|NiN$7{guqvx z$Ht~MqrRYy-P%U~=J%vjYj7G%x;awnjy?FE;m=pUE-}RIon}1=KH(9ySIa^tecz1z zjfbz$+P3sbAM`Zc=a*;f82sUmO%c_X!J|$_W%H5iO?b`r?#%C^=r*M_`{7e~Mcc&^ zqckVX0{uO!5sPjY8xH9Ipc~gr)1@?mL-M1eRz?nio;A!y9 zT3y#q{eQctc~Qpxh-8M&Kd#(!0y#_Nvz)s-?r-H~{9ub6L*;$HmMuqqiOUEXw1VH5 zU!6ZiR-Gjthmvkr=xj)&EVCZD#gnEg9fc0Hwwxbq!CZfYmUm!I9wW<1cca zLtkI{C~_afJ}e6zkkm%*NWDMD>9q|@TLUB+bL0YM{*?X8;b)tf28VQs{>XKfhG$|wqfF0zx^{sj%Y%RN)J*Kf`RddOTll~D#KJr-?DK!eT~mgDH#?7nG8_04cMFL- z_L!&V!s`WV>{xnNy!lq>A(lRFtV-I5d|4WO$*dl{DqdneP%|Aq^zvZ2&=H25&R);z z!?~S`>-AfFk;kfA^?I?tzeF3Xov`OqGgVy=Lr(%mucsCxzpYZ8R1*Ooz4hbDT}h1$ zWpP*WiK33obKUHA_-L2aQxZDXF;r|-U!beRl5g-LN0D*lad(~KFX)3}?$Hh#_-RgR zQ|=n%t1}{NcRz=Z=gti;c>)}cm`yLw2G2!F%dF=T0sb=n&bbYredj^3W64MOT%(-4 zzubk`K5lx_5Bpmq ztkEn3A6C=dwn?FgC4*CUKaJ)wv@cfXjP_EN7L6aMumV1&n@Xu zEc{x`hmXglI-tk>(J6+=Q*Lc0j@H<_vAE9D$H3=z`uHD8hA}jt8GVBX{Q5jivFV~R z{C7b>?i%!4Q(^q?dhDx2#mu@s=#Kg6gpZM@SZWMyU7inLCO+wEg$w#wwsN8P(n|0i zzipHj{OA4)+a{ahxlKif?!6F){`z;hJ(Ol>2Pau4MvJA%s;%?o;45}?m-k9z&bN1H z`s+gf^p@Cln}RROdhD7Ofk%wW?82`B_wfm7KlgY8|3+7bf53ly&As5S4Ik}OdL;9g zDoa~xHH($tTa#Z;4||2W-EQ4}rJKP}Y~re8_4e>bV&8qa;KM5U1ol>8uhpD+B1EBw z%0c{Msh!OHjh^dr@loj&Rm|tBV=Ef!9 z+Ikm!VVN_m!<_=3UL_vb9??ybTQ+$_As1YicL|XN9_=FDKYJ9-kmB`K3%_E%>I&Ra zD$t3!%Y_99deGT*EzTQy+n`X`V+7tFnw$UCFdx3f>(#Sy({3uP*LPJ6WGJU+Uv)S1 zU_hX#qze2!a&|Oa3VZB1tISe(HA50TZz{IH$DS*?eCQ44@}t&J&2Ae@*F_Vn%z=;p zKD>8ML0+2~!JDUvJYm-OquCceankT=-iUwoJf8F>1AgMmy8Jiqe|HgmI;|8BUDkBu z@=yYQ7B}(`} zynN6rMqJ=Me94aRVZ)0IMW5se3qxLeu&i|2(#6QR_G50B&<{2B(gQb<-;O`B3~sW) zxxHQ=D&S+Kr`S3DMD7U}vR4>_o=scu;<$c0L-#9&3I&ietCj8hnvkzN$IIi=Tu^Vc zY?=%5{p8>r$!^SxzwYtKJm9)rdFrxK>_x@gXuV6&FW)PUS674gXH{q#-v|DdMhx8Y z9A!u-e4>6#4L&){F?>DnyYK^N<#d~We_vBNdk^?7xV}a|1anrm|0c5Anx#M&ogRJo zYkRXyiRd8cU0`m>GwfxmpGVQjnfTlx*uNY&<`p@`CzOW#c79IHTkzDZm91BQkRxPKL6|CH3XY@G%^ zo6&noXgPFXqsfXhfAI6n;f61c65u84%O_v)GL$E>)ixR5Tl`w0dMkWfXzdpf*93+V zXE_$`9O)u{>*VeX+@Gxvk+4AS+0Z!j`s#J`RZG*b5%&Yb=E69Zi|WBHfYJF0Qc&M2P+c<=F0=@=d46V>2pDIPa~>zMUtGN4!E z?#tF6J%Su@a$Wuye9^s$J#%HhFvR!sp68D!mU@q>Y&n8_{c)wl)%(1tdqVu%0ptVI z_OX$#>sWfStTH@V8@^U>Z$cz;t=x^r%a-6h-|i1Qmd9MeqxR9_;P!pF{vyLm6|PISE$V)+k-1N?vdk=nnmffKmHstc$@wp z$b&;V(;T_I>6#J*qYjvVS~bYFZ+okJy->#pVKaLA4WlwZXP3<^T^tbrBpdN?*?iI@7vEI^mIkZ7{%)}fOe+Fv{=R6+#zik&x|*}$QtUh_5w^W)w+Z16r44#_xoi>9K!zhX|W z?yg5)KF4NXRpQW`>p$J2P><)ObK;L?acGlere~KHhaNrFy|EycLoT}bH~R|<1b433K3#36@w>&NraACvLFk191d^mS+LkL^4hGWCt*Ixfti`~CfE z|AcW!kzbfOP|2a)0>g_V0yq>s_;gfdJBR)U0096099effmhTr984Xz_tE7;~9tri> zL?pXp3&|F;vqwS;32Deqgk%)SNE%2P`O*-Dhq7h-uJ`xX`*}Ujec$JtYn*exs0wIV z#DYm;`<7>0WX_U=26Kp>j~q!{UtGQ-s6i6Js;}tlbV8SdvInJNHaRlqB?m zdTJNsNkZ9bgs3tkiTUos$GeY`#7_sGhCEl2P#~%L+K-Tg{zqNry%r= zF{7%+45w?p=}X_cc$DL~CTeEUzs|$aLqQvc-44){cW^sw9yo<7EB^`%n1k zIc!xSiL1r#5wAB2^ubl`;ec)Br{2bd{;a#a-JmapT0TqXnB$l5Gb_>)FX+h zR4(@c#P!ZK(WFF*B>0tNXC5H#g(_3OJ@-lCa_S0okt|6(VM#qj&qWei!HVkpPLhP7 zYIxHa>Or$<_Ar$XNodR54DrSJz4KQpG)zdsV2Rc6o(xGefAQK>a)2aQ6dkg7k0dzhB4-qFj+VU3>A}+^VVlaj%OQXy5-&C;A3|L$FS|vBv5>??S3Zki5t6Vz z(!BK-;_$6A@vlEb5=&&~v3d!T;4mASmsTT**|P#HmyutjPWPjqnk1q2!K6a%FiHG( zz@pXNpCmr3da^~Mu0sY_OF!Uz?d_e_9>|w9p0;hnK9ach`mlot>ea3^;&19r5;cm< z_M5Tam{g4D0puxh*x){!7)hu~&D?!#O%ehcH)+3FlEkWcj)wy~>h>tJ-J61E&nYTF z1CpR$dAMn37)i98c{OctnIyKw32Se~dbjYcc~$t7&FYsjMZF#u-nsh%_|Zg{nV17t zbw^d5#2}KO%iVNz59(Z=?QgKllOzQFN^TuSoC(QuRs>m0=p0D| zgbWAj>?Vot^-%+#b+EqmoazBdyw{?jKMUTt?yJc)126yG?~oo;A_;c(B{EErB)Z#d zo!8A#_kzEte2qVf&(t$2HqFy>g#(#QE5o1csD z;ar+djTALylCV+uI~D-G78rUuzv3r}V)Y5NPktm(N6uL^gWn86#$0}=$8**XgIDpq zeN*JL3-&wXA*;INOcGXBZSLI>B$1oPe$pBBiM9=>lt3LikDPBEIYknwN6tN*fX=R2 z`&J*ON4^{X*vs31&!)|irx0Jn{*tm02h{&-V6_7HR}|N28ijK`H+)bp0ng~WawTZN zpG@Me*bDGO_~t%3MsAW|;$JQ`L7f`482#lKptquG(_!E*+a$<;3w8LZ%YNr5^mIjs zXO;mxj@dF-txth%8H^T#Hr4}E^@-b~5CW*qX#qV!`+Y4>2=i(kDq2giKcz{5@ORGvL(6P`4^P(1Y zlAyNVTbLLK932O@2XR7=V&1~};GGuTa9+PVNi?MH6}Cs7*LnI?PeY%Szj}&ozOw(D4Tz>5lM_lXkT^`g#Tvt22*c`KOP9! z_=w{Fh3s}E*yrZ&uW9v&H`mXSzwiR`pdKCM03P>isT!>CU9w`iLl=DBp71!-8T;^F zA5GZ+{(LBrc*HIYolcjtc%yFr6)UbTLT4>KD{5N$ByludhPes*EYS#lxCCCvQ8RRi z!M9bu$qY&wgYPFJ=gbkukIdQI2l2jRnUK2{^0~Kj=(E+@B;l|@EU^?irIzNsHihS$ zb<{;?fxBpjf@1=FlwJ9pZR&OC-shw74(#i7#6~X!_&W1KODF?+KMf9!@nvzObsWnpJ>KyOE>V&ox8R9ANFe+K77dxx?UFGEQu;0iIl|{ zOS%WZV^#L>9DG9crF(Q0c(8nx$20@}SiWuXKnB)J=EkRe1)q#XHkv!3u6A0EPmiKL ze1?pL_VD3qOI=r6@bKaNmHlrsNn-lo0oVP=`|VfGk)Ob={sA91>lKptYx;0a3HwZ2 zoY^UYcvW!*E1V7v%+^QP7FV2guW^#Uv&N2g}4LR zTUU^u-jK!P8ue_0)q_J?EFJzzH+q{E>w>==lzoMB?ub2p7zEt7o|n=+dW1aK zjFhBMhr5RMN2Rqe2T3dFZe=40BmP|~_jaQ1P@T`{0e=kChkpcsS2{8@3EDVMMT5#S z1%FSql-BM%K>@34t5vm z0Iz#Tq#x8FU+(q{R(|k4hiTu?6ctIF_6%W^0Kb&U>{cW2sJWM|>lS?DLh7+a3ZKr@ zEo~}%@qrNWdUpb|p?z!QcgnUJ>YG$V+?!o=6 z8JBVX=DD}bls*=}Qdk#){BE3B*H6N^!Q&AnuG`?pi;K6-!K)WP#l8gt-|M`(HN)sb z1}t|%DD@ZT@!Ao9c{2QyrRFU5v9mhV+eBFx>s@&f`uHTI=r)78b4dozSqoskXj_l& zg-^0A%$hNBVBK7|_)FlQbk0uaJLZZJ@oSv5nAc?Ap9nXj=xbkdGZXkJsm8743ceWb zvWYqgeH#4=uiJvYYx7|Jv?O#Z@QjYO4&TRDXRZ4&f9dQyEKCg_y)=-d;EMjeO|tp~ zl^5n|1EbFG&=HwoeqTY^8K?=;;0cP$zHFVSTt zTOInybz5*Q0`ER>&{r;E-8Ap5z*Ojv_p*-OZuGfZ7k`=V2JSoLB}CmYKR@K>KFWyw zw(CTQJHb!*Pbzabtxpnnn!iM>#hL@1p}OS>!i=|4d}P!L1SMJiIc=vTg~Y;^i|{469bff8|uN=(3l4vs!4?K zfFD(hgRxxjDWA!1|Es{uec65Y<1F;CWIvk?=)d*{&-x1D{zQMk-LM$?C{hb)bcYU} zCH~w%jr#m>KPku$evWnInXln{%*~uZGvHjj|CFQ~bbZCRro;q#Ww%?&cSN06h5s_! z?kJc>J$WDfb>c+)*WKgQ3sIlk^kMBO_`1lKsTqIFVZOb` z8<@Z|KR(ykCG>sy=8oPD@JMxtBjG7{f6CNp6BF{hH??-13vpdhJ9YFk^ig+mw(KM7 zxno*;$5-fRQZZ(09QHe=?-#NNUdd6z{l`u8yeH=bm)6KOOLUI18+NlP5vg}-`S8fj?b7cs-`E{j1c!& z4u8=f=s~SsqxLYq%P(smyNvT1cZk~kB=BN9=Spt{wbsBmu z=u1ie%JSBO;77{p$8@Zp+Ptqd4f(}XviVlxeUFPTrK8c;+l+F@a&b>~RJCpzN4&X0 zw41lBOcG}ug492uE^f)R#7+2nK$D`rE&8g*)$ckP4@rWmxKsNKct$@as5y`OZeeYM zAQk%7fhO*E@i_O{r^dbh$m=@)dNC7lRiu$qr0{b4V+qd&)LrWPp6)l$+vSaaB+G93?crVyE~6N0(aw+wd%isH{-YXH$3>hf9TQ+ zS@iXS7W)p3o7n&MLGJ+gt?v5m@*wc>*zB151?Z=dRz6e|`H5X@h)zdeiIBM~*pIlh z!#tfMDZEQ^IUf$Zw>5TPlL{zl zshlMAx1(*I)bI;l<@zu=+(&YjI|vr=t$4Gs;ShYD|M}@keeB!SWkvt>=OkgocuR-} ze3_nRGD*Xyt}QhbFbZpH)!5PU$B--0*qB z=^s`p_5Nz3FpRm8<@mN)P3W4{fHBt^yd4Q?F}(m?=*;Wiy9d6S-sgGC2ft*zm^%`P zxj;?Lz>^n$e`Iy(hb??$UDq?v9lEdno}gI?eQ_=Rp!x*eKF<;DTtOVnzner|pyw-> zXAJq!H~8E8_!Yp5F~+N#cLE=av&-@Ys5jGioS-V!xwRf*>cW0SX%=-sn5zvHQ(K&& z^O8Gm(V>{<*rQ9F*wJ?%G0#WZg3smg+w)h@M|WRz4b}*Sez~hPUnAeBJMm&SppVTO zC!SBCPU6<|ZyC|&{_Aer+z0>N6O|OG24CryvFD}e$2eHTvKw{zJr^yJ3Y_kB^ec@b zzbb8suhi%dUgTTdM)1J8%D?e6@^o03WN86zj>*#r4e;lBjisg&z*Qth%*+nw%NZv% zaAKZc@Z-M4g1@KpZEmRsu0}hi^gYp+E^NwR~zb?ZN*Z- z0{?4eZag{wKGBl%Bb0TL?QE98xWA^{UCjTF!oz3Ow1b$}hN~UKn(_Qkm|K`UeD#>U zp!5^e<91+y`7!uGZ$QLxb?9IyHZrXfII6wrxSR-nkT3ayf8ut*mKX0`3{=nwCg)`hj82gXtRiQ~xg(cJwWO~_+>xxQ6@r-O^flO*hQN!|mEHMf(1Ur+IWb1epL%I(Dqg6sz-bkM=kVRx zjwv4-_~4FMdJCt8=ILM^-P*tbBkU7r&(>Z3W|E*4 zXIrVkTq>J=lFAc(XggVa#0<~#yEnT1Kt1&uO3rSA|8aDTb2{R=J9~KfKn;nK7P^_&rQI6Q~=e6qJl=78^g$&sSyBpRz@I_Ho_ z(wO6PEclmR^q)p3_K(Xa*BIefGg)qyHNY`8Yimd;e3dSnHdO)dtw*&vhGAY1saQ=& z!2Fc|>)AXv^qs42WKIQMgIp!i0??=M@PB0uT@ z+23(kZ*l2<(Ra+fiQMbmVbHgC4!b7hcX;8@b%jv)MN{n0q#Jl179EzH`Odig=&*{<%8_AO19-H4uUK1g@&xE{{Y%TKvpMU|#T->dfRr z9~dzu1})*said0k$>?8`vmgJ)Lmx8ze~XLp*{9Mk6^MEN-CRpn74CH#Lc^!L1Sq~R z$sme20~+$*tl%8=ndI;6m=|6qW)3Uj-ali+z&3&XHH7xCgrL5T)U3BC_hB=WSQCxQ z=>IH>X-V{$8?tTcuY<2=_8-^k0lquK#t&`ATrk4?TqP8KJ@(Au&q3%)jY-Zk2j|-D z&Pk_2{8n48eJRDK#lZPACft(^=(n@-BHjZx{(YH(o_fpK`lR8D_U>X)+?aD(`sw^; zfU8E3ZZ$vnFs$Qreg(MnN%BeNz;BftG(>!`KFLU0!WMIDL28so0p^Dd^3vLr-&^I{ zc;`+5j|~R#f7sw#>bYaBqv*5sL!n>n(O>%puRmMCx-L&ui!{8aN8jev3_qUu5xAEN z{^;;Ww@ny)teHMdFd{BR&BeyYh(E8XTe=T^VcMf6y9xW%^c`j9+m5+-pTaju=;gzL za^MB@Az$)yG%Nf(SFWjA59j|`5#9X?zTU5RPB{y6^m30l&#?%~T;y+L3m-P&+0$Z* z^P0P2cJWd^Q$4)5Vm@zEuKh9#9}x`n%jm+rNB-UG(QDwH6(?)>X4E&eEQ&GVD&~w0 zyWD7j2g5~<1xM&r+?=;=3BG*x{_7tT9=(q>;X?jgncwgm_8LigjDLN9Qzn_&9iYNz3fPEapzlEkXV5@U5VV_hEX-dt)N41QqZf=2PCw z4ZTUoTJtB}fX^7GU*N($Bs}CP{Wj#)EuPmF27g@mabp*|IHhm8jQ8N4Azocb+{JqW zYO+30;4AKf&J&{Wixk$cB@vj*GM1kXe1@<0 zb<73QUkWd*!M9(pn8X;s=WnK-e_#uKh<9B$avS~YR|S`JJo=4;qY5oGco+0jaaSDP zU%038WeIv-%anJ_0G~s2&b;}JI?SeUKMX^hxhLF2ZbF~5He|CXc=JzI+le2()u%}J zo;zEY5~8r zV5y2?#oWNM-p?HgA5p!+JGc`(S*Teo;l+8CGVk4~zy}wV`(NFl$I^G{FN)De>btiN z?u8$?8WkS$!MWeW`XXM#7a4MHJYS;xf1sga^(6RmH+8%B6!`I&!!yr*ZIbwKQl#(! z`VOOOVA59Ly64v;KLzMt=<$}3PwQyA5ZX^r-h?EKUm=ZwQ~xNKIjXjA(i(j_t@u=FPAltkJ{PV%q!H;xnKYE0=^fpHcH=t&Tgn= zTTuRgr5eTNRf70bjk!nnVEwqQdN-x7S%)&n7eTiNoI?2fDp1$5)xRH?CJAY|Ywm4W zmojc)dKS2zezIcZg?#@9009606jygVkZlx=?2#QsWk$*@(cm#d2oX{lkz|w*8HGfI zvZ5$L6qS-FGD9g;NPRS@tcv%I?9g}m>;1j!dG0;;oOAE9rDjwAF;$9id|zK+D@_rz zY_EXCAc}mKH{z^Rq{xdKwt2g>DI!$Y$>L>05qHZJzd&D#>|^J6+rUVX4P51UM-NkE zOk$bohe(P<*4f`ywx9@eZ>HcaT=`oZbZ_J8!4e*^Z7oH55=|Y#Z7ITSS?NxRQ)HFf z7*mlxMQTs#FwbaFM4%$3UI^dKvoQ(2#=Xc$Iz4)jBGO~A>Ie2yWO>7`&KYNl@HQ~_ z`XJs5kuGl^?V`w~+nz-?5Z|HbQ>spXMoF!KMZ!mX|K-YOhF&*{933myZ!x3@r@Ans zyf;PapZ*QkQ>KWfQN))4Zi=Y0uWvCqgS?4C(EEi^B3JQ_B?Ea~e0iJk5iiz_zcpZL ziugy9eg7CxL@n;F?}L358TSqgZ5E=4O83jJAMiYWb7qZ;9z|lSi?)_4P~>=OmK~QA zMWVHsj1<)<;+AIV``C&ieaafFM=dF0I52zW9}f~sFz`oBWc#0g zaUO^(IQ2?}B3~~C`N|kmWa(yUJMYsJ*>w1*w%;0xI7`d)C?dal_4kFxHNd;-A>LI! z6k*si?j#O;mq#vC&!kX9%x~|g32^G)qjF4VcQW`$%C0Ej@u~0&QpEuH{#PxdsbwvNs$irJLSg`DI&As zTFv_%6w%grCTDiKAjXcaA9S(!|cxIb|((-FVc3c*J--aEIO`90Zy ztDIJ!EAnaCU0OdcNs-41`u9RzC=wF)*t=MXB5l#n&!&R+YCX)oEZ|M*ea$OF;H~h^ zT)hDB>UiR^b#*!v5ziHmc;bq@$&1v4{6h z@IAfEjHIF-ANt;Nn)*Xu7oAV`OHf2UTmNnw@GE4TlSy%>$j{$G-k)_TB6adubEiE; z2IeM4Wq@1hDVZ-PpnH|!iHAlTDKfb=R;9+8B6TLOf}FsgnBnZdyTOl+r+FVXnNcKH zY1Tp!JlUjbB<+Db_S%zxZwJPOyK6veDic zQ;W*AW8wc&gL8(&xTH_mY@6D9zc=e z)V$Lt!Gj8(wD4i@YO{FZ;osQ*d79{@SoqlWapcwAqMB9_PAWEn!I66+H} zx1c_zYchM)z{@3{qSDu6UESMZaK6au7 zfLDsWshRXuitrRiyx<1ELvo_hgu%b%iYx3+K^NhdmodnI#~QW?-TA0vbx_>h2)yro z_}DESdOCl+sFfGKC}*sZ5R16a>PY{20>1bgGE83ffS+)yDI|lRhdS>^e#G|=yVtdM zVBJrgwaJa(L))FC%0R3i6S&mZ2DtodGcOS>aa{Dn`YeDpK7;77kvjXTl_-y4rT!*GCsB7&F|W4H zP3tIPS0A`gfcOkNhVtd%S9)BRbiU)g=D1q57qz@lMJTFRh^u#g6LzZHHAh`pbOh+5mkN`@T=xp zv^VzIwP(y>V<1I5zxHug*r9&AB*ofKpe{`DJUqZr=-K4pY4}A`U8&_$TpPqca5rJU zd*iP=1;Ixr$p_I-p_g#O&Y87{e|Y?O(ogIox=MYAfC%ngD)*FQ{|n!vwv6xk$Il(7 z-@w-%LG3 zj}Q1%th7&vpvdX7E?Md5vsJ7NBHO~@-&Q;w&%v|SDr3R#J1L@=tExMMd`EaZ^lK4U zTDj=2$M8Yp7mkkMsPnzd_kWiF=Q;sbxh>$!enZu3*{DZQvYh;1^u12OU}hSAHB?OT z`6G^opXc-|Q9svQ;^=w^eDh4=G626!Uu(4WE}_U>ve?uPUy!aeDwl!}T?tKModIs# zp6+kQaGhP5|Dp{0Ux}JkWCU)G*8Mkz;A>fz({H|kznF~dNw#-=F?{y$)2uwVv1=2izL<@3O2hn`Bkgy4&yGYwf5Pf^ALhZ=l;Hc) zui22VWW^I@@IY{PY4;1{yX;Xx$SizREZUE^-jO2Su>p4uA&=y%)Ru9?b+cBs+zWN( zQQv4wv}1?xUJ z09}m61XTUT{)!oH3Ss!(pk44}8|wW2mdKK1wroP{H*M>pBxE7^FhIWDvC0y(8aUvf5Su@eM@I(30sEu65;m`5=D~!=!`_3$-vyLF%(G;^F z_>4?~MfoxK6J=~(oeDp@%)Nu(3iU{l-^soNdhu93H&_i^#O#-H%)t*=2z~uqg1%ug zFBn`3-%VucStPOc9pbcfQGh2RX)r z_4SySV^90O4ZyW%=Jb;s@HubcVRq?C=%`Bc%w6a=;Gy8H_wf{w9XVUW0KCHZ7n&V` ze=PlB@gdf8kiDW`555b!3ryWZesv+?Wz)c!sX98?9d*!FAA0*6{Zn%@*PcAgvm)Fp z!}Y*luNVEqDk<4WWOw~??>5cJH2UgB%|t% z`*WJ4ix{M+7$C3v)v*iyG*MqOJ!hLp6Mf@_%!7F}x#m>-W5JImw=|;`Qnt{fe$Z-+(U~;ss18GsF<)2PLr|oVM{0MXmUUK+@U(; z5fcB1NBuhTmGFsfCN!z(>^rp@are2{9^_A@iTu{!Rm_4k2^@RaKG#Z<9i4v}9W!Xc zC{Y-ovXmwPi~A4Eyu>`iHD$w!`dpHYnG~Ax|v|zbmhZq9BDG8*Kx`H1Wh)0tciQQk0yBotWTJc*D?;O z*V2+EOt0LhA8n?I+%lcje~#n-$SY=n7BuKyW*Y5kKOG;vie-oV67lT*4Z2cm&zG|O5hS=4=4H&E4X9zJt=^xDH* zn#jDpZ{&Xx_zCxpdb;wh7qgN3>_}x6}Kgfi5tEk|y3Gi@7-CX>}V>B84c+6^ClO|dWzm#*#z!!nb znT|m;8Pw1FavVBwdlX@j2>vh%G_m#WK^_68KO3&4iBH@1+8U1-9u{YT^c8Q}G8en&g- z6jK)5KLy>WD+wlXqaI3ojAlll&yQ6HUv{&CH;eZf8W4w*O$h%^@TO;5t(43m=<{Xq zp8(`}TwLQ`D(Y{~wyEbZ{=4($E&p{+n#>N|-&cn^B=riJB$(1fWM{(=h3|c0s-IS|8CBHvQ)@i?&oJm;RDvxP_%RLNE-476$(637vYW78=hN2U zFYpUa`TAoL$UpVOz$lL=_@44ZJ+BEkQho7asQY@kjdreb@Gbha^FHY5fJ54z+t8s? zTH%B}^w6-@d{L&9CQAqPqCJ3DnSS5IZ|tXY`;Nn2@H~EH-v%A<|Kd@d-+zwMBzMI& z?K8N(G)+oi5TJ=dzbQvP{3^*RX=(*@ckIZ@6DW|c4l z;#pAhivz#3GETVKBhQ{MpTlgyyV1hnnL_xhwNlgkjl*{1rV z`5thU46xmOE{i5jLNEI>ooEufh3nu2;NKM7bBKn{Z@z8`G=N_o4Em}533a~IulGqG zezoJKqN+CV=@Ngz-y{pa;e@5z^al1pRV}2{Kw`9_#BVYhn46D<$SM5xfo5@TAuYe!w=}72D%qlJO0c=VbF!b z)tViR(DU$AUy{5AO%z?F%7=WEGjEbA+OLQUi&5CdufZSFNY#;z1K%} zWp&Y{W$;_YGVpc%WqCU$`1FS+b^gFu)U(e|p#!>UF1fk(xhqW`DVRMQ15Rp2@=eV5 z@SHy}F+P(fYn|s3OfUGd*3Ht$66@6c zw-BX=dbXUYXg!PgR-5rn{)K<>EuL-ZjG;;YjuOvV@FeT?;H8z|#k5F3h4TpJGX{&U zCiE*qX8V_e@QsQLLCYiHkrTDI*5oSoD>o0UT0xV1&q&Eo?Eibo!0hb5K6yQo{f{By z&kElD3;ar1d9r^F{WVw7OkoA`?D-nqR)P9NUeLK>#6}b4J0|yHv5xxJ{hsd7oe6)~ zxk9{eF3X>My}u7_;kD`uhg{TocSX|s8$F1%6&0g zgL+<+|8i0Sc$Ji^8vOIm+0Q&uOHeOnTH#X^@-f|(?r{fo2^&9J$bfkJZg?)^zl=U| zRq%Zm>O6UO+V~25Fn>ya{x)=7$ucc+5kA&0KH_~1KKV+gO~Da*8o#KsOd&KUQXlo<>N0gj{r5%O>MC!CZ|mxuRYBi0Tyf+G z3+iF>#V&}zKUzF5DSJ&&qfQY_k!KPA zOvkI@jjvH(i{n!w&~5gMDQG!BpA(K(l zJ@lDiLH)oH;B8=gH{Cp%Cg)vKC%%Jc!YRQCo~Zkvl%8ZR@Tv2Bcuo`XZV`Kz--AAV zq41^2=64jCG1Q6i1K#nQ3*W@}{+r`k_yfR?iH}_?PGAna*qmk-i+TmCoiN)C z?s9`KiY9;BvKxHMNjPvK3jV30)VbyS07dqATJn?^p&uBt%1VExNJYZ6z2S)0Rp*Oj z6?D{5Zmyk=d39S6{p%obcyngY$!XwxIH=^nzx~3cYYrW`Llf_tF$a2~L#ETb`&Xl$ zmsC%=xIo9x;#Loi!0*4OxJw%Vk9u|amha^N4D;pY~P1CM7iI>!C*zh61; z`os49(<|!+UhpYmS!31=aO%BoAM_3LNrpz-(^sfN#za|u9{BUu;Pgku2k7^e?{2u@ zze$_Q!HNAei8}LoYYcdLY53QZqwuqPDKo2s;P;;ySO+Un*YCf=pNyhEA6+*gkP2Q` zdmNb*x#j)K7L?8CRuO zNg%$LKsTE$Sd;33kF2re#f$1RG4n{`D(=U5++$%A!!6XElsdDZKH-ti*-s!& z)5dR8oAF;e({#sY)PYOAVQC@WkK2EzQ<0~Ky%y&-;LBktIJ}5FRC71q351Vnv^^X+ z3BAQXy-M4_7qyS)zdC^a|9JeSnhSL8R{D9&2=#1v?zBV#`DxYtSN;fn(yZjzL1FOj z?`Ku5MCN~dbiDopd1*i0KQ6#$sPG~Fpvzmf3S&O8t*GD_6@ni-&2$|_ zeCPNw7W&|GDMO{J{4rN}Pb8JjgXb+VAFQh}UpGy82<^eV>6WQA{Tw(l|9Y~%6#1}t zrY_82p9sl3e;ceXYPfN+8UEkLD$b{43?C^OvU>`j@@Z)6*1$dr1DP>4=${>%uK#X= z{{L!ZtC_%$T8B72Ceas7rq{1 zpOr~J=0zo;nJsfZG})BZ8}lFZYvo#fRU7>_ziV7!1MYj`CevHNPxYfxlCtnEGHp9t z3mtUEu&|UNu0CPK_!z`HR=;p1wjT4%?3L_0z*XQ4i$OQ~a;>oYUPneR!FibJvNj|58RlbUod93YFY&q_ zjlOrzM<(@OzmF-{H2V$bb9|0QH z+4<0qC%bEpHGE=e$cwZ6sDGJmp~q3ods!UmX>#zVh=ZGVJj8r@N5p)P5j;p$5;q*3 zrHGEU&ex%{m@B(4Ont->Jz-ymhmhIYtrZTTO~Kz~z*4wW{epHJ5PcOw8i5^t8}Rs{|TrN*=Z`i+jOvQ!}a zu&zghw$p-68FaiH!Kb;KcLg;t*OaVFw>*z|n{_A8-E{P`#DYeTOw=!ZQsqfK&d167 z!o*52uTi`s%U3~v(%eoB%fO@UVzykx@Qb!fMgGs>E7@$0a=!3+RyPZ_Ex_j-t-)`J zbt*3~8y#PXKKdAVAc>tY|J9Gw9)UJuE(3HVy+*0F2gqjoJNX1`0oa9s-vFBwPGDb!9)^| zeZ6}pb){a=q{!6S<|%k%qIEgC3w(0O@M660;~$Qgl1tb&(=N z--q4V@SXhe@os<2ft(}0E9cOc{dTXJrwTCNnl^1Tgub*&Ypvy>N4e$a=bNAl8X?>pMI2rQb8FXvH@-{1N@U`Ev{(A4G(nx_ z>P&*`F)#T<6y$t_U)t|>Z#Kd@YTK54%Z5(>2LJ&7{}oqfJl1U(ZBa-n$|fpGLM24p z5>m29LPnV-QCU$)6fL2wlo1&rQA$}UNp@&SvgdOnd6n=UpPt|IU-vc6IoG+!jU$CCc_2rnf~=GV`&~Dd;*SReo+a(!D7WQ%!hN zs7c9^5w?ONym$6cl!XTF?-JhNdb*sF^VU{=CcczB&3`=p`Yt7Bco%9Mm7~Oi1hT}Y zQ?lD&75COaN{%hATPXjM5_{c6wv?G-^+y7>e>6MMzta$I>oYT3h z4=Je`h_LnXr{wSZHHW%HC^2!ZJlw`$kn68PN*V|y?X{6Fg_I~6b>CmJnT3)=iDgAE z`6&@zYqM8Y71v)aI2smANkp5PmhurwHjFe{xvZq*25tI08yigOscv_~^@hgOXhKFoiAn&f>&n8qRorN1#qbg99a^)xCu#vy{~B?H)GDMjkv7 zvLT9;TwNbu(uMlk?wtvEvIF@%uI8ymd=WjS?|M)tm1V(Y@6hjsZySdFI4H3*-0_-K zpOQyqdMPK}C`t2(dKAM>N%=r5t^CFyhYx;GYVo9mzkK8n*OGf7u6K6fH|QfOt1d{-@-E|L@FAj{ma&?x%VB*yeE=(c8az zh(RJk&Tip}q{Mpn?D;HpN_3hU>q^iMcH4@0R$EF2j?MhKB}U2eYz}29MiX~^mWx_eo*xVN+c^&eA9F&xnMluO~4xt z?+vmIpDDS>UiVH?mXgyahKxRfFXsI*?5?Ji$UecQfoz53`RQ^e$Bq>qj)x8LuE6lxSANo4oJMFvmICNpb)4wr=lrRiMn2eEsEhp=o zKX7fh-tB?#P4v6+z}z%&8d&YR;5Fi{@N;GEVy470Wdq+)DN6bUo(Vafq{O(|x!Osd zlAnd|imHJ7^{E4UnUEK2aiie-*OaIW^%!ssKo@2OZ`?ct-0Lu_>atN{<$3qbHsDHX zM$?54@h0q7`u-p48~fL9;&>J%4m=*_?7&S=y_I9uDM~Kiv^3R4{j9hzD#oCn+3JFR z++K(?AX;y<7`T#srrnG8b@?4qbkxBArs*Rr;A7m)NEsINiv-Ut)WPqyR^$A69F*B5 zNWWI5BxGo~qZIsjvuIZS4C209C(kDVo!F-6`u$51CA=M%=d|un5|EPA_80s&`|kvG z#JRuP+16iy9vO|Lb^kzISFW$$>4rE*Ja_RA;XhM>ZRP=8N;dttxZcbMeYm~q;1HgF z?WuasAG|n!zS}iIPiaRCKF(Dfw8sd*84-CDEIm zhD?BO4au|GB96#6jaU9v7$xpWN)e*hpu;KtF80vxNH+m)Pw3*(unq@1=t2BS4z5aP z&me^d!*kAYQsNo5R?BIeLHsL*UzsD$Af^hH!{Fh% z*atZcQw+lUBepjL{Puk4_|_9R89ilnaum8It@mSvA?A`l+qtwl;5he(MUfr&X0c~= zC@c71B2(bfWdnZbO9qtSdB;3v?`4R)cjDo_d(bgy`;)>lCBWSo#?m-EFVk1yaTvU? zljx2*i+q0GKb&$EbFIw6G`0xwIJ}r)H^g`Dhet>4dk36)mwKPb$GJoICE51?2X0xF zKfs?*V^s+;wH&(os&ixH6ZCiOj_P7@@V#_jc8(zG{rJKFZz$px z-fz7^2=m)9|54Ib;G{avVP7u#?yq-VQw;O2-f1JVE4~xd^J3jJzT0`LD4TRsSYzxHjdd%3`eH?!W(4=X5<+x_swgUj#% zst>CLp>utU`z+0&lS=O{>F{B$a&5{~w&5Hlj5AAJ-U2U$y%p9MlPPIP{PH&w_!v!| zN?3*Y(HX3o{n`k1@3_S_1^qEieLN%vT~jl$&~E`BH>ulAZp1k~4YEcrp*JpZZ`+Je zuj}gl^_{@=u1@jEZq(^$?U4HYN0j(}ps7_d(D{@*`(yAOm$LslBvF6UY%{Yq;Mwau7a6HZy#~;1ay2#VoZ4bG%{f=ey&qn3L`0ztq65*r(E(GN_kYu=Rqs zh$lJXjnY%#r9OAN;bG|Uzwcp+$0r!XUfj>K4fE-C>CO=$T<`r+taS={%UCGTYHCGE z>gg?EM?EOv>Gc?RjPq@79OJjOr(}ZNrlkzNjHmI-s`Po?xSVUY7J5+S`Qm*HboO!A zaa}Le#jVSw%=&tm&VBTmSH-cg7juUZyk+jnMoPX5*l_OzPyKI|X4@f8#oe*P z9MGwlg;I}JVeXv0xj{}2I{s1n(APlV$y{kTh|h!)7K3Gf?asl6xotnhn1#=Ac=q42 z_0an{9_=F3wIzqGoKFFIwRUZvJI?DCkiTXMUKo6eow4F&`$j#@S~T_obxdBZC#Cm0^+{I&lM(s=c-FT9%4Z~hbD8n>>h(BpF;oK-bsnz z-@t0Y`Mf9hzmx>N>K#mv8Nv6?>NRS!qkgB|?s9&EpX$D3b-b$`^KIRRejDJ&;JBNN zF8DJ&5Lep_JzhI{ZD<#C`0~;W%dq9(?O7$3TErt7vZi!n9ee{}&A0)7Qrx&{*cfvp z{GitUhfh&Y_l#3T(7X4u)vW1!m~&xQ*LUH)p3*#>LFfY8J5lW)=c4es#?pSgr)GLjHvrE|o)+{;LZ5WG9~aykWss`Z#j#z$Wi$I+x+Ch?^3J)%Nf2`{ z;_RkS_`J!kyg#SW@87wv^pzG;^87Q0{`CX!w^QCP(lKwCsh9m#;C0-tW6SU=)PJA) z_;27sR^r!OJo??(X)90)f7H3Mujx}B`Y_X-G6tM2yrQkV7S~T|>=lR+fZr|q^h*!C zI+c2K+yZg$67!94LcMduz5=wlD=TW<`X^MmepsXZC$hF--MTI7tvhedt#Yfl7!-(=s)sKs+S8;vX( z*dOGAdb zS>7xe;0b@XBB4hjrJF%EXKr>(dX732ExDHhJT?90miy!iUtMVukqJB*a$jz$29JmM zB>xN}-fK@(R2q=qY3qLu%)t4z-fz5i*e7(SynSnc-yORi`lMhky{OU(8b`l5-{0== zg|A68Puo6{_kJ5?$n3MNQTKh~8Z|2Ue&T%+j##cvlIrw&N z$JU9tFS-HurI~)uy@H=`x^sL=1pP5OMK+9K zuJUZ=xDbr{EH?3fy;0v;!R0}balqqj&OI*h=H|=F&=K(Kn2P$pRN#rx|1H-Dc)BWY zz`6svZ~odMNgTe-Qd7Vs5xSjU$F}SUbVv5p8TV4mfj@I0RVmPe`vuaQgrT$5(^+fl zfwQQ0r^ZtdpOd9F3m3lA5ojSei+sP#4f#zW&JmyL>1pV5kJjMxf54l--2JLOIRC*X zNkx0qC$8>gh7kClvQf5R19U{(_FtI}=AyjY1>5zwU$*s4=6jsu(tBrbWDxecOLDiz zpI{HGyuM;67kE$E8h#P;u*p{SXCC%XbJqqTRp25=YUY*?>dTPVJ-i0}wO8nR83+CC z(ePWrI`7Z84TsuMFNLx@JG0^MTGnemGl7l{d+yov9CdkcMbu#<>cJ9i91;({J~Wr! zod^9Bs@}?@4qtzJiZx0ZaoSy#ILujL-u)?8&hiZ52}Jk2If=r2^tqv*G)#yas#n>D&oX;7&nK zc$+!&hdcdxpE!8+XeX2H=X2)KeK2y3u z0lM?(XtLZQ_*+FHH5Lp$bp4kV`wIH@>`vfzBj}~q`6PFRc|Rt;x=0NAGf6$qFGQd2 zm|62(gCFYd+BYGM_!cYEWpdEtBdfbp7^ruht#rU0oZt1(*vcLF`y$7=`5|y(VdJ00 zi8`*}c0c6?zD{;XD{Kay&c6Sya07i7b+7VG#hzIEYg|wWKB1xPZdg2gPqF#&6f?|4 z)`zz{_n>cY4R^*FVgJ8xKK}3G{GQ3B#!*fo?oqS6nHEx-RFJ_e(>GP;MJ*^KOwqO8%i(1M&NSW9|ggvy$?6xT=u?yHxPH!gu?e$)bCSUkdha8YHW3UuLf}R;mOAxk>H=6 zy!c~(>>V@8UtPV$achCRhaBvC1BC)&_q}&5Ns9 zfbUp_`E!2Md5W2hQxg5ws<$Zclfzs%^X;A_bf9wlj4vPXDt3b9xlIZs%0eQO-=UkK zAD6O}!pGG#&&Er^7ZzJ3na}$_mO^$dCg`~SuZU#BdA*SrRoD&OcnN0+d*U3i;tyQ5 z!1s35UbE9P_ZT=2t#P2Rrq`o$+KxG@HL>U-!@xCL;lD! z0eEz7phhYKeV|Ga;ljt@t9_@8?PeInC-q5hJMfs7+@5=E9w)g22V;@11kVDWG#wz zo&HiA4N;u;-%g1I7ZIn(rEtz}==JB(c0+O2QKEn9XIXm~_A|4{qy6*wMAd!jMm)!+ z&NO~SJ}F$QPp6}A%42aWL{OjKT=9WRaQ|wUxY9h2TA-Bz4s) z=w8b5uWTGR@4<&}&z<3OfAZH%$sm8Z*!)fp%;m$m?NbNANADe9m(TD0WojqnM8Kzh zTg!kd=pOq3Bjy|M$lrDD(?#gAV%v?rZK9Yrc}Womz!TxaT)kgW*W{|r#R1wn^*x@Vm4fbM)aJyFQWq9iL@;@&o#D^kAuZUK08 z?5%sI?;L~3hHi9L!CVcfnseMTzcpli}oK2pPvgpCoVnY zL%*d~@lH|9bG5%`z0$!ydP!jEXYf&FrgCX3bpKz^NR>GDL0yj@%=3F?+J}V$+wojy z$4bvE`1rA?7)3Ae=liPDv!6BKmwzo?qYmAFoEYTu5`Cm=_;uZUk-u)0owhb~?{%m{ z@h$Av_KvBUI5#?hC1VP6)CtV#Ie!fT7I*;f^-o@WdH}z9 z**IVG4)!fCCgo8@oZBnTCKQBv-p$B1Duw@0Tw>c|0o=K7^C@})J+oEm$!Mtt&knTw zoacLuSi27&^76UrFz1GToBWB-et|h$+Qet+3BUKbN1XNJ5QF6MS{+kBy?5_F#Hxqy z${27O-Gpv)zh>p#$N(PgxBIq(ch^FaV=ch%M~j;@U%=l;ta}@{6#rQ!3_fP8gRXuN z9_vQ`S~|^+CuU)PT6{D(rX2Z;&>M^N&=>L2%!AmwPAbV&lpr6@)Nr3X=!`Qm$!{wIuJ-HXIXGdS`RSQmeg{8lTq~C(fxcHwuKaQh`siZI zo8pG+t-I1MuK~`*k{T-eSkRYeZt)$^@3d3weqER=@3-HcaRjb;eUnUXVGj2wq+7*f z4&Gm6ZuuNIH?y4R;>UMlbn+j(MIK>`Ca*`e0Qb-Bq@?hjv4z}y>F^7`CC+k&|> zv`9D<{^H&lZNG_Z%p>paEvtbu<}VdDY|z(@NAnIvfPe3{>T>_&r^MB(ciaT}+IhdO zqY3ly&B)@G_5lX5pUIbAkG`)Ci;X@B9tuqhUUEhK!$X(KU5CyxeMx_^3wo4u$=t(i zoI#@bFRWz3oV%FJRHy!olK%l9jF~_W0000t0001Zoa19)U|XecEsqZC3*3uTXFm1GnlWn^T2g(M@CtZbsf@9z5l zyDry*p68s;xySqcdRxc9z`(7*z`%;H>D-!L-#`(kErr+Fx^#_zw% zGcXi+CJGz%k&s~UHfp$qgtV%uhV3RKw7;$2XfIB|e`{{|AFn51>o$h?T|ERC+jMCs z@si*+F;p-Afq=O^itFsu2#~cLx<1xIf!}ueg&au|822ch{~kj^i7aUozncKYu4d}_ zY7#bobSn!IrJ(isQ-eE4NGLUG3*F^N!N9Q}d!q_ypg(1(U_3>F<%@?*<>yJbk@oqQ z9Tx>EY90rKl4&4vNi+CZ6ak+-&Sy2JQ}AMgMxL7p0R{tJ_w&5)`iz$w?XyU@BB(IV z{G5bhrW?0Yy$E=gAFlFZ9RX|t4w8|wB;5SuFkzocKvW~i@B5H|5?lGxUcxl^Djh$* z>IMOPbBA}dxlz!?;?}e%MZ)`QK_hfl3Qi?EmSkNepu6@!1?fftjg5Wn$`%stDVK!B z#1k-hrf{gCiGWZCzq^6$H0Vu;^PCAFft!bWM)N5Jnaum9yM7a}=19@9C`E#>nP^om z*5T&Ne&t+i0y2ZsZXX?_fq1gho^x1-nt<%q-xo<>sp@E4XvDfLv7MDsze}>$KXxPx zhwQHUV@-j1(qV3g2nqzk>IZG)DUc3RyR&|d1_jfMny=RrP_z5n*2i5mIB-k7*LEKP z+pMx1 zhrKufF@IibJ6e-KXZ<{(ah!zh_hb#1L`d*5QQI+!^UR$>q?UaoLF_UttXo5Y+tGHZ z-8ct_w|h*UeNMo-)?>kkc}Q5y-gcvP2L)1@Pqqbk6Oi%y#=%ebC|F8UGv&g%&z;rY z_do>qZDY@j_819qce*=+zfy1_pZU6z0|nJK<#}m3BrG4^8D5Y56!ShZXN&X6K^MK6 zu7-QTni+JLlLFe<-R)2Cx$EdE2jDIN2_4qo*+fZrr!QE4fSG~{$zH)KJWr<57D7v% zgv0=a!5l5@kCE}Z1DynDg)H32$-%kbpXzIQmjq_t^54Be_&)iRUGAYItd6-*}$_?_)S3(l$+i;UshBn-3(Y zIo{TtO{D>QY0Em6O9XHaJ^Hx=uiInYGRRp&K!~gxZ%YyZ2cs|U+VqZqvUOjc{Bpwk zpH{WKJ%Z0$%&fcBkAPzy3L=uHN#H-+bA($M_rAV`y}5({u^-tQ9tR27LsqEd|0O`8 zmu1xa0SQf4Ywk=xBf&6s^{G%50@OBpC#goFPUigPR81sd!c|Bx;sEMXqV?B9u>?#p z4rBya5b!WSPTZ1_06jJbs|95W_FZhL_3Q$dQt+T~9%r`BT9HTM`O|vJZyz5g-zro1*><^?mFMZDfLg zd_DE#pfmXX^F_8vZwaWC%&PxyKMk15Bc2RBL_KGlkY;Bkp_S52a1Np1+;aw(ty>5X zj2Dt;Jx@UxV}1eC904hzn{3+GlMpa%<@3~-0`1?1Pvy?hz}tOhuQ{Crqn^H-)MEm) zZCWeG3rXN{@b0Zpq#)Z?_b@m5Na-#fp0NcI!Z(dO-@it{&!UZ^5@k3q#*U*Mr6lC% zwN*$Dy+w_=Qq`E4%B;Y;g)Y#Xdrtjddnw1)G?>Nld9-DUJ>{C4Ri($ajs}IJdyWy4cd2DUiL!W zKIwYU*$(IS*;+^qL_ZscUb}!(B-k4j^ADL((2{Y5&B=)XE*GVff5qrvIQUmm2k%?9 z`d)Y0F#4LorlDsq@w(+}9`f%e;k%)D&3&B1VjhXvhj}ou% z(JVv#c5a%faOtGL{(@U{%XJDa?|L0SW=etoqYazu@Hwj7#E^1l-^w6pEynP%2RD0+3ZaE4db9Z6=Dgp}cgkFoo z=c|a~whBjI?cI3G=osqTH{)GRX8Y+7WP0b?(?J4)%_K9~S5u%9Cb|3ieG2ry-@eBY zMndFV4EyhT3dn>u8+Ts{(%eUBvh4(13FHrIl0)6w$mwE=`_Uj~FiHHR!AYLIT+)XL z$QwHHvRH+JX(^W97wb@8XScexq7J`e?%(mgm4KD6;yfI}B=DN`1TA9UojAXY(V|F* z)oQ-|k4yNvO`mz*K zAmv z=k8pUjKqE1CRkI1`#nsr8|%M{{FJh@D-?Cr-cS3;3|@b?psS_R2?{Qb3zRgY{&Vx! zu8qKX*6#9oQ}2R0{9L(hpB@D~&RzX#caS5umyI&wURH`D1YL_I@Div0sRyHc%7v9x2A3;=r?(9cwc$o zE8!$%hVO4(dx`V@Jp#NJR88b%>9C8@$Kkd+@+qrZ{pd>qHZrXCoLxda92$5ZzLf;F zf*j_H=!;wiKboskkYC;`Z)_&fSJ(X9^dOpq@uL>!M!PAX^OW^%OTvAbI9pnIoes`L z!DT`f1RVXiYu9iI@?NRMmsmX4HsgN%h5KqAs-Szvf*%IJ?R{EwgK5ufQE;$!Kz>*WAXwvzsie`*9XImBx|%u8#%}pVNA4 z50PMAILMNSI$M$|+%WwY9AMtW@#*1N3R2hm@7$S7 zg47y!^$6T+>0NAR8gbqqU5!&I#Lw|E=1zCk(jZURz2Kf7?)!VgISygei8)J0JJj#j zS+$*Gi3CL2tlqweoOw}y1*R9I8bXoxD&;=E=0W{2D@{Bzl8HG-Bi*#h9!zDS{;;pEAdi5LpvT&YU zuKd#b#fkeD+P!WWxuNj#{8OW~I7bH0s~6`;*rT^crd<*D(xuq@JvZv~&BZa>J0xt7 z8tfE%N`h={VSOk1TKSo`tX(_t|JP4GGi#^7St*vPI!yywi1@D&d3hiDXd{mY z>(~LDL!k(s5-$qwY<(4VX#)jC{g(pQBWD;5q~tEu&_Mq8MB&%71gPu(+M~=sK`dMD zy;Khh7IL?T4rJi-)TryH5CjZ_=e6-6uRZ_4THaDb!5;PhmRF(g?KY|L5Bf%e>O@sT zK60tYtvL1u3DlVjf`*cesN2Tw(RTVIOrMa8zsf>~E93~{GBX__Ho2V?WTN2y0bWfu zoTm?8(#H(@2{>|^D?Cw$g4ZStm%9xqcz2|4 z7ikeKrpF)BpnvuwtIA2dKRtbutRd>2!20YHn1iVxoqnS?cpsN!*{45H4{UhT#gAb= zC@f%HLQXwuL<_2QCP7n6*1ez-=WSPjIdz;4?u?u>dFVHMcH{M{pVHyh3gkCp-g?PN z$RB)+zA;W~IF5OI>zrKFm@WkuRx>elekS0>{TC_@}_pArKI8hH9O^ZE``yl~Br+<;VWq~^3B2`pd zeUbpHiBs1!tw;!}j4L?VPr)gvnA#W}0^XGM%a|>ZutJ!t=^+n{@{GUuiN4NPdrBiG z2y>tv{irSK*@3UN{SxSl8s@$V&sLCItE^YHVjd{&agCB&!Q9pDxS3xU^}Zx}ZUg$A zdV62oc~jI?pTtaN%rDJNm=lc@r;0I0d`RAL!V`6HgK!MbJKWdXh40w+ zenIYHG`b_EMZ%A^(AWvgj|ySZH#m^j`PKKYbRz$}yPYC_67|R^MrWtsD&$_XNP|Pj zH$!WVE&V)6L0eDyjBYXo?)#ilb+L~Q%eMT7Xar=f-dZw_oUO)|e&_lo3f4OBKi+Ia zLASNH?U!-nW9zvBapV$0JiKu|&Q-13FCRl-^O>deZy3aWOCQux)4+KSqOQIZ!sn0))@A2IeRVvrci@FM33X-d z*S|DVFyD68(XJN#t^a)FJ>=%nTirzA6|56&wcqkIzVE$sst_9qe?Qr`F<(V~bn1P@ zgYzI+dVJga1e_}cKhf>z7t0bw_X`9_$l_7ae2Uz9YS3$&#A6DELk?`_mOu_T=*p{k z0lB=#dyNk2(f5e8wDlL!uiP_?lJNOd=6t3Un^1p$O4sgxLP9T}^pk5L1WX8u8pvUu z$-SdXHRAKkUOlrQ-%5jEJCpSm`ZTDfN)=r&Xa0Dd>RHc8fvP2C@A4acAc*^wKr98# zk}dUH=LvAmPRxq6A;I?9bn64m3p-B#xqAfrS>w`oU<>9;-}peI*YlWT-5$H-_2WKy z?+6H%BH@|Eyw^e*0X-GAIh~AnpRp@*^T$c(dbK}|gN=eL`J6Qp5hR?Ayx3HQxsYj_ zkDloQ4VJA&C$6H-)<&MH3B-KuZaOkt@|A>3JZu@dFEIb^mNVy+BH;Lm^8OYL3Zl<8 zsLK(^y`1s=rl{k||9!Bo#XSxmnq&G`_t#UZyA!dldlqjc?ZX^-nN?$$7yaO_wP~37P2?2gd95_$$yKT!F4p0` z*2cv~sN-|~?v^WbJx{&4tsD5z|81^s&E znc#25QS@KB+~(xd$UCcxSq1U>Z-@R0{{4u$aI|QC={N=62j1C<;q`7F?R+PC?O$JP zR)3zL0k5BV{QN7-dj?%i1#hwM_L4rD$cMvsC3+rMAeTuO`zm1`?%1<$C(AAZRBVOM zN28vd%nk86jCpxS$70LZAQrzr2~5 zXo3QU6x#?bAH=| zX|TuU`NLPi==1B}s)e%Qc~-JM?nJI?jTW&jD@0#<6r`)EhC1&%zsnanQ~raQxhm>x z%X{g>s~7+M9Vv^^@E7{js@c+NN6hax7$QwjSD!=duPttL`1`_>`3|)`nmE;y9M0SnIR$V zt2htgQQ2bNniTkz#IABiU5;$*xV#!UY4+gjY!((07Wmf4{@6nTi(bG0JLa+CFV}=m zAouhzZR<$K`u^Ql@L0;1fVE*dpGq))o7XAGA3;AT7JM+vmrcNL-DTx(Scl!&auW|Q zzmz^V`Y$>ObLc_Qk!QT9>u>kIerb(-;eU43U7YvR=2w;DI|y*CjT{eukNI+lzV{6e z0rI`Arcv$$+zY-C;rNLHy;jo_c1`SKUG-y|ObRL-vOoLdId6G2{pQ8I@$B}w!xaZf z;Mu6isV7T7^58RNN7TbXPu4~zRRV1HD?L7|jr%Si&SCz9fb%v-?H(b&E^x>`vHwOu zy_^d zDRUxkGhAZ&THZ*5ZzdNWtj0Xfx6X@8p$ProQ~7C6E9S46^B=R3&-#nJZ!n|(Dcuf= zJds6%1Z9N{p6Cm(@rC;&=F7E1Nx^i~&$i~RorcIe=C!RN18V3CN=Lnvm`D)svXZn% z-VscD@7`2E0Oz;$G3DmS-||t+pBSoA`G+Z`|!R;232b;570qI zaMhECxGyhmP-PR66s#<(8L1&pj+wIxKP!KEX;p9y7 z7t!?;{QcaWbZUtP5vQ02*JEC|>6~JfiTSqUbf)$Ee>hhPG18m0D6sJBNzcP`j0tJf z7$T>5Clb4Ae>*~}sRQTPA2b*{nRYcE^}cX7gT({fzkuVpSFRur7^`qOK1GiA_`qG7 zUP}VmSZGwSo(_ld#uslOPc)gzT&EY~e3lswc496#d}(m+hfkQ>v`>W$wjqzk-|t;H zL4e(!w1vkJus6*g)cMS}C%BGuXv5;8_=3-==j*V)Re_F$eG4Bg=Q0J+uTaTx6l-amfX zTAUyEdN$ZPVaH$Oj;6##Kjf=AkuoiXej04!m`X_yMxS;xmi>(P(scPj?)q!N}`~foMlM zIO|=yA%oAU#Z`99@(cx@N9kD{IJY(JoDO1`vtHMW+hhyS;aKe8i{r?hb{*mjf3~Bq zB}Eq|p>KUUBP(efjGX*9p3fcqe6i^;ZDj_1u-7D+7jy2SiF?E6{;6b+BU!HHq8A?Jdv+4$9p&inE9fP6ua`KTtglZ%2`=( z;K%oGS)vu8U#C=O$2CZkpsr-Oe+;>HQjH@s5c`>T=+2R8^tEl%$#yUIVvcWT@qLH; zE~Ze)&#FR!NU2)?QPhjcXb#gA^tX49qN*8?BQqFHKXbd{xz0&AW!<7cO2RpR1o>Q) z_2Ur={d>kBQ!Ou$fXip*7^$al-W8VxSUv6<#g8yfWR^L~6q z6y`4j!Th*M8)M%>yT?-G&g zBk^hML$SrW%b&(@9<;toh=*gY*x|D;9S9)k8DeMgJc-IfMI`3KdmX&bqkSo`We;2m z!t=B<8R@STM6Obt*z^teJTi9fw?F#GNcHmbI^@Le(H|k=(YPmbr}cjZ5g;fN_4a)? z>RrYD6>1ThXg17_%r66sNac?8rC5%%WW!7{QaE-H5ZKHv@ zpT20|7zzIa009604VHH}mTefum4rw|3K69gvdL&%Mj`Y?ii*m6-_MH@l9@e1NEsnQ zp}tZyjC_PjQAsk21}Y@0BD1XT`pfY=2aju<=kGd&(=w`~wONq!ir+~;gno%NgCqQ=89QE`aS{WqZCXEqsNAeDQMkKIWIayfpg!h&sUuo zSe8`p>M+TI`mzgBYfA{YpZ?-+?@wV%RgH5hn}Fq~sIlE66wWRB>6O|`p!LVYKQVo6aQ|cq| zYZpsPFol7YLHR;MBLoUUm4lU~DEx7<>-WD&p+GX}ao=MCouW1FrkxaGZrX@fU8m5n z>T2&td_VR(B#Z~=vdnI%RT}H!(J#r^vyp&n(W{k0*so^9UZY=z6dotvHS%<(u$mc5 z7+uA{&C-1P*<=a}HwAF_b27j(i+qrmPawRm?m^iE0pIK^Dtqz0%aZfHH$ULtho1b~ z)5L=4P7bX@q6E~m=BWxW6vTf$b^ZtE#Hw(GtnCzth4SYm(FFbqrmCj85%^QHDY*M1 z0hN*Ic~zDa;(oi{x0+&sS|IZs}rpioz}e8W!y ze!;KLe|f=xrAO?Ak}oXCkXsrlVb4HkB3o5rJAobT+n!yUPa$ELC*$uq0_7si_iOgOoLdzzT~2r}@)(m?)gIs-FYL0^Ra5{UolvF&pW1)00DN*RAB@H_sS zK8XEhFWD&V)kk5ub%>^xHHDJP#GAE&1+|~v%0n!Wig2|LC_A?zML2X8cAz3Y)psujG5nzzLDLgB#2!=nm>!Pne=0?^~*o zEkPjTKeOmO+}j-gkD$rbY&d0k#PJ3%13R)>x&vb<9A;OEt-$&RMBhy&WmDjYoD3Mw zVxV17jUCrTfpx)+2=H3VZ_HQAoK>M5+sGH>eFp)k{?z*TaAfpcRV z7xU&&w-Y};ZbE&8jGLY5ea65&gZb?KK>|nj=cx(I5U_tLrs5VsLD4)$BjYK7ujRGp zj!qJgTqf}1;x7tar}JYPaIXsYlFD6bDR3>5QLr2)K;Jt(`goSYiyODp;?H0oPwibc zzNX->>?>=R#{i$jjSX$LDdYy+pBsHip!AuX0_z9^hT2?YzHx5R0`#F5vvA2!H*$zb?@Cr@!?*KOv4Pnv zFxBza{%B9Zw72D1gAaw>64odAelw73v*_uKC>A_$u--rWoPlpet(u1=QD5nC4lEH0 z!*cthHyk6-e#+6fWjpeAk-75%)VZ;3v%*v-3mkuLn_DrJqlKH<@&zjv@n`$`(rzxVq1n^(Ga09hte^QJ8{C0Xb)?O<}CE z+$LcW1-TL9Pq&ou*>d}Z9^_npPjk8p>UpLqt~VkR^`0JB;BXuH753no0^YBQCh6qd zr8wV~W}a`p=uhhq4!vCz>bnGAscI6~ceye(GM~agtlGl?1qwBbb?qgeQ&_Q7sbOq+#9mCrpd9C6On3cq^AX3{+!3p;MY3D;c=05Z=yznt`Ct zFYIpNUE3d$qSZf6Aj0VampbZOJVi>|kPH2+DPyIwpMfWXRcnQ|vLX5H-GTJw6k6q0 z_!1ui{=a{e*1e%{;zZV(U1b!cwIYK0tO#&)7=(x8-Hd)|)LV`7@&B-)m&X)!5tr3v zhtKNQ7HCh05jbU+?Jdv8z@=r*no4Z+U7CosyCMUDkdPROJjl&UdNqu9zM|7~9j1Mlxeb7gorziop z^*5T2p#ExjdXl&c37q`Wa+h~8f!{N_s~%lvKzD+pc^L0+o3e;Ti6a5$Pow+K%_0wY z6;^HdkAWh?7RiTpm`~yzI~2Sz*VM5d7`;Fpjt3QR8d3OuqwmQea>eE9aFJOPg(sPs zGWTrIKM@H@-5eAGMT5ng1t^?RY!t}Fe31G+zHUa91t&G5{7vu<%AWnIII@aDi`e(! z5Y)NBz)Q1IBL;k=jCB9ST;ntoab&+N0iJ4yt?SSy^Rh22&HaYltIOrw>WTL*E+x^+ z1ATlrHe5)WK)_u=mW~7kC!-^d5~%;vUv6ue;&&Job8YQJ^!eB5zz%x?Sv&g6FDX(O z97tJKfjQ{yj{@~Q$o&_=OQ-%lPNCm_L-dCo>|*>-eVN22cB^tIK-+X$4gg#$lh-jL0<{VIH)!nWfs*B%!U5awj= zN1+dOW4BCS$DCCoLhNkJ7$`UFor}i2*`e#_z>B>7DpL}>5q+R+HGT8iAvU~PK##bM z5g6}jke!Gk@TBKgtpe`Nh9%6IM=7-PM*WeH#P2%|SljckA<9!|@2#KcbE(76`*Ij~ zYaHmPW{JGWnVKF@XJGXSizI2h-%3Y|xdt!~1Z(K)t;5`^dixBQ+!XeA#eQ>v3Hr*r z%ij|D5%W0l(1K_v2C8fcnrA0$Z(*SzkxrXSQa~ zDkD$rr|uOs>?JUz+8*m2N1%f&4;@7v?%wMAS#*GanH9S{nu7ry<3iIe9R|EhI360H zu0|?--mk`7mzJkD;Dr9@9F1M#8jtV!4KiBLpZa!onO5B_VCAi`^G{=-x#3=qAm%A; zFUw1>w^8sjH95Saiv=PE1T1jA2j!%M(~-w*$HRA|ekQOwqpeBUlz|mKt2!>Vv%qU- zgn|unm6yYR;nN@jym#$s&O79)uk)5-ymJzvuIEIsZp)R=6q_Gmjwt?XRfD~r$S$=EN4Ld59fi3Bm#Ch z`DgevC^TObkd<*|K%_m-tneY`4A(LPUDWG`^SN+#4FlHCHLDEK7rbG=xG%1yaCC1$ zi%bHA4}VI6?GF?9*ckG(2k&S2GWqs7tio)} zDLZ-Epb+myql8;+Wh((0k(9oudjz6$OFwEL*OFg!rEQ@su*hrDsY0EK75p8s!aCf` zIr|5d83-;h*|trB0p|{j)AQm9=C8*JVLEK+_EDTzR7+r`Q`#0Q%n!9xsK!9Z&r|8%$kzsw8|&U15*Uell-h{7 zZ+?}*IKMN2B-H`GOd-rcgLVsz=M#7=?3mM=i+VaZ`$|@m4N}Ixd?mjk-}mMxn;J9V zH(5LX;6VoBh+%QZ6|6t0-rfrP3eA7py&L%uR}h`8ig$t6&%+@U`#(4zbi2kGIP~0O wy;A^z9KQ10T-<|5PQJ&|5WH{W4$`{$3|K|RuzptJyb>i?0=S3&0kLK``GH(e*8l(j diff --git a/3. PSMs/Specific Forward Models/@ukPSM/errorCheckPSM.m b/3. PSMs/Specific Forward Models/@ukPSM/errorCheckPSM.m deleted file mode 100644 index 93f080f7..00000000 --- a/3. PSMs/Specific Forward Models/@ukPSM/errorCheckPSM.m +++ /dev/null @@ -1,11 +0,0 @@ -function[] = errorCheckPSM( obj ) -% Error checks a UK37 PSM - -% Need 12 state vector elements -if ~isvector( obj.H) || length(obj.H)~=12 - error('H must contain 12 elements.'); -elseif isempty( obj.bayesFile ) - error('Missing bayesian posterior file.'); -end - -end diff --git a/3. PSMs/Specific Forward Models/@ukPSM/getStateIndices.m b/3. PSMs/Specific Forward Models/@ukPSM/getStateIndices.m deleted file mode 100644 index 40ab611e..00000000 --- a/3. PSMs/Specific Forward Models/@ukPSM/getStateIndices.m +++ /dev/null @@ -1,37 +0,0 @@ -function[] = getStateIndices( obj, ensMeta, sstName, monthMeta, varargin ) -% Finds state vector elements needed to run a UK37 PSM -% -% obj.getStateIndices( obj, ensMeta, sstName, monthMeta ) -% Finds the closest SST state vector element in each of 12 -% months. -% -% obj.getStateIndices( ..., dimN, metaN ) -% Search for the closest element along sepcific elements of -% other dimensions. For example, only search the top level of a -% 3D ocean grid. See PSM.getClosestLatLonIndex for details. -% -% ----- Inputs ----- -% -% ensMeta: An ensemble metadata object -% -% sstName: The name of the SST variable. A string -% -% monthMeta: Metadata for each of the 12 months. Must have 12 -% rows. -% -% dimN, metaN: Please see PSM.getClosestLatLonIndex for details - -% Error check -if ~isa(ensMeta, 'ensembleMetadata') || ~isscalar(ensMeta) - error('ensMeta must be a scalar ensembleMetadata object.'); -elseif ~isstrflag(sstName) - error('The name of the SST variable must be a string scalar or character row vector.'); -elseif size(monthMeta,1) ~= 12 - error('monthNames must have 12 rows (one for each month).'); -end - -% Finds the closest SST variable in each of the specified months -[~,~,~,~,~,~,time] = getDimIDs; -obj.H = ensMeta.closestLatLonIndices( obj.coord, sstName, time, monthMeta, varargin{:} ); - -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@ukPSM/runForwardModel.m b/3. PSMs/Specific Forward Models/@ukPSM/runForwardModel.m deleted file mode 100644 index 2aefed89..00000000 --- a/3. PSMs/Specific Forward Models/@ukPSM/runForwardModel.m +++ /dev/null @@ -1,16 +0,0 @@ -function[uk, R] = runForwardModel( obj, M, ~, ~ ) -% Runs the UK forward model. Searches coordinate polygons for seasonal -% areas - -% Get the appropriate season for the region -ind = obj.seasonalPolygon; -SST = mean( M(ind,:), 1 ); - -% Run the forward model, estimate R from the variance of the estimate -uk = ukPSM.UK_forward_model( SST, obj.bayesFile ); -R = mean( var(uk,[],1), 2 ); - -uk = mean( uk, 1 ); -end - - diff --git a/3. PSMs/Specific Forward Models/@ukPSM/seasonalPolygon.m b/3. PSMs/Specific Forward Models/@ukPSM/seasonalPolygon.m deleted file mode 100644 index 05b0096e..00000000 --- a/3. PSMs/Specific Forward Models/@ukPSM/seasonalPolygon.m +++ /dev/null @@ -1,35 +0,0 @@ -function[ind] = seasonalPolygon( obj ) -% Defines seasonal polygons for a UK37 PSM - -% Mediterranean polygon: Nov-May -poly_m_lat=[36.25; 47.5; 47.5; 30; 30]; -poly_m_lon=[-5.5; 3; 45; 45; -5.5]; - -% North Atlantic polygon: Aug-Oct -poly_a_lat=[48; 70; 70; 62.5; 58.2; 48]; -poly_a_lon=[-55; -50; 20; 10; -4.5; -4.5]; - -% North Pacific polygon: Jun-Aug -poly_p_lat=[45; 70; 70; 52.5; 45]; -poly_p_lon=[135; 135; 250; 232; 180]; - -% Convert to -180 to 180 for Med. and Atl. -lon180=obj.coord(2); -lon180(lon180>180)=lon180(lon180>180)-360; - -% Convert to 0 to 360 for Pacific -lon360=obj.coord(2); -lon360(lon360<0)=360+lon360(lon360<0); - -% Get the appropriate season -if inpolygon(lon180,obj.coord(1),poly_m_lon,poly_m_lat) - ind = [1 2 3 4 5 11 12]; -elseif inpolygon(lon180,obj.coord(1),poly_a_lon,poly_a_lat) - ind = [8 9 10]; -elseif inpolygon(lon360,obj.coord(1),poly_p_lon,poly_p_lat) - ind = [6 7 8]; -else - ind = (1:12)'; -end - -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@ukPSM/ukPSM.m b/3. PSMs/Specific Forward Models/@ukPSM/ukPSM.m deleted file mode 100644 index bb6ae266..00000000 --- a/3. PSMs/Specific Forward Models/@ukPSM/ukPSM.m +++ /dev/null @@ -1,69 +0,0 @@ -classdef ukPSM < PSM - % ukPSM - % Implements proxy model for Uk'37 - % - % ukPSM Methods: - % ukPSM - Creates a new ukPSM object - % getStateIndices - Finds state vector elements needed to run the PSM - % runForwardModel - Runs a UK37 PSM - % UK_forward_model - Static function implementing the UK37 forward model function. - - properties - bayesFile = 'bayes_posterior_v2.mat'; % The file with the Bayesian posterior - coord; % Lat lon coordinates of the proxy site - end - - % Constructor - methods - function obj = ukPSM( lat, lon, varargin ) - % Creates a new ukPSM object - % - % obj = ukPSM( lat, lon ) - % Creates a ukPSM at the specified coordinates - % - % obj = ukPSM( lat, lon, 'bayesFile', file ) - % Specifies a file to use for loading the Bayesian posterior. - % Default is 'bayes_posterior_v2.mat' - % - % ----- Inputs ----- - % - % lat: Site latitude. A scalar - % - % lon: SIte longitude. A scalar - % - % file: The name of the file with a bayesian posterior. - - % Get the posterior file - [file] = parseInputs( varargin, {'bayesFile'}, {[]}, {[]} ); - if ~isempty(file) - if ~exist(file, 'file') - error('The specified bayes file cannot be found. It may be misspelled or not on the active path.'); - end - obj.bayesFile = file; - end - - % Set the coordinates - obj.coord = [lat, lon]; - end - end - - % PSM methods - methods - - % State indices - getStateIndices( obj, ensMeta, sstName, monthMeta, varargin ); - - % Error checking - errorCheckPSM( obj ); - - % Run the forward model - [uk, R] = runForwardModel( obj, M, ~, ~ ); - - end - - % Static call to the forward model function - methods (Static) - uk = UK_forward_model( ssts, bayes ); - end - -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@vstempPSM/errorCheckPSM.m b/3. PSMs/Specific Forward Models/@vstempPSM/errorCheckPSM.m deleted file mode 100644 index 23cefa85..00000000 --- a/3. PSMs/Specific Forward Models/@vstempPSM/errorCheckPSM.m +++ /dev/null @@ -1,14 +0,0 @@ -function[] = errorCheckPSM( obj ) -% Error checks a vstempPSM -if ~isnumeric(obj.T1) || ~isnumeric(obj.T2) || ~isscalar(obj.T1) || ~isscalar(obj.T2) - error('lat, T1, and T2 must all be numeric scalars.'); -elseif ~isvector( obj.coord ) || numel(obj.coord)~=2 - error('coord must be a 2 element vector.'); -elseif obj.coord(1) > 90 || obj.coord(1) < -90 - error('The latitude of the PSM must be on the interval [-90 90].'); -elseif obj.T2 < obj.T1 - error('T2 must be greater than T1.'); -elseif ~isvector(obj.intwindow) || numel(obj.intwindow)~=numel(obj.H) - error('intwindow must be a vector with one element per state index (H)'); -end -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@vstempPSM/getStateIndices.m b/3. PSMs/Specific Forward Models/@vstempPSM/getStateIndices.m deleted file mode 100644 index e04e2fd0..00000000 --- a/3. PSMs/Specific Forward Models/@vstempPSM/getStateIndices.m +++ /dev/null @@ -1,25 +0,0 @@ -function[] = getStateIndices( obj, ensMeta, Tname, monthMeta, varargin ) -% Gets state indices for a vstempPSM -% -% obj.getStateIndices( ensMeta, Tname, monthMeta ) -% Finds the closest temperature element in the specified months. -% -% obj.getStateIndices( ..., dimN, metaN ) -% Specifies additional search parameters. See PSM.getClosestLatLonIndex for -% details. -% -% ----- Inputs ----- -% -% ensMeta: An ensemble metadata object -% -% Tname: The name of the temperature variable -% -% monthMeta: Metadata for the required months. Each row is one month. - -% Get the closest indices -[~,~,~,~,~,~,time] = getDimIDs; - -obj.H = ensMeta.closestLatLonIndices( [obj.lat, obj.lon], Tname, time, ... - monthMeta, varargin{:} ); - -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@vstempPSM/runForwardModel.m b/3. PSMs/Specific Forward Models/@vstempPSM/runForwardModel.m deleted file mode 100644 index d0c0d0a7..00000000 --- a/3. PSMs/Specific Forward Models/@vstempPSM/runForwardModel.m +++ /dev/null @@ -1,11 +0,0 @@ -% Run the PSM -function[Ye,R] = runForwardModel( obj, M, ~, ~ ) - - % Infill missing months with NaN - T = NaN( 12, size(M,2) ); - T( obj.intwindow, : ) = M; - - % Run the model - Ye = vstempPSM.vstemp( obj.coord(1), obj.T1, obj.T2, T, 'intwindow', obj.intwindow ); - R = NaN; -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/@vstempPSM/vstemp.m b/3. PSMs/Specific Forward Models/@vstempPSM/vstemp.m deleted file mode 100644 index 36a2afed..00000000 --- a/3. PSMs/Specific Forward Models/@vstempPSM/vstemp.m +++ /dev/null @@ -1,144 +0,0 @@ -function[width] = vstemp( phi, T1, T2, T, varargin ) -%% A temperature-only version of VS-Lite -% -% width = vstemp( phi, T1, T2, T ) -% Generates ring widths from monthly temperature and insolation. Returns an -% un-normalized chronology. Uses all months to calculate ring widths. -% -% width = vstemp( phi, T1, T2, T, 'intwindow', intwindow ) -% Only uses specified months to calculate ring widths. -% -% ----- Inputs ----- -% -% phi: Latitude in degrees. -% -% T1: Temperature threshold below which growth is 0. -% -% T2: Temperature threshold above which growth is maximal. -% -% T: Monthly temperatures. For northern hemisphere sites, in order from -% Jan - Dec. For southern hemisphere sites, July - June. (12 x N) -% -% intwindow: Vector of integers indicating months of seasonal sensitivity. -% Integers correspond to the rows of T. (For example: [5 6] corresponds to -% May and June for a NH site, but Nov. and Dec. for a SH site). (nMonth x 1) -% -% ----- Outputs ----- -% -% width: Unnormalized ring widths. - -% ----- Written By ----- -% Jonathan King, University of Arizona, 2019 -% (Mostly just copied from the vslite code, with some better documentation) - -% Intwindow -intwindow = parseInputs( varargin, {'intwindow'}, {1:12}, {[]} ); - -% Error checking and setup -errCheck( phi, T1, T2, T, intwindow ); - -% Compute gE, the scaled monthly proxy for insolation: -gE = Compute_gE(phi); - -% Flip gE to July - June for SH sites -if phi < 0 - gE = gE( [7:12, 1:6] ); -end - -% Get the months below T1 and above T2 -lessT1 = T < T1; -greatT2 = T > T2; - -% Preallocate the temperature response -gT = NaN( size(T) ); - -% Get the temperature growth responses -gT( lessT1 ) = 0; -gT( greatT2 ) = 1; -gT( ~lessT1 & ~greatT2 ) = ( T(~lessT1 & ~greatT2) - T1 ) ./ (T2 - T1); - -% Compute growth rate (temperature response scaled by insolation) -Gr = gE .* gT; - -% Get ring width as the sum of the seasonally sensitive months -width = sum( Gr(intwindow,:), 1 ); - -end - -%% Error check the inputs. -function[] = errCheck( phi, T1, T2, T, intwindow ) - -if ~isscalar( phi ) - error('phi is not a scalar.'); -elseif ~isscalar(T1) - error('T1 is not a scalar'); -elseif ~isscalar(T2) - error('T2 is not a scalar'); -elseif T1 > T2 - error('T1 is greater than T2'); -elseif ~ismatrix(T) || size(T,1)~=12 - error('T must be a matrix with 12 rows.'); -elseif ~isvector(intwindow) || ~isnumeric(intwindow) || any( mod(intwindow,1)~=0 ) || any(intwindow>12) || any(intwindow<1) - error('intwindow must be a vector of integers on the interval [1, 12]'); -elseif numel(unique(intwindow))~=numel(intwindow) - error('intwindow contains duplicate values.'); -end - -end - -%% Compute response to light limitation -function [gE] = Compute_gE(phi) -%% Computes gE from latitude -% -% gE = Compute_gE( phi ) -% -% ----- Inputs ----- -% -% phi: Latitude in degrees. (Scalar) -% -% ----- Outputs ----- -% -% gE: Insolation growth response in each month. Ordered from Jan - Dec. (12 x 1) - -% ----- Written By ----- -% SETW 3/8/13 -% -% Cleaned by Jonathan King, 5/22/19 - -% Error check -if ~isscalar( phi ) - error('phi must be a scalar'); -elseif phi > 90 || phi <= -90 - error('phi must be on the interval [-90, 90].'); -end - -% Preallocate -gE = NaN(12,1); - -% Get latitude in radians -latr = phi*pi/180; - -% Use solar declination to compute normalized day length -sd = asin(sin(pi*23.5/180) * sin(pi * (((1:365) - 80)/180)))'; - -y = -tan(ones(365,1).* latr) .* tan(sd); -y( y >= 1 ) = 1; -y( y <= -1 ) = -1; - -hdl = acos(y); -dtsi = (hdl.* sin(ones(365,1).*latr).*sin(sd))+(cos(ones(365,1).*latr).*cos(sd).*sin(hdl)); - -ndl=dtsi./max(dtsi); - -% Get the cumulative number of days at the end of each month -ndays = [0 31 28 31 30 31 30 31 31 30 31 30 31]; -cdays = cumsum(ndays); - -% Get the monthly insolation growth modifier. (The sum of normalized day -% length over each month.) -for t = 1:12 - gE(t) = mean( ndl( cdays(t)+1:cdays(t+1), 1) ); -end - -end - diff --git a/3. PSMs/Specific Forward Models/@vstempPSM/vstempPSM.m b/3. PSMs/Specific Forward Models/@vstempPSM/vstempPSM.m deleted file mode 100644 index 2279c9a8..00000000 --- a/3. PSMs/Specific Forward Models/@vstempPSM/vstempPSM.m +++ /dev/null @@ -1,57 +0,0 @@ -classdef vstempPSM < PSM - % vstempPSM - % Implements a PSM with a temperature-only version of VS-Lite - % - % vstempPSM Methods: - % vstempPSM - Creates a new vstempPSM object - % getStateIndices - Find state vector elements needed to run the model - % runForwardModel - Runs the PSM on an ensemble of input values - % vstemp - Static call to the vstemp function - - properties - coord; % The [lat, lon] location of the proxy site - T1; % The lower temperature threshold below which growth = 0 - T2; % The upper temperature threshold above which growth = 1 - intwindow; % The monthly integration window - end - - % Constructor - methods - function obj = vstempPSM( lat, lon, T1, T2, varargin ) - - % Parse, error check, set default - [intwindow] = parseInputs( varargin, {'intwindow'}, {[]}, {[]} ); - if ~isempty(intwindow) - obj.intwindow = (1:12)'; - end - - % Set values - obj.lat = lat; - obj.lon = lon; - obj.T1 = T1; - obj.T2 = T2; - obj.intwindow = intwindow; - end - end - - % PSM methods - methods - - % State indices - getStateIndices( obj, ensMeta, Tname, monthNames, varargin ) - - % Error checking - errorCheckPSM(obj); - - % Runs the forward model - [Ye, R] = runForwardModel( obj, M, ~, ~ ); - - end - - % Static call to vstemp function - methods (Static) - width = vstemp( phi, T1, T2, T, varargin ); - end - -end - \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/VSlite (defunct)/VSLite4dash.m b/3. PSMs/Specific Forward Models/VSlite (defunct)/VSLite4dash.m deleted file mode 100644 index 3ff7c04d..00000000 --- a/3. PSMs/Specific Forward Models/VSlite (defunct)/VSLite4dash.m +++ /dev/null @@ -1,553 +0,0 @@ -function [trw] = VSLite4dash(phi,T1,T2,M1,M2,T,P,standard,Tclim,varargin) -%% VS-Lite optimized for the Dash framework -% -% trw = VSLite4dash(phi,T1,T2,M1,M2,T,P,standard,Tclim) -% gives just simulated tree ring as ouput. -% -% [...] = VSLite4dash( ..., 'lbparams', lbparams ) -% Specifies the parameters of the leaky bucket model. -% -% [...] = VSLite4dash( ..., 'intwindow', intwindow ) -% Specify the growth response window. This is the window used to determine -% seasonal sensitivity. -% -% [...] = VSLite4dash( ..., 'hydroclim', H ) -% Specifies whether P or M is provided as input. -% -% ----- Inputs ----- -% -% phi: Site latitude in degrees N -% -% T1: Scalar temperature threshold below which growth response is 0. (in Degrees C) -% T2: Scalar temperature threshold above which growth response is 1. (degrees C) -% M1: Scalar soil moisture threshold below which growth response is 0. (in v/v) -% M2: Scalar soil moisture threshold above which growth response is 1. (in v/v) -% *** Note that T1, T2, M1, and M2 may be estimated using estimate_vslite_params_v2_5.m -% -% T = (12 x nEns) matrix of ordered mean monthly temperatures (in -% degrees C). For a Northern Hemisphere site, the months should be in order -% from January to December in the year of growth. For a Southern Hemisphere -% site the months should be in order from July in the starting year of -% growth to June of the next year. -% -% P = (nMonth x nEns) matrix of ordered accumulated monthly precipitation -% (in mm). Follows the same month ordering scheme as T. -% -% standard: A 2x1 numeric vector. First element is the mean width to use in -% ring width standardization. Second element is the standard deviation. -% Leave empty for no standardization. -% -% Tclim: A monthly temperature climatology to use for the leaky bucket -% model. (12 x 1). -% -% lbparams: Parameters of the Leaky Bucket model of soil moisture. An -% 8 x 1 vector in the following order. -% Mmax: scalar maximum soil moisture content (in v/v), -% default value is 0.76 -% Mmin: scalar minimum soil moisture (in v/v), default -% value is 0.01 -% alph: scalar runoff parameter 1 (in inverse months), -% default value is 0.093 -% m_th: scalar runoff parameter 3 (unitless), default -% value is 4.886 -% mu_th: scalar runoff parameter 2 (unitless), default -% value is 5.80 -% rootd: scalar root/"bucket" depth (in mm), default -% value is 1000 -% M0: initial value for previous month's soil moisture at -% t = 1 (in v/v), default value is 0.2 -% substep: logical 1 or 0; perform monthly substepping in -% leaky bucket (1) or not (0). Default value is 0. -% -% intwindow: A 2x1 vector indicating which month's growth responses should -% be integrated. -% -% H: A single character indicating whether P (precipitation) or M (soil -% moisture) is provided as input. If M, then the leaky bucket model is -% disabled and the value of M is used to calculate growth. - - -% ----- References ----- -% -% For more detailed documentation, see: -% 1) Tolwinski-Ward et al., An efficient forward model of the climate -% controls on interannual variation in tree-ring width, Climate Dynamics (2011) -% DOI: 10.1007/s00382-010-0945-5 -% -% 2) Tolwinski-Ward et al., Erratum to: An efficient forward model of the climate -% controls on interannual variation in tree-ring width, Climate Dynamics (2011) -% DOI: 10.1007/s00382-011-1062-9 -% -% 3) Tolwinski-Ward et al., Bayesian parameter estimation and -% interpretation for an intermediate model of tree-ring width, Clim. Past -% (2013), DOI: 10.5194/cp-9-1-2013 -% -% 4) Documentation available with the model at http://www.ncdc.noaa.gov/paleo/softlib/softlib.html - -% Parse the inputs and set the defaults -[lbparams, intwindow, hydroclim] = parseInputs( varargin, {'lbparams','intwindow','hydroclim'}, ... - {[0.76, 0.01, 0.093, 4.886, 5.80, 1000, 0.2, 0], 1:12, 'P'}, ... - {[],[],{'P','M'}} ); - -% Error check the inputs -errCheck( phi, T1, T2, M1, M2, T, P, standard, lbparams, intwindow ); - -% Use the original VSLite variable names -Mmax = lbparams(1); -Mmin = lbparams(2); -alph = lbparams(3); -m_th = lbparams(4); -mu_th = lbparams(5); -rootd = lbparams(6); -M0 = lbparams(7); -substep = lbparams(8); - -% Preallocate growth responses and soil moisture variables. -nEns = size(T,2); -gT = NaN(12,nEns); -gM = NaN(12,nEns); -M = NaN(12,nEns); - -% Permute SH months to match the original VS-Lite scheme in which the array -% always proceeds from January to December. (This is neccesary because it -% affects the insolation gE growth term). -if phi < 0 - T = T([7:12, 1:6], :); - P = P([7:12, 1:6], :); - intwindow(intwindow>6) = intwindow(intwindow>6)-6; - intwindow(intwindow<6) = intwindow(intwindow<6)+6; -end - -% Load in or estimate soil moisture: -if strcmp(hydroclim,'M') - % Read in soil moisture: - M = P; -else - % Compute soil moisture: - if substep == 1 - M = leakybucket_submonthly(nEns,phi,T,P,Mmax,Mmin,alph,m_th,mu_th,rootd,M0,Tclim); - elseif substep == 0 - M = leakybucket_monthly(nEns,phi,T,P,Mmax,Mmin,alph,m_th,mu_th,rootd,M0,Tclim); - elseif substep ~=1 && substep ~= 0 - disp('''substep'' must either be set to 1 or 0.'); - return - end -end - -% Compute gE, the scaled monthly proxy for insolation: -gE = Compute_gE(phi); - -% Permute gE for the southern hemisphere -if phi < 0 - gE = gE([7:12, 1:6]); -end - -% Get the months that are below T1 and above T2 -lessT1 = T < T1; -greatT2 = T > T2; - -% Get the temperature growth responses -gT( lessT1 ) = 0; -gT( greatT2 ) = 1; -gT( ~lessT1 & ~greatT2 ) = ( T(~lessT1 & ~greatT2) - T1 ) ./ (T2 - T1); - -% Get the months that are below M1 and above M2 -lessM1 = M < M1; -greatM2 = M > M2; - -% Get the moisture growth responses -gM(lessM1) = 0; -gM(greatM2) = 1; -gM(~lessM1 & ~greatM2) = ( M(~lessM1 & ~greatM2) - M1 ) ./ (M2 - M1); - -% Compute Growth rate -Gr = gE .* min( gT, gM ); - -% Grow the tree rings -width = sum( Gr(intwindow,:), 1); - -% Standardize the proxy series if a standardization is provided -if ~isempty(standard) - trw = (width - standard(1)) / standard(2); - -% Otherwise return the raw widths (for the setStandardization method) -else - trw = width; -end - -end - - -%% Subroutines - -% Error check -function[] = errCheck( phi, T1, T2, M1, M2, T, P, standard, lbparams, intwindow ) - -if ~isscalar(phi) || phi<-90 || phi>90 - error('phi must be a scalar on the interval [-90, 90]'); -end -if ~isscalar(T1) || ~isscalar(T2) || ~isscalar(M1) || ~isscalar(M2) - error('T1, T2, M1, and M2 must all be scalars.'); -end -if size(T,1)~=12 - error('T must have 12 rows. (One for each month).'); -elseif size(P,1)~=12 - error('P must have 12 rows. (One for each month).'); -elseif size(T,2) ~= size(P,2) - error('T and P must have the same number of columns. (One for each ensemble member.'); -elseif ~isempty(standard) && (numel(standard)~=2 || ~isnumeric(standard)) - error('standard must be a numeric vector with two elements or empty.'); -elseif numel(lbparams)~=8 || ~isvector(lbparams) || ~isnumeric(lbparams) - error('lbparams must be a numeric vector with 8 elements.'); -end - -% Int window -if ~isvector(intwindow) || ~isnumeric(intwindow) || any(intwindow<1) || any(intwindow>12) - error('intwindow must be a numeric vector on the interval [1, 12]'); -elseif any( mod(intwindow,1)~=0 ) - error('All elements in intwindow must be integers.'); -elseif numel(unique(intwindow)) ~= numel(intwindow) - error('intwindow cannot contain duplicate elements.'); -end - -end - -% Leaky Bucket with substepping -function [M,potEv,ndl,cdays] = leakybucket_submonthly(nEns,phi,T,P,... - Mmax,Mmin,alph,m_th,mu_th,rootd,M0,Tclim) -% leackybucket_submonthly.m - Simulate soil moisture; substeps within monthly timesteps -% to better capture nonlinearities and improve moisture estimates. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Usage: [M,potEv,ndl,cdays] = leakybucket_submonthly(syear,eyear,phi,T,P,... -% Mmax,Mmin,alph,m_th,mu_th,rootd,M0) -% outputs simulated soil moisture and potential evapotranspiration. -% -% Inputs: -% syear = start year of simulation. -% eyear = end year of simulation. -% phi = latitude of site (in degrees N) -% T = (12 x nEns) matrix of ordered mean monthly temperatures (in degEes C) -% P = (12 x nEns) matrix of ordered accumulated monthly precipitation (in mm) -% Mmax = scalar maximum soil moisture held by the soil (in v/v) -% Mmin = scalar minimum soil moisture (for error-catching) (in v/v) -% alph = scalar runoff parameter 1 (in inverse months) -% m_th = scalar runoff parameter 3 (unitless) -% mu_th = scalar runoff parameter 2 (unitless) -% rootd = scalar root/"bucket" depth (in mm) -% M0 = initial value for previous month's soil moisture at t = 1 (in v/v) -% -% Outputs: -% M = soil moisture computed via the CPC Leaky Bucket model (in v/v, 12 x nEns) -% potEv = potential evapotranspiration computed via Thornthwaite's 1947 scheme (in mm) -% -% SETW+ N. Graham and K. Georgakakos 2011 - -% modified by Nick G. and K. Georgakakos - to sub-step the monthly steps. Also this version has added -% soil moisture initial conditions for restarts, or spin-up. Hands back monthly soil moisture -% and summer soil moisture as well - see varargout. Nick G. 2011/06 -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -% Storage for growth response output variables (size [12 x Nyears]): -M = NaN(12,nEns); -potEv = NaN(12,nEns); - -% ADDED BY NICK -if(M0 < 0.) - M0=200/rootd; -end - -% Compute normalized daylength (neglecting small difference in calculation for leap-years) -latr = phi*pi/180; % change to radians -ndays = [0 31 28 31 30 31 30 31 31 30 31 30 31]; -cdays = cumsum(ndays); -sd = asin(sin(pi*23.5/180) * sin(pi * (((1:365) - 80)/180)))'; % solar declination -y = -tan(ones(365,1).* latr) .* tan(sd); -if ~isempty(find(y>=1,1)) - y(y>=1) = 1; -end -if ~isempty(find(y<=-1,1)) - y(y<=-1) = -1; -end -hdl = acos(y); -dtsi = (hdl.* sin(ones(365,1).*latr).*sin(sd))+(cos(ones(365,1).*latr).*cos(sd).*sin(hdl)); -ndl=dtsi./max(dtsi); % normalized day length - -% calculate mean monthly daylength (used for evapotranspiration in soil moisture calcs) -jday = cdays(1:12) +.5*ndays(2:13); -m_star = 1-tand(phi)*tand(23.439*cos(jday*pi/182.625)); -mmm = NaN*ones(1,12); -for mo = 1:12 - if m_star(mo) < 0 - mmm(mo) = 0; - elseif m_star(mo) >0 && m_star(mo) < 2 - mmm(mo) = m_star(mo); - elseif m_star(mo) > 2 - mmm(mo) = 2; - end -end -nhrs = 24*acosd(1-mmm)/180; % the number of hours in the day in the middle of the month -L = (ndays(2:13)/30).*(nhrs/12); % mean daylength in each month. - -% Pre-calculation of istar and I, using input T to compute the climatology: -Tm=Tclim; -if length(Tm) ~=12 - error(['problem with creating T climatology for computing istar ' ... - 'and I']) -elseif length(Tm) ==12 - istar=(Tm./5).^1.514;istar(Tm<0)=0; - I=sum(istar); -end -% precalculation of the exponent alpha in the Thornwaite (1948) equation: -a = (6.75e-7)*I^3 - (7.71e-5)*I^2 + (1.79e-2)*I + .49; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%% -- year cycle -- %%%% -% syear = start (first) year of simulation -% eyear = end (last) year of simulation -% cyear = year the model is currently working on -% iyear = index of simulation year - -for cyear=1:nEns % begin cycling over years - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - for t = 1:12 % begin cycling over months in a year - %%%%% Compute potential evapotranspiration for current month after Thornthwaite: - Ep = NaN; - if T(t,cyear) < 0 - Ep = 0; - elseif T(t,cyear)>=0 && T(t,cyear) < 26.5 - Ep = 16*L(t)*(10*T(t,cyear)/I)^a; - elseif T(t,cyear) >= 26.5 - Ep = -415.85 + 32.25*T(t,cyear) - .43* T(t,cyear)^2; - end - potEv(t,cyear) = Ep; - %%%%% Now calculate soil moisture according to the CPC Leaky Bucket model - %%%%% (see J. Huang et al, 1996). Set n-steps according to 2 mm increments - %%%%% have to update alpha and Ep as well - 2 mm increments came from - %%%%% testing by K. Georgakakos, but one could use 5 or more, with less "accurate" results. - %%%%% Stepping is necessary because the parametization is linearized around init condition. - %%%%%%%%%%%%%%%%% - dp = 2.0; % mm of precip per increment - nstep = floor(P(t,cyear)/dp)+1; % number of sub-monthly substeps - Pinc = P(t,cyear)/nstep; % precip per substep - alphinc = alph/nstep; % runoff rate per substep time interval - Epinc = Ep/nstep; % potential evapotrans per substep. - %%%%%%%%%%%%%%%%% - % handling for sm_init - if (t > 1) - M0=M(t-1,cyear); - elseif (t == 1 && cyear > 1) - M0=M(12,cyear-1); - end - sm0=M0; - - for istep=1:nstep - % evapotranspiration: - Etrans = Epinc*sm0*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alphinc/(1+mu_th)*sm0*rootd; - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = Pinc*(sm0*rootd/(Mmax*rootd))^m_th + (alphinc/(1+mu_th))*sm0*rootd; - dWdt = Pinc - Etrans - R - G; - sm1 = sm0 + dWdt/rootd; - % - sm0=max(sm1,Mmin); - sm0=min(sm0,Mmax); - end - M(t,cyear) = sm0; - % error-catching: - if M(t,cyear) <= Mmin; M(t,cyear) = Mmin; end - if M(t,cyear) >= Mmax; M(t,cyear) = Mmax; end - if isnan(M(t,cyear))==1; M(t,cyear) = Mmin; end - end % end month (t) cycle -end % end year cycle - -end - -% Leaky Bucket without substepping -function [M,potEv,ndl,cdays] =... - leakybucket_monthly(nEns,phi,T,P,Mmax,Mmin,alph,m_th,mu_th,rootd,M0,Tclim) -% leackybucket_monthly.m - Simulate soil moisture with coarse monthly time step. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Usage: [M,potEv,ndl,cdays] = leakybucket_monthly(syear,eyear,phi,T,P,Mmax,Mmin,alph,m_th,mu_th,rootd,M0) -% outputs simulated soil moisture and potential evapotranspiration. -% -% Inputs: -% syear = start year of simulation. -% eyear = end year of simulation. -% phi = latitude of site (in degrees N) -% T = (12 x nEns) matrix of ordered mean monthly temperatures (in degEes C) -% P = (12 x nEns) matrix of ordered accumulated monthly precipitation (in mm) -% Mmax = scalar maximum soil moisture held by the soil (in v/v) -% Mmin = scalar minimum soil moisture (for error-catching) (in v/v) -% alph = scalar runoff parameter 1 (in inverse months) -% m_th = scalar runoff parameter 3 (unitless) -% mu_th = scalar runoff parameter 2 (unitless) -% rootd = scalar root/"bucket" depth (in mm) -% M0 = initial value for previous month's soil moisture at t = 1 (in v/v) -% -% Outputs: -% M = soil moisture computed via the CPC Leaky Bucket model (in v/v, 12 x nEns) -% potEv = potential evapotranspiration computed via Thornthwaite's 1947 scheme (in mm) -% -% SETW 2011 -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -% Storage for output variables (size [12 x Nyears]): -M = NaN(12,nEns); -potEv = NaN(12,nEns); - -% Compute normalized daylength (neglecting small difference in calculation for leap-years) -latr = phi*pi/180; % change to radians -ndays = [0 31 28 31 30 31 30 31 31 30 31 30 31]; -cdays = cumsum(ndays); -sd = asin(sin(pi*23.5/180) * sin(pi * (((1:365) - 80)/180)))'; % solar declination -y = -tan(ones(365,1).* latr) .* tan(sd); -if ~isempty(find(y>=1,1)) - y(y>=1) = 1; -end -if ~isempty(find(y<=-1,1)) - y(y<=-1) = -1; -end -hdl = acos(y); -dtsi = (hdl.* sin(ones(365,1).*latr).*sin(sd))+(cos(ones(365,1).*latr).*cos(sd).*sin(hdl)); -ndl=dtsi./max(dtsi); % normalized day length - -% calculate mean monthly daylength (used for evapotranspiration in soil moisture calcs) -jday = cdays(1:12) +.5*ndays(2:13); -m_star = 1-tand(phi)*tand(23.439*cos(jday*pi/182.625)); -mmm = NaN*ones(1,12); -for mo = 1:12 - if m_star(mo) < 0 - mmm(mo) = 0; - elseif m_star(mo) >0 && m_star(mo) < 2 - mmm(mo) = m_star(mo); - elseif m_star(mo) > 2 - mmm(mo) = 2; - end -end -nhrs = 24*acosd(1-mmm)/180; % the number of hours in the day in the middle of the month -L = (ndays(2:13)/30).*(nhrs/12); % mean daylength in each month. - -% Pre-calculation of istar and I, using input T to compute the climatology: -Tm = Tclim; -if length(Tm) ~=12 - error(['problem with creating T climatology for computing istar ' ... - 'and I']) -elseif length(Tm) ==12 - istar = (Tm./5).^1.514; istar(Tm<0)=0; - I=sum(istar); -end -% precalculation of the exponent alpha in the Thornwaite (1948) equation: -a = (6.75e-7)*I^3 - (7.71e-5)*I^2 + (1.79e-2)*I + .49; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%% -- year cycle -- %%%% -% syear = start (first) year of simulation -% eyear = end (last) year of simulation -% cyear = year the model is currently working on -% iyear = index of simulation year - -for cyear=1:nEns % begin cycling over years - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - for t = 1:12 % begin cycling over months in a year - %%%%% Compute potential evapotranspiration for current month after Thornthwaite: - Ep = NaN; - if T(t,cyear) < 0 - Ep = 0; - elseif T(t,cyear)>=0 && T(t,cyear) < 26.5 - Ep = 16*L(t)*(10*T(t,cyear)/I)^a; - elseif T(t,cyear) >= 26.5 - Ep = -415.85 + 32.25*T(t,cyear) - .43* T(t,cyear)^2; - end - potEv(t,cyear) = Ep; - %%%%% Now calculate soil moisture according to the CPC Leaky Bucket model - %%%%% (see J. Huang et al, 1996). - if t > 1 - % evapotranspiration: - Etrans = Ep*M(t-1,cyear)*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alph/(1+mu_th)*M(t-1,cyear)*rootd; - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = P(t,cyear)*(M(t-1,cyear)*rootd/(Mmax*rootd))^m_th +... - (alph/(1+mu_th))*M(t-1,cyear)*rootd; - dWdt = P(t,cyear) - Etrans - R - G; - M(t,cyear) = M(t-1,cyear) + dWdt/rootd; - elseif t == 1 && cyear > 1 - % evapotranspiration: - Etrans = Ep*M(12,cyear-1)*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alph/(1+mu_th)*M(12,cyear-1)*rootd; - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = P(t,cyear)*(M(12,cyear-1)*rootd/(Mmax*rootd))^m_th +... - (alph/(1+mu_th))*M(12,cyear-1)*rootd; - dWdt = P(t,cyear) - Etrans - R - G; - M(t,cyear) = M(12,cyear-1) + dWdt/rootd; - elseif t == 1 && cyear == 1 - if M0 < 0; M0 = .20; end - % evapotranspiration (take initial soil moisture value to be 200 mm) - Etrans = Ep*M0*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alph/(1+mu_th)*(M0*rootd); - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = P(t,cyear)*(M0*rootd/(Mmax*rootd))^m_th + (alph/(1+mu_th))*M0*rootd; - dWdt = P(t,cyear) - Etrans - R - G; - M(t,cyear) = M0 + dWdt/rootd; - end - % error-catching: - if M(t,cyear) <= Mmin; M(t,cyear) = Mmin; end - if M(t,cyear) >= Mmax; M(t,cyear) = Mmax; end - if isnan(M(t,cyear))==1; M(t,cyear) = Mmin; end - end % end month (t) cycle - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -end % end year cycle -end - -% Insolation growth response -function [gE] = Compute_gE(phi) -% Just what it sounds like... computes just gE from latitude a la VS-Lite, -% but without all the other stuff. -% -% Usage: gE = Compute_gE(phi) -% -% SETW 3/8/13 - -% -gE = NaN(12,1); -% -% Compute normalized daylength (neglecting small difference in calculation for leap-years) -latr = phi*pi/180; % change to radians -ndays = [0 31 28 31 30 31 30 31 31 30 31 30 31]; -cdays = cumsum(ndays); -sd = asin(sin(pi*23.5/180) * sin(pi * (((1:365) - 80)/180)))'; % solar declination -y = -tan(ones(365,1).* latr) .* tan(sd); -if ~isempty(find(y>=1,1)) - y(y>=1) = 1; -end -if ~isempty(find(y<=-1,1)) - y(y<=-1) = -1; -end -hdl = acos(y); -dtsi = (hdl.* sin(ones(365,1).*latr).*sin(sd))+(cos(ones(365,1).*latr).*cos(sd).*sin(hdl)); -ndl=dtsi./max(dtsi); % normalized day length - -% calculate mean monthly daylength (used for evapotranspiration in soil moisture calcs) -jday = cdays(1:12) +.5*ndays(2:13); -m_star = 1-tand(phi)*tand(23.439*cos(jday*pi/182.625)); -mmm = NaN*ones(1,12); -for mo = 1:12 - if m_star(mo) < 0 - mmm(mo) = 0; - elseif m_star(mo) >0 && m_star(mo) < 2 - mmm(mo) = m_star(mo); - elseif m_star(mo) > 2 - mmm(mo) = 2; - end -end -%nhrs = 24*acosd(1-mmm)/180; % the number of hours in the day in the middle of the month -%L = (ndays(2:13)/30).*(nhrs/12); -% -for t = 1:12 - gE(t) = mean(ndl(cdays(t)+1:cdays(t+1),1)); -end -%%%%%%%%%%%%%%% -end \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/VSlite (defunct)/VSLite_v2_5_1.m b/3. PSMs/Specific Forward Models/VSlite (defunct)/VSLite_v2_5_1.m deleted file mode 100644 index 91819093..00000000 --- a/3. PSMs/Specific Forward Models/VSlite (defunct)/VSLite_v2_5_1.m +++ /dev/null @@ -1,606 +0,0 @@ -function [trw,varargout] = VSLite_v2_5_1(phi,T1,T2,M1,M2,T,P,varargin) -% VSLite_v2_5.m - Simulate tree ring width index given monthly climate inputs. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Basic Usage: -% trw = VSLite_v2_5(syear,eyear,phi,T1,T2,M1,M2,T,P) -% gives just simulated tree ring as ouput. -% -% [trw,gT,gM,gE,Gr,M] = -% VSLite_v2_5(syear,eyear,phi,T1,T2,M1,M2,T,P)) also includes -% growth response to temperature, growth response to soil moisture, -% scaled insolation index, overall growth function = gE*min(gT,gM), -% and soil moisture estimate in outputs. -% -% Basic Inputs: -% syear = start year of simulation. -% eyear = end year of simulation. -% phi = latitude of site (in degrees N) -% T1 = scalar temperature threshold below which temp. growth response is zero (in deg. C) -% T2 = scalar temperature threshold above which temp. growth response is one (in deg. C) -% M1 = scalar soil moisture threshold below which moist. growth response is zero (in v/v) -% M2 = scalar soil moisture threshold above which moist. growth response is one (in v/v) -% (Note that optimal growth response parameters T1, T2, M1, M2 may be estimated -% using code estimate_vslite_params_v2_5.m also freely available at -% the NOAA NCDC Paleoclimatology software library.) -% T = (12 x Nyrs) matrix of ordered mean monthly temperatures (in degEes C) -% P = (12 x Nyrs) matrix of ordered accumulated monthly precipitation (in mm) -% -% Advanced Inputs (must be specified as property/value pairs): -% 'lbparams': Parameters of the Leaky Bucket model of soil moisture. -% These may be specified in an 8 x 1 vector in the following -% order (otherwise the default values are read in): -% Mmax: scalar maximum soil moisture content (in v/v), -% default value is 0.76 -% Mmin: scalar minimum soil moisture (in v/v), default -% value is 0.01 -% alph: scalar runoff parameter 1 (in inverse months), -% default value is 0.093 -% m_th: scalar runoff parameter 3 (unitless), default -% value is 4.886 -% mu_th: scalar runoff parameter 2 (unitless), default -% value is 5.80 -% rootd: scalar root/"bucket" depth (in mm), default -% value is 1000 -% M0: initial value for previous month's soil moisture at -% t = 1 (in v/v), default value is 0.2 -% substep: logical 1 or 0; perform monthly substepping in -% leaky bucket (1) or not (0)? Default value is 0. -% 'intwindow': Integration window. Which months' growth responses should -% be intregrated to compute the annual ring-width index? -% Specified as a 2 x 1 vector of integer values. Both -% elements are given in integer number of months since January -% (July) 1st of the current year in the Northern (Southern) -% hemisphere, and specify the beginning and end of the integration -% window, respectively. Defaults is [1 ; 12] (eg. integrate -% response to climate over the corresponding calendar year, -% assuming location is in the northern hemisphere). -% 'hydroclim': Value is a single character either taking value ['P'] or ['M']. -% If ['M'], then 9th input is interpreted as an estimate of -% soil moisture content (in v/v) rather than as precipitation. -% Model default is to read in precipitation and use the CPC's -% Leaky Bucket model of hydrology to estimate soil moisture, -% however if soil moisture observations or a more sophisticated -% estimate of moisture accounting for snow-related processes -% is available, then using these data directly are recommended -% (and will also speed up code). -% -% For more detailed documentation, see: -% 1) Tolwinski-Ward et al., An efficient forward model of the climate -% controls on interannual variation in tree-ring width, Climate Dynamics (2011) -% DOI: 10.1007/s00382-010-0945-5 -% -% 2) Tolwinski-Ward et al., Erratum to: An efficient forward model of the climate -% controls on interannual variation in tree-ring width, Climate Dynamics (2011) -% DOI: 10.1007/s00382-011-1062-9 -% -% 3) Tolwinski-Ward et al., Bayesian parameter estimation and -% interpretation for an intermediate model of tree-ring width, Clim. Past -% (2013), DOI: 10.5194/cp-9-1-2013 -% -% 4) Documentation available with the model at http://www.ncdc.noaa.gov/paleo/softlib/softlib.html -% -% Revision History -% v0.1 - Original coding at monthly timestep from full daily timestep model (SETW, 4/09) -% v1.0 - Changed soil moisture module to the CPC Leaky Bucket model (SETW, 5/09) -% v1.1 - No upper parametric bounds for gT, gW as in full model; no density module (SETW, 9/09) -% v1.2 - Added adjustable integration window parameters (SETW, 1/10) -% v2.0 - Minor debugging for Octave compatibility, final version for publication (SETW, 10/10) -% v2.1 - Error in evapotranspiration calculation corrected (SETW, 7/11) -% v2.2 - Add switch to allow for monthly sub-stepping in soil moisture computation (SETW, N.Graham, K.Georgakaos, 9/11) -% v2.3 - Add switch to allow moisture M to be given as input rather than estimated -% from T and P; add variable input options and improve -% commenting (SETW, 7/13) -% v2.4 MNE debugged for using soil moisture inputs at l. 97-131 -% v2.5 Nick Graham (7/31/14) pointed out mistake in calculation of istar and -% I at l. 350-352 and l. 484-486 in version 2.3, following Huang et al (1996), Equ. 3a: -% -% i = (Tm/5) ** 1.514 -% -% - Here Tm is the climatological long-term monthly mean temperature -% for month m - calculated over some suitable period. so there are -% implicitly 12 values of i, and these are summed to give the -% climatological value I for that site - -% -% I = sum over 1-12 i(m) -% -% Note that this value could be calculated prior to beginning the -% actual simulation. -% -% implemented by MNE (8/6/14). -% -% v2.5: also added Gr as an output, as varargout(5), and shuffling -% varargout(4-8) to varargout(5-9). -% -% v2.?: Jonathan King: Fixed leaky bucket inputs and replaced years with -% nEns - -% Parameters of the Leaky Bucket model: -Mmax = 0.76; -Mmin = 0.01; -alph = 0.093; -m_th = 4.886; -mu_th = 5.80; -rootd = 1000; -M0 = 0.2; -substep = 0; -% Integration window parameters: -I_0 = 1; -I_f = 12; -% Hydroclimate variable: -hydroclim = 'P'; - -% Read in advanced inputs if user-specified: -if nargin > 9 - for i = 1:nargin-10 - namein = varargin{i}; - switch namein - case 'lbparams' - Mmax = varargin{i+1}(1); - Mmin = varargin{i+1}(2); - alph = varargin{i+1}(3); - m_th = varargin{i+1}(4); - mu_th = varargin{i+1}(5); - rootd = varargin{i+1}(6); - M0 = varargin{i+1}(7); - substep = varargin{i+1}(8); - case 'intwindow' - I_0 = varargin{i+1}(1); - I_f = varargin{i+1}(2); - case 'hydroclim' - hydroclim = varargin{i+1}; - end - end -end -%%% Pre-allocate storage for outputs: %%%% - -nEns = size(T,2); -Gr = NaN(12,nEns); -gT = NaN(12,nEns); -gM = NaN(12,nEns); -M = NaN(12,nEns); -potEv = NaN(12,nEns); -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Load in or estimate soil moisture: -if strcmp(hydroclim,'M') - % Read in soil moisture: - M = P; -else - % Compute soil moisture: - if substep == 1 - M = leakybucket_submonthly(nEns,phi,T,P,Mmax,Mmin,alph,m_th,mu_th,rootd,M0); - elseif substep == 0 - M = leakybucket_monthly(nEns,phi,T,P,Mmax,Mmin,alph,m_th,mu_th,rootd,M0); - elseif substep ~=1 && substep ~= 0 - disp('''substep'' must either be set to 1 or 0.'); - return - end -end -% Compute gE, the scaled monthly proxy for insolation: -gE = Compute_gE(phi); -% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Now compute growth responses to climate, and simulate proxy: -%%%%%%%%%%%%%%%% -% syear = start (first) year of simulation -% eyear = end (last) year of simulation -% cyear = year the model is currently working on -% iyear = index of simulation year -% Compute monthly growth response to T & M, and overall growth response G: -for cyear=1:nEns % begin cycling over years - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - for t = 1:12 % begin cycling over months in a year - %%% Calculate Growth Response functions gT(t) and gM(t) - % First, temperature growth response: - x = T(t,cyear); - if (x < T1) - gT(t,cyear) = 0; - elseif (x >= T1) && (x <= T2) - gT(t,cyear) = (x - T1)/(T2 - T1); - elseif (x >= T2) - gT(t,cyear) = 1; - end - % Next, Soil moisture growth response: - x = M(t,cyear); - if (x < M1) - gM(t,cyear) = 0; - elseif (x >= M1) && (x <= M2) - gM(t,cyear) = (x - M1)/(M2 - M1); - elseif (x >= M2) - gM(t,cyear) = 1; - end - end % end month (t) cycle - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - % Compute overall growth rate: - Gr(:,cyear) = gE.*min(gT(:,cyear),gM(:,cyear)); -end % end year cycle -%%%%%%%%%%%%%% Compute proxy quantity from growth responses %%%%%%%%%%%%%%% -width = NaN*ones(nEns,1); -if phi>0 % if site is in the Northern Hemisphere: - if I_0<0 % if we include part of the previous year in each year's modeled growth: - startmo = 13+I_0; - endmo = I_f; - % use average of growth data across modeled years to estimate first year's growth due - % to previous year: - width(1) = sum(Gr(1:endmo,1)) + sum(mean(Gr(startmo:12,:),2)); - for cyear = 2:nEns - width(cyear) = sum(Gr(startmo:12,cyear-1)) + sum(Gr(1:endmo,cyear)); - end - else % no inclusion of last year's growth conditions in estimates of this year's growth: - startmo = I_0+1; - endmo = I_f; - for cyear = 1:nEns - width(cyear) = sum(Gr(startmo:endmo,cyear)); - end - end -elseif phi<0 % if site is in the Southern Hemisphere: - % (Note: in the Southern Hemisphere, ring widths are dated to the year in which growth began!) - startmo = 7+I_0; % (eg. I_0 = -4 in SH corresponds to starting integration in March of cyear) - endmo = I_f-6; % (eg. I_f = 12 in SH corresponds to ending integraion in June of next year) - for cyear = 1:nEns-1 - width(cyear) = sum(Gr(startmo:12,cyear)) + sum(Gr(1:endmo,cyear+1)); - end - % use average of growth data across modeled years to estimate last year's growth due - % to the next year: - width(nEns) = sum(Gr(startmo:12,nEns))+... - sum(mean(Gr(1:endmo,:),2)); -end -% -trw = ((width-mean(width))/std(width))'; % proxy series is standardized width. -% -if nargout >=1 - varargout(1) = {gT}; - varargout(2) = {gM}; - varargout(3) = {gE}; - varargout(4) = {Gr}; - varargout(5) = {M}; - varargout(6) = {potEv}; - varargout{7} = {width}; - varargout{8} = {mean(width)}; - varargout{9} = {std(width)}; -end -% -end -% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%%% SUBROUTINES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%% LEAKY BUCKET WITH SUBSTEPPING %%%%%%%%%%%%%%%%%%%%%%%% -function [M,potEv,ndl,cdays] = leakybucket_submonthly(nEns,phi,T,P,... - Mmax,Mmin,alph,m_th,mu_th,rootd,M0) -% leackybucket_submonthly.m - Simulate soil moisture; substeps within monthly timesteps -% to better capture nonlinearities and improve moisture estimates. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Usage: [M,potEv,ndl,cdays] = leakybucket_submonthly(syear,eyear,phi,T,P,... -% Mmax,Mmin,alph,m_th,mu_th,rootd,M0) -% outputs simulated soil moisture and potential evapotranspiration. -% -% Inputs: -% syear = start year of simulation. -% eyear = end year of simulation. -% phi = latitude of site (in degrees N) -% T = (12 x Nyrs) matrix of ordered mean monthly temperatures (in degEes C) -% P = (12 x Nyrs) matrix of ordered accumulated monthly precipitation (in mm) -% Mmax = scalar maximum soil moisture held by the soil (in v/v) -% Mmin = scalar minimum soil moisture (for error-catching) (in v/v) -% alph = scalar runoff parameter 1 (in inverse months) -% m_th = scalar runoff parameter 3 (unitless) -% mu_th = scalar runoff parameter 2 (unitless) -% rootd = scalar root/"bucket" depth (in mm) -% M0 = initial value for previous month's soil moisture at t = 1 (in v/v) -% -% Outputs: -% M = soil moisture computed via the CPC Leaky Bucket model (in v/v, 12 x Nyrs) -% potEv = potential evapotranspiration computed via Thornthwaite's 1947 scheme (in mm) -% -% SETW+ N. Graham and K. Georgakakos 2011 - -% modified by Nick G. and K. Georgakakos - to sub-step the monthly steps. Also this version has added -% soil moisture initial conditions for restarts, or spin-up. Hands back monthly soil moisture -% and summer soil moisture as well - see varargout. Nick G. 2011/06 -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -% Storage for growth response output variables (size [12 x Nyears]): -M = NaN(12,nEns); -potEv = NaN(12,nEns); - -% ADDED BY NICK -if(M0 < 0.) - M0=200/rootd; -end - -% Compute normalized daylength (neglecting small difference in calculation for leap-years) -latr = phi*pi/180; % change to radians -ndays = [0 31 28 31 30 31 30 31 31 30 31 30 31]; -cdays = cumsum(ndays); -sd = asin(sin(pi*23.5/180) * sin(pi * (((1:365) - 80)/180)))'; % solar declination -y = -tan(ones(365,1).* latr) .* tan(sd); -if ~isempty(find(y>=1,1)) - y(y>=1) = 1; -end -if ~isempty(find(y<=-1,1)) - y(y<=-1) = -1; -end -hdl = acos(y); -dtsi = (hdl.* sin(ones(365,1).*latr).*sin(sd))+(cos(ones(365,1).*latr).*cos(sd).*sin(hdl)); -ndl=dtsi./max(dtsi); % normalized day length - -% calculate mean monthly daylength (used for evapotranspiration in soil moisture calcs) -jday = cdays(1:12) +.5*ndays(2:13); -m_star = 1-tand(phi)*tand(23.439*cos(jday*pi/182.625)); -mmm = NaN*ones(1,12); -for mo = 1:12 - if m_star(mo) < 0 - mmm(mo) = 0; - elseif m_star(mo) >0 && m_star(mo) < 2 - mmm(mo) = m_star(mo); - elseif m_star(mo) > 2 - mmm(mo) = 2; - end -end -nhrs = 24*acosd(1-mmm)/180; % the number of hours in the day in the middle of the month -L = (ndays(2:13)/30).*(nhrs/12); % mean daylength in each month. - -% Pre-calculation of istar and I, using input T to compute the climatology: -Tm=nanmean(T',1); -if length(Tm) ~=12 - error(['problem with creating T climatology for computing istar ' ... - 'and I']) -elseif length(Tm) ==12 - istar=(Tm./5).^1.514;istar(Tm<0)=0; - I=sum(istar); -end -% precalculation of the exponent alpha in the Thornwaite (1948) equation: -a = (6.75e-7)*I^3 - (7.71e-5)*I^2 + (1.79e-2)*I + .49; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%% -- year cycle -- %%%% -% syear = start (first) year of simulation -% eyear = end (last) year of simulation -% cyear = year the model is currently working on -% iyear = index of simulation year - -for cyear=1:nEns % begin cycling over years - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - for t = 1:12 % begin cycling over months in a year - %%%%% Compute potential evapotranspiration for current month after Thornthwaite: - if T(t,cyear) < 0 - Ep = 0; - elseif T(t,cyear)>=0 && T(t,cyear) < 26.5 - Ep = 16*L(t)*(10*T(t,cyear)/I)^a; - elseif T(t,cyear) >= 26.5 - Ep = -415.85 + 32.25*T(t,cyear) - .43* T(t,cyear)^2; - end - potEv(t,cyear) = Ep; - %%%%% Now calculate soil moisture according to the CPC Leaky Bucket model - %%%%% (see J. Huang et al, 1996). Set n-steps according to 2 mm increments - %%%%% have to update alpha and Ep as well - 2 mm increments came from - %%%%% testing by K. Georgakakos, but one could use 5 or more, with less "accurate" results. - %%%%% Stepping is necessary because the parametization is linearized around init condition. - %%%%%%%%%%%%%%%%% - dp = 2.0; % mm of precip per increment - nstep = floor(P(t,cyear)/dp)+1; % number of sub-monthly substeps - Pinc = P(t,cyear)/nstep; % precip per substep - alphinc = alph/nstep; % runoff rate per substep time interval - Epinc = Ep/nstep; % potential evapotrans per substep. - %%%%%%%%%%%%%%%%% - % handling for sm_init - if (t > 1) - M0=M(t-1,cyear); - elseif (t == 1 && cyear > 1) - M0=M(12,cyear-1); - end - sm0=M0; - - for istep=1:nstep - % evapotranspiration: - Etrans = Epinc*sm0*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alphinc/(1+mu_th)*sm0*rootd; - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = Pinc*(sm0*rootd/(Mmax*rootd))^m_th + (alphinc/(1+mu_th))*sm0*rootd; - dWdt = Pinc - Etrans - R - G; - sm1 = sm0 + dWdt/rootd; - % - sm0=max(sm1,Mmin); - sm0=min(sm0,Mmax); - end - M(t,cyear) = sm0; - % error-catching: - if M(t,cyear) <= Mmin; M(t,cyear) = Mmin; end - if M(t,cyear) >= Mmax; M(t,cyear) = Mmax; end - if isnan(M(t,cyear))==1; M(t,cyear) = Mmin; end - end % end month (t) cycle -end % end year cycle - -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%% LEAKY BUCKET WITHOUT SUBSTEPPING %%%%%%%%%%%%%%%%%%%%% -function [M,potEv,ndl,cdays] =... - leakybucket_monthly(nEns,phi,T,P,Mmax,Mmin,alph,m_th,mu_th,rootd,M0) -% leackybucket_monthly.m - Simulate soil moisture with coarse monthly time step. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Usage: [M,potEv,ndl,cdays] = leakybucket_monthly(syear,eyear,phi,T,P,Mmax,Mmin,alph,m_th,mu_th,rootd,M0) -% outputs simulated soil moisture and potential evapotranspiration. -% -% Inputs: -% syear = start year of simulation. -% eyear = end year of simulation. -% phi = latitude of site (in degrees N) -% T = (12 x Nyrs) matrix of ordered mean monthly temperatures (in degEes C) -% P = (12 x Nyrs) matrix of ordered accumulated monthly precipitation (in mm) -% Mmax = scalar maximum soil moisture held by the soil (in v/v) -% Mmin = scalar minimum soil moisture (for error-catching) (in v/v) -% alph = scalar runoff parameter 1 (in inverse months) -% m_th = scalar runoff parameter 3 (unitless) -% mu_th = scalar runoff parameter 2 (unitless) -% rootd = scalar root/"bucket" depth (in mm) -% M0 = initial value for previous month's soil moisture at t = 1 (in v/v) -% -% Outputs: -% M = soil moisture computed via the CPC Leaky Bucket model (in v/v, 12 x Nyrs) -% potEv = potential evapotranspiration computed via Thornthwaite's 1947 scheme (in mm) -% -% SETW 2011 -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -% Storage for output variables (size [12 x Nyears]): -M = NaN(12,nEns); -potEv = NaN(12,nEns); - -% Compute normalized daylength (neglecting small difference in calculation for leap-years) -latr = phi*pi/180; % change to radians -ndays = [0 31 28 31 30 31 30 31 31 30 31 30 31]; -cdays = cumsum(ndays); -sd = asin(sin(pi*23.5/180) * sin(pi * (((1:365) - 80)/180)))'; % solar declination -y = -tan(ones(365,1).* latr) .* tan(sd); -if ~isempty(find(y>=1,1)) - y(y>=1) = 1; -end -if ~isempty(find(y<=-1,1)) - y(y<=-1) = -1; -end -hdl = acos(y); -dtsi = (hdl.* sin(ones(365,1).*latr).*sin(sd))+(cos(ones(365,1).*latr).*cos(sd).*sin(hdl)); -ndl=dtsi./max(dtsi); % normalized day length - -% calculate mean monthly daylength (used for evapotranspiration in soil moisture calcs) -jday = cdays(1:12) +.5*ndays(2:13); -m_star = 1-tand(phi)*tand(23.439*cos(jday*pi/182.625)); -mmm = NaN*ones(1,12); -for mo = 1:12 - if m_star(mo) < 0 - mmm(mo) = 0; - elseif m_star(mo) >0 && m_star(mo) < 2 - mmm(mo) = m_star(mo); - elseif m_star(mo) > 2 - mmm(mo) = 2; - end -end -nhrs = 24*acosd(1-mmm)/180; % the number of hours in the day in the middle of the month -L = (ndays(2:13)/30).*(nhrs/12); % mean daylength in each month. - -% Pre-calculation of istar and I, using input T to compute the climatology: -Tm = nanmean( T', 1 ); -if length(Tm) ~=12 - error(['problem with creating T climatology for computing istar ' ... - 'and I']) -elseif length(Tm) ==12 - istar = (Tm./5).^1.514; istar(Tm<0)=0; - I=sum(istar); -end -% precalculation of the exponent alpha in the Thornwaite (1948) equation: -a = (6.75e-7)*I^3 - (7.71e-5)*I^2 + (1.79e-2)*I + .49; - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%% -- year cycle -- %%%% -% syear = start (first) year of simulation -% eyear = end (last) year of simulation -% cyear = year the model is currently working on -% iyear = index of simulation year - -for cyear=1:nEns % begin cycling over years - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - for t = 1:12 % begin cycling over months in a year - %%%%% Compute potential evapotranspiration for current month after Thornthwaite: - if T(t,cyear) < 0 - Ep = 0; - elseif T(t,cyear)>=0 && T(t,cyear) < 26.5 - Ep = 16*L(t)*(10*T(t,cyear)/I)^a; - elseif T(t,cyear) >= 26.5 - Ep = -415.85 + 32.25*T(t,cyear) - .43* T(t,cyear)^2; - end - potEv(t,cyear) = Ep; - %%%%% Now calculate soil moisture according to the CPC Leaky Bucket model - %%%%% (see J. Huang et al, 1996). - if t > 1 - % evapotranspiration: - Etrans = Ep*M(t-1,cyear)*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alph/(1+mu_th)*M(t-1,cyear)*rootd; - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = P(t,cyear)*(M(t-1,cyear)*rootd/(Mmax*rootd))^m_th +... - (alph/(1+mu_th))*M(t-1,cyear)*rootd; - dWdt = P(t,cyear) - Etrans - R - G; - M(t,cyear) = M(t-1,cyear) + dWdt/rootd; - elseif t == 1 && cyear > 1 - % evapotranspiration: - Etrans = Ep*M(12,cyear-1)*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alph/(1+mu_th)*M(12,cyear-1)*rootd; - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = P(t,cyear)*(M(12,cyear-1)*rootd/(Mmax*rootd))^m_th +... - (alph/(1+mu_th))*M(12,cyear-1)*rootd; - dWdt = P(t,cyear) - Etrans - R - G; - M(t,cyear) = M(12,cyear-1) + dWdt/rootd; - elseif t == 1 && cyear == 1 - if M0 < 0; M0 = .20; end - % evapotranspiration (take initial soil moisture value to be 200 mm) - Etrans = Ep*M0*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alph/(1+mu_th)*(M0*rootd); - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = P(t,cyear)*(M0*rootd/(Mmax*rootd))^m_th + (alph/(1+mu_th))*M0*rootd; - dWdt = P(t,cyear) - Etrans - R - G; - M(t,cyear) = M0 + dWdt/rootd; - end - % error-catching: - if M(t,cyear) <= Mmin; M(t,cyear) = Mmin; end - if M(t,cyear) >= Mmax; M(t,cyear) = Mmax; end - if isnan(M(t,cyear))==1; M(t,cyear) = Mmin; end - end % end month (t) cycle - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -end % end year cycle -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%% SCALED DAYLENGTH %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [gE] = Compute_gE(phi) -% Just what it sounds like... computes just gE from latitude a la VS-Lite, -% but without all the other stuff. -% -% Usage: gE = Compute_gE(phi) -% -% SETW 3/8/13 - -% -gE = NaN(12,1); -% -% Compute normalized daylength (neglecting small difference in calculation for leap-years) -latr = phi*pi/180; % change to radians -ndays = [0 31 28 31 30 31 30 31 31 30 31 30 31]; -cdays = cumsum(ndays); -sd = asin(sin(pi*23.5/180) * sin(pi * (((1:365) - 80)/180)))'; % solar declination -y = -tan(ones(365,1).* latr) .* tan(sd); -if ~isempty(find(y>=1,1)) - y(y>=1) = 1; -end -if ~isempty(find(y<=-1,1)) - y(y<=-1) = -1; -end -hdl = acos(y); -dtsi = (hdl.* sin(ones(365,1).*latr).*sin(sd))+(cos(ones(365,1).*latr).*cos(sd).*sin(hdl)); -ndl=dtsi./max(dtsi); % normalized day length - -% calculate mean monthly daylength (used for evapotranspiration in soil moisture calcs) -jday = cdays(1:12) +.5*ndays(2:13); -m_star = 1-tand(phi)*tand(23.439*cos(jday*pi/182.625)); -mmm = NaN*ones(1,12); -for mo = 1:12 - if m_star(mo) < 0 - mmm(mo) = 0; - elseif m_star(mo) >0 && m_star(mo) < 2 - mmm(mo) = m_star(mo); - elseif m_star(mo) > 2 - mmm(mo) = 2; - end -end -%nhrs = 24*acosd(1-mmm)/180; % the number of hours in the day in the middle of the month -%L = (ndays(2:13)/30).*(nhrs/12); -% -for t = 1:12 - gE(t) = mean(ndl(cdays(t)+1:cdays(t+1),1)); -end -%%%%%%%%%%%%%%% -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% diff --git a/3. PSMs/Specific Forward Models/VSlite (defunct)/estimate_vslite_params_v2_3.m b/3. PSMs/Specific Forward Models/VSlite (defunct)/estimate_vslite_params_v2_3.m deleted file mode 100644 index d7a30668..00000000 --- a/3. PSMs/Specific Forward Models/VSlite (defunct)/estimate_vslite_params_v2_3.m +++ /dev/null @@ -1,1730 +0,0 @@ -function [T1,T2,M1,M2,varargout] = estimate_vslite_params_v2_3(T,P,phi,RW,varargin) -% REVISED as per reviews in CPD... April 2013 -% -% Given calibration-interval temperature, precipitation, and ring-width data, -% and site latitude, vslite_bayes_param_cal.m performes a Bayesian parameter -% estimation of the growth response parameters T1, T2, M1 and M2. The -% default prior distributions are based on a survey of current literature -% pertaining to biological growth thresholds in trees; however uniform or -% user-defined four-parameter-beta distributions may also optionally be -% used as priors. The scheme also assumes independent, Gaussian model errors, -% and the amplitude of the error variance may also be estimated. -% -% Basic Usage: -% Basic: [T1,T2,M1,M2] = vslite_bayes_param_cal(T,P,phi,RW) -% -% Basic Inputs: -% T: monthly calibration-interval temperature, dimension 12 x number of calibration years. -% P: monthly calibration-interval precipitation, dimension 12 x number of cal. years. -% phi: site latitude in degrees N. -% RW: standardized annual calibration-interval ring-width. -% -% Basic Ouptuts: -% T1, T2, M1, M2: point estimates given by the median of their respective -% posterior distributions. -% -% Advanced Usage: [T1,T2,M1,M2,varargout] = vslite_bayes_param_cal(T,P,phi,RW,varargin) -% -% Advanced Optional Inputs: -% Must be specified as property/value pairs. Valid property/value pairs are: -% 'errormod' Error model. Options are [0], [1], and [2] for white Gaussian -% noise, AR(1) model, or AR(2) model. Default is [0]. -% 'gparscalint' Index of years to use to estimate the growth response -% parameters T1, T2, M1, M2. Default is all years. -% 'eparscalint' Index of years to use to estimate the parameters of the -% error model if these parameters are to be estimated. -% Must be contiguous if using AR(1) error model. Default -% is all years (may underestimate error if not disjoint -% from interval used to fit growth response parameters -% as specified in 'gparscalint') -% 'errorpars' vector holding values of error model parameters is user -% wishes to fix their values rather than estimate them. -% For errormod == 0 (white noise model), values is a scalar -% with fixed value for sigma2w; for errormod == 1 (AR(1)), -% errorpars = [phi1 tau^2]; for errormod == 2 (AR(2)), -% errorpars = [phi1 phi2 tau^2]. No default (since default is -% to estimate parameters of a white noise error model). -% 'pt_ests' Choices are ['mle'] or ['med'] to return either the -% posterior draw with the maximum data likelihood or -% the posterior median as the point estimate for the -% growth reseponse parameter estimates and the error -% model parameter estimates. Default is 'mle'. -% 'hydroclim' Is the hydroclimate input variable (2nd input) precipitation ['P'] -% or soil moisture ['M']? Default is ['P']; CPC Leaky -% Bucket model is then used to estimate M from input T -% and input P. -% 'substep' If hydroclim == 'P', then 'substep' is logical 0/1 -% depending on whether leaky bucket model without/with -% substepping is preferred. Default is [0]. -% 'intwindow' VS-Lite integration window, specified as vector [I_0 I_f] -% Default is [0 12]. -% 'nsamp' 200<=integer<=10,000 fixing number of MCMC iterations. -% Default is [1000]. -% 'nbi' Number of burn-in samples. Default is round(200). -% 'nchain' 3<=integer<=number of comp threads for the computation -% of Rhat. Default is 3. -% 'gparpriors' growth parameter priors. Either can be 'fourbet' for a -% set of informative four-parameter beta-distributed priors, -% or can be 'uniform'. In either case parameters can be -% specified by user, or else a default set will be used -% (see the following 5 property/value pairs for more -% information). -% see Tolwinski-Ward et al 2013, 'Bayesian parameter -% estimation and interpretation for an intermediate -% model of tree-ring width' for details on literature-based -% parameters for the four-parameter beta distributed -% priors. -% 'T1priorsupp' 2x1 vector with elements giving lower and upper bounds -% for support of uniform T1 prior. If not included in input -% argument list, default used is [0.0 8.5] -% 'T2priorsupp' " T2 prior. Default is [9.0 20.0] -% 'M1priorsupp' " M1 prior. Default is [0.01 0.03] -% 'M2priorsupp' " M2 prior. Default is [0.1 0.5] -% 'fourbetparams' is a 4x4 matrix specifying parameters of the -% four-parameter beta distributed priors. First row -% gives parameters of T1, second row gives T2, third -% row gives parameters for M1, and 4th row params of -% M2. Columns 1 and 2 give the two shape parameters, -% while columns 3 and 4 give the lower and upper bounds -% on the interval containing the transformed beta -% distribution. If not included in input arguent list, -% default parameter set based on current literature is -% [9 5 0 9 -% 3.5 3.5 10 24 -% 1.5 2.8 0.0 0.1 -% 1.5 2.5 0.1 0.5] -% (See Tolwinski-Ward et al., 'Bayesian parameter estimation -% and interpretation for an intermediate model of tree-ring -% width', for justification of these choices.) -% 'convthresh' scalar value greater than 0. Threshold for MCMC -% convergence; warning is displayed if abs(Rhat-1)>convthresh. -% Default value is [1.1]. -% 'ploutput' logical 0/1; plot priors, posteriors, and estimates? -% Default is [0]. -% 'verbose' logical 0/1; print progress to screen? Default is [1]. -% -% Advanced Optional Ouptuts (must be specified in the following order): -% T1dist, T2dist, M1dist, M2dist: returns the entire numerical posterior distributions -% of the growth response parameters if the user wants to check for -% convergence, autocorrelation, multi-modality, or point estimates -% of parameters other than the median. -% Rhats: Returns the convergence statistics associated with T1, T2, M1, M2, -% and sigma2rw if it was estimated. -% convwarning: logical 0/1 depending on whether all Rhat values were outside of -% the threshold distance of 1. -% -- Next outputs for white noise error model (errormod==0): -- -% sig2rw: point estimate of model error variance -% sigma2rwdist: returns the entire numerical posterior distribution -% -- Next outputs for AR(1) error model (errormod==1): -- -% phi1: Point estimate of AR(1) coefficient -% phi1dist: Numerical distribution of AR(1) coefficient samples -% tau2: Point estimate of error model innovations variance -% tau2dist: Nunderical distribution of error model innovation -% variance samples -% -% -% SETW 9/20/2011: version 1.0. Estimates T1, T2, M1, M2 for fixed sigma2w, assuming -% normally- and independent and identically distributed model residuals. -% SETW 4/15/2012: version 2.0. Added simultaneous estimation of sigma2w under assumption -% of inverse-gamma prior, literature-based priors, Rhat convergence -% metric, and output plots. -% SETW 12/15/2012: version 2.1 for publication; added options for user-defined -% prior distributions. -% SETW 5/10/2013: version 2.2: Revised data-level model and added options for white or -% AR(1) error models following reviewer comments in -% Climate of the Past Discussions; options for flexible -% calibration-intervals for growth parameters and error -% model parameters also added; MLE added as option for point estimates; -% version 2.3: additional commenting added; option to -% condition on user-supplied input soil moisture data included -% as opposed to necessarily estimating M from T & P via -% Leaky Bucket. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -if nargin > 4 % read in advanced options if user-specified: - % first fill values in with defaults: - errormod = 0; - gparscalint = 1:length(RW); - eparscalint = 1:length(RW); - pt_ests = 'mle'; - hydroclim = 'P'; - substep = 0; - intwindow = [0 12]; - nsamp = 1000; - nbi = 200; - nchain = 3; - gparpriors = 'fourbet'; - aT1 = 9; bT1 = (aT1+1)/2; - slpT1 = 9; intT1 = 0; - aT2 = 3.5; bT2 = aT2; - slpT2 = 14; intT2 = 10; - slpM1 = 0.1; intM1 = 0.0; - aM1 = 1.5; bM1 = (1-.035/slpM1)*aM1/(.035/slpM1); % .035 is target mean - slpM2 = 0.4; intM2 = 0.1; - aM2 = 1.5; bM2 = (1-(.25-intM2)/slpM2)*aM2/((.25-intM2)/slpM2); % .25 is target mean - convthresh = .1; - ploutput = 0; - verbose = 1; - % then over-write defaults if user-specified: - Nvararg = length(varargin); - for i = 1:Nvararg/2 - namein = varargin{2*(i-1)+1}; - valin = varargin{2*i}; - switch namein - case 'errormod' - errormod = valin; - case 'gparscalint' - gparscalint = valin; - case 'eparscalint' - eparscalint = valin; - case 'errorpars' - errorpars = valin; - case 'pt_ests' - pt_ests = valin; - case 'hydroclim' - hydroclim = valin; - case 'substep' - substep = valin; - case 'intwindow' - intwindow = valin; - case 'nsamp' - nsamp = valin; - case 'nbi' - nbi = valin; - case 'nchain' - nchain = valin; - case 'gparpriors' - gparpriors = valin; - if strcmp(gparpriors,'uniform') - % read in default supports for uniform priors: - aT1 = 0; bT1 = 9; - aT2 = 10; bT2 = 24; - aM1 = 0; bM1 = .1; - aM2 = .1; bM2 = .5; - end - case 'T1priorsupp' - aT1 = valin(1); bT1 = valin(2); - case 'T2priorsupp' - aT2 = valin(1); bT2 = valin(2); - case 'M1priorsupp' - aM1 = valin(1); bM1 = valin(2); - case 'M2priorsupp' - aM2 = valin(1); bM2 = valin(2); - case 'fourbetparams' - aT1 = valin(1,1); bT1 = valin(1,2); - intT1 = valin(1,3); slpT1 = valin(1,4)-valin(1,3); - aT2 = valin(2,1); bT2 = valin(2,2); - intT2 = valin(2,3); slpT2 = valin(2,4)-valin(2,3); - aM1 = valin(3,1); bM1 = valin(3,2); - intM1 = valin(3,3); slpM1 = valin(3,4)-valin(3,3); - aM2 = valin(4,1); bM2 = valin(4,2); - intM2 = valin(4,3); slpM2 = valin(4,4)-valin(4,3); - case 'convthresh' - convthresh = valin; - case 'ploutput' - ploutput = valin; - case 'verbose' - verbose = valin; - end - end -else % otherwise, read in defaults: - errormod = 0; - gparscalint = 1:length(RW); - eparscalint = 1:length(RW); - pt_ests = 'mle'; - hydroclim = 'P'; - substep = 0; - intwindow = [0 12]; - nsamp = 1000; - nbi = 200; - nchain = 3; - gparpriors = 'fourbet'; - aT1 = 9; bT1 = (aT1+1)/2; - slpT1 = 9; intT1 = 0; - aT2 = 3.5; bT2 = aT2; - slpT2 = 14; intT2 = 10; - slpM1 = 0.1; intM1 = 0.0; - aM1 = 1.5; bM1 = (1-.035/slpM1)*aM1/(.035/slpM1); % .035 is target mean - slpM2 = 0.4; intM2 = 0.1; - aM2 = 1.5; bM2 = (1-(.25-intM2)/slpM2)*aM2/((.25-intM2)/slpM2); % .25 is target mean - convthresh = .1; - ploutput = 0; - verbose = 1; -end -% -% take zscore of RW data over interval used for fitting to fulfill assumptions of model error/noise -% structure -RW = zscore(RW); -% -% Compute soil moisture: -Mmax =.76; % maximum soil moisture; v/v -Mmin =.01; % minimum soil moisture; v/v -muth = 5.8; % mu from thornthwaite's Ep scheme -mth = 4.886; % m from thornthwaite's Ep scheme -alpha = .093; -Minit = 200; % initial value for soil moisture; v/v -dr = 1000; % root depth -% -Nyrs = size(T,2); -% -if strcmp(hydroclim,'P') % if second input variable is precip, - % then estimate soil moisture from T and P inputs via Leaky Bucket: - if substep == 1; - [M,potEv,ndl,cdays] =... - leakybucket_submonthly(1,Nyrs,phi,T,P,Mmax,Mmin,alpha,mth,muth,dr,Minit/dr); - elseif substep == 0 - [M,potEv,ndl,cdays] =... - leakybucket_monthly(1,Nyrs,phi,T,P,Mmax,Mmin,alpha,mth,muth,dr,Minit/dr); - end -elseif strcmp(hydroclim,'M') % if user supplies soil moisture estimate from elsewhere, - % read in the soil moisture from the second input variable: - M = P; -end -% Compute monthly growth response to insolation, gE: -gE = NaN(12,1); -for t = 1:12 - gE(t) = mean(ndl(cdays(t)+1:cdays(t+1),1)); -end -% -% SAMPLE: -for chain = 1:nchain - % Storage space for realizations of parameters - Tt = NaN(1,nsamp+nbi); - To = NaN(1,nsamp+nbi); - Mt = NaN(1,nsamp+nbi); - Mo = NaN(1,nsamp+nbi); - logLdata = NaN(1,nsamp+nbi); - % - if verbose; disp(['Working on chain ' num2str(chain)... - ' out of ' num2str(nchain) '...']); end - % - % Initialize the MCMC: - gT = NaN(size(T)); - gM = NaN(size(M)); - sim = 1; - % - % Initialize growth response parameters: - if strcmp(gparpriors,'uniform') - % Initialize Tt and To with draws from priors: - Tt(sim) = unifrnd(aT1,bT1); - To(sim) = unifrnd(aT2,bT2); - % Initialize Mt and Mo with draws from priors: - Mt(sim) = unifrnd(aM1,bM1); - Mo(sim) = unifrnd(aM2,bM2); - elseif strcmp(gparpriors,'fourbet') - % Initialize Tt and To with draws from priors: - Tt(sim) = slpT1*betarnd(aT1,bT1)+intT1; - To(sim) = slpT2*betarnd(aT2,bT2)+intT2; - % Initialize Mt and Mo with draws from priors: - Mt(sim) = slpM1*betarnd(aM1,bM1)+intM1; - Mo(sim) = slpM2*betarnd(aM2,bM2)+intM2; - end - % - gT(TTo(sim)) = 1; - gT(T>Tt(sim)&TTt(sim)&TMo(sim)) = 1; - gM(M>Mt(sim)&MMt(sim)&M 1-phi1(1)^2 - phi1(1) = unifrnd(0,1); - tau2(1) = unifrnd(0,1); - end - % hold current values of error model parameters: - errorpars(1) = phi1(1); errorpars(2) = tau2(1); - end - % - while sim < nsamp+nbi+1 - % - switch gparpriors - case 'uniform' - Tt(sim) = Tt_U_aux(Tt(sim-1),T,To(sim-1),gM,RW',errorpars,... - gE,Gterms,aT1,bT1,intwindow,gparscalint); - case 'fourbet' - Tt(sim) = Tt_lit_aux(Tt(sim-1),T,To(sim-1),gM,RW',errorpars,... - gE,Gterms,aT1,bT1,slpT1,intT1,intwindow,gparscalint); - end - gT(TTo(sim-1)) = 1; - gT(T>Tt(sim)&TTt(sim)&TTo(sim)) = 1; - gT(T>Tt(sim)&TTt(sim)&TMo(sim-1)) = 1; - gM(M>Mt(sim)&MMt(sim)&MMo(sim)) = 1; - gM(M>Mt(sim)&MMt(sim)&M1 mle_ind = mle_ind(1); end - T1 = Ttensemb(mle_ind); T2 = Toensemb(mle_ind); - M1 = Mtensemb(mle_ind); M2 = Moensemb(mle_ind); - if errormod == 0 - sig2rw = sig2rwensemb(mle_ind); - elseif errormod == 1 - phi1hat = phi1ensemb(mle_ind); - tau2hat = tau2ensemb(mle_ind); - end -end -% -eval(['RhatT1 = gelmanrubin92(nsamp,nbi,' Ttchains ');']); -eval(['RhatT2 = gelmanrubin92(nsamp,nbi,' Tochains ');']); -eval(['RhatM1 = gelmanrubin92(nsamp,nbi,' Mtchains ');']); -eval(['RhatM2 = gelmanrubin92(nsamp,nbi,' Mochains ');']); -if errormod == 0 - eval(['Rhatsig2rw = gelmanrubin92(nsamp,nbi,' sig2rwchains ');']); -elseif errormod == 1 - eval(['Rhatphi1 = gelmanrubin92(nsamp,nbi,' phi1chains ');']); - eval(['Rhattau2 = gelmanrubin92(nsamp,nbi,' tau2chains ');']); -end -% -Rhats = [RhatT1 RhatT2 RhatM1 RhatM2]; -if verbose == 1 - if errormod == 0 - Rhats = [Rhats Rhatsig2rw]; - disp(' Rhat for T1, T2, M1, M2, sigma2rw:'); - disp([RhatT1 RhatT2 RhatM1 RhatM2 Rhatsig2rw]); - elseif errormod == 1 - Rhats = [Rhats Rhatphi1 Rhattau2]; - disp(' Rhat for T1, T2, M1, M2, phi1, tau2:'); - disp([RhatT1 RhatT2 RhatM1 RhatM2 Rhatphi1 Rhattau2]); - end -end -if any(abs(Rhats-1)>convthresh) - disp('Gelman and Rubin metric suggests MCMC has not yet converged to within desired threshold;') - disp('Parameter estimation code should be re-run using a greater number of MCMC iterations.') - disp('(See ''nsamp'' advanced input option.)') - convwarning = 1; -else - convwarning = 0; -end -% -%%%% NOTE!!! Output graphics not yet revised to support estimates of error -%%%% model parameters. SETW 4/4/2013 -if ploutput == 1 % if user requested plot of results: - figure; - % - if strcmp(gparpriors,'uniform') - set(gcf,'position',[680 320 560 775]); - subplot(321); - plot(aT1:((bT1-aT1)/100):bT1,(1/(bT1-aT1))*ones(1,101)); hold on; - [f,xi] = ksdensity(Ttensemb); - plot(xi,f,'r'); - % legend('prior','psterior'); - title('T1','fontsize',14); xlim([aT1 bT1]) - % - subplot(322); - plot(aT2:((bT2-aT2)/100):bT2,(1/(bT2-aT2))*ones(1,101)); hold on; - [f,xi] = ksdensity(Toensemb); - plot(xi,f,'r'); - % legend('prior','psterior'); - title('T2','fontsize',14); xlim([aT2 bT2]) - %%%%%%%%%%%%%%%%%%%%% - %figure; - % - nbins = 100; - subplot(323); - plot(aM1:((bM1-aM1)/100):bM1,(1/(bM1-aM1))*ones(1,101)); hold on; - [f,xi] = ksdensity(Mtensemb); - plot(xi,f,'r'); - % legend('prior','psterior'); - title('M1','fontsize',14); xlim([aM1 bM1]) - % - subplot(324); - plot(aM2:((bM2-aM2)/100):bM2,(1/(bM2-aM2))*ones(1,101)); hold on; - [f,xi] = ksdensity(Moensemb); - plot(xi,f,'r'); - % legend('prior','psterior'); - title('M2','fontsize',14); xlim([aM2 bM2]) - %%%%%%%%%%%%%%%%%%%%% - %figure; - subplot(313) - % - rlim = wv2/(wv1+1)+3*sqrt(wv2^2/((wv1-2)*(wv1-1)^2)); % 3 standard deviations to the right of the prior mode - xi = 0:rlim/300:rlim; - f = ((wv2^wv1)/gamma(wv1))*(xi.^(-wv1-1)).*exp(-wv2./xi); - plot(xi,f); - hold on - [f,xi] = ksdensity(sig2rwensemb); - plot(xi,f,'r'); - % legend('prior','posterior'); - title('\sigma^2_W','fontsize',14); xlim([0 rlim]) - % - suptitle('Prior (blue) and posterior (red) densities of VS-Lite parameters') - elseif strcmp(gparpriors,'fourbet') - set(gcf,'position',[680 320 560 775]); - subplot(321); - % plot prior: - xj = 0:.01:1; - for j = 1:length(xj) - T1prior(j) = betapdf(xj(j),aT1,bT1); - end - T1prior = T1prior/slpT1; - plot(xj*slpT1+intT1,T1prior); hold on - % plot smoothed histogram of posterior draws: - [f,xi] = ksdensity(Ttensemb); - plot(xi,f,'r'); - % legend('prior','psterior'); - title('T1','fontsize',14); xlim([intT1 slpT1+intT1]) - % - subplot(322); - % plot prior: - xj = 0:.01:1; - for j = 1:length(xj) - T2prior(j) = betapdf(xj(j),aT2,bT2); - end - T2prior = T2prior/slpT2; - plot(xj*slpT2+intT2,T2prior); hold on - % plot smoothed histogram of posterior draws: - [f,xi] = ksdensity(Toensemb); - plot(xi,f,'r'); - % legend('prior','psterior'); - title('T2','fontsize',14); xlim([intT2 slpT2+intT2]) - % - subplot(323); - % plot prior: - xj = 0:.01:1; - for j = 1:length(xj) - M1prior(j) = betapdf(xj(j),aM1,bM1); - end - M1prior = M1prior/slpM1; - plot(xj*slpM1+intM1,M1prior); hold on - % plot smoothed histogram of posterior draws: - [f,xi] = ksdensity(Mtensemb); - plot(xi,f,'r'); - % legend('prior','psterior'); - title('M1','fontsize',14); xlim([intM1 slpM1+intM1]) - % - subplot(324); - % plot prior: - xj = 0:.01:1; - for j = 1:length(xj) - M2prior(j) = betapdf(xj(j),aM2,bM2); - end - M2prior = M2prior/slpM2; - plot(xj*slpM2+intM2,M2prior); hold on - % plot smoothed histogram of posterior draws: - [f,xi] = ksdensity(Toensemb); - plot(xi,f,'r'); - % legend('prior','psterior'); - title('T2','fontsize',14); xlim([intT2 slpT2+intT2]) - % plot smoothed histogram of posterior draws: - [f,xi] = ksdensity(Moensemb); - plot(xi,f,'r'); - % legend('prior','psterior'); - title('M2','fontsize',14); xlim([intM2 slpM2+intM2]) - %%%%%%%%%%%%%%%%%%%%% - subplot(313) - % - rlim = wv2/(wv1+1)+3*sqrt(wv2^2/((wv1-2)*(wv1-1)^2)); % 3 standard deviations to the right of the prior mode - xi = 0:rlim/300:rlim; - f = ((wv2^wv1)/gamma(wv1))*(xi.^(-wv1-1)).*exp(-wv2./xi); - plot(xi,f); - hold on - [f,xi] = ksdensity(sig2rwensemb); - plot(xi,f,'r'); - % legend('prior','posterior'); - title('\sigma^2_W','fontsize',14); xlim([0 rlim]) - % - suptitle('Prior (blue) and posterior (red) densities of VS-Lite parameters') - end -end -% -if nargout > 0 - varargout{1} = Ttensemb; varargout{2} = Toensemb; - varargout{3} = Mtensemb; varargout{4} = Moensemb; - varargout{5} = Rhats; varargout{6} = convwarning; - if errormod == 0 - varargout{7} = sig2rw; varargout{8} = sig2rwensemb; - elseif errormod == 1 - varargout{7} = phi1hat; varargout{8} = tau2hat; - varargout{9} = phi1ensemb; varargout{10} = tau2ensemb; - end - -end -% -end -% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%%% SOIL MOISTURE SUBROUTINES %%%%%%%%%%%%%%%%%%%%%%%%%%% -function [M,potEv,ndl,cdays] = leakybucket_submonthly(syear,eyear,phi,T,P,... - Mmax,Mmin,alph,m_th,mu_th,rootd,M0) -% leackybucket_submonthly.m - Simulate soil moisture; substeps within monthly timesteps -% to better capture nonlinearities and improve moisture estimates. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Usage: [M,potEv,ndl,cdays] = leakybucket_submonthly(syear,eyear,phi,T,P,... -% Mmax,Mmin,alph,m_th,mu_th,rootd,M0) -% outputs simulated soil moisture and potential evapotranspiration. -% -% Inputs: -% syear = start year of simulation. -% eyear = end year of simulation. -% phi = latitude of site (in degrees N) -% T = (12 x Nyrs) matrix of ordered mean monthly temperatures (in degEes C) -% P = (12 x Nyrs) matrix of ordered accumulated monthly precipitation (in mm) -% Mmax = scalar maximum soil moisture held by the soil (in v/v) -% Mmin = scalar minimum soil moisture (for error-catching) (in v/v) -% alph = scalar runoff parameter 1 (in inverse months) -% m_th = scalar runoff parameter 3 (unitless) -% mu_th = scalar runoff parameter 2 (unitless) -% rootd = scalar root/"bucket" depth (in mm) -% M0 = initial value for previous month's soil moisture at t = 1 (in v/v) -% -% Outputs: -% M = soil moisture computed via the CPC Leaky Bucket model (in v/v, 12 x Nyrs) -% potEv = potential evapotranspiration computed via Thornthwaite's 1947 scheme (in mm) -% -% SETW+ N. Graham and K. Georgakakos 2011 - -% modified by Nick G. and K. Georgakakos - to sub-step the monthly steps. Also this version has added -% soil moisture initial conditions for restarts, or spin-up. Hands back monthly soil moisture -% and summer soil moisture as well - see varargout. Nick G. 2011/06 -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -iyear = syear:eyear; -nyrs = length(iyear); -% Storage for growth response output variables (size [12 x Nyears]): -M = NaN(12,nyrs); -potEv = NaN(12,nyrs); - -% ADDED BY NICK -if(M0 < 0.) - M0=200/rootd; -end - -% Compute normalized daylength (neglecting small difference in calculation for leap-years) -latr = phi*pi/180; % change to radians -ndays = [0 31 28 31 30 31 30 31 31 30 31 30 31]; -cdays = cumsum(ndays); -sd = asin(sin(pi*23.5/180) * sin(pi * (((1:365) - 80)/180)))'; % solar declination -y = -tan(ones(365,1).* latr) .* tan(sd); -if ~isempty(find(y>=1,1)) - y(y>=1) = 1; -end -if ~isempty(find(y<=-1,1)) - y(y<=-1) = -1; -end -hdl = acos(y); -dtsi = (hdl.* sin(ones(365,1).*latr).*sin(sd))+(cos(ones(365,1).*latr).*cos(sd).*sin(hdl)); -ndl=dtsi./max(dtsi); % normalized day length - -% calculate mean monthly daylength (used for evapotranspiration in soil moisture calcs) -jday = cdays(1:12) +.5*ndays(2:13); -m_star = 1-tand(phi)*tand(23.439*cos(jday*pi/182.625)); -mmm = NaN*ones(1,12); -for mo = 1:12 - if m_star(mo) < 0 - mmm(mo) = 0; - elseif m_star(mo) >0 && m_star(mo) < 2 - mmm(mo) = m_star(mo); - elseif m_star(mo) > 2 - mmm(mo) = 2; - end -end -nhrs = 24*acosd(1-mmm)/180; % the number of hours in the day in the middle of the month -L = (ndays(2:13)/30).*(nhrs/12); % mean daylength in each month. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%% -- year cycle -- %%%% -% syear = start (first) year of simulation -% eyear = end (last) year of simulation -% cyear = year the model is currently working on -% iyear = index of simulation year - -for cyear=1:length(iyear) % begin cycling over years - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - for t = 1:12 % begin cycling over months in a year - %%%%% Compute potential evapotranspiration for current month after Thornthwaite: - if T(t,cyear) < 0 - Ep = 0; - elseif T(t,cyear)>=0 && T(t,cyear) < 26.5; - istar = (T(:,cyear)/5); istar(istar<0) = 0; - I = sum(istar.^1.514); - a = (6.75e-7)*I^3 - (7.71e-5)*I^2 + (1.79e-2)*I + .49; - Ep = 16*L(t)*(10*T(t,cyear)/I)^a; - elseif T(t,cyear) >= 26.5; - Ep = -415.85 + 32.25*T(t,cyear) - .43* T(t,cyear)^2; - end - potEv(t,cyear) = Ep; - %%%%% Now calculate soil moisture according to the CPC Leaky Bucket model - %%%%% (see J. Huang et al, 1996). Set n-steps according to 2 mm increments - %%%%% have to update alpha and Ep as well - 2 mm increments came from - %%%%% testing by K. Georgakakos, but one could use 5 or more, with less "accurate" results. - %%%%% Stepping is necessary because the parametization is linearized around init condition. - %%%%%%%%%%%%%%%%% - dp = 2.0; % mm of precip per increment - nstep = floor(P(t,cyear)/dp)+1; % number of sub-monthly substeps - Pinc = P(t,cyear)/nstep; % precip per substep - alphinc = alph/nstep; % runoff rate per substep time interval - Epinc = Ep/nstep; % potential evapotrans per substep. - %%%%%%%%%%%%%%%%% - % handling for sm_init - if (t > 1) - M0=M(t-1,cyear); - elseif (t == 1 && cyear > 1) - M0=M(12,cyear-1); - end - sm0=M0; - - for istep=1:nstep - % evapotranspiration: - Etrans = Epinc*sm0*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alphinc/(1+mu_th)*sm0*rootd; - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = Pinc*(sm0*rootd/(Mmax*rootd))^m_th + (alphinc/(1+mu_th))*sm0*rootd; - dWdt = Pinc - Etrans - R - G; - sm1 = sm0 + dWdt/rootd; - % - sm0=max(sm1,Mmin); - sm0=min(sm0,Mmax); - end - M(t,cyear) = sm0; - % error-catching: - if M(t,cyear) <= Mmin; M(t,cyear) = Mmin; end; - if M(t,cyear) >= Mmax; M(t,cyear) = Mmax; end; - if isnan(M(t,cyear))==1; M(t,cyear) = Mmin; end; - end % end month (t) cycle -end % end year cycle - -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [M,potEv,ndl,cdays] =... - leakybucket_monthly(syear,eyear,phi,T,P,Mmax,Mmin,alph,m_th,mu_th,rootd,M0) -% leackybucket_monthly.m - Simulate soil moisture with coarse monthly time step. -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -% Usage: [M,potEv,ndl,cdays] = leakybucket_monthly(syear,eyear,phi,T,P,Mmax,Mmin,alph,m_th,mu_th,rootd,M0) -% outputs simulated soil moisture and potential evapotranspiration. -% -% Inputs: -% syear = start year of simulation. -% eyear = end year of simulation. -% phi = latitude of site (in degrees N) -% T = (12 x Nyrs) matrix of ordered mean monthly temperatures (in degEes C) -% P = (12 x Nyrs) matrix of ordered accumulated monthly precipitation (in mm) -% Mmax = scalar maximum soil moisture held by the soil (in v/v) -% Mmin = scalar minimum soil moisture (for error-catching) (in v/v) -% alph = scalar runoff parameter 1 (in inverse months) -% m_th = scalar runoff parameter 3 (unitless) -% mu_th = scalar runoff parameter 2 (unitless) -% rootd = scalar root/"bucket" depth (in mm) -% M0 = initial value for previous month's soil moisture at t = 1 (in v/v) -% -% Outputs: -% M = soil moisture computed via the CPC Leaky Bucket model (in v/v, 12 x Nyrs) -% potEv = potential evapotranspiration computed via Thornthwaite's 1947 scheme (in mm) -% -% SETW 2011 -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -iyear = syear:eyear; -nyrs = length(iyear); -% Storage for output variables (size [12 x Nyears]): -M = NaN(12,nyrs); -potEv = NaN(12,nyrs); - -% Compute normalized daylength (neglecting small difference in calculation for leap-years) -latr = phi*pi/180; % change to radians -ndays = [0 31 28 31 30 31 30 31 31 30 31 30 31]; -cdays = cumsum(ndays); -sd = asin(sin(pi*23.5/180) * sin(pi * (((1:365) - 80)/180)))'; % solar declination -y = -tan(ones(365,1).* latr) .* tan(sd); -if ~isempty(find(y>=1,1)) - y(y>=1) = 1; -end -if ~isempty(find(y<=-1,1)) - y(y<=-1) = -1; -end -hdl = acos(y); -dtsi = (hdl.* sin(ones(365,1).*latr).*sin(sd))+(cos(ones(365,1).*latr).*cos(sd).*sin(hdl)); -ndl=dtsi./max(dtsi); % normalized day length - -% calculate mean monthly daylength (used for evapotranspiration in soil moisture calcs) -jday = cdays(1:12) +.5*ndays(2:13); -m_star = 1-tand(phi)*tand(23.439*cos(jday*pi/182.625)); -mmm = NaN*ones(1,12); -for mo = 1:12 - if m_star(mo) < 0 - mmm(mo) = 0; - elseif m_star(mo) >0 && m_star(mo) < 2 - mmm(mo) = m_star(mo); - elseif m_star(mo) > 2 - mmm(mo) = 2; - end -end -nhrs = 24*acosd(1-mmm)/180; % the number of hours in the day in the middle of the month -L = (ndays(2:13)/30).*(nhrs/12); % mean daylength in each month. - -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%% -- year cycle -- %%%% -% syear = start (first) year of simulation -% eyear = end (last) year of simulation -% cyear = year the model is currently working on -% iyear = index of simulation year - -for cyear=1:nyrs % begin cycling over years - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - for t = 1:12 % begin cycling over months in a year - %%%%% Compute potential evapotranspiration for current month after Thornthwaite: - if T(t,cyear) < 0 - Ep = 0; - elseif T(t,cyear)>=0 && T(t,cyear) < 26.5; - istar = (T(:,cyear)/5); istar(istar<0) = 0; - I = sum(istar.^1.514); - a = (6.75e-7)*I^3 - (7.71e-5)*I^2 + (1.79e-2)*I + .49; - Ep = 16*L(t)*(10*T(t,cyear)/I)^a; - elseif T(t,cyear) >= 26.5; - Ep = -415.85 + 32.25*T(t,cyear) - .43* T(t,cyear)^2; - end - potEv(t,cyear) = Ep; - %%%%% Now calculate soil moisture according to the CPC Leaky Bucket model - %%%%% (see J. Huang et al, 1996). - if t > 1 - % evapotranspiration: - Etrans = Ep*M(t-1,cyear)*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alph/(1+mu_th)*M(t-1,cyear)*rootd; - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = P(t,cyear)*(M(t-1,cyear)*rootd/(Mmax*rootd))^m_th +... - (alph/(1+mu_th))*M(t-1,cyear)*rootd; - dWdt = P(t,cyear) - Etrans - R - G; - M(t,cyear) = M(t-1,cyear) + dWdt/rootd; - elseif t == 1 && cyear > 1 - % evapotranspiration: - Etrans = Ep*M(12,cyear-1)*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alph/(1+mu_th)*M(12,cyear-1)*rootd; - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = P(t,cyear)*(M(12,cyear-1)*rootd/(Mmax*rootd))^m_th +... - (alph/(1+mu_th))*M(12,cyear-1)*rootd; - dWdt = P(t,cyear) - Etrans - R - G; - M(t,cyear) = M(12,cyear-1) + dWdt/rootd; - elseif t == 1 && cyear == 1 - if M0 < 0; M0 = .20; end - % evapotranspiration (take initial soil moisture value to be 200 mm) - Etrans = Ep*M0*rootd/(Mmax*rootd); - % groundwater loss via percolation: - G = mu_th*alph/(1+mu_th)*(M0*rootd); - % runoff; contributions from surface flow (1st term) and subsurface (2nd term) - R = P(t,cyear)*(M0*rootd/(Mmax*rootd))^m_th + (alph/(1+mu_th))*M0*rootd; - dWdt = P(t,cyear) - Etrans - R - G; - M(t,cyear) = M0 + dWdt/rootd; - end - % error-catching: - if M(t,cyear) <= Mmin; M(t,cyear) = Mmin; end; - if M(t,cyear) >= Mmax; M(t,cyear) = Mmax; end; - if isnan(M(t,cyear))==1; M(t,cyear) = Mmin; end; - end % end month (t) cycle - %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -end % end year cycle -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%% CONDITIONAL PARAMETER SAMPLING SUBROUTINES %%%%%%%%%%% -function [Tt] = Tt_U_aux(Ttcurr,T,To,gM,RW,errorpars,gE,Gterms,att,btt,intwindow,cyrs) -% gM/gT is the matrix of gM for all the years and all the months of the previous simulation. -% T is the matrix of temperature for all years and all months of the previous simulation. -% att is the lower bound on the support of the uniform prior distribution for Tt -% btt is the upper bound on the support of the uniform prior distribution for Tt -% -% SETW 6/10/2010 -Ny = size(Gterms,2); -I_0 = intwindow(1); I_f = intwindow(2); -% -if 1 % Sample from prior as proposal distribution! - Ttprop = unifrnd(att,btt); - gTprop = NaN*ones(12,Ny); - gTprop(TTo) = 1; - gTprop(TTtprop) = (T(TTtprop)-Ttprop)/(To-Ttprop); - gprop = diag(gE)*min(gM,gTprop); - gcurr = Gterms; - % - %%%%%%%%%% account for variable integration window: - if I_0<0; % if we include part of the previous year in each year's modeled growth: - startmo = 13+I_0; - endmo = I_f; - prevseas = [mean(gprop(startmo:12,:),2) gprop(startmo:12,1:end-1)]; - gprop = gprop(1:endmo,:); - gprop = [prevseas; gprop]; - prevseas = [mean(gcurr(startmo:12,:),2) gcurr(startmo:12,1:end-1)]; - gcurr = gcurr(1:endmo,:); - gcurr = [prevseas; gcurr]; - else % no inclusion of last year's growth conditions in estimates of this year's growth: - startmo = I_0+1; - endmo = I_f; - gprop = gprop(startmo:endmo,:); - gcurr = gcurr(startmo:endmo,:); - end - %%%%%%%%%%%% - % - if length(errorpars) == 1 % White noise error model: - sigma2rw = errorpars; - expcurr = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr))).^2); - expprop = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop))).^2); - HR = exp(-.5*(expprop-expcurr)/sigma2rw); - elseif length(errorpars) == 2 % AR(1) error model: - phi1 = errorpars(1); tau2 = errorpars(2); - sigma2rw = tau2/(1-phi1^2); - % - [iSig] = makeAR1covmat(phi1,tau2,length(cyrs)); - % - Wcurr = ((sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr)))'; - Wprop = ((sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop)))'; - % - logLprop = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wprop)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wprop); - logLcurr = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wcurr)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wcurr); - HR = exp(logLprop-logLcurr); - end -end -% accept or reject the proposal. -if binornd(1,min(HR,1))==1 - Tt = Ttprop; -else - Tt = Ttcurr; -end -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [Tt] = Tt_lit_aux(Ttcurr,T,To,gM,RW,errorpars,gE,Gterms,att,btt,slp,int,intwindow,cyrs) -% gM/gT is the matrix of gM for all the years and all the months of the previous simulation. -% T is the matrix of temperature for all years and all months of the previous simulation. -% att is the lower bound on the support of the uniform prior distribution for Tt -% btt is the upper bound on the support of the uniform prior distribution for Tt -% -% SETW 6/10/2010 -Ny = size(Gterms,2); -I_0 = intwindow(1); I_f = intwindow(2); -% -if 1 % Sample from prior as proposal distribution! - Ttprop = slp*betarnd(att,btt)+int; - % upperlim = normcdf(int+slp,Ttcurr,.5); - % lowerlim = normcdf(int,Ttcurr,.5); - % U = unifrnd(lowerlim,upperlim); - % Ttprop = norminv(U,Ttcurr,.5); - % - gTprop = NaN*ones(12,Ny); - gTprop(TTo) = 1; - gTprop(TTtprop) = (T(TTtprop)-Ttprop)/(To-Ttprop); - gprop = diag(gE)*min(gM,gTprop); - gcurr = Gterms; - % - %%%%%%%%%% account for variable integration window: - if I_0<0; % if we include part of the previous year in each year's modeled growth: - startmo = 13+I_0; - endmo = I_f; - prevseas = [mean(gprop(startmo:12,:),2) gprop(startmo:12,1:end-1)]; - gprop = gprop(1:endmo,:); - gprop = [prevseas; gprop]; - prevseas = [mean(gcurr(startmo:12,:),2) gcurr(startmo:12,1:end-1)]; - gcurr = gcurr(1:endmo,:); - gcurr = [prevseas; gcurr]; - else % no inclusion of last year's growth conditions in estimates of this year's growth: - startmo = I_0+1; - endmo = I_f; - gprop = gprop(startmo:endmo,:); - gcurr = gcurr(startmo:endmo,:); - end - %%%%%%%%%%%% - if length(errorpars) == 1 % White noise error model: - sigma2rw = errorpars; - expcurr = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr))).^2); - expprop = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop))).^2); - HR = exp(-.5*(expprop-expcurr)/sigma2rw); - % - % % must also account for relative prior probs if using "geyer default" sampling: - % liklicurr = betapdf((Ttcurr-int)/slp,att,btt); - % likliprop = betapdf((Ttprop-int)/slp,att,btt); - % HR = (likliprop/liklicurr)*exp(-.5*(expprop-expcurr)/sigma2rw); - elseif length(errorpars) == 2 % AR(1) error model: - phi1 = errorpars(1); tau2 = errorpars(2); - sigma2rw = tau2/(1-phi1^2); - % - [iSig] = makeAR1covmat(phi1,tau2,length(cyrs)); - % - Wcurr = ((sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr)))'; - Wprop = ((sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop)))'; - % - logLprop = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wprop)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wprop); - logLcurr = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wcurr)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wcurr); - HR = exp(logLprop-logLcurr); - % - % % must also account for relative prior probs if using "geyer default" sampling: - % liklicurr = betapdf((Ttcurr-int)/slp,att,btt); - % likliprop = betapdf((Ttprop-int)/slp,att,btt); - % HR = (likliprop/liklicurr)*exp(logLprop-logLcurr); - end -end -% accept or reject the proposal. -if binornd(1,min(HR,1))==1 - Tt = Ttprop; -else - Tt = Ttcurr; -end -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [To] = To_U_aux(Tocurr,T,Tt,gM,RW,errorpars,gE,Gterms,ato,bto,intwindow,cyrs) -% gM/gT is the matrix of gM for all the years and all the months of the previous simulation. -% T is the matrix of temperature for all years and all months of the previous simulation. -% att is the lower bound on the support of the uniform prior distribution for Tt -% btt is the upper bound on the support of the uniform prior distribution for Tt -% -% SETW 6/10/2010 -Ny = size(Gterms,2); -I_0 = intwindow(1); I_f = intwindow(2); -if 1 % Sample from prior as proposal distribution! - Toprop = unifrnd(ato,bto); - gTprop = NaN*ones(12,Ny); - gTprop(TToprop) = 1; - gTprop(TTt) = (T(TTt)-Tt)/(Toprop-Tt); - gprop = diag(gE)*min(gM,gTprop); - gcurr = Gterms; - % - %%%%%%%%%% account for variable integration window: - if I_0<0; % if we include part of the previous year in each year's modeled growth: - startmo = 13+I_0; - endmo = I_f; - prevseas = [mean(gprop(startmo:12,:),2) gprop(startmo:12,1:end-1)]; - gprop = gprop(1:endmo,:); - gprop = [prevseas; gprop]; - prevseas = [mean(gcurr(startmo:12,:),2) gcurr(startmo:12,1:end-1)]; - gcurr = gcurr(1:endmo,:); - gcurr = [prevseas; gcurr]; - else % no inclusion of last year's growth conditions in estimates of this year's growth: - startmo = I_0+1; - endmo = I_f; - gprop = gprop(startmo:endmo,:); - gcurr = gcurr(startmo:endmo,:); - end - %%%%%%%%%%%% - % - if length(errorpars) == 1 % White noise error model: - sigma2rw = errorpars; - expcurr = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr))).^2); - expprop = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop))).^2); - HR = exp(-.5*(expprop-expcurr)/sigma2rw); - elseif length(errorpars) == 2 % AR(1) error model: - phi1 = errorpars(1); tau2 = errorpars(2); - sigma2rw = tau2/(1-phi1^2); - % - [iSig] = makeAR1covmat(phi1,tau2,length(cyrs)); - % - Wcurr = ((sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr)))'; - Wprop = ((sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop)))'; - % - logLprop = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wprop)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wprop); - logLcurr = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wcurr)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wcurr); - HR = exp(logLprop-logLcurr); - end -end -% accept or reject the proposal. -if binornd(1,min(HR,1))==1 - To = Toprop; -else - To = Tocurr; -end -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [To] = To_lit_aux(Tocurr,T,Tt,gM,RW,errorpars,gE,Gterms,ato,bto,slp,int,intwindow,cyrs) -% gM/gT is the matrix of gM for all the years and all the months of the previous simulation. -% T is the matrix of temperature for all years and all months of the previous simulation. -% att is the lower bound on the support of the uniform prior distribution for Tt -% btt is the upper bound on the support of the uniform prior distribution for Tt -% -% SETW 6/10/2010 -Ny = size(Gterms,2); -I_0 = intwindow(1); I_f = intwindow(2); -if 1 % Sample from prior as proposal distribution! - Toprop = slp*betarnd(ato,bto)+int; - % upperlim = normcdf(int+slp,Tocurr,.5); - % lowerlim = normcdf(int,Tocurr,.5); - % U = unifrnd(lowerlim,upperlim); - % Toprop = norminv(U,Tocurr,.5); - % - gTprop = NaN*ones(12,Ny); - gTprop(TToprop) = 1; - gTprop(TTt) = (T(TTt)-Tt)/(Toprop-Tt); - gprop = diag(gE)*min(gM,gTprop); - gcurr = Gterms; - % - %%%%%%%%%% account for variable integration window: - if I_0<0; % if we include part of the previous year in each year's modeled growth: - startmo = 13+I_0; - endmo = I_f; - prevseas = [mean(gprop(startmo:12,:),2) gprop(startmo:12,1:end-1)]; - gprop = gprop(1:endmo,:); - gprop = [prevseas; gprop]; - prevseas = [mean(gcurr(startmo:12,:),2) gcurr(startmo:12,1:end-1)]; - gcurr = gcurr(1:endmo,:); - gcurr = [prevseas; gcurr]; - else % no inclusion of last year's growth conditions in estimates of this year's growth: - startmo = I_0+1; - endmo = I_f; - gprop = gprop(startmo:endmo,:); - gcurr = gcurr(startmo:endmo,:); - end - %%%%%%%%%%%% - % - if length(errorpars) == 1 % White noise error model: - sigma2rw = errorpars; - expcurr = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr))).^2); - expprop = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop))).^2); - HR = exp(-.5*(expprop-expcurr)/sigma2rw); - % - % % must also account for relative prior probs if using "geyer default" sampling: - % liklicurr = betapdf((Tocurr-int)/slp,ato,bto); - % likliprop = betapdf((Toprop-int)/slp,ato,bto); - % HR = (likliprop/liklicurr)*exp(-.5*(expprop-expcurr)/sigma2rw); - elseif length(errorpars) == 2 % AR(1) error model: - phi1 = errorpars(1); tau2 = errorpars(2); - sigma2rw = tau2/(1-phi1^2); - % - [iSig] = makeAR1covmat(phi1,tau2,length(cyrs)); - % - Wcurr = ((sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr)))'; - Wprop = ((sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop)))'; - % - logLprop = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wprop)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wprop); - logLcurr = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wcurr)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wcurr); - HR = exp(logLprop-logLcurr); - % - % % must also account for relative prior probs if using "geyer default" sampling: - % liklicurr = betapdf((Ttcurr-int)/slp,ato,bto); - % likliprop = betapdf((Ttprop-int)/slp,ato,bto); - % HR = (likliprop/liklicurr)*exp(logLprop-logLcurr); - end -end -% accept or reject the proposal. -if binornd(1,min(HR,1))==1 - To = Toprop; -else - To = Tocurr; -end -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [Mt] = Mt_U_aux(Mtcurr,M,Mo,gT,RW,errorpars,gE,Gterms,amt,bmt,intwindow,cyrs) -% gM/gT is the matrix of gM for all the years and all the months of the previous simulation. -% M is the matrix of soil moisture for all years and all months of the previous simulation. -% amt is the lower bound on the support of the uniform prior distribution for Mt -% bmt is the upper bound on the support of the uniform prior distribution for Mt -% -% SETW 6/10/2010 -Ny = size(Gterms,2); -I_0 = intwindow(1); I_f = intwindow(2); -if 1 % Sample from prior as proposal distribution! - Mtprop = unifrnd(amt,bmt); - gWprop = NaN*ones(12,Ny); - gWprop(MMo) = 1; - gWprop(MMtprop) = (M(MMtprop)-Mtprop)/(Mo-Mtprop); - gprop = diag(gE)*min(gWprop,gT); - gcurr = Gterms; - % - %%%%%%%%%% account for variable integration window: - if I_0<0; % if we include part of the previous year in each year's modeled growth: - startmo = 13+I_0; - endmo = I_f; - prevseas = [mean(gprop(startmo:12,:),2) gprop(startmo:12,1:end-1)]; - gprop = gprop(1:endmo,:); - gprop = [prevseas; gprop]; - prevseas = [mean(gcurr(startmo:12,:),2) gcurr(startmo:12,1:end-1)]; - gcurr = gcurr(1:endmo,:); - gcurr = [prevseas; gcurr]; - else % no inclusion of last year's growth conditions in estimates of this year's growth: - startmo = I_0+1; - endmo = I_f; - gprop = gprop(startmo:endmo,:); - gcurr = gcurr(startmo:endmo,:); - end - %%%%%%%%%%%% - % - if length(errorpars) == 1 % White noise error model: - sigma2rw = errorpars; - expcurr = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr))).^2); - expprop = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop))).^2); - HR = exp(-.5*(expprop-expcurr)/sigma2rw); - elseif length(errorpars) == 2 % AR(1) error model: - phi1 = errorpars(1); tau2 = errorpars(2); - sigma2rw = tau2/(1-phi1^2); - % - [iSig] = makeAR1covmat(phi1,tau2,length(cyrs)); - % - Wcurr = ((sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr)))'; - Wprop = ((sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop)))'; - % - logLprop = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wprop)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wprop); - logLcurr = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wcurr)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wcurr); - HR = exp(logLprop-logLcurr); - end -end -% accept or reject the proposal. -if binornd(1,min(HR,1))==1 - Mt = Mtprop; -else - Mt = Mtcurr; -end -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [Mt] = Mt_lit_aux(Mtcurr,M,Mo,gT,RW,errorpars,gE,Gterms,amt,bmt,slp,int,intwindow,cyrs) -% gM/gT is the matrix of gM for all the years and all the months of the previous simulation. -% M is the matrix of soil moisture for all years and all months of the previous simulation. -% amt is the lower bound on the support of the uniform prior distribution for Mt -% bmt is the upper bound on the support of the uniform prior distribution for Mt -% -% SETW 6/10/2010 -Ny = size(Gterms,2); -I_0 = intwindow(1); I_f = intwindow(2); -if 1 % Sample from prior as proposal distribution! - Mtprop = slp*betarnd(amt,bmt)+int; - % upperlim = normcdf(int+slp,Mtcurr,.01); - % lowerlim = normcdf(int,Mtcurr,.01); - % U = unifrnd(lowerlim,upperlim); - % Mtprop = norminv(U,Mtcurr,.01); - % - gWprop = NaN*ones(12,Ny); - gWprop(MMo) = 1; - gWprop(MMtprop) = (M(MMtprop)-Mtprop)/(Mo-Mtprop); - gprop = diag(gE)*min(gWprop,gT); - gcurr = Gterms; - % - %%%%%%%%%% account for variable integration window: - if I_0<0; % if we include part of the previous year in each year's modeled growth: - startmo = 13+I_0; - endmo = I_f; - prevseas = [mean(gprop(startmo:12,:),2) gprop(startmo:12,1:end-1)]; - gprop = gprop(1:endmo,:); - gprop = [prevseas; gprop]; - prevseas = [mean(gcurr(startmo:12,:),2) gcurr(startmo:12,1:end-1)]; - gcurr = gcurr(1:endmo,:); - gcurr = [prevseas; gcurr]; - else % no inclusion of last year's growth conditions in estimates of this year's growth: - startmo = I_0+1; - endmo = I_f; - gprop = gprop(startmo:endmo,:); - gcurr = gcurr(startmo:endmo,:); - end - %%%%%%%%%%%% - % - if length(errorpars) == 1 % White noise error model: - sigma2rw = errorpars; - expcurr = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr))).^2); - expprop = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop))).^2); - HR = exp(-.5*(expprop-expcurr)/sigma2rw); - % - % % must also account for relative prior probs if using "geyer default" sampling: - % liklicurr = betapdf((Mtcurr-int)/slp,amt,bmt); - % likliprop = betapdf((Mtprop-int)/slp,amt,bmt); - % HR = (likliprop/liklicurr)*exp(-.5*(expprop-expcurr)/sigma2rw); - elseif length(errorpars) == 2 % AR(1) error model: - phi1 = errorpars(1); tau2 = errorpars(2); - sigma2rw = tau2/(1-phi1^2); - % - [iSig] = makeAR1covmat(phi1,tau2,length(cyrs)); - % - Wcurr = ((sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr)))'; - Wprop = ((sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop)))'; - % - logLprop = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wprop)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wprop); - logLcurr = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wcurr)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wcurr); - HR = exp(logLprop-logLcurr); - % - % % must also account for relative prior probs if using "geyer default" sampling: - % liklicurr = betapdf((Ttcurr-int)/slp,amt,bmt); - % likliprop = betapdf((Ttprop-int)/slp,amt,bmt); - % HR = (likliprop/liklicurr)*exp(logLprop-logLcurr); - end -end -% accept or reject the proposal. -if binornd(1,min(HR,1))==1 - Mt = Mtprop; -else - Mt = Mtcurr; -end -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [Mo] = Mo_U_aux(Mocurr,M,Mt,gT,RW,errorpars,gE,Gterms,amo,bmo,intwindow,cyrs) -% gM/gT is the matrix of gM for all the years and all the months of the previous simulation. -% M is the matrix of soil moisture for all years and all months of the previous simulation. -% amt is the lower bound on the support of the uniform prior distribution for Mt -% bmt is the upper bound on the support of the uniform prior distribution for Mt -% -% SETW 6/10/2010 -Ny = size(Gterms,2); -I_0 = intwindow(1); I_f = intwindow(2); -if 1 % Sample from prior as proposal distribution! - Moprop = unifrnd(amo,bmo); - gWprop = NaN*ones(12,Ny); - gWprop(MMoprop) = 1; - gWprop(MMt) = (M(MMt)-Mt)/(Moprop-Mt); - gprop = diag(gE)*min(gWprop,gT); - gcurr = Gterms; - % - %%%%%%%%%% account for variable integration window: - if I_0<0; % if we include part of the previous year in each year's modeled growth: - startmo = 13+I_0; - endmo = I_f; - prevseas = [mean(gprop(startmo:12,:),2) gprop(startmo:12,1:end-1)]; - gprop = gprop(1:endmo,:); - gprop = [prevseas; gprop]; - prevseas = [mean(gcurr(startmo:12,:),2) gcurr(startmo:12,1:end-1)]; - gcurr = gcurr(1:endmo,:); - gcurr = [prevseas; gcurr]; - else % no inclusion of last year's growth conditions in estimates of this year's growth: - startmo = I_0+1; - endmo = I_f; - gprop = gprop(startmo:endmo,:); - gcurr = gcurr(startmo:endmo,:); - end - %%%%%%%%%%%% - % - if length(errorpars) == 1 % White noise error model: - sigma2rw = errorpars; - expcurr = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr))).^2); - expprop = sum((RW(cyrs)'-sqrt(1-sigma2rw)*(sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop))).^2); - HR = exp(-.5*(expprop-expcurr)/sigma2rw); - elseif length(errorpars) == 2 % AR(1) error model: - phi1 = errorpars(1); tau2 = errorpars(2); - sigma2rw = tau2/(1-phi1^2); - % - [iSig] = makeAR1covmat(phi1,tau2,length(cyrs)); - % - Wcurr = ((sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr)))'; - Wprop = ((sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop)))'; - % - logLprop = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wprop)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wprop); - logLcurr = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wcurr)'*iSig*(RW(cyrs) - sqrt(1-sigma2rw)*Wcurr); - HR = exp(logLprop-logLcurr); - end -end -% accept or reject the proposal. -if binornd(1,min(HR,1))==1 - Mo = Moprop; -else - Mo = Mocurr; -end -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [Mo] = Mo_lit_aux(Mocurr,M,Mt,gT,RW,errorpars,gE,Gterms,amo,bmo,slp,int,intwindow,cyrs) -% gM/gT is the matrix of gM for all the years and all the months of the previous simulation. -% M is the matrix of soil moisture for all years and all months of the previous simulation. -% amt is the lower bound on the support of the uniform prior distribution for Mt -% bmt is the upper bound on the support of the uniform prior distribution for Mt -% -% SETW 6/10/2010 -Ny = size(Gterms,2); -I_0 = intwindow(1); I_f = intwindow(2); -if 1 % Sample from prior as proposal distribution! - Moprop = slp*betarnd(amo,bmo)+int; - % upperlim = normcdf(int+slp,Mocurr,.03); - % lowerlim = normcdf(int,Mocurr,.03); - % U = unifrnd(lowerlim,upperlim); - % Moprop = norminv(U,Mocurr,.03); - % - gWprop = NaN*ones(12,Ny); - gWprop(MMoprop) = 1; - gWprop(MMt) = (M(MMt)-Mt)/(Moprop-Mt); - gprop = diag(gE)*min(gWprop,gT); - gcurr = Gterms; - % - %%%%%%%%%% account for variable integration window: - if I_0<0; % if we include part of the previous year in each year's modeled growth: - startmo = 13+I_0; - endmo = I_f; - prevseas = [mean(gprop(startmo:12,:),2) gprop(startmo:12,1:end-1)]; - gprop = gprop(1:endmo,:); - gprop = [prevseas; gprop]; - prevseas = [mean(gcurr(startmo:12,:),2) gcurr(startmo:12,1:end-1)]; - gcurr = gcurr(1:endmo,:); - gcurr = [prevseas; gcurr]; - else % no inclusion of last year's growth conditions in estimates of this year's growth: - startmo = I_0+1; - endmo = I_f; - gprop = gprop(startmo:endmo,:); - gcurr = gcurr(startmo:endmo,:); - end - %%%%%%%%%%%% - % - if length(errorpars) == 1 % White noise error model: - sigma2rw = errorpars; - expcurr = sum((RW(cyrs)'-sqrt(1-sigma2rw)*... - (sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr))).^2); - expprop = sum((RW(cyrs)'-sqrt(1-sigma2rw)*... - (sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop))).^2); - HR = exp(-.5*(expprop-expcurr)/sigma2rw); - % - % % must also account for relative prior probs if using "geyer default" sampling: - % liklicurr = betapdf((Mocurr-int)/slp,amo,bmo); - % likliprop = betapdf((Moprop-int)/slp,amo,bmo); - % HR = (likliprop/liklicurr)*exp(-.5*(expprop-expcurr)/sigma2rw); - elseif length(errorpars) == 2 % AR(1) error model: - phi1 = errorpars(1); tau2 = errorpars(2); - sigma2rw = tau2/(1-phi1^2); - % - [iSig] = makeAR1covmat(phi1,tau2,length(cyrs)); - % - Wcurr = ((sum(gcurr(:,cyrs))-mean(sum(gcurr)))/std(sum(gcurr)))'; - Wprop = ((sum(gprop(:,cyrs))-mean(sum(gprop)))/std(sum(gprop)))'; - % - logLprop = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wprop)'*iSig*(RW(cyrs)-sqrt(1-sigma2rw)*Wprop); - logLcurr = -.5*(RW(cyrs)-sqrt(1-sigma2rw)*Wcurr)'*iSig*(RW(cyrs)-sqrt(1-sigma2rw)*Wcurr); - HR = exp(logLprop-logLcurr); - % - % % must also account for relative prior probs if using "geyer default" sampling: - % liklicurr = betapdf((Mocurr-int)/slp,amo,bmo); - % likliprop = betapdf((Moprop-int)/slp,amo,bmo); - % HR = (likliprop/liklicurr)*exp(logLprop-logLcurr); - end -end -% accept or reject the proposal. -if binornd(1,min(HR,1))==1 - Mo = Moprop; -else - Mo = Mocurr; -end -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [sigma2rw,logLdata] = errormodel0_aux(sigma2rwcurr,RW,Gterms,intwindow,cyrs) -% RW = vector of observed annual ring widths -% Gterms = vector of terms that sum together to give the simulated raw ring with index for all -% months (rows) and all years (columns) -% SETW 4/5/2013 -% -%%%%%%%%%% account for variable integration window: -I_0 = intwindow(1); I_f = intwindow(2); -if I_0<0; % if we include part of the previous year in each year's modeled growth: - startmo = 13+I_0; - endmo = I_f; - prevseas = [mean(Gterms(startmo:12,:),2) Gterms(startmo:12,1:end-1)]; - Gterms = Gterms(1:endmo,:); - Gterms = [prevseas; Gterms]; -else % no inclusion of last year's growth conditions in estimates of this year's growth: - startmo = I_0+1; - endmo = I_f; - Gterms = Gterms(startmo:endmo,:); -end -%%%%%%%%%%%% -% % sample proposal from the prior: -% sigma2rwprop = unifrnd(0,1); % restricted to the unit interval since sigma^2_rw = (1)/(1+SNR^2) in this model - -% try in terms of a uniform prior on the std dev. of the model error, -% rather than a uniform prior on the variance.... -sigma2rwprop = (unifrnd(0,1))^2; - -% -% accept or reject? -Nyrs = length(cyrs); -Gamma = squeeze(sum(Gterms)); -Gammabar = mean(Gamma); -siggamma = std(Gamma); -% -logprop = -.5*sum((RW(cyrs)-sqrt(1-sigma2rwprop)*(Gamma(cyrs)-Gammabar)/siggamma).^2)/sigma2rwprop; -logcurr = -.5*sum((RW(cyrs)-sqrt(1-sigma2rwcurr)*(Gamma(cyrs)-Gammabar)/siggamma).^2)/sigma2rwcurr; -HR = ((sigma2rwcurr/sigma2rwprop)^(Nyrs/2))*exp(logprop-logcurr); -if binornd(1,min(HR,1))==1 - sigma2rw = sigma2rwprop; - logLdata = logprop-Nyrs/2*log(sigma2rwprop); -else - sigma2rw = sigma2rwcurr; - logLdata = logcurr-Nyrs/2*log(sigma2rwcurr); -end -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [pars,logLdata] = errormodel1_aux(currpars,RW,Gterms,intwindow,cyrs) -% RW = vector of observed annual ring widths -% Gterms = vector of terms that sum together to give the simulated raw ring with index for all -% months (rows) and all years (columns) -% SETW 4/5/2013 -% -%%%%%%%%%% account for variable integration window: -I_0 = intwindow(1); I_f = intwindow(2); -if I_0<0; % if we include part of the previous year in each year's modeled growth: - startmo = 13+I_0; - endmo = I_f; - prevseas = [mean(Gterms(startmo:12,:),2) Gterms(startmo:12,1:end-1)]; - Gterms = Gterms(1:endmo,:); - Gterms = [prevseas; Gterms]; -else % no inclusion of last year's growth conditions in estimates of this year's growth: - startmo = I_0+1; - endmo = I_f; - Gterms = Gterms(startmo:endmo,:); -end -%%%%%%%%%%%% -% read current values of parameters: -phi1curr = currpars(1); -tau2curr = currpars(2); -% if 0 % sample proposal from the prior: -phi1prop = unifrnd(0,1); -tau2prop = unifrnd(0,1); -while tau2prop > 1-phi1prop^2 - % satisfy conditions for stationarity, causality, and also - % sigma2_w = tau2/(1-phi1^2) <= 1 since sigma2_w = 1/(1+SNR^2) in this model - phi1prop = unifrnd(0,1); - tau2prop = unifrnd(0,1); -end - -% try in terms of a uniform prior on the std dev. of the model error, -% rather than a uniform prior on the variance.... -% sig_w = unifrnd(0,1); -% phi1prop = unifrnd(0,1); -% tau2prop = (1-phi1prop^2)*sig_w^2; - - - -% else % sample proposal using random walk step from current location: -% phi1prop = phi1curr + .25*randn; -% tau2prop = tau2curr + .1*randn; -% while tau2prop > 1-phi1prop^2 || tau2prop <0 || phi1prop < 0 -% % satisfy conditions for stationarity, causality, and also -% % sigma2_w = tau2/(1-phi1^2) <= 1 since sigma2_w = 1/(1+SNR^2) in this model -% phi1prop = phi1curr + .25*randn; -% tau2prop = tau2curr + .1*randn; -% end -% end -% -% accept or reject? -Ny = length(cyrs); -Gamma = squeeze(sum(Gterms)); -Gammabar = mean(Gamma); -siggamma = std(Gamma); -% VS-lite estimate of TRW at current parameter values: -What = ((Gamma(cyrs)-Gammabar)/siggamma)'; -% -[iSigprop,detSigprop] = makeAR1covmat(phi1prop,tau2prop,Ny); -[iSigcurr,detSigcurr] = makeAR1covmat(phi1curr,tau2curr,Ny); -alphaprop = sqrt(1-tau2prop/(1-phi1prop^2)); -alphacurr = sqrt(1-tau2curr/(1-phi1curr^2)); -% -logLprop = -.5*(RW(cyrs)'-alphaprop*What)'*iSigprop*(RW(cyrs)'-alphaprop*What); -logLcurr = -.5*(RW(cyrs)'-alphacurr*What)'*iSigcurr*(RW(cyrs)'-alphacurr*What); -HR = sqrt(detSigcurr/detSigprop)*exp(logLprop-logLcurr); - -if binornd(1,min(HR,1))==1 - pars(1) = phi1prop; - pars(2) = tau2prop; - logLdata = logLprop-log(detSigprop); -else - pars(1) = phi1curr; - pars(2) = tau2curr; - logLdata = logLcurr-log(detSigcurr); -end -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [invSigma,detSigma] = makeAR1covmat(phi1,tau2,N) -%%% [Sigma,invSigma,detSigma] = makeAR1covmat(phi1,tau2,N) -% Make approximate joint covariance matrix, inverse covariace matrix, and covariance matrix -% determinant for N sequential observations that follow the AR(1) model -% X_t = phi1 X_t-1 + eps_t, where eps_t ~ N(0,tau^2) - -A = -phi1*eye(N); -superdiag = sub2ind([N N],(1:(N-1))',(2:N)'); -A(superdiag) = 1; -% Now Var(A* e) \approx tau2*I, so -% Sigma \approx tau2* inv(A)*inv(A') -% and -% invSigma \approx (1/tau2)* A'*A - -%Sigma = tau2*(A\eye(N))*(A'\eye(N)); -invSigma = (1/tau2)*(A')*A; -detSigma = (tau2/phi1^2)^N; -end -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -function [Rhat] = gelmanrubin92(Nsamp,Nbi,varargin) -% Usage: Rhat = gelmanrubin92(Nsamp,Nbi,chain1,chain2,...,chainN) -% Nsamp = number of iterations in each sample -% Nbi = number to consider "burn-in". -% chain1, ..., chainN must have dimension Nsamp x 1. -% SETW 1/26/2011 - -% Number of chains: -m = nargin-2; -for i = 1:m - eval(['chain' num2str(i) ' = varargin{i};']); -end -% number of non-burn-in iterations: -n = Nsamp-Nbi; -% -allX = []; -for i = 1:m - eval(['X = chain' num2str(i) ';']); - % within-chain means of X: - Xbar(i) = nanmean(X(Nbi+1:end)); - % within-chain variances of X: - Xs2(i) = nanvar(X(Nbi+1:end)); - allX = [allX X(Nbi+1:end)]; -end -% mean across chains of mean X in each month: -Xbarbar = mean(Xbar); -% -BX = n*(sum(((Xbar-repmat(Xbarbar,1,m)).^2),2))/(m-1); -% -WX = nanmean(Xs2,2); -% -muhatX = nanmean(allX,2); -% -sig2hatX = (n-1)*WX/n + BX/n; % G&R92 eqn. 3 -% -VhatX = sig2hatX + BX/(m*n); -varhatXs2 = var(Xs2,0,2); -% -covhatXs2Xbar2 =... - sum((Xs2-nanmean(Xs2)).*(Xbar.^2-nanmean(Xs2.^2)))/m; % ???? Check this! -covhatXs2Xbar =... - sum((Xs2-nanmean(Xs2)).*(Xbar-nanmean(Xs2)))/m; % ???? Check this! -% -covhatXs2Xbar2 = covhatXs2Xbar2';covhatXs2Xbar = covhatXs2Xbar'; -% -varhatVhatX = (((n-1)/n)^2)*varhatXs2/m + (((m+1)/(m*n))^2)*2*BX.^2/(m-1) +... - 2*((m+1)*(n-1)/(m*n^2))*n*(covhatXs2Xbar2-2*Xbarbar.*covhatXs2Xbar)/m; -dfX = 2*(VhatX.^2)./varhatVhatX; -% -Rhat = (VhatX./WX).*(dfX./(dfX-2)); -end diff --git a/3. PSMs/Specific Forward Models/VSlite (defunct)/vslitePSM.m b/3. PSMs/Specific Forward Models/VSlite (defunct)/vslitePSM.m deleted file mode 100644 index e2e15c8f..00000000 --- a/3. PSMs/Specific Forward Models/VSlite (defunct)/vslitePSM.m +++ /dev/null @@ -1,98 +0,0 @@ -classdef vslitePSM < PSM - - properties - lat; - lon - - T1; - T2; - M1; - M2; - - Tclim; - standard; - - intwindow; - lbparams; - hydroclim; - end - - - methods - - % Constructor - function obj = vslitePSM( coord, T1, T2, M1, M2, Tclim, varargin ) - - % Parse inputs - [intwindow, lbparams, hydroclim] = ... - parseInputs( varargin, {'lbparams','hydroclim','intwindow'}, ... - {[],[],[]}, {[],{'P','M'},[]} ); - - % Defaults - obj.intwindow = {}; - obj.lbparams = {}; - obj.hydroclim = {}; - obj.standard = []; - - % Advanced parameters - if ~isempty(intwindow) - obj.intwindow = {'intwindow', intwindow}; - end - if ~isempty(lbparams) - obj.lbparams = {'lbparams', lbparams}; - end - if ~isempty(hydroclim) - obj.hydroclim = {'hydroclim', hydroclim}; - end - - % Set other values - obj.lat = coord(1); - obj.lon = coord(2); - obj.T1 = T1; - obj.T2 = T2; - obj.M1 = M1; - obj.M2 = M2; - obj.Tclim = Tclim; - end - - % State indices - function[] = getStateIndices( obj, ensMeta, Tname, Pname, monthName, varargin ) - - % Concatenate the variable names - varNames = [string(Tname), string(Pname)]; - - % Get the time dimension - [~,~,~,~,~,~,timeID] = getDimIDs; - - - % Get the closest indices - tic - obj.H = getClosestLatLonIndex( [obj.lat, obj.lon], ensMeta, ... - varNames, timeID, monthName, varargin{:} ); - toc - end - - % Error Checking - function[] = errorCheckPSM(obj) - warning('VS-Lite PSMs have no error checking!!!'); - end - - % Run the PSM - function[Ye] = runForwardModel( obj, M, ~, ~ ) - - % Split the state vector into T and P - T = M(1:12,:); - P = M(13:24,:); - - % Run the model - Ye = VSLite4dash( obj.lat, obj.T1, obj.T2, obj.M1, obj.M2, ... - T, P, obj.standard, obj.Tclim, obj.intwindow{:}, ... - obj.lbparams{:}, obj.hydroclim{:} ); - - % Standardize - Ye = zscore(Ye); - end - - end -end - \ No newline at end of file diff --git a/3. PSMs/Specific Forward Models/trivialPSM.m b/3. PSMs/Specific Forward Models/trivialPSM.m deleted file mode 100644 index 2a49220d..00000000 --- a/3. PSMs/Specific Forward Models/trivialPSM.m +++ /dev/null @@ -1,33 +0,0 @@ -classdef trivialPSM < PSM - % trivialPSM - % A trivial PSM used that directly returns whatever value it is given. - % - % trivialPSM Methods: - % trivialPSM - Creates a new trivial PSM - % getStateIndices - Finds state vector element needed to run the PSM - % runForwardModel - Runs the trivial PSM forward model - - % ----- Written By ----- - % Jonathan King, University of Arizona, 2019 - - methods - % Constructor - function obj = trivialPSM - end - - % Set the value of H - function[] = getStateIndices(obj, H) - obj.H = H; - end - - % Placeholder error check PSM method. - function[] = errorCheckPSM(~) - end - - % Run the PSM. Simply return whatever value is input. Just a - % placeholder. - function[Ye,R] = runForwardModel( ~, Ye, ~, ~) - R = []; - end - end -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/calculateYe.m b/4. DA Analyses/@dash/calculateYe.m deleted file mode 100644 index 78e8d311..00000000 --- a/4. DA Analyses/@dash/calculateYe.m +++ /dev/null @@ -1,35 +0,0 @@ -function[Ye] = calculateYe( M, F ) -%% Calculates Ye for a given ensemble and set of PSMs without conducting -% any data assimilation. -% -% Ye = dash.calculateYe( M, F ) -% Calculate Ye values. -% -% ----- Inputs ----- -% -% M: A model ensemble (nState x nEns), or a scalar ensemble object. -% -% F: A set of forward models -% -% ----- Outputs ----- -% -% Ye: A set of Ye values - -% Initialize an empty Kalman Filter, just to error check M and F. Load M. -kalmanFilter( M, ones(size(F)), ones(size(F)), F ); -if isa(M, 'ensemble') - M = M.load; -end - -% Preallocate -nObs = size(F,1); -nEns = size(M,2); -Ye = NaN( nObs, nEns ); - -% Generate model estimates -for d = 1:numel(F) - Mpsm = M( F{d}.H, : ); - Ye(d,:) = dash.processYeR( F{d}, Mpsm, 1, NaN, d ); -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/checkReconH.m b/4. DA Analyses/@dash/checkReconH.m deleted file mode 100644 index 62658547..00000000 --- a/4. DA Analyses/@dash/checkReconH.m +++ /dev/null @@ -1,42 +0,0 @@ -function[reconH] = checkReconH( recon, F ) -% Checks if PSM H values will be reconstructed -% -% reconH = dash.checkReconH( recon, F ) -% -% ----- Inputs ----- -% -% recon: A vector of logical indices (nState x 1) -% -% F: A cell vector of PSMs -% -% ----- Outputs ----- -% -% reconH: A scalar logical indicating whether the reconstructed elements -% include all of the state elements needed to run the PSMs (H). - -% Error check -if ~isvector(recon) || ~islogical(recon) - error('recon must be a logical vector.'); -elseif ~isvector(F) || ~iscell(F) - error('F must be a cell vector.'); -end - -% Get the H indices from each PSM -nPSM = numel(F); -psmIndices = cell(nPSM,1); -for s = 1:nPSM - if ~isscalar(F{s}) || ~isa( F{s}, 'PSM' ) - error('Each element of F must be a scalar PSM.'); - end - psmIndices{s} = F{s}.H; -end -psmIndices = cell2mat(psmIndices); - -% Compare to the reconstruction indices -reconIndices = find( recon ); -reconH = true; -if any( ~ismember(psmIndices, reconIndices) ) - reconH = false; -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/dash.m b/4. DA Analyses/@dash/dash.m deleted file mode 100644 index b1712c1f..00000000 --- a/4. DA Analyses/@dash/dash.m +++ /dev/null @@ -1,81 +0,0 @@ -classdef (Abstract) dash < handle - % dash - % Implements data assimilation. Provides support functions for kalman - % filters, particle filters, optimal sensor tests, etc. - % - % dash Methods: - % localizationWeights - Computes distance based localization weights - % regrid - Converts a state vector or a matrix of state vectors to a gridded data array. - % regridTripolar - Converts a tripolar state vector or matrix of state vectors to a gridded data array. - % - % inflate - Inflates the covariance of an ensemble - % decompose - Breaks an ensemble into mean, deviations, and variance - % processYeR - Runs PSMs and error checks output - - % ----- Written By ----- - % Jonathan King, University of Arizona, 2019 - - % Interface for analyses - methods (Abstract) - - % Runs a data assimilation test - run( obj ); - - % Change the settings for a particular type of test - settings( obj, varargin); - - % Change the data in an existing test object - setValues( obj, values ); - - end - - % Regridding - methods (Static) - - % Regrids a variable in an ensemble from a state vector. - [A, meta, dimID] = regrid( A, var, ensMeta, keepSingleton ) - - % Regrids a tripolar variable - [rA, dimID] = regridTripolar( A, var, ensMeta, gridSize, notnan, keepSingleton ); - - end - - % General analysis methods - methods (Static) - - % Inflates the covariance matrix - M = inflate( M, factor ); - - % Breaks an ensemble into mean and devations. Also variance. - [Mmean, Mdev, Mvar] = decompose( M ); - - % Temporal localization weights - [weights, yloc] = temporalLocalization( siteTime, stateTime, R, scale ); - - % Spatial localization weights - [weights, yloc] = spatialLocalization( siteCoord, stateCoord, R, scale ); - - % Redirect of old method - [weights, yloc] = localizationWeights( siteCoord, stateCoord, R, scale); - - % Error check Ye and R generation on the fly without crashing the analysis - [Ye, R, use] = processYeR( F, Mpsm, R, t, d ); - - % Calculate Ye without running a data assimilation - Ye = calculateYe( M, F ); - - % Return the current version of dash - versionString = version; - - % Checks if reconstructed indices include all PSM indices - reconH = checkReconH( recon, F ); - - % Error propagation for spatial means - [E, sigma] = uncertainMean( X, Xvar, dim, weights ); - - % Restrict variables to values needed to run PSMs - restrictVarsToPSMs( vars, F, ens ); - - end - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/decompose.m b/4. DA Analyses/@dash/decompose.m deleted file mode 100644 index bb7ed302..00000000 --- a/4. DA Analyses/@dash/decompose.m +++ /dev/null @@ -1,20 +0,0 @@ -function[Xmean, Xdev] = decompose( X ) -%% Break apart an ensemble into mean, deviations, and variance. -% -% [Xmean, Xdev] = dash.decompose( X ) -% Gets the mean and deviations from the mean for an ensemble. -% -% ----- Inputs ----- -% -% X: An ensemble. (nState x nEns) -% -% ----- Outputs ----- -% -% Xmean: Mean of the ensemble (nState x 1) -% -% Xdev: Ensemble deviations (nState x nEns) - -Xmean = mean(X,2); -Xdev = X - Xmean; - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/inflate.m b/4. DA Analyses/@dash/inflate.m deleted file mode 100644 index f0a14ba4..00000000 --- a/4. DA Analyses/@dash/inflate.m +++ /dev/null @@ -1,23 +0,0 @@ -function[M] = inflate( M, factor ) -% Inflates the covariance of an ensemble. -% -% M = dash.inflate( M, factor ) -% -% ----- Inputs ----- -% -% M: An ensemble. (nState x nEns) -% -% factor: An inflation factor. A scalar. -% -% ----- Outputs ---- -% -% M: The inflated ensemble. - -% Don't both inflating if the inflation factor is 1. -if factor ~= 1 - [Mmean, Mdev] = dash.decompose( M ); - Mdev = sqrt(factor) .* Mdev; - M = Mmean + Mdev; -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/localizationWeights.m b/4. DA Analyses/@dash/localizationWeights.m deleted file mode 100644 index f4782fa6..00000000 --- a/4. DA Analyses/@dash/localizationWeights.m +++ /dev/null @@ -1,21 +0,0 @@ -function[weights, yloc] = localizationWeights( siteCoord, stateCoord, R, scale) -% Redirects to dash.spatialLocalization - -warning(['dash.localizationWeights has been renamed to dash.spatialLocalization, and ', ... - 'will be removed in a future release. Please consider updating your code.']); - -if ~exist('siteCoord','var') - siteCoord = []; -end -if ~exist('stateCoord','var') - stateCoord = []; -end -if~exist('R','var') - R = []; -end -if ~exist('scale','var') - scale = []; -end -[weights, yloc] = dash.spatialLocalization( siteCoord, stateCoord, R, scale ); - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/processYeR.m b/4. DA Analyses/@dash/processYeR.m deleted file mode 100644 index 738846b2..00000000 --- a/4. DA Analyses/@dash/processYeR.m +++ /dev/null @@ -1,105 +0,0 @@ -function[Ye, R, use] = processYeR( F, Mpsm, R, t, d ) -% Estimates Ye and R as a data assimilation is running. Error checks the -% PSM output and determines if it can be used to update. -% -% [Ye, R, use] = dash.processYeR( F, Mpsm, R, t, d ) -% -% ----- Inputs ----- -% -% F: A scalar PSM object -% -% Mpsm: The ensemble elements needed to run the PSM -% -% R: A row vector of R values. The uncertainty for the observations associated -% with the PSM site at each processed time step. -% -% t: The current DA time step -% -% d: The proxy order of the current PSM -% -% ----- Outputs ----- -% -% Ye: The Ye estimates -% -% R: The merged input and dynamically generated R values -% -% use: Whether or not to use the PSM to update in each processed time step. - -% Get some sizes. Preallocate -nEns = size(Mpsm,2); -nTime = size(R,2); -use = false( 1, nTime ); - -% Find the R values that need dynamic generation -nanR = isnan(R); - -getR = false; -if any( nanR ) - getR = true; -end - -% Run the PSM, check R and Ye for errors -try - goodYe = false; - [Ye, Rpsm] = F.run( Mpsm, t, d ); - - % Check Ye for errors. If Ye is good, note that any time steps with - % non-NaN R are ready. - if ~isequal( size(Ye), [1, nEns] ) - error('Ye is the incorrect size.'); - elseif ~isnumeric(Ye) || any(~isreal(Ye)) - error('Ye must be numeric, and cannot be complex numbers.'); - elseif any(isnan(Ye)) || any(isinf(Ye)) - error('Ye cannot be NaN or Inf.'); - elseif numel(unique(Ye))==1 - error('Ye values are all identical, but the filter requires estimates with non-zero variance.'); - end - goodYe = true; - use( ~nanR ) = true; - - % Check R for errors if necessary. If good, save and use to update - % time steps with unspecified R. - if getR - if ~isscalar(Rpsm) || ~isnumeric(Rpsm) - error('R must be a numeric, scalar value.'); - elseif ~isreal(Rpsm) || isnan(Rpsm) || isinf(Rpsm) - error('R cannot be complex, NaN, or Inf.'); - elseif Rpsm < 0 - error('R cannot be negative.'); - end - R( nanR ) = Rpsm; - use( nanR ) = true; - end - - -% If the PSM fails, send the error to the console, but don't crash the DA. -% Create NaN output if necessary. -catch ME - psmFailureMessage( ME.message, t, d, goodYe ); - if ~goodYe - Ye = NaN( 1, nEns ); - end -end - -end - -% Informative error messages. -function[] = psmFailureMessage( message, t, d, goodYe ) - -if ~isnan(t) - timestr = sprintf('in time step %.f ', t); - noupdate = timestr; - -else - timestr = ''; - noupdate = ''; - if goodYe - noupdate = ' in time steps with unspecified R values'; - end -end - -fprintf([ 'PSM %.f failed %swith error message: \n', ... - message, '\n', ... - '\tDash will not use observations from site %.f to update the analysis%s.\n\n'], ... - d, timestr, d, noupdate ); -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/regrid.m b/4. DA Analyses/@dash/regrid.m deleted file mode 100644 index 94b0b47c..00000000 --- a/4. DA Analyses/@dash/regrid.m +++ /dev/null @@ -1,71 +0,0 @@ -function[A, meta, dimID] = regrid( A, varName, ensMeta, keepSingleton ) -%% Regrids a variable from a particular time step for a DA analysis. -% -% [rA, meta, dimID] = dash.regrid( A, var, ensMeta ) -% Regrids a variable in an ensemble. -% -% [rA, meta, dimID] = dash.regrid( A, var, ensMeta, keepSingle ) -% Specify whether to preserve or remove singleton dimensions. Default is to -% remove singletons. -% -% ----- Inputs ----- -% -% A: An ensemble. (nState x nTime). -% -% var: The name of a variable. Must be a string. -% -% ensMeta: The ensembleMetadata object associated with A -% -% keepSingleton: A scalar logical. -% -% ----- Outputs ----- -% -% rA: A regridded analysis for one variable. -% -% meta: Metadata associated with each element of the regridded data. -% -% dimID: The order of the dimensions for the regridded variable. - -% Set defaults -if ~exist('keepSingleton','var') || isempty(keepSingleton) - keepSingleton = false; -end - -% Error check -if ~ismatrix(A) - error('"A" must be a matrix.'); -elseif ~isa( ensMeta, 'ensembleMetadata' ) || ~isscalar(ensMeta) - error('ensMeta must be a scalar ensembleMetadata object.') -elseif size(A,1) ~= ensMeta.varLimit(end) - error('The number of rows in A (%.f) must match the number of elements in the ensemble metadata (%.f).', size(A,1), ensMeta.varLimit(end) ); -elseif ~isscalar(keepSingleton) || ~islogical(keepSingleton) - error('keepSingleton must be a scalar logical.'); -end -v = ensMeta.varCheck(varName); - -if ensMeta.partialGrid(v) - error('Variable %s is not a complete grid thus cannot be regridded. (It may have been restricted to PSM indices).', varName); -end - -% Get the metadata -meta = ensMeta.design.varMetadata; -meta = meta.(varName); - -% Extract the variable from the ensemble. Regrid -H = ensMeta.varIndices( varName ); -nTime = size(A,2); -A = reshape( A(H,:), [ensMeta.varSize(v,:), nTime] ); - -% Include metadata for DA time steps -dimID = [ensMeta.design.var(v).dimID, "DA_Time_Steps"]; -meta.(dimID(end)) = (1:nTime)'; - -% Optionally remove singletons -if ~keepSingleton - singleton = size(A)==1; - meta = rmfield( meta, dimID(singleton) ); - dimID( singleton ) = []; - A = squeeze(A); -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/regridTripolar.m b/4. DA Analyses/@dash/regridTripolar.m deleted file mode 100644 index 3aab8665..00000000 --- a/4. DA Analyses/@dash/regridTripolar.m +++ /dev/null @@ -1,82 +0,0 @@ -function[rA, dimID] = regridTripolar( A, var, ensMeta, gridSize, notnan, keepSingleton ) -%% Regrids a tripolar variable from a particular time step for a DA analysis. -% -% [rA, dimID] = dash.regridTripolar( A, var, ensMeta, gridSize, notnan ) -% Regrids a variable on a tripolar grid. -% -% [rA, dimID] = dash.regridTripolar( ..., keepSingleton ) -% Specify whether to keep or remove singleton dimensions. Default is remove. -% -% ----- Inputs ----- -% -% A: A state vector. Typically the updated ensemble mean or variance. (nState x 1) -% -% var: The name of a variable. Must be a string. -% -% ensMeta: Ensemble metadata -% -% gridSize: The size of the original tripolar spatial grid. -% -% notnan: A set of logicals indices that point to non-nan indices on a -% tripolar grid. Typically, the indices of ocean grid nodes. (nTripole x 1) -% -% keepSingleton: A scalar logical indicating whether to keep or remove -% singleton dimensions. -% -% ----- Outputs ----- -% -% rA: A regridded analysis for one variable. -% -% dimID: The dimensional ordering of the regridded variable. Tri1 and tri2 -% are the first and second dimensions associated with the original -% tripolar grid. - -% Set defaults -if ~exist('keepSingleton','var') || isempty(keepSingleton) - keepSingleton = false; -end - -% Error check -if numel(gridSize)~=2 || any(gridSize<1) || any(mod(gridSize,1)~=0) - error('gridSize must be a 2-element vector with the size of the original tripolar spatial grid.'); -elseif ~islogical(notnan) || ~isvector(notnan) || prod(gridSize)~=numel(notnan) - error('notnan must be a logical vector with one element for each element in the original tripolar grid (%.f)', prod(gridSize)); -end - -% Intial regird -[A, ~, dimID] = dash.regrid( A, var, ensMeta, keepSingleton ); - -% Get the tripole dimension -[~,~,~,~,~,~,~,~,triDim] = getDimIDs; -tri = find( strcmp(dimID, triDim) ); - -% Preallocate the full tripolar grid -siz = size(A); -siz(tri) = prod( gridSize ); -rA = NaN(siz); - -% Fill in the non-NaN values on the full grid -dataIndices = repmat( {':'}, [numel(dimID),1] ); -dataIndices{tri} = notnan; -rA( dataIndices{:} ) = A; - -% Reshape to the original grid size -siz = siz([1:tri, tri:end]); -siz(tri) = gridSize(1); -siz(tri+1) = gridSize(2); - -rA = reshape( rA, siz ); - -% Update the dimension IDs -dimID = dimID([1:tri, tri:end]); -dimID{tri} = 'tri1'; -dimID{tri+1} = 'tri2'; - -% Optionally remove singleton dimensions -if ~keepSingleton - singleton = size(rA)==1; - dimID(singleton) = []; - rA = squeeze(rA); -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/restrictVarsToPSMs.m b/4. DA Analyses/@dash/restrictVarsToPSMs.m deleted file mode 100644 index fd636702..00000000 --- a/4. DA Analyses/@dash/restrictVarsToPSMs.m +++ /dev/null @@ -1,74 +0,0 @@ -function[] = restrictVarsToPSMs( vars, F, ens ) -%% For specified variables, only load values required to run PSMs. -% -% dash.restrictVarsToPSMs( vars, F, ens ) -% For the specified variables, only values required to run PSMs will be -% loaded from the ensemble. Updates the ensemble object and its metadata. -% Also updates the PSM array so that state indices (H) point to the correct -% locations in the reduced ensemble. -% -% ----- Inputs ----- -% -% vars: A list of variables for which state elements should be limited to -% values required to run PSMs. A string vector, cellstring vector, or -% character row vector. -% -% F: A cell array of PSMs. (nSite x 1) -% -% ens: An ensemble object. -% -% ----- Outputs ----- -% -% F: PSMs for which the state indices (H) have been adjusted to match the -% smaller, restricted ensemble. (nSite x 1) -% -% ens: An ensemble object that will only load the reduced ensemble -% (and corresponding reduced metadata). - -% Error check -if ~isa(ens, 'ensemble') || ~isscalar(ens) - error('ens must be a scalar ensemble object.'); -elseif ~isstrlist(vars) - error('vars must be a string vector, cellstring vector, or character row vector.'); -elseif ~isvector(F) || ~iscell(F) - error('F must be a cell vector.'); -end -v = unique( ens.metadata.varCheck(vars) ); -nVar = numel(v); -ensMeta = ens.metadata; - -% Run through the PSMs, collect H indices -nPSM = numel(F); -indices = cell(nPSM, 1); -for s = 1:nPSM - if ~isa(F{s}, 'PSM') || ~isscalar(F{s}) - error('Element %.f of F is not a scalar PSM object.', s ); - elseif isempty( F{s}.H ) - warning('PSM %.f does not have state vector indices (H)', s ); - end - indices{s} = F{s}.H(:); -end -Hpsm = cell2mat(indices); - -% Get the set of all indices and restrictable indices -indices = cell( nVar, 1 ); -for var = 1:nVar - indices{var} = ensMeta.varIndices( ensMeta.varName(v(var)) ); -end -Hvar = cell2mat(indices); -Hall = (1:ensMeta.ensSize(1))'; - -% Remove all indices not in PSMs -useH = ~ismember(Hall, Hvar) | ismember(Hall, Hpsm); -Hnew = Hall( useH ); - -% Update the H indices in the PSMs -for s = 1:nPSM - [~, psmH] = ismember( F{s}.H, Hnew ); - F{s}.setStateIndices( psmH ); -end - -% Update the ensemble -ens.useStateIndices( useH ); - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/spatialLocalization.m b/4. DA Analyses/@dash/spatialLocalization.m deleted file mode 100644 index a118712e..00000000 --- a/4. DA Analyses/@dash/spatialLocalization.m +++ /dev/null @@ -1,108 +0,0 @@ -function[weights, yloc] = spatialLocalization( siteCoord, stateCoord, R, scale) -%% Calculates the weights for covariance localization at a site. -% -% [w, yloc] = dash.spatialLocalization( siteCoord, ensMeta, R ) -% Calculates covariance localization weights for an ensemble. -% -% [...] = dash.spatialLocalization( siteCoord, stateCoord, R ) -% Calculates covariance localization weights for user-defined state -% coordinates. -% -% [...] = dash.spatialLocalization( siteCoord, stateCoord, R, scale ) -% Specifies the length scale to use for the localization weights. This -% adjusts how quickly localization weights decrease as they approach the -% cutoff radius. Must be a scalar on the interval (0, 0.5]. Default is 0.5. -% -% [...] = dash.spatialLocalization( siteCoord, stateCoord, R, 'optimal' ) -% Uses the optimal length scale of sqrt(10/3) based on Lorenc, 2003. -% -% -% ***** Explanation of length scales and R -% -% The length scale c, is used to define the behavior of the localization. -% For | distance <= c, Full covariance is retained -% | c < distance < 2*c, Covariance retention decreases from 1 to 0 with distance -% | 2c < distance, No covariance is retained -% -% Rloc = 2c is the localization radius. It is a more stringent requirement -% than R, the cutoff radius. Thus, Rloc <= R, which requires 0 < scale <= 1/2 -% -% Essentially, R is the radius at which covariance is required to be zero, -% while Rloc is the (more strict) radius at which covariance actually is 0. -% -% ***** -% -% ----- Inputs ----- -% -% siteCoord: The coordinates observation sites. A two-column matrix. First -% is latitude, second is longitude. Supports both 0-360 and -% -180 to 180 longitude coordinates. (nObs x 2) -% -% Ideally, site coordinates are the coordinates of the model grid -% nodes closest to the individual sites. However, when using -% multiple grids, the actual site coordinates are a good approximation. -% -% ensMeta: An ensemble metadata object. -% -% stateCoord: A set of state vector coordinates. A two column matrix. First -% column is latitude, second is longitude. Supports both 0-360 and -% 180 to -180 longitude coordinates. -% -% R: The cutoff radius. All covariance outside of this radius will be -% eliminated. -% -% scale: A scalar on the interval (0, 0.5]. Used to determine the -% localization radius. If unspecified, scale is set to 0.5 and the -% localization radius is equivalent to R. -% -% ----- Outputs ----- -% -% w: The localization weights between each site and each state vector element. (nState x nObs) -% -% yloc: The localization weights between the observations sites. (Required -% localization with a joint update scheme. (nObs x nObs) - -% ----- Sources ----- -% -% Based on the approach of Hamill et al., 2001 -% https://doi.org/10.1175/1520-0493(2001)129<2776:DDFOBE>2.0.CO;2 -% -% ----- Written By ----- -% -% Original function by R. Tardif, Dept. Atmos. Sci., Univ. of Washington -% for the Last Millennium Reanalysis. -% -% Adapted for MATLAB by Jonathan King, Dept. Geoscience, University of -% Arizona, 08 Nov 2018. -% -% Modified to included variable/optimal length scales by Jonathan King. -% -% Y localization weights by Jonathan King - -% Get defaults -if ~exist('scale','var') - scale = []; -end -if isa(stateCoord, 'ensembleMetadata') - if ~isscalar(stateCoord) - error('ensMeta must be a scalar ensembleMetadata object.'); - end - stateCoord = stateCoord.coordinates; -end - -% Error check -if ~ismatrix(siteCoord) || size(siteCoord,2) ~= 2 - error('Site coordinates must be a two column matrix.'); -elseif ~ismatrix(stateCoord) || size( stateCoord, 2 ) ~= 2 - error('State coordinates must be a matrix with 2 columns.'); -end - -% Get the distances -wdist = haversine( siteCoord, stateCoord ); -ydist = haversine( siteCoord, siteCoord ); - -% Use a Gaspari-Cohn polynomial to get the localization weights -weights = gaspariCohn( wdist, R, scale )'; -yloc = gaspariCohn( ydist, R, scale ); - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/temporalLocalization.m b/4. DA Analyses/@dash/temporalLocalization.m deleted file mode 100644 index 17411ad0..00000000 --- a/4. DA Analyses/@dash/temporalLocalization.m +++ /dev/null @@ -1,82 +0,0 @@ -function[weights, yloc] = temporalLocalization( siteTime, stateTime, R, scale ) -%% Calculates temporal localization weights. -% -% ***Note: Assumes site and state vector time metadata uses the same units. -% -% [w, yloc] = dash.temporalLocalization( siteTime, ensMeta, R ) -% Calculates temporal covariance localization weights for an ensemble. -% -% [w, yloc] = dash.temporalLocalization( siteTime, stateTime, R ) -% Calculates weights for user-defined state vector time points. -% -% [w, yloc] = dash.temporalLocalization( siteTime, stateTime, R, scale ) -% Specifies a length scale to use in the Gaspari-Cohn polynomial. -% -% ----- Inputs ----- -% -% siteTime: The time points for the observation sites. A vector. May be -% either numeric data or datetime. (nObs x 1) -% -% ensMeta: An ensemble metadata object. -% -% stateTime: User defined time points for a state vector. Must use the same -% units as siteTime. (nState x 1) -% -% R: The temporal cutoff radius. If siteTime is numeric, a numeric scalar -% with the same units as siteTime. If siteTime is a datetime, a duration -% object. -% -% scale: A scalar on the interval (0, 0.5]. Default is 0.5 -% -% ----- Outputs ----- -% -% w: The localization weights between each site and each state vector -% element. (nState x nObs) -% -% yloc: The localization weights between sites. (nObs x nObs) - -% Get defaults -if ~exist('scale','var') - scale = []; -end -if isa(stateTime, 'ensembleMetadata') - if ~isscalar(stateTime) - error('ensMeta must be a scalar ensembleMetadata object.'); - end - stateTime = stateTime.timepoints; -end - -% Error check -if ~isvector(siteTime) - error('siteTime must be a vector'); -elseif ~isvector(stateTime) - error('stateTime must be a vector.'); -elseif isnumeric(siteTime) && (~isnumeric(stateTime) || ~isnumeric(R)) - error('stateTime and R must be numeric when siteTime is numeric.'); -elseif isdatetime(siteTime) && (~isdatetime(stateTime) || ~isduration(R)) - error('When siteTime is a datetime array, stateTime must be a datetime array and R must be a duration.'); -end - -% Get the temporal distances for datetime metadata (no bsx for datetime) -if isdatetime(siteTime) - nState = length(stateTime); - nSite = length(siteTime); - wdist = NaN( nState, nSite ); - ydist = NaN( nSite, nSite ); - for k = 1:nSite - wdist(:,k) = abs( years(siteTime(k) - stateTime) ); - ydist(:,k) = abs( years(siteTime(k) - siteTime) ); - end - R = years( R ); - -% Distance for numeric metadata -else - wdist = abs( siteTime' - stateTime ); - ydist = abs( siteTime' - siteTime ); -end - -% Use a gaspariCohn to get localization weights -weights = gaspariCohn( wdist, R, scale ); -yloc = gaspariCohn( ydist, R, scale ); - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/uncertainMean.m b/4. DA Analyses/@dash/uncertainMean.m deleted file mode 100644 index 63e6c9c6..00000000 --- a/4. DA Analyses/@dash/uncertainMean.m +++ /dev/null @@ -1,116 +0,0 @@ -function[E, sigma] = uncertainMean( X, Xvar, dim, weights ) -%% Calculates the mean of uncertain, correlated variables and propagates error. -% -% *** THIS IS A PROTOTYPE. IT MAY CHANGE WITHOUT WARNING IN THE FUTURE. *** -% -% Uses the first order, second moment approximations to propagate error. -% Assumes observations along the mean dimension are independent and -% calculates correlation coefficients along this dimension. -% -% [E, sigma] = uncertainMean( X, Xvar ) -% Calculates the mean and associated uncertainty for data X with associated -% variance Xvar. Takes the mean along the first dimension. -% -% [E, sigma] = uncertainMean( X, Xvar, dim ) -% Specify which dimension to take the mean along. -% -% [E, sigma] = uncertainMean( X, Xvar, dim, weights ) -% Propagate error for a weighted mean. -% -% ----- Inputs ----- -% -% X: An N-dimensional data array. -% -% Xvar: An N-dimensional array specifying the variance in of each element -% in X. Must be the same size as X. -% -% dim: A scalar positive integer specifying the dimension over which to -% take the mean. -% -% weights: Weights for a weighted mean. Must be a singleton in the mean -% dimension, and match the size of all other dimensions of X. -% -% ----- Outputs ----- -% -% E: The mean. -% -% sigma: The standard deviation of each value in the mean. - -warning('This function is a prototype. It may change without warning in the future.'); - -% Set defaults. First dimension. Unweighted mean. -if ~exist('dim','var') || isempty(dim) - dim = 1; -end -if ~exist('weights','var') || isempty(weights) - siz = size(X); - siz(dim) = 1; - weights = (1/prod(siz)) * ones( siz ); -end - -% Error check -if ~isnumeric(dim) || ~isscalar(dim) || dim<=0 || mod(dim,1)~=0 - error('dim must be a scalar, positive integer.'); -elseif ~isnumeric(X) || any(isnan(X),'all') || any(isinf(X),'all') - error('X must be a numeric array and cannot contain NaN or Inf.'); -elseif ~isnumeric(Xvar) || any(isnan(Xvar),'all') || any(isinf(Xvar),'all') || any(Xvar<0,'all') - error('Xvar must be numeric array and cannot contain NaN, Inf, or negative values.'); -elseif ~isnumeric(weights) || any(isnan(weights),'all') || any(isinf(weights),'all') || any(weights<0,'all') - error('weights must be a numeric array and cannot contain NaN, Inf, or negative values.'); -elseif ~isequal( size(X), size(Xvar) ) - error('X and Xvar must be the same size.'); -end - -sizeX = size(X); -sizeWeight = size( weights ); -maxDims = max( dim, ndims(X) ); -sizeX( end+1:maxDims ) = 1; -sizeWeight( end+1:maxDims ) = 1; -if ~isequal( size(X), size(Xvar) ) - error('X and Xvar must be the same size.'); -elseif ~isequal( sizeX([1:dim-1,dim+1:end]), sizeWeight([1:dim-1,dim+1:end]) ) || sizeWeight(dim)~=1 - error('weights must be a singleton in the mean dimension, and the same size as X in all other dimensions.'); -end - -% Weight the variance. Get the sum of the weights -Xvar = weights.^2 .* Xvar; -w = sum( weights, 'all' ); - -% Move the mean dimension to the first dimension -dimOrder = 1:ndims(X); -dimOrder(1) = dim; -dimOrder(dim) = 1; - -X = permute( X, dimOrder ); -Xvar = permute( Xvar, dimOrder ); - -% Reshape to 2D matrix, each column is a variable -siz = size(Xvar); -newSize = [siz(1), prod(siz(2:end))]; - -X = reshape( X, newSize ); -Xvar = reshape( Xvar, newSize ); - -% Compute the mean. Preallocate sigma -weights = weights(:)'; -E = sum( X.*weights, 2 ) ./ w; -sigma = NaN( siz(1), 1 ); - -% Get the upper triangular portion of the correlation matrix -rho = triu( corr(X), 1 ); - -% Optimize the progress bar -hundredth = ceil( siz(1) / 100 ); -progressbar(0); - -% Propagate the errors in each time step -for t = 1:siz(1) - Xsigma = sqrt( Xvar(t,:) ); - sigma(t) = (1/w) * sqrt( sum(Xvar(t,:),2) + 2*( Xsigma * rho * Xsigma' ) ); - - if mod(t, hundredth)==0 || t==siz(1) - progressbar(t/siz(1)); - end -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@dash/version.m b/4. DA Analyses/@dash/version.m deleted file mode 100644 index 88606826..00000000 --- a/4. DA Analyses/@dash/version.m +++ /dev/null @@ -1,3 +0,0 @@ -function[versionString] = version -versionString = "v3.6.0"; -end \ No newline at end of file diff --git a/4. DA Analyses/@dashFilter/dashFilter.m b/4. DA Analyses/@dashFilter/dashFilter.m deleted file mode 100644 index e09966c6..00000000 --- a/4. DA Analyses/@dashFilter/dashFilter.m +++ /dev/null @@ -1,22 +0,0 @@ -classdef (Abstract) dashFilter < dash - % Implements setValues error checking for particle and kalman filters - - properties - M; - D; - R; - F; - Rtype; - end - - methods - % Error check values before setting - setValues( obj, M, D, R, F ) - end - - % Set the values for a particular filter - methods (Abstract) - checkValues( obj, M, D, R, F, Rtype ); - end - -end \ No newline at end of file diff --git a/4. DA Analyses/@dashFilter/setValues.m b/4. DA Analyses/@dashFilter/setValues.m deleted file mode 100644 index ede07281..00000000 --- a/4. DA Analyses/@dashFilter/setValues.m +++ /dev/null @@ -1,130 +0,0 @@ -function[M, D, R, F, Rtype] = setValues( obj, M, D, R, F ) -% Sets the model prior, observations, observation uncertainty, and PSMs to -% use with a data assimilation filter. -% -% obj.setValues( M, D, R, F ) -% -% ***Note: Use an empty array to keep the current value of a variable in -% the dash object. For example: -% -% >> obj.setValues( [], D, R ) -% would set new values for D and R, but use existing values for M and F -% -% ----- Inputs ----- -% -% M: A model prior. Either an ensemble object or a matrix (nState x nEns) -% -% D: A matrix of observations (nObs x nTime) -% -% R: Observation uncertainty. NaN entries in time steps with observations -% will be calculated dynamically via the PSMs. -% -% scalar: (1 x 1) The same value will be used for all proxies in all time steps -% row vector: (1 x nTime) The same value will be used for all proxies in each time step -% column vector: (nObs x 1) The same value will be used for each proxy in all time steps. -% matrix: (nObs x nTime) Each value will be used for one proxy in one time step. -% -% F: A cell vector of PSM objects. {nObs x 1} - -% Get saved/default values -Rtype = 'new'; -if ~exist('M','var') || isempty(M) - M = obj.M; -end -if ~exist('D','var') || isempty(D) - D = obj.D; -end -if ~exist('R','var') || isempty(R) - R = obj.R; - Rtype = obj.Rtype; -end -if ~exist('F','var') || isempty(F) - F = obj.F; -end - -% Check M -if isa(M,'ensemble') - if ~isscalar(M) - error('When M is an ensemble object, it must be scalar.'); - end - v = M.metadata.varCheck( M.loadVars ); - if any( M.hasnan( v, M.loadMembers ), 'all' ) - error('Cannot load NaN values for data assimilation. Please see ensemble.useMembers to only load ensemble members without NaN elements.'); - end - meta = M.loadMetadata; - nState = meta.ensSize(1); -else - if ~ismatrix(M) || ~isreal(M) || ~isnumeric(M) || any(isinf(M(:))) || any(isnan(M(:))) - error('M must be a matrix of real, numeric, finite values and may not contain NaN.'); - end - nState = size(M,1); -end - -% Check the observations -if ~ismatrix(D) || ~isreal(D) || ~isnumeric(D) || any(isinf(D(:))) - error('D must be a matrix of real, numeric, finite values.'); -end -[nObs, nTime] = size(D); - -% Get R. Error check. Replicate -if strcmp(Rtype, 'scalar') - R = R(1); -elseif strcmp(Rtype, 'row') - R = R(1,:); -elseif strcmp(Rtype, 'column') - R = R(:,1); -end - -if isscalar(R) - R = R * ones( size(D) ); - Rtype = 'scalar'; -elseif isrow(R) - R = repmat( R, [nObs, 1] ); - Rtype = 'row'; -elseif iscolumn(R) - R = repmat( R, [1, nTime] ); - Rtype = 'column'; -else - Rtype = 'matrix'; -end - -if ~isnumeric(R) || ~isreal(R) || any(R(:)<0) || ~ismatrix(R) - error('R must be a set of real, numeric, positive values and cannot have more than 2 dimensions.'); -elseif isrow(R) - if length(R)~=nTime - error('The number of elements in R (%.f) does not match the number of time steps (%.f).', length(R), nTime ); - end -elseif iscolumn(R) - if length(R)~=nObs - error('The number of elements in R (%.f) does not match the number of observation sites.', length(R), nObs ); - end -elseif ismatrix(R) && ~isequal( size(R), [nObs, nTime]) - error('R must be a (%.f x %.f) matrix.', nObs, nTime ); -end - -% Check the PSMs. Have them do an internal review -if ~isvector(F) || ~iscell(F) || length(F)~=nObs - error('F must be a cell vector with %.f elements.', nObs ); -end -for d = 1:nObs - if ~isa( F{d}, 'PSM' ) || ~isscalar( F{d} ) - error('Element %.f of F must be a scalar "PSM" object', d ); - end - try - F{d}.review( nState ); - catch ME - error( [sprintf('PSM %.f failed with the following error message:\n',d), ME.message] ); - end -end - -% Have the filter do any internal error checking -obj.checkValues( M, D, R, F, Rtype ); - -% Set the values -obj.M = M; -obj.D = D; -obj.R = R; -obj.F = F; -obj.Rtype = Rtype; - -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/adjustH.m b/4. DA Analyses/@kalmanFilter/adjustH.m deleted file mode 100644 index a1915ab0..00000000 --- a/4. DA Analyses/@kalmanFilter/adjustH.m +++ /dev/null @@ -1,9 +0,0 @@ -function[F] = adjustH( F, reconstruct ) -% Adjust H indices in PSMs to account for partially reconstructed ensemble - -reconIndex = find( reconstruct ); -for s = 1:numel(F) - [~, F{s}.H] = ismember( F{s}.H, reconIndex ); -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/appendYe.m b/4. DA Analyses/@kalmanFilter/appendYe.m deleted file mode 100644 index 2f0b7321..00000000 --- a/4. DA Analyses/@kalmanFilter/appendYe.m +++ /dev/null @@ -1,47 +0,0 @@ -function[M, F, Yi, reconstruct] = appendYe( M, F, reconstruct ) -%% Precalculates Ye and appends them to the end of the prior. -% -% [Ma, Fa, Yi] = dash.appendYe( M, F ); -% Calculates Ye values, appends them to the end of a prior. Converts PSMs -% to trivial "appendPSMs" that sample the appropriate appended value -% -% ----- Inputs ----- -% -% M: A prior model ensemble. (nState x nEns) -% -% F: A cell vector of PSMs {nObs x 1} -% -% Yi: The initial, appended Ye values. (nObs x 1) -% -% reconstruct: Logical vector indicating which state vector elements to -% reconstruct. (nState x 1) -% -% ----- Outputs ----- -% -% Ma: The appended model. (nState + nObs x nEns) -% -% F: A set of trivial PSMs. {nObs x 1} -% -% Yi: The initial Ye values. (nObs x nEns) -% -% reconstruct: Updated reconstruction indices (nState + nObs x nEns) - -% Get sizes, preallocate -nObs = numel(F); -[nState, nEns] = size(M); -M = [M; NaN(nObs, nEns)]; - -% Generate Ye for each observation. Replace PSM with trivialPSM -for d = 1:nObs - M( nState+d, : ) = F{d}.run( M(F{d}.H, :), NaN, d ); - F{d} = trivialPSM; - F{d}.getStateIndices( nState + d ); -end - -% Get the initial Ye values -Yi = M( nState+(1:nObs), : ); - -% Update the reconstruction indices -reconstruct = [reconstruct; true(nObs,1)]; - -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/checkValues.m b/4. DA Analyses/@kalmanFilter/checkValues.m deleted file mode 100644 index fe86e6fb..00000000 --- a/4. DA Analyses/@kalmanFilter/checkValues.m +++ /dev/null @@ -1,44 +0,0 @@ -function[] = checkValues( obj, M, D, ~, F, ~ ) -% Check values against kalman filter settings - -% Get some sizes -if isa(M, 'ensemble') - meta = M.loadMetadata; - nState = meta.ensSize(1); -else - nState = size(M,1); -end -nObs = size(D,1); - -% Check that localization still works -if ~isempty( obj.localize ) - if strcmpi(obj.type, 'serial') - w = obj.localize; - else - w = obj.localize{1}; - end - if ~isequal( size(w), [nState,nObs] ) - error('The previous w localization weights are (%.f x %.f), which would no longer be the correct size (%.f x %.f). You can reset them with the command:\n\t>> obj.settings(''localize'', [])', size(w,1), size(w,2), nState, nObs ); - end - % Note that we don't need to reset yloc, because w already scales to nObs -end - -% Check that reconstruction indices are still allowed -if ~isempty( obj.reconstruct ) - if length(obj.reconstruct)~=nState - error('The size of the prior would change, so the previously specified reconstruction indices would not be valid. You can reset them with the command:\n\t>> obj.reconstructVars%s',''); - end - - % Check if PSM H indices are reconstructed. Throw error if serial. - reconH = dash.checkReconH( obj.reconstruct, F ); - if ~reconH && strcmpi(type,'serial') && ~obj.append - error('The previously specified reconstruction indices would no longer include the PSM state indices (H). Consider switching to joint updates, using the appended Ye method, or resetting the reconstruction indices with the command:\n\t>> obj.reconstructVars%s',''); - end -end - -% Set internal values -if ~isempty( obj.reconstruct ) - obj.reconH = reconH; -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/jointENSRF.m b/4. DA Analyses/@kalmanFilter/jointENSRF.m deleted file mode 100644 index 99ebc50d..00000000 --- a/4. DA Analyses/@kalmanFilter/jointENSRF.m +++ /dev/null @@ -1,143 +0,0 @@ -function[output] = jointENSRF( M, D, R, F, w, yloc, meanOnly, fullDevs, percentiles, reconstruct ) -%% Implements an ensemble square root kalman filter updating observations jointly -% -% [output] = dash.jointENSRF( M, D, R, F, w, yloc, meanOnly, fullDevs, percentiles, reconstruct ) -% -% ----- Inputs ----- -% -% M: The model ensemble. (nState x nEns) -% -% D: The observations. (nObs x nTime) -% -% R: Observation uncertainty. NaN values will be determined via dynamic R -% generation by the PSM. (nObs x nTime) -% -% F: A cell array of proxy system models of the "PSM" class. (nObs x 1) -% -% w: State vector - observation localization weights. (nState x nObs) -% -% yloc: Observation - observation localization weights (nObs x nObs) -% -% meanOnly: Whether to only update the ensemble mean. Scalar logical. -% -% fullDevs: Whether to return full ensemble deviations. Scalar logical -% -% percentiles: Which percentiles to return. A vector of values between 0 -% and 100 (nPerc x 1) -% -% reconstruct: Logical vector indicating which state vector elements to -% reconstruct. (nState x 1) -% -% ----- Outputs ----- -% -% output: A structure with the following fields -% -% settings - The settings used to run the filter -% -% Amean - The updated ensemble mean (nState x nTime) -% -% Avar - Updated ensemble variance (nState x nTime) -% -% Adev - Updated ensemble deviations (nState x nEns x nTime) -% -% Aperc - Percentiles of the updated ensemble. (nState x nPercentile x nTime) -% -% Ye - Proxy estimates (nObs x nEns) -% -% calibRatio - The calibration ratio. (nObs x nTime) -% -% R - The observation uncertainty used to run the filter. Includes -% dynamically generated R values. (nObs x nTime). -% -% sites - A logical array indicating which sites were used to update each -% time step. (nObs x nTime) - -% Get sizes -[nObs, nTime] = size(D); -nPerc = numel( percentiles ); -nEns = size(M,2); - -% Preallocate PSM outputs and calibration ratio -Ye = NaN( nObs, nEns ); -sites = false(nObs, nTime); -calibRatio = NaN( nObs, nTime ); - -% Generate model estimates -for d = 1:numel(F) - Mpsm = M(F{d}.H, :); - hasObs = ~isnan( D(d,:) ); - [Ye(d,:), R(d,hasObs), sites(d,hasObs)] = dash.processYeR( F{d}, Mpsm, R(d,hasObs), NaN, d ); -end - -% Reduce M to reconstructed elements. Get state vector length -M = M( reconstruct, : ); -nState = size(M, 1); - -% Preallocate updated ensemble -Amean = NaN(nState, nTime); -if fullDevs - Adev = NaN( nState, nEns, nTime ); -elseif ~meanOnly - Avar = NaN( nState, nTime ); -end -Aperc = NaN( nState, nPerc, nTime ); - -% Get (static) Kalman numerator. Clear M for space -[Mmean, Mdev] = dash.decompose( M ); -clearvars M; -[Ymean, Ydev] = dash.decompose( Ye ); -Knum = kalmanFilter.jointKalman( 'Knum', Mdev, Ydev, w ); - -% Use the obs in each time step to compute the full kalman gain -% progressbar(0); -for t = 1:nTime - sites(:,t) = sites(:,t) & ~isnan( D(:,t) ); - obs = sites(:,t); - [K, Kdenom] = kalmanFilter.jointKalman( 'K', Knum(:,obs), Ydev(obs,:), yloc(obs,obs), R(obs,t) ); - - % Update the mean and get the calibration ratio - Amean(:,t) = Mmean + K * ( D(obs,t) - Ymean(obs) ); - calibRatio( obs, t ) = abs( D(obs,t) - Ymean(obs) ).^2 ./ ( diag(Kdenom) ); - - % Optionally update the deviations / variance / percentiles - if ~meanOnly - Ka = kalmanFilter.jointKalman( 'Ka', Knum(:,obs), Kdenom, R(obs,t) ); - if fullDevs - Adev(:,:,t) = Mdev - Ka * Ydev(obs,:); - if nPerc > 0 - Aperc(:,:,t) = Amean(:,t) + prctile( Adev(:,:,t), percentiles, 2 ); - end - elseif nPerc == 0 - Avar(:,t) = sum( (Mdev - Ka * Ydev(obs,:)).^2, 2) ./ (nEns-1); - else - Adev = Mdev - Ka * Ydev(obs,:); - Avar(:,t) = sum( Adev.^2, 2 ) ./ (nEns-1); - Aperc(:,:,t) = Amean(:,t) + prctile( Adev, percentiles, 2 ); - end - end - -% progressbar(t/nTime); -end - -% Create the output structure -output.settings = struct('Updates', 'Joint', 'Mean_Only', meanOnly, 'version', dash.version, 'Time_Completed', datetime(clock)); -if ~all(w==1, 'all') || ~all(yloc==1, 'all') - output.settings.Localize = {w, yloc}; -end -output.Amean = Amean; -if fullDevs - output.Adev = Adev; - output.Amean = permute( Amean, [1 3 2] ); -elseif ~meanOnly - output.Avar = Avar; -end -if nPerc > 0 - output.settings.percentiles = percentiles(:)'; - output.Aperc = Aperc; -end -output.Ye = Ye; -output.calibRatio = calibRatio; -output.R = R; -output.sites = sites; - -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/jointKalman.m b/4. DA Analyses/@kalmanFilter/jointKalman.m deleted file mode 100644 index 9f233cab..00000000 --- a/4. DA Analyses/@kalmanFilter/jointKalman.m +++ /dev/null @@ -1,45 +0,0 @@ -function[varargout] = jointKalman(type, varargin) -%% Splits apart the kalman calculations for the joint updates. This allows -% more efficient calculations and helps conserve memory. -% -% [Knum] = dash.jointKalman( 'Knum', Mdev, Ydev, w ) -% -% [K, Kdenom] = dash.jointKalman( 'K', Knum, Ydev, yloc, R ) -% -% [Ka] = dash.jointKalman( 'Ka', Knum, Kdenom, R ) - -% Get the (static) numerator -if strcmp(type, 'Knum') - [Mdev, Ydev, w] = parseKalman( varargin ); - unbias = 1 / (size(Ydev,2) - 1); - - Knum = unbias .* (Mdev * Ydev'); - Knum = Knum .* w; - varargout = {Knum}; - -% Denominator and full gain -elseif strcmp(type, 'K') - [Knum, Ydev, yloc, R] = parseKalman( varargin ); - unbias = 1 / (size(Ydev,2) - 1); - R = diag(R); - - Kdenom = unbias .* yloc .* (Ydev * Ydev') + R; - K = Knum / Kdenom; - varargout = {K, Kdenom}; - -% Adjusted gain -elseif strcmp(type, 'Ka') - [Knum, Kdenom, R] = parseKalman( varargin ); - R = diag(R); - - Ka = Knum * (sqrtm(Kdenom)^(-1))' * (sqrtm(Kdenom) + sqrtm(R))^(-1); - varargout = {Ka}; -end - -end - -% Convenience function, lets you rename the inputs based on the specific -% calculation. -function[varargout] = parseKalman( input ) -varargout = input; -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/kalmanFilter.m b/4. DA Analyses/@kalmanFilter/kalmanFilter.m deleted file mode 100644 index edd819f3..00000000 --- a/4. DA Analyses/@kalmanFilter/kalmanFilter.m +++ /dev/null @@ -1,117 +0,0 @@ -classdef kalmanFilter < dashFilter - % Implements an offline, ensemble square root kalman filter. - % - % kalmanFilter Methods: - % kalmanFilter - Creates a new kalmanFilter - % settings - Adjusts the settings for the kalman filter - % run - Runs the kalman filter - % setValues - Changes the data used in an existing kalman filter - % reconstructVars - Specify which variables to reconstruct - - properties - % Settings - type; % Serial or joint updates - localize; % Localization weights - inflate; % The inflation factor - append; % Whether to use the appended Ye method - meanOnly; % Whether to only calculate the ensemble mean - fullDevs; % Whether to return full ensemble deviations - percentiles; % Which percentiles of the ensemble to return - reconstruct; % Which state vector elements to reconstruct - reconH; % Whether all H indices are reconstructed - end - - % Constructor - methods - function obj = kalmanFilter( M, D, R, F ) - % Creates a new kalmanFilter object - % - % obj = kalmanFilter( M, D, R, F ) - % - % ----- Inputs ----- - % - % M: A model prior. Either an ensemble object or a matrix (nState x nEns) - % - % D: A matrix of observations (nObs x nTime) - % - % R: Observation uncertainty. NaN entries in time steps with observations - % will be calculated dynamically via the PSMs. - % - % scalar: (1 x 1) The same value will be used for all proxies in all time steps - % row vector: (1 x nTime) The same value will be used for all proxies in each time step - % column vector: (nObs x 1) The same value will be used for each proxy in all time steps. - % matrix: (nObs x nTime) Each value will be used for one proxy in one time step. - % - % F: A cell vector of PSM objects. {nObs x 1} - % - % ----- Outputs ----- - % - % obj: A new kalmanFilter object - - % Default settings - obj.type = 'joint'; - obj.localize = []; - obj.inflate = 1; - obj.append = false; - obj.meanOnly = false; - obj.fullDevs = false; - obj.percentiles = []; - - % Block empty constructor, set values - if isempty(M) || isempty(D) || isempty(R) || isempty(F) - error('M, D, R, and F cannot be empty.'); - end - obj.setValues( M, D, R, F ); - - % Defaults for reconstructed variables - obj.reconstruct = []; - obj.reconH = []; - end - end - - % User methods - methods - - % Run the filter - output = run( obj ); - - % Change settings - settings( obj, varargin ); - - % Specify variables to reconstruct - reconstructVars( obj, vars, ensMeta ) - - end - - % Utilities - methods - checkValues( obj, M, D, ~, F, ~ ); - end - - % Static analysis methods - methods (Static) - - % Serial updating scheme - [output] = serialENSRF( M, D, R, F, w, fullDevs, percentiles ); - - % Full inversion - [output] = jointENSRF( M, D, R, F, w, yloc, meanOnly, fullDevs, percentiles, reconstruct ); - - % Serial kalman gain - [K, a] = serialKalman( Mdev, Ydev, w, R ); - - % Joint kalman gain - [varargout] = jointKalman(type, varargin); - - % Append Ye - [M, F, Yi, reconstruct] = appendYe( M, F, reconstruct ); - - % Unappend Ye - [Amean, Avar] = unappendYe( Amean, Avar, nObs ) - - % Adjust PSM H indices for partial reconstruction - F = adjustH( F, reconstruct ); - - end - -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/reconstructVars.m b/4. DA Analyses/@kalmanFilter/reconstructVars.m deleted file mode 100644 index 37781f64..00000000 --- a/4. DA Analyses/@kalmanFilter/reconstructVars.m +++ /dev/null @@ -1,77 +0,0 @@ -function[] = reconstructVars( obj, vars, ensMeta ) -% Specifies to only reconstruct certain variables. -% -% obj.reconstructVars( vars, ensMeta ) -% Reconstructs specific variables given the ensemble metadata for the -% prior. -% -% obj.reconstructVars -% Reset to the default of reconstructing all variables. -% -% ----- Inputs ----- -% -% vars: The names of the variables to reconstruct. String, cellstring, or -% character row vector. -% -% ensMeta: The ensemble metadata for the prior. - -% Reset to default if no inputs -if (~exist('vars','var') || isempty(vars)) && (~exist('ensMeta','var') || isempty(ensMeta)) - obj.reconstruct = []; - obj.reconH = []; - return; -end - -% Error check -if ~isscalar(ensMeta) || ~isa(ensMeta, 'ensembleMetadata') - error('ensMeta must be a scalar ensembleMetadata object.'); -end -ensMeta.varCheck( vars ); -vars = string(vars); - -% Check that this ensemble metadata matches the size of M -if isa(obj.M, 'ensemble') - Mmeta = obj.M.loadMetadata; - nState = Mmeta.ensSize(1); -else - nState = size(obj.M,1); -end -if ensMeta.ensSize(1)~=nState - error('The ensemble metadata does not match the number of state elements (%.f) in the prior.', nState ); -end - -% Get the indices to reconstruct -nVars = numel(vars); -indices = cell( nVars, 1 ); -for v = 1:nVars - indices{v} = ensMeta.varIndices( vars(v) ); -end -indices = cell2mat( indices ); - -% Convert to logical -reconstruct = false( nState, 1 ); -reconstruct( indices ) = true; - -% Check if PSM H indices are reconstructed. Throw error for serial -reconH = dash.checkReconH( reconstruct, obj.F ); -if ~reconH && strcmpi(obj.type,'serial') && ~obj.append - error('When using serial updates without appended Ye, you must reconstruct all state elements used to run the PSMs.'); -end - -% Check if localization exists. Require reset -if ~isempty(obj.localize) - if iscell(obj.localize) - w = obj.localize{1}; - else - w = obj.localize; - end - if size(w,1)~=sum(reconstruct) - error('The previously specified localization weights would no longer match the size of the reconstructed prior. You can reset them with the command:\n\t>> obj.settings(''localize'',[])%s',''); - end -end - -% Set the values -obj.reconstruct = reconstruct; -obj.reconH = reconH; - -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/run.m b/4. DA Analyses/@kalmanFilter/run.m deleted file mode 100644 index 89d4dbc9..00000000 --- a/4. DA Analyses/@kalmanFilter/run.m +++ /dev/null @@ -1,101 +0,0 @@ -function[output] = run( obj ) -% Runs an ensemble square root Kalman filter for a specific object -% -% output = obj.run -% -% ---- Outputs ----- -% -% output: A structure that may contain the following fields -% -% settings - The settings used to run the filter -% -% Amean - The updated ensemble mean (nState x nTime) -% -% Avar - Updated ensemble variance (nState x nTime) -% -% Adev - Updated ensemble deviations (nState x nEns x nTime) -% -% Aperc - Percentiles of the updated ensemble (nState x nPercentile x nTime) -% (See output.settings for the percentiles calculated) -% -% Ye - Proxy estimates -% Joint Updates: (nObs x nEns) -% Serial Updates: (nObs x nEns x nTime) -% -% Yi - Initial proxy estimates when using the appended Ye method. (nObs x nEns) -% -% calibRatio - The calibration ratio. (nObs x nTime) -% -% R - The observation uncertainty used to run the filter. Includes -% dynamically generated R values. (nObs x nTime). -% -% sites - A logical array indicating which sites were used to update each -% time step. (nObs x nTime) - -% Load the ensemble if necessary -M = obj.M; -if isa(M,'ensemble') - M = M.load; -end - -% Default reconstruction indices -if isempty( obj.reconstruct ) - reconstruct = true( size(M,1), 1 ); -else - reconstruct = obj.reconstruct; -end - -% Inflate ensemble -M = dash.inflate( M, obj.inflate ); - -% Sizes -nState = size(M,1); -nObs = size(obj.D,1); - -% Serial updates -if strcmp(obj.type, 'serial') - - % Default localization - w = obj.localize; - if isempty(w) - w = ones( nState, nObs ); - end - - % Optionally append Ye - F = obj.F; - if obj.append - [M, F, Yi, reconstruct] = obj.appendYe( M, F, reconstruct ); - end - - % Reduce prior to reconstructed variables. Adjust H indices - M = M( reconstruct, : ); - F = obj.adjustH( F, reconstruct ); - - % Do the updates - output = obj.serialENSRF( M, obj.D, obj.R, F, w, obj.fullDevs, obj.percentiles ); - - % Unappend if necessary - output.Append = false; - if obj.append - obj.unappendYe; - output.Yi = Yi; - output.Append = true; - end - -% Joint updates -else - - % Default localization - if isempty(obj.localize) - w = ones( sum(reconstruct), nObs); - yloc = ones( nObs, nObs); - else - w = obj.localize{1}; - yloc = obj.localize{2}; - end - - % Do the updates - output = obj.jointENSRF( M, obj.D, obj.R, obj.F, w, yloc, obj.meanOnly, obj.fullDevs, obj.percentiles, reconstruct ); -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/serialENSRF.m b/4. DA Analyses/@kalmanFilter/serialENSRF.m deleted file mode 100644 index 7f014514..00000000 --- a/4. DA Analyses/@kalmanFilter/serialENSRF.m +++ /dev/null @@ -1,132 +0,0 @@ -function[output] = serialENSRF( M, D, R, F, w, fullDevs, percentiles ) -%% Implements an ensemble square root kalman filter with serial updates. -% -% [output] = dash.serialENSRF( M, D, R, F, w ) -% -% ----- Inputs ----- -% -% M: The model ensemble. (nState x nEns) -% -% D: The observations. (nObs x nTime) -% -% R: Observation uncertainty. NaN values will be determined via dynamic R -% generation by the PSM. (nObs x nTime) -% -% F: A cell array of proxy system models of the "PSM" class. (nObs x 1) -% -% w: Covariance localization weights. (nState x nObs) -% -% percentiles: A vector of values between 0 and 100 specifying which -% percentiles to return. (nPerc x 1) -% -% fullDevs: Scalar logical indicating whether to return full ensemble -% deviations. -% -% ----- Outputs ----- -% -% output: A structure with the following fields -% -% settings - The settings used to run the filter -% -% Amean - The updated ensemble mean (nState x nTime) -% -% Adev - Updated ensemble deviations (nState x nEns x nTime) -% -% Avar - Updated ensemble variance (nState x nTime) -% -% Aperc - Percentiles of the updated ensemble. (nState x nPerc x nTime) -% -% Ye - Proxy estimates (nObs x nEns x nTime) -% -% calibRatio - The calibration ratio. (nObs x nTime) -% -% R - The observation uncertainty used to run the filter. Includes -% dynamically generated R values. (nObs x nTime). -% -% sites - A logical array indicating which sites were used to update each -% time step. (nObs x nTime) - -% Get some sizes -[nObs, nTime] = size(D); -[nState, nEns] = size(M); -nPerc = numel( percentiles ); - -% Decompose the initial ensemble. Clear the ensemble to free memory. -[Mmean, Mdev] = dash.decompose(M); -clearvars M; - -% Preallocate -Amean = NaN( nState, nTime ); -if fullDevs - Adev = NaN( nState, nEns, nTime ); -else - Avar = NaN( nState, nTime ); -end -Aperc = NaN( nState, nPerc, nTime ); -Ye = NaN( nObs, nEns, nTime ); -sites = false( nObs, nTime ); -calibRatio = NaN( nObs, nTime ); - -% Initialize each time step with the prior -progressbar(0); -for t = 1:nTime - Am = Mmean; - Ad = Mdev; - - % Estimate Ye for each observation in this time step - for d = 1:nObs - if ~isnan( D(d,t) ) - Mpsm = Am(F{d}.H) + Ad(F{d}.H,:); - [Ye(d,:,t), R(d,t), sites(d,t)] = dash.processYeR( F{d}, Mpsm, R(d,t), t, d ); - - % If Ye and R were successful, use to update - if sites(d,t) - - % Decompose the estimates, get the Kalman gain, and - % calibration ratio - [Ymean, Ydev] = dash.decompose( Ye(d,:,t) ); - [K, a] = kalmanFilter.serialKalman( Ad, Ydev, w(:,d), R(d,t) ); - calibRatio(d,t) = ( D(d,t) - Ymean ).^2 ./ ( var(Ye(d,:,t)) + R(d,t) ); - - % Update - Am = Am + K*( D(d,t) - Ymean ); - Ad = Ad - (a * K * Ydev); - end - end - end - - % Record the updated mean and variance/deviations for the time step - Amean(:,t) = Am; - if fullDevs - Adev(:,:,t) = Ad; - else - Avar(:,t) = sum( Ad.^2, 2 ) ./ (nEns - 1); - end - if nPerc > 0 - Aperc(:,:,t) = Am + prctile( Ad, percentiles, 2 ); - end - progressbar(t/nTime); -end - -% Create the output structure -output.settings = struct('Analysis', 'EnSRF', 'Type', 'Serial','version', dash.version, 'Time_Completed', datetime(clock)); -if ~all( w==1, 'all' ) - output.settings.Localize = w; -end -output.Amean = Amean; -if fullDevs - output.Adev = Adev; - output.Amean = permute( Amean, [1 3 2] ); -else - output.Avar = Avar; -end -if nPerc > 0 - output.settings.percentiles = percentiles(:)'; - output.Aperc = Aperc; -end -output.Ye = Ye; -output.calibRatio = calibRatio; -output.R = R; -output.sites = sites; - -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/serialKalman.m b/4. DA Analyses/@kalmanFilter/serialKalman.m deleted file mode 100644 index b733e58a..00000000 --- a/4. DA Analyses/@kalmanFilter/serialKalman.m +++ /dev/null @@ -1,36 +0,0 @@ -function[K, a] = serialKalman( Mdev, Ydev, w, R ) -%% Gets the kalman gain for serial updates -% -% [K, a] = dash.serialKalman( Mdev, ydev, w, R ) -% Computes the kalman gain and alpha scaling factor for a serial update. -% -% ----- Inputs ----- -% -% Mdev: Model deviations. (nState x nEns) -% -% Ydev: Ye deviations for a single observation. (1 x nEns) -% -% w: A covariance localization for a single observation. (nState x 1) -% -% R: Uncertainty for a single observation. (1 x 1) -% -% ----- Outputs ----- -% -% K: The kalman gain -% -% a: The alpha weight for the adjusted kalman gain for a serial update. - -% Get the coefficient for an unbiased estimator -nEns = size(Ydev,2); -unbias = 1 / (nEns-1); - -% Get the numerator, denominator (localized covariance of M with Ye) -Knum = unbias .* (Mdev * Ydev'); -Knum = Knum .* w; -Kdenom = unbias .* (Ydev * Ydev') + R; - -% Get the full Kalman gain and adjusted gain scaling factor -K = Knum / Kdenom; -a = 1 / ( 1 + sqrt(R/Kdenom) ); - -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/settings.m b/4. DA Analyses/@kalmanFilter/settings.m deleted file mode 100644 index 786672bb..00000000 --- a/4. DA Analyses/@kalmanFilter/settings.m +++ /dev/null @@ -1,157 +0,0 @@ -function[] = settings( obj, varargin ) -% Specifies settings for an Ensemble Square Root Kalman Filter analysis. -% -% obj.ensrfSettings( ..., 'type', type ) -% Whether to process updates jointly or in serial. Default is jointly. -% -% obj.ensrfSettings( ..., 'type', 'serial', 'localize', w ) -% ensrfSettings( ..., 'type', 'joint', 'localize', {w, yloc} ) -% Applies localization weights. See dash.covLocalization for w and yloc. -% -% obj.ensrfSettings( ..., 'inflate', inflate ) -% Specify an inflation factor. Default is 1 (no inflation). Note that -% inflation is applied BEFORE generating Ye values. -% -% obj.ensrfSettings( ..., 'type', 'serial', 'append', append ) -% Indicate whether to pre-calculate Y estimates, append them to the state -% vector, and update via the Kalman Gain. Default is false. -% -% obj.ensrfSettings( ..., 'type', 'joint', 'meanOnly', meanOnly ) -% Specify whether to only update the ensemble mean. (This is typically much -% faster than calculating the ensemble mean and variance.) -% -% obj.settings( ..., 'returnDevs', fullDevs ) -% Specify whether to return full ensembles deviations, or just the -% variance. Default is just the variance. -% -% obj.settings( ..., 'percentiles', percentiles ) -% Specify which percentiles of the ensemble to return -% -% obj.settings( ..., 'reconstruct', reconstruct ) -% Specify which state vector elements to reconstruct. Not recommended. See -% "kalmanFilter.reconstructVars" instead. -% -% ---- Inputs ----- -% -% type: A string indicating the type of updating scheme to use. Default is -% 'serial' - Observations are processed in serial -% 'joint' (Default) - All observations are processed at once. -% -% w: Localization weights between each observation and state vector element. -% The first output of dash.covLocalization. -% -% yloc: Localization weights betwen observations. The second output of -% dash.covLocalization. -% -% inflate: An inflation factor. A positive scalar value. -% -% append: A scalar logical indicating whether to use the append method. -% -% meanOnly: A scalar logical indicating whether to only calculate the -% ensemble mean for joint updating schemes. -% -% fullDevs: A scalar logical indicating whether to return full ensemble -% deviations. Default is false. -% -% percentiles: A vector of values between 0 and 100, specifying which -% ensemble percentiles to return. -% -% reconstruct: A logical vector specifying which state vector elements to -% reconstruct. (nState x 1) - -% Parse inputs -[type, weights, inflate, append, meanOnly, fullDevs, percentiles, recon] = parseInputs( varargin, ... - {'type','localize','inflate','append','meanOnly','returnDevs','percentiles','reconstruct'}, ... - {obj.type, obj.localize, obj.inflate, obj.append, obj.meanOnly, obj.fullDevs, obj.percentiles, obj.reconstruct}, ... - {[],[],[],[],[],[],[],[]} ); - -% Error checking -if ~isstrflag(type) - error('type must be a string scalar or character row vector.'); -elseif ~strcmpi(type,'joint') && ~strcmpi(type,'serial') - error('Unrecognized type'); -elseif ~isnumeric(inflate) || ~isreal(inflate) || ~isscalar(inflate) || inflate<=0 - error('inflate must be a positive scalar value.'); -end - -if ~isscalar(fullDevs) || ~islogical(fullDevs) - error('fullDevs must be a scalar logical.'); -end - -if strcmpi(type,'joint') - append = false; -elseif ~isscalar(append) || ~islogical(append) - error('append must be a scalar logical.'); -end - -if strcmpi(type, 'serial') - meanOnly = false; -elseif ~isscalar(meanOnly) || ~islogical(meanOnly) - error('meanOnly must be a scalar logical.'); -end -if fullDevs && meanOnly - error('Cannot compute only the ensemble mean when returning full ensemble deviations.'); -end - -% Reconstruction indices -if isa(obj.M,'ensemble') - ensMeta = obj.M.loadMetadata; - nState = ensMeta.ensSize(1); -else - nState = size(obj.M,1); -end -reconH = []; -reconstruct = []; -if ~isempty(recon) - if ~isvector(recon) || ~islogical(recon) || length(recon)~=nState - error('reconstruct must be a logical vector with nState (%.f) indices.', nState); - end - reconH = dash.checkReconH( recon, obj.F ); - if ~reconH && strcmpi(type,'serial') && ~append - error('When using serial updates without appended Ye, you must reconstruct all state elements used to run the PSMs.'); - end - reconstruct = recon; -end - -% Localization Weights -nRecon = nState; -if ~isempty(obj.reconstruct) - nRecon = sum( obj.reconstruct ); -end -nObs = size(obj.D,1); -if ~isempty(weights) - if strcmpi(type,'joint') - if ( ~iscell(weights) || numel(weights)~=2 ) - error(['Localization weights for joint updates must be provided as the 2-element cell: {w, yloc}\n',... - 'Please see dash.localizationWeights for details.'] ); - elseif ~isnumeric(weights{2}) || ~isreal(weights{2}) || ~ismatrix(weights{2}) || ~isequal(size(weights{2}), [nObs, nObs]) - error('The second element of joint localization weights must be a %.f x %.f numeric matrix', nObs, nObs ); - elseif ~isnumeric(weights{1}) || ~isreal(weights{1}) || ~ismatrix(weights{1}) || ~isequal(size(weights{1}), [nRecon, nObs]) - error('The first element of joint localization weights must be a %.f x %.f numeric matrix.', nRecon, nObs ); - end - elseif strcmpi(type, 'serial') && ( ~isnumeric(weights) || ~isreal(weights) || ~ismatrix(weights) || ~isequal(size(weights), [nRecon, nObs]) ) - error('serial localization weights must be a %.f x %.f numeric matrix.', nRecon, nObs ); - end -end - -% Percentiles -if ~isempty( percentiles ) - if meanOnly - error('Cannot calculate ensemble percentiles when only updating the ensemble mean.'); - elseif ~isnumeric(percentiles) || ~isvector(percentiles) || any(percentiles<0) || any(percentiles>100) || any(isnan(percentiles)) - error('percentiles must be a vector of numeric values between 0 and 100 that do not contain NaN values.'); - end -end - -% Save values -obj.type = type; -obj.localize = weights; -obj.inflate = inflate; -obj.append = append; -obj.meanOnly = meanOnly; -obj.fullDevs = fullDevs; -obj.reconstruct = reconstruct; -obj.percentiles = percentiles; -obj.reconH = reconH; - -end \ No newline at end of file diff --git a/4. DA Analyses/@kalmanFilter/unappendYe.m b/4. DA Analyses/@kalmanFilter/unappendYe.m deleted file mode 100644 index 9a900144..00000000 --- a/4. DA Analyses/@kalmanFilter/unappendYe.m +++ /dev/null @@ -1,32 +0,0 @@ -function[Amean, Avar] = unappendYe( Amean, Avar, nObs ) -%% Unappends Ye values from a model posterior. -% -% [Amean, Avar] = dash.unappendYe( Amean, Avar, nObs ) -% -% ----- Inputs ----- -% -% Amean: The ensemble mean posterior from an appended assimilation. -% (nState + nObs x nTime) -% -% Avar: The ensemble variance posterior from an appended assimilation. -% (nState + nObs x nTime) -% -% nObs: The number of observations / proxies in an appended assimilation. -% (1 x 1). -% -% ----- Outputs ----- -% -% Amean: The ensemble mean posterior from an appended assimilation. -% (nState x nTime) -% -% Avar: The ensemble variance posterior from an appended assimilation. -% (nState x nTime) - -% Get the indices of the appended estimates -ye = size(Amean,1) - nObs + (1:nObs)'; - -% Remove the Ye from the end of the ensemble -Amean( ye, : ) = []; -Avar( ye, : ) = []; - -end \ No newline at end of file diff --git a/4. DA Analyses/@particleFilter/bigpf.m b/4. DA Analyses/@particleFilter/bigpf.m deleted file mode 100644 index 42770f0b..00000000 --- a/4. DA Analyses/@particleFilter/bigpf.m +++ /dev/null @@ -1,84 +0,0 @@ -function[output] = bigpf( ens, D, R, F, N, batchSize ) -%% Implements a particle filter. -% -% [output] = dash.pf( M, D, R, F, N ) -% -% ----- Inputs ----- -% -% M: An ensemble (nState x nEns) -% -% D: Observations (nObs x nTime) -% -% R: Observation Uncertainty (nObs x nTime) -% -% F: A cell vector of PSM objects (nObs x 1) -% -% N: The number of particles to use when calculating the ensemble mean. A -% positive scalar. Use NaN for probabilistic weights. -% -% batchSize: The number of ensemble members to load per step. -% -% ----- Outputs ----- -% -% output: A structure with the following fields -% -% settings - Settings used to run the analysis -% -% A - The updated ensemble mean (nState x nTime) -% -% sse - The sum of squared errors for each particle (nEns x nTime) -% -% weights - The weights used to compute the updated ensemble mean - -% Preallocate -[nObs, nTime] = size(D); -nEns = ens.ensSize(2); -nState = ens.ensSize(1); - -Ye = NaN( nObs, nEns ); -A = zeros( nState, nTime ); -sse = NaN( nEns, nTime ); - -% Get the number of batches -nBatch = floor( ens.ensSize(2) / batchSize ); -if mod( ens.ensSize(2), batchSize ) ~= 0 - nBatch = nBatch + 1; -end - -% Permute for singleton expansion -D = permute(D, [1 3 2]); -R = permute(R, [1 3 2]); - -% Load the portion of the ensemble for each batch -for b = 1:nBatch - batchIndices = (b-1)*blockSize+1 : min( b*blockSize, ens.ensSize(2) ); - M = ens.load( batchIndices ); - - % Generate Ye and compute sse - for d = 1:nObs - Ye(d, batchIndices) = dash.processYeR( F{d}, M(F{d}.H, :), R(d,:), NaN, d ); - end - sse(batchIndices,:) = squeeze( sum( (1./R) .* (D - Ye(:,batchIndices)).^2, 1 ) ); -end - -% Get the weights -weights = particleFilter.pfWeights( sse, N ); - -% Use the weights to combine each batch -for b = 1:nBatch - batchIndices = (b-1)*blockSize+1 : min( b*blockSize, ens.ensSize(2) ); - M = ens.load( batchIndices ); - A = A + M * weights( batchIndices, : ); -end - -% Build the output structure -output.settings = struct('Analysis','Particle Filter','Weights', 'Probabilistic'); -if ~isnan(N) - output.settings.Weights = 'Best N'; - output.settings.N = N; -end -output.A = A; -output.sse = sse; -output.weights = weights; - -end \ No newline at end of file diff --git a/4. DA Analyses/@particleFilter/normexp.m b/4. DA Analyses/@particleFilter/normexp.m deleted file mode 100644 index 6c201945..00000000 --- a/4. DA Analyses/@particleFilter/normexp.m +++ /dev/null @@ -1,68 +0,0 @@ -function[Y] = normexp( X, dim, nanflag ) -%% Normalizes a set of exponentials by the sum of all exponentials. -% Uses an alternative formulation to avoid directly computing exponentials -% of large magnitude numbers, thereby increasing numerical stability. -% -% Y = normexp( X ) -% Given the set of exponents, returns the normalized values for each e^Xi. -% Uses the formulation: exp( Xi - log(sum(exp(Xi))) ) -% -% Y = normexp( X, dim ) -% Specifies a dimension over which to normalize. Default is dimension 1. -% -% Y = normexp( X, 'all' ) -% Normalizes over the entire array. -% -% Y = normexp( X, dim, nanflag ) -% Specifies how to treat NaN values. Default is to include NaN in -% normalizations. -% -% ----- Inputs ----- -% -% X: A vector or array with the exponents of e^X1, e^X2, ... e^Xn -% -% dim: A scalar integer indicating the dimension along which to sum. -% Default is the first dimension. -% -% nanflag: A string indicating how to treat NaN values in sums -% "includenan": (default) Sums containing NaN will evaluate to NaN. -% "omitnan": Remove all terms with NaN exponents from each sum. -% -% ----- Outputs ----- -% -% Y: The normalized exponentials. - -% ----- Written By ----- -% Jonathan King - -% Set defaults. Error check inputs. -if ~exist('dim','var') || isempty(dim) - dim = 1; -end -if ~exist('nanflag','var') || isempty(nanflag) - nanflag = "includenan"; -end -errorCheck( X, dim, nanflag ); - -% Compute the normalized values using the log sum of exponentials -m = max(X, [], dim); -lse = m + log( sum( exp(X - m), dim, nanflag ) ); -Y = exp( X - lse ); - -end - -% Error checker -function[] = errorCheck( X, dim, nanflag ) - -if ~isnumeric(X) || ~isreal(X) - error('X must be a real, numeric array.'); -elseif ((ischar(dim) && isrow(dim)) || (isstring(dim) && isscalar(dim))) && ~strcmp(dim,'all') - error('Unrecognized string in second input. Perhaps you misspelled "all"?'); -elseif (~isnumeric(dim) || ~isreal(dim) || ~isscalar(dim) || dim<1 || mod(dim,1)~=0) && ~( (ischar(dim) && isrow(dim)) || (isstring(dim) && isscalar(dim)) ) - error('dim must be a positive, scalar integer.'); -elseif ~( (ischar(nanflag) && isrow(nanflag)) || (isstring(nanflag) && isscalar(nanflag)) ) - error('nanflag must be a string scalar or character row vector.'); -elseif ~strcmp(nanflag, "includenan") && ~strcmp(nanflag, "omitnan") - error('Unrecognized nanflag. It may be misspelled. Valid options are "omitnan" and "includenan".'); -end -end diff --git a/4. DA Analyses/@particleFilter/particleFilter.m b/4. DA Analyses/@particleFilter/particleFilter.m deleted file mode 100644 index 9253e792..00000000 --- a/4. DA Analyses/@particleFilter/particleFilter.m +++ /dev/null @@ -1,87 +0,0 @@ -classdef particleFilter < dashFilter - % Implements a particle filter. - % - % particleFilter Methods: - % particleFilter - Creates a new particleFilter - % settings - Changes the settings for a particle filter - % run - Runs the particle filter - % setValues - Change the data used in an existing particle filter - - properties - % Settings - type; % Weighting scheme, N best or probabilistic - N; % Number of best particles for N best weights - big; % Whether the ensemble is too large to fit into memory - nEns; % How many ensemble members to process per batch for large ensembles. - end - - % Constructor - methods - function obj = particleFilter( M, D, R, F ) - % Creates a new particleFilter object - % - % obj = particleFilter( M, D, R, F ) - % - % ----- Inputs ----- - % - % M: A model prior. Either an ensemble object or a matrix (nState x nEns) - % - % D: A matrix of observations (nObs x nTime) - % - % R: Observation uncertainty. NaN entries in time steps with observations - % will be calculated dynamically via the PSMs. - % - % scalar: (1 x 1) The same value will be used for all proxies in all time steps - % row vector: (1 x nTime) The same value will be used for all proxies in each time step - % column vector: (nObs x 1) The same value will be used for each proxy in all time steps. - % matrix: (nObs x nTime) Each value will be used for one proxy in one time step. - % - % F: A cell vector of PSM objects. {nObs x 1} - % - % ----- Outputs ----- - % - % obj: A new particleFilter object. - - % Default settings - obj.type = 'weight'; - obj.N = NaN; - obj.big = false; - obj.nEns = NaN; - - % Block empty constructor, set values - if isempty(M) || isempty(D) || isempty(R) || isempty(F) - error('M, D, R, and F cannot be empty.'); - end - obj.setValues( M, D, R, F ); - end - end - - % User methods - methods - - % Run the filter - output = run( obj ); - - % Change the settings - settings( obj, varargin ); - - end - - % Static analysis utilities - methods (Static) - - % Particle filter - output = pf( M, D, R, F, N ); - - % Particle filter for big ensemble - output = bigpf( ens, D, R, F, N, batchSize ); - - % Compute particle weights - weights = pfWeights( sse, N ); - - % Normalize exponentials by their combined sum (efficiently) - [Y] = normexp( X, dim, nanflag ) - - end - -end \ No newline at end of file diff --git a/4. DA Analyses/@particleFilter/pf.m b/4. DA Analyses/@particleFilter/pf.m deleted file mode 100644 index 61af539e..00000000 --- a/4. DA Analyses/@particleFilter/pf.m +++ /dev/null @@ -1,61 +0,0 @@ -function[output] = pf( M, D, R, F, N ) -%% Implements a particle filter. -% -% [output] = dash.pf( M, D, R, F, N ) -% -% ----- Inputs ----- -% -% M: An ensemble (nState x nEns) -% -% D: Observations (nObs x nTime) -% -% R: Observation Uncertainty (nObs x nTime) -% -% F: A cell vector of PSM objects (nObs x 1) -% -% N: The number of particles to use when calculating the ensemble mean. A -% positive scalar. Use NaN for probabilistic weights. -% -% ----- Outputs ----- -% -% output: A structure with the following fields -% -% settings - Settings used to run the analysis -% -% A - The updated ensemble mean (nState x nTime) -% -% sse - The sum of squared errors for each particle (nEns x nTime) -% -% weights - The weights used to compute the updated ensemble mean - -% Preallocate -[nObs, nTime] = size(D); -nEns = size(M,2); -Ye = NaN( nObs, nEns ); - -% Run the forward models. Get Ye and R -for d = 1:nObs - hasObs = ~isnan( D(d,:) ); - [Ye(d,:), R(d,hasObs)] = dash.processYeR( F{d}, M(F{d}.H,:), R(d,hasObs), NaN, d ); -end - -% Permute for singleton expansions -D = permute(D, [1 3 2]); -R = permute(R, [1 3 2]); - -% Sum of uncertainty weighted squared errors. Use to compute weights and update. -sse = squeeze( nansum( (1./R) .* (D - Ye).^2, 1 ) ); -weights = particleFilter.pfWeights( sse, N ); -A = M * weights; - -% Build the output structure -output.settings = struct('Analysis','Particle Filter','Weights', 'Probabilistic'); -if ~isnan(N) - output.settings.Weights = 'Best N'; - output.settings.N = N; -end -output.A = A; -output.sse = sse; -output.weights = weights; - -end \ No newline at end of file diff --git a/4. DA Analyses/@particleFilter/pfWeights.m b/4. DA Analyses/@particleFilter/pfWeights.m deleted file mode 100644 index 9da59978..00000000 --- a/4. DA Analyses/@particleFilter/pfWeights.m +++ /dev/null @@ -1,35 +0,0 @@ -function[weights] = pfWeights( sse, N ) -% Compute particle filter weights -% -% weights = dash.pfWeights( sse, NaN ) -% Uses a probabilistic weighting -% -% weights = dash.pfWeights( sse, N ) -% Does equal weighting of the best N particles -% -% ----- Inputs ----- -% -% sse: A matrix of particle sum of squared errors (nEns x nTime) -% -% N: A scalar integer. -% -% ----- Outputs ----- -% -% weights: The weights for each particle (nEns x nTime) - -% Probabilistic weights -if isnan(N) - weights = normexp( (-1/2) * sse ); - -% Or best N particles -else - [nEns, nTime] = size(sse); - weights = zeros( nEns, nTime ); - for t = 1:nTime - [~,rank] = sort( sse(:,t) ); - best = rank( 1:N ); - weights( best, t ) = 1/N; - end -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@particleFilter/run.m b/4. DA Analyses/@particleFilter/run.m deleted file mode 100644 index dc91067d..00000000 --- a/4. DA Analyses/@particleFilter/run.m +++ /dev/null @@ -1,31 +0,0 @@ -function[output] = run( obj ) -% Runs a particle filter for a specific particleFilter object. -% -% output = obj.run -% -% ----- Outputs ----- -% -% output: A structure with the following fields -% -% settings - Settings used to run the analysis -% -% A - The updated ensemble mean (nState x nTime) -% -% sse - The sum of squared errors for each particle (nEns x nTime) -% -% weights - The weights used to compute the updated ensemble mean - -% Switch to the alternative algorithm if using a large ensemble -if obj.big - output = obj.bigpf( obj.M, obj.D, obj.R, obj.F, obj.N, obj.nEns ); - -% Otherwise, load the ensemble if necessary, then run the normal algorithm -else - M = obj.M; - if isa( obj.M, 'ensemble') - M = obj.M.load; - end - output = obj.pf( M, obj.D, obj.R, obj.F, obj.N ); -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@particleFilter/settings.m b/4. DA Analyses/@particleFilter/settings.m deleted file mode 100644 index 37ac8f05..00000000 --- a/4. DA Analyses/@particleFilter/settings.m +++ /dev/null @@ -1,76 +0,0 @@ -function[] = settings( obj, varargin ) -% Specifies settings for a particle filter analysis -% -% obj.settings( ..., 'type', type ) -% Specify whether to use a probability weighting or N best particles to -% create the analysis. Default is probability weighting. -% -% obj.settings( ..., 'type', 'bestN', 'N', N ) -% Set the number of best particles to use to create the analysis. Default -% of N = 1. -% -% obj.settings( ..., 'big', big ) -% Indicate whether to use an alternative algorithm to process ensemble -% too large to fit into active memory by processing particles in batches. -% -% obj.settings( ..., 'big', true, 'batchSize', nEns ) -% Specify the number of particles to use per batch. Must be specified by -% the user when using the alternative algorithm for large ensembles. -% -% ----- Inputs ----- -% -% type: The method used to create the analysis. A string. -% 'weight' - Uses a probability weighted mean of the particles -% 'bestN' - Take the mean of the N best particles -% -% N: The number of particles to use in the "bestN" method. A scalar, -% positive integer. -% -% big: A scalar logical indicating whether the ensemble is too large to fit -% into active memory. -% -% nEns: The number of ensemble members to use for batch processing. A -% scalar positive integer indicating a number of ensemble members that -% can fit in active memory simultaneously. Large numbers are typicaly -% faster. - -% Parse inputs -[type, N, big, nEns] = parseInputs( varargin, {'type','N','big','nEns'}, ... - {obj.type, obj.N, obj.big, obj.nEns}, {[],[],[],[]} ); - -% Error check -if ~isstrflag(type) - error('type must be a string scalar, or character row vector.'); -elseif ~strcmpi(type,'weight') && ~strcmpi(type,'bestN') - error('Unrecognized type'); -elseif ~isscalar(big) || ~islogical(big) - error('big must be a scalar logical'); -end - -if strcmpi(type, 'bestN') - if isnan(N) || isempty(N) - N = 1; - elseif ~isnumeric(N) || ~isscalar(N) || ~isreal(N) || N<=0 || mod(N,1)~=0 || N>obj.nEns - error('N must be a positive scalar integer, and cannot exceeds the size of the ensemble (%.f)', obj.nEns ); - end -else - N = NaN; -end - -if big - if isnan(nEns) || isempty(nEns) - error('You must specify the number of ensemble members to process per batch.'); - elseif ~isnumeric(nEns) || ~isscalar(nEns) || ~isreal(nEns) || nEns<=0 || mod(nEns,1)~=1 || nEns>obj.nEns - error('nEns must be a positive scalar integer, and cannot exceeds the size of the ensemble (%.f)', obj.nEns ); - end -else - nEns = NaN; -end - -% Save the settings -obj.type = type; -obj.N = N; -obj.big = big; -obj.nEns = nEns; - -end \ No newline at end of file diff --git a/4. DA Analyses/@sensorSites/sensorSites.m b/4. DA Analyses/@sensorSites/sensorSites.m deleted file mode 100644 index 1f8acbeb..00000000 --- a/4. DA Analyses/@sensorSites/sensorSites.m +++ /dev/null @@ -1,73 +0,0 @@ -classdef sensorSites - % sensorSites - % Holds information on possible sensor placements for optimal sensor - % tests. - % - % sensorSites Methods: - % sensorSites - Creates a new sensorSites object. - - properties (SetAccess = private) - H; % State vector indices - R; % Site observation uncertainty - coordinates; % lat-lon coordinates of each site - useSite; % Logical indicating whether a site is still available for consideration - end - - % Constructor - methods - function obj = sensorSites( H, R, coordinates ) - % Creates a new sensorSites object - % - % obj = sensorSites( H, R, coordinates ) - % - % ----- Inputs ----- - % - % H: State vector indices for each site (nSite x 1) - % - % R: Observation uncertainty for each site (nSite x 1) - % - % coordinates: Lat-lon coordinates for each site. A two column - % matrix. First column is lat, second is lon. (nSite x 2) - % - % ----- Outputs ----- - % - % obj: The new sensorSites object - - % Error check - if ~isvector(H) || ~isnumeric(H) || ~isreal(H) || any(mod(H,1)~=0) || any(H<0) - error('H must be a vector of positive integers.'); - elseif ~isvector(R) || ~isnumeric(R) || ~isreal(R) || any(R<0) || length(R)~=length(H) - error('R must be a vector of positive values and must have as many elements as H.'); - elseif ~ismatrix(coordinates) || ~isnumeric(coordinates) || ~isreal(coordinates) || size(coordinates,1)~=length(H) || size(coordinates,2)~=2 - error('coordinates must be a 2 column matrix of real values with a row for each element in H (%.f).', length(H) ); - end - - % Record values - obj.H = H(:); - obj.R = R(:); - obj.coordinates = coordinates; - obj.useSite = true( length(obj.H), 1 ); - end - end - - % Remove sites - methods - - % Remove single site - function[obj] = removeSite( obj, remove ) - % Removes site from a sensor array - obj.useSite(remove) = false; - end - - % Remove sites in radius - function[obj] = removeRadius( obj, best, radius ) - if ~isnan(radius) - dist = haversine( obj.coordinates(best,:), obj.coordinates ); - remove = ( dist <= radius ); - obj.useSite( remove ) = false; - end - end - - end - -end \ No newline at end of file diff --git a/4. DA Analyses/@sensorTest/assessPlacement.m b/4. DA Analyses/@sensorTest/assessPlacement.m deleted file mode 100644 index a66fc9ed..00000000 --- a/4. DA Analyses/@sensorTest/assessPlacement.m +++ /dev/null @@ -1,33 +0,0 @@ -function[skill] = assessPlacement( Jdev, HMdev, R ) -% Assesses the utility of sensor placements relative to a metric. -% -% skill = dash.assessPlacement( Jdev, Mdev, R ) -% -% ----- Inputs ----- -% -% Jdev: The ensemble deviations of the metric -% -% Mdev: The ensemble deviations of the prior at the sensor sites -% -% R: The uncertainty associated with a measurement at each site -% -% ----- Outputs ----- -% -% skill: The relative change in variance in J explained by the sensor - -% Preallocate -nSite = size(HMdev,1); -skill = NaN( nSite, 1 ); - -% Variance of the metric -Jvar = var(Jdev,[],2); - -% Get the model deviations for each site. Use to compute the change in the -% variance of J -for s = 1:nSite - covJH = cov( Jdev', HMdev(s,:)' ); - dStdJ = covJH(2).^2 / ( var(HMdev(s,:),[],2) + R(s) ); - skill(s) = dStdJ ./ Jvar; -end - -end \ No newline at end of file diff --git a/4. DA Analyses/@sensorTest/optimalSensor.m b/4. DA Analyses/@sensorTest/optimalSensor.m deleted file mode 100644 index e094056f..00000000 --- a/4. DA Analyses/@sensorTest/optimalSensor.m +++ /dev/null @@ -1,102 +0,0 @@ -function[output] = optimalSensor( M, Fj, sites, N, replace, radius ) -% Conducts an optimal sensor test -% -% output = dash.sensorTest( J, M, sites, N ) -% Finds the N best sensors. -% -% output = dash.sensorTest( J, M, sites, N, replace ) -% Specify whether to replace or remove sites from consideration after being -% selected. Default is with replacement -% -% output = dash.sensorTest( J, M, sites, N, replace, radius ) -% Specify to remove sites within a given radius after a nearby site is -% selected. Default is no radius. -% -% ----- Inputs ----- -% -% J: A metric vector. (1 x nEns) -% -% M: A model ensemble (nState x nEns) -% -% sites: A sensorSites object -% -% N: The number of sensors to test. A positive integer. -% -% replace: Whether to replace sensors after selection. A scalar logical. -% Default is true. -% -% radius: The radius in which to remove sites after a nearby site is -% selected. Default is no radius. -% -% ----- Outputs ----- -% -% output: A structure with the following fields -% -% settings - Settings used to run the analysis -% -% bestH - The state vector indices of the best sites -% -% bestSite - The index of the best site in the sensorSites object. -% -% skill - The relative reduction in J variance of each placement. - -% Preallocate -bestH = NaN( N, 1 ); -bestSite = NaN( N, 1 ); -skill = NaN( N, 1 ); - -% Decompose -[Mmean, Mdev] = dash.decompose(M); -clearvars M; - -% For each new sensor, get the metric J -progressbar(0) -for s = 1:N - Mj = Mmean(Fj.H) + Mdev(Fj.H,:); - J = dash.processYeR( Fj, Mj, 1, NaN, NaN ); - [~, Jdev] = dash.decompose(J); - - % Test the skill of each site remaining for consideration - checkSites = find( sites.useSite ); - expVar = sensorTest.assessPlacement( Jdev, Mdev(sites.H(checkSites), :), sites.R(checkSites) ); - - % Find the best site. - currBest = find( expVar == max(expVar), 1 ); - bestSite(s) = checkSites( currBest ); - bestH(s) = sites.H( bestSite(s) ); - skill(s) = expVar( currBest ); - - % Optionally remove the site and sites within the radius. Stop the loop - % if no sites are left - sites = sites.removeRadius( bestSite(s), radius ); - if ~replace - sites = sites.removeSite( bestSite(s) ); - end - if isempty(sites.H) - progressbar(1); - break; - end - - % Update the ensemble - H = sites.H( bestSite(s) ); - [K, a] = kalmanFilter.serialKalman( Mdev, Mdev(H,:), ones(size(Mmean)), sites.R( bestSite(s) ) ); - Mdev = Mdev - a * K * Mdev(H,:); - - progressbar(s/N); -end - -% Remove any extra entries if quitting early -bestSite(s+1:N) = []; -bestH(s+1:N) = []; -skill(s+1:N) = []; - -% Get the output structure -output.settings = struct('Analysis', 'Optimal Sensor', 'N', N, 'Replacement', replace ); -if ~isnan(radius) - output.settings.Radius = radius; -end -output.bestH = bestH; -output.bestSite = bestSite; -output.skill = skill; - -end \ No newline at end of file diff --git a/4. DA Analyses/@sensorTest/run.m b/4. DA Analyses/@sensorTest/run.m deleted file mode 100644 index 69835185..00000000 --- a/4. DA Analyses/@sensorTest/run.m +++ /dev/null @@ -1,21 +0,0 @@ -function[output] = run( obj ) -% Does an optimal sensor test for a specific sensorTest object. -% -% output = obj.run -% -% ----- Outputs ----- -% -% output: A structure with the following fields -% -% settings - Settings used to run the analysis -% -% bestH - The state vector indices of the best sites -% -% bestSites - The index of the best sites in the sensorSites object -% -% skill - The relative reduction in J variance of each placement. - -% Run -output = obj.optimalSensor( obj.M, obj.Fj, obj.S, obj.nSensor, obj.replace, obj.radius ); - -end \ No newline at end of file diff --git a/4. DA Analyses/@sensorTest/sensorTest.m b/4. DA Analyses/@sensorTest/sensorTest.m deleted file mode 100644 index 2b5a84df..00000000 --- a/4. DA Analyses/@sensorTest/sensorTest.m +++ /dev/null @@ -1,81 +0,0 @@ -classdef sensorTest < dash - % sensorTest - % Implements tests for optimal sensor placement. - % - % sensorTest Methods: - % sensorTest - Creates a new sensor test object - % settings - Changes the settings for the sensor test - % run - Runs the test - % setValues - Changes the data used in an existing sensor test object - - properties (SetAccess = private) - % Settings - nSensor; % The number of sensors to select - replace; % Whether to select sensors with or without replacement - radius; % Limits selection of new sensors outside of a distance radius - - % Analysis values - M; % A model prior - Fj; % A forward model used to estimate J from M - S; % A sensorSites object. - end - - % Constructor - methods - function obj = sensorTest( M, Fj, S ) - % Creates a new optimal sensor test. - % - % obj = sensorTest( M, Fj, S ) - % - % ----- Inputs ----- - % - % M: A model prior. A matrix (nState x nEns) - % - % Fj: A forward model used to estimate J, the sensor metric. - % - % S: A sensor sites object. - - % Use the default settings - obj.replace = true; - obj.nSensor = 1; - obj.radius = NaN; - - % Set the values. Don't allow empty values for initial - % constructor - if nargin < 3 - error('Insufficient inputs.'); - end - obj.setValues( M, Fj, S ); - end - end - - % User methods - methods - - % Run the sensor test - output = run( obj ); - - % Change the settings for the sensor test - settings( obj, varargin ); - - % Change the values in the sensor test - setValues( obj, M, Fj, S ); - - end - - % Static analysis utilities - methods (Static) - - % Static call for optimal sensor test - output = optimalSensor( M, Fj, sites, N, replace, radius ); - - % Test how J variance is reduced for a sensor. - skill = assessPlacement( Jdev, HMdev, R ); - - end - - -end - - - \ No newline at end of file diff --git a/4. DA Analyses/@sensorTest/setValues.m b/4. DA Analyses/@sensorTest/setValues.m deleted file mode 100644 index 0d7ac683..00000000 --- a/4. DA Analyses/@sensorTest/setValues.m +++ /dev/null @@ -1,52 +0,0 @@ -function[] = setValues( obj, M, Fj, S ) -% Specify a model prior, forward model for J metric, and sensor sites to -% use in an optimal sensor test. Error checks everything -% -% obj.setValues( M, Fj, S ) -% -% **Note: Use an empty array to keep the current value of a variable in the -% sensorTest object. -% -% ----- Inputs ----- -% -% M: A model prior. Either an ensemble object or a matrix (nState x nEns) -% -% Fj: A scalar PSM used to generate the J metric. -% -% S: A sensorSites object. - -% Get any saved variables -if ~exist('M','var') || isempty(M) - M = obj.M; -end -if ~exist('Fj','var') || isempty(Fj) - Fj = obj.Fj; -end -if ~exist('S','var') || isempty(S) - S = obj.S; -end - -% Check M -if ~ismatrix(M) || ~isreal(M) || ~isnumeric(M) || any(isinf(M(:))) || any(isnan(M(:))) - error('M must be a matrix of real, numeric, finite values and may not contain NaN.'); -end -[nState] = size(M,1); - -% Check Fj -if ~isa(Fj, 'PSM') || ~isscalar( Fj ) - error('Fj must be a scalar PSM object.');l -end -Fj.review( nState ); - -% Check the H values in the sites -if any(S.H > nState) - badH = find( S.H > nState, 1 ); - error('The state vector index of site %.f (%.f) exceeds the number of state vector elements (%.f)', badH, sites.H(badH), nState ); -end - -% Save the values -obj.M = M; -obj.Fj = Fj; -obj.S = S; - -end \ No newline at end of file diff --git a/4. DA Analyses/@sensorTest/settings.m b/4. DA Analyses/@sensorTest/settings.m deleted file mode 100644 index c561eca7..00000000 --- a/4. DA Analyses/@sensorTest/settings.m +++ /dev/null @@ -1,41 +0,0 @@ -function[] = settings( obj, varargin ) -% Specifies settings for an optimal sensor analysis -% -% obj.settings( ..., 'replace', replace ) -% Specifies whether to select sensors with or without replacement. Default -% is with replacement. -% -% obj.settings( ..., 'nSensor', N ) -% Set the number of sensors to locate. Default is 1. -% -% obj.settings( ..., 'radius', R ) -% Limits the selection of new sensors outside of a distance radius of -% selected sensors. -% -% ----- Inputs ----- -% -% replace: Scalar logical. True: select sensors with replacement. -% -% N: The number of sensors. A scalar, positive integer. -% -% R: The radius used to limit sensor placement. Units are km. - -% Parse the inputs -[replace, N, radius] = parseInputs( varargin, {'replace', 'nSensor', 'radius'},... - {obj.replace, obj.nSensor, obj.radius}, {[],[],[]} ); - -% Error check -if ~isscalar(replace) || ~islogical(replace) - error('replace must be a scalar logical.'); -elseif ~isnumeric(N) || ~isscalar(N) || N<=0 || mod(N,1)~=0 - error('N must be a positive scalar integer.'); -elseif ~isnumeric(radius) || ~isscalar(radius) || radius<0 - error('radius must be a scalar, non-negative number.'); -end - -% Save the settings -obj.replace = replace; -obj.nSensor = N; -obj.radius = radius; - -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/blendCovariance.m b/4. DA Analyses/jointOfflineKalmanFilter/blendCovariance.m deleted file mode 100644 index fefca24c..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/blendCovariance.m +++ /dev/null @@ -1,4 +0,0 @@ -function[Knum, Ycov] = blendCovariance( Knum, Ycov, Knum_clim, Ycov_clim, b ) -Knum = b(1).*Knum + b(2).*Knum_clim; -Ycov = b(1).*Ycov + b(2).*Ycov_clim; -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/calibrationRatio.m b/4. DA Analyses/jointOfflineKalmanFilter/calibrationRatio.m deleted file mode 100644 index 112f9edf..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/calibrationRatio.m +++ /dev/null @@ -1,3 +0,0 @@ -function[calibRatio] = calibrationRatio( D, Ymean, Kdenom ) -calibRatio = (D - Ymean).^2 ./ diag(Kdenom); -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/ensembleCovariances.m b/4. DA Analyses/jointOfflineKalmanFilter/ensembleCovariances.m deleted file mode 100644 index e81d63ba..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/ensembleCovariances.m +++ /dev/null @@ -1,8 +0,0 @@ -function[Knum, Ycov] = ensembleCovariances( Mdev, Ydev ) -% Mdev: Prior -% -% Ydev: -unbias = 1 / (size(Mdev,2)-1); -Knum = unbias .* (Mdev * Ydev'); -Ycov = unbias .* (Ydev * Ydev'); -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/ensrf.m b/4. DA Analyses/jointOfflineKalmanFilter/ensrf.m deleted file mode 100644 index 83919563..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/ensrf.m +++ /dev/null @@ -1,78 +0,0 @@ -function[output] = ensrf( M, D, R, Y, ... % Basic inputs - inflate, w, yloc, Knum_clim, Ycov_clim, b,... % Covariance calculations - Q, percentiles, ... % Posterior calculations - returnMean, returnVar, returnDevs, ... % Returned output - showProgress ) % Batch options -%% Runs an offline ensemble square root Kalman filter that processes all -% observations jointly. -% -% output = ensrf( M, D, R, Y, inflate, w, yloc, Knum_clim, Ycov_clim, Q, -% percentiles, returnMean, returnVar, returnDevs, showProgress ) -% -% ----- Inputs ----- -% -% M: Prior model ensemble (nState x nEns) -% -% D: Observations (nObs x nTime) -% -% R: Observation uncertainty (nObs x nTime) -% -% Y: Observation estimates (nObs x nEns) -% -% inflate: An inflation factor. A positive scalar. Use [] for no inflation. -% -% w: Observation-ensemble covariance localization weights (nState x nObs). -% Use [] for no localization. -% -% yloc: Observation estimate covariance localization weights (nObs x nObs). -% Use [] for no localization. -% -% Knum_clim: Climatological model-observation estimate covariance used for -% blending covariance. (nState x nObs) Use [] for no blending. -% -% Ycov_clim: Climatological estimate-estimate covariance. (nObs x nObs) -% Use [] for no blending. -% -% Q: Calculators for the posterior ensemble. (nCalcs x 1) -% Use [] for no calculations. -% -% percentiles: The ensemble percentiles to return. A vector of values -% between 0 and 100. (nPercs x 1) -% -% returnMean: Whether to return the updated ensemble mean as output. Scalar -% logical. -% -% returnVar: Whether to return the variance of the updated ensemble as -% output. A scalar logical. -% -% returnDevs: Whether to return the updated ensemble deviations as output. -% A scalar logical. -% -% showProgress: Whether to display a progress bar. Scalar logical. -% -% ----- Output ----- -% -% output: A structure that may contain the following fields -% -% calibRatio: Calibration ratios for the observations (nObs x nTime) -% -% Amean: The updated ensemble mean (nState x nTime) -% -% Avar: The variance of the updated ensemble (nState x nTime) -% -% Aperc: The percentiles of the updated ensemble (nState x nPercentile x nTime) -% -% Adev: The updated ensemble deviations (nState x nEns x nTime) - -% Decompose the ensembles -[Mmean, Mdev] = dash.decompose(M); -[Ymean, Ydev] = dash.decompose(Y); - -% Get covariance matrices for the Kalman Gain -[Knum, Ycov] = ensrfCovariance( Mdev, Ydev, inflate, w, yloc, Knum_clim, Ycov_clim, b ); - -% Do the updates -output = ensrfUpdates( Mmean, Mdev, D, R, Ymean, Ydev, Knum, Ycov, Q,... - percentiles, returnMean, returnVar, returnDevs, showProgress ); - -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/ensrfCovariance.m b/4. DA Analyses/jointOfflineKalmanFilter/ensrfCovariance.m deleted file mode 100644 index 41695c32..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/ensrfCovariance.m +++ /dev/null @@ -1,21 +0,0 @@ -function[Knum, Ycov] = ensrfCovariance( Mdev, Ydev, inflate, w, yloc, Knum_clim, Ycov_clim, b ) - -% Inflate -if ~isempty(inflate) - Mdev = inflateCovariance( Mdev, inflate ); -end - -% Determine covariances -[Knum, Ycov] = ensembleCovariances( Mdev, Ydev ); - -% Localize -if ~isempty(w) - [Knum, Ycov] = localizeCovariance( Knum, Ycov, w, yloc ); -end - -% Blend -if ~isempty( Knum_clim ) - [Knum, Ycov] = blendCovariance( Knum, Ycov, Knum_clim, Ycov_clim, b ); -end - -end diff --git a/4. DA Analyses/jointOfflineKalmanFilter/ensrfUpdates.m b/4. DA Analyses/jointOfflineKalmanFilter/ensrfUpdates.m deleted file mode 100644 index 8991a227..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/ensrfUpdates.m +++ /dev/null @@ -1,86 +0,0 @@ -function[output] = ensrfUpdates( Mmean, Mdev, D, R, Ymean, Ydev, ... - Knum, Ycov, ... - Q, percentiles, ... - returnMean, returnVar, returnDevs, ... - showProgress ) - -% Preallocate output. Determine whether to calculate mean and deviations. -[nObs, nTime] = size(D); -[nState, nEns] = size(Mdev); -nPercs = numel(percentiles); -nCalcs = numel(Q); -[output, calculateMean, calculateDevs] = preallocateENSRF( nObs, nTime, ... - nState, nEns, nPercs, nCalcs, returnMean, returnVar, returnDevs ); - -% Get the unique sets of obs + R for the various time steps (each set has -% a unique Kalman Gain) -hasobs = ~isnan(D); -obsR = [hasobs;R]'; -[~, iA, iC] = unique( obsR, 'rows' ); -nSet = numel(iA); - -% Progress bar -if showProgress - progressbar(0); -end - -% Get the time steps, obs, and R for each set -for k = 1:nSet - t = find(iC==k); - nt = numel(t); % Numer of time steps in the set - obs = hasobs(:, t(1)); - Rset = R(obs, t(1)); - - % Get the Kalman Gain - Kdenom = kalmanDenominator( Ycov(obs,obs), Rset ); - K = kalmanGain( Knum(:,obs), Kdenom ); - - % Update the mean - if calculateMean - Amean = updateMean( Mmean, K, D(obs,t), Ymean(obs) ); - end - - % Calibration ratio - output.calibRatio(obs,t) = calibrationRatio( D(obs,t), Ymean(obs), Kdenom ); - - % Update the deviations - if calculateDevs - Ka = kalmanAdjusted( Knum(:,obs), Kdenom, Rset ); - Adev = updateDeviations( Mdev, Ka, Ydev(obs,:) ); - end - - % Save mean - if returnMean - output.Amean(:,t) = Amean; - end - - % Save variance - if returnVar - Avar = sum(Adev.^2, 2) ./ (nEns-1); - output.Avar(:,t) = repmat( Avar, [1,nt] ); - end - - % Save deviations - if returnDevs - output.Adev(:,:,t) = repmat( Adev, [1,1,nt]); - end - - % Posterior calculations - if ~isempty(Q) - output.calcs(:,:,t) = posteriorCalculations( Amean, Adev, Q ); - end - - % Ensemble percentiles - if ~isempty(percentiles) - Amean = permute(Amean, [1 3 2]); - Aperc = prctile( Adev, percentiles, 2 ); - output.Aperc(:,:,t) = Amean + Aperc; - end - - % Progress bar - if showProgress - progressbar(k/nSet); - end -end - -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/ensrf_outline.m b/4. DA Analyses/jointOfflineKalmanFilter/ensrf_outline.m deleted file mode 100644 index 3597c41c..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/ensrf_outline.m +++ /dev/null @@ -1,139 +0,0 @@ -function[output] = ensrf_outline( M, D, R, Y, w, yloc, Q, percentiles, ... - returnMean, returnVar, returnDevs, showProgress ) -% Runs the ensemble square root kalman filter -% -% output = ensrf( M, D, R, Y, w, yloc, Q, percentiles, returnMean, returnVar, -% returnDevs, showProgress ) -% -% ----- Inputs ----- -% -% M: Prior model ensemble (nState x nEns) -% -% D: Observations (nObs x nTime) -% -% R: Observation uncertainty (nObs x nTime) -% -% Y: Observation estimates (nObs x nEns) -% -% w: Observation-ensemble covariance localization weights (nState x nObs) -% -% yloc: Observation estimate covariance localization weights (nObs x nObs) -% -% Q: Calculators that act on the posterior. (nCalcs x 1) -% -% percentiles: The ensemble percentiles to return. A vector of values -% between 0 and 100. (nPercs x 1) -% -% returnMean: Whether to return the updated ensemble mean as output. Scalar -% logical. -% -% returnVar: Whether to return the variance of the updated ensemble as -% output. A scalar logical. -% -% returnDevs: Whether to return the updated ensemble deviations as output. -% A scalar logical. -% -% showProgress: Whether to display a progress bar. Scalar logical. -% -% ----- Output ----- -% -% output: A structure that may contain the following fields -% -% calibRatio: Calibration ratios for the observations (nObs x nTime) -% -% Amean: The updated ensemble mean (nState x nTime) -% -% Avar: The variance of the updated ensemble (nState x nTime) -% -% Aperc: The percentiles of the updated ensemble (nState x nPercentile x nTime) -% -% Adev: The updated ensemble deviations (nState x nEns x nTime) - -% Preallocate output. Determine whether to calculate mean and deviations. -[nObs, nTime] = size(D); -[nState, nEns] = size(M); -nPercs = numel(percentiles); -nCalcs = numel(Q); -[output, calculateMean, calculateDevs] = obj.preallocateENSRF( nObs, nTime, ... - nState, nEns, nPercs, nCalcs, returnMean, returnVar, returnDevs ); - -% Decompose ensembles -[Mmean, Mdev] = dash.decompose(M); -[Ymean, Ydev] = dash.decompose(Y); - -% Get the static Kalman numerator and the Y covariance -Knum = obj.kalmanNumerator( Mdev, Ydev, w ); -Ycov = obj.Ycovariance( yloc, Ydev, Ydev ); - -% Get the unique sets of obs + R for the various time steps (each set has -% a unique Kalman Gain) -hasobs = ~isnan(D); -obsR = [hasobs;R]'; -[~, iA, iC] = unique( obsR, 'rows' ); -nSet = numel(iA); - -% Progress bar -if showProgress - progressbar(0); -end - -% Get the time steps, obs, and R for each set -for k = 1:nSet - t = (iC==k); - nt = numel(t); % Numer of time steps in the set - obs = hasobs(:, t(1)); - Rset = R(obs, t(1)); - - % Get the Kalman Gain - Kdenom = obj.kalmanDenominator( Ycov(obs,obs), Rset ); - K = Knum(:,obs) / Kdenom; - - % Update the mean - if calculateMean - Amean = obj.updateMean( Mmean, K, D(obs,t), Ymean(obs) ); - end - - % Calibration ratio - output.calibRatio(obs,t) = (D(obs,t)-Ymean(obs)).^2 ./ diag(Kdenom); - - % Update the deviations - if calculateDevs - Ka = obj.kalmanAdjusted( Knum(:,obs), Kdenom, Rset ); - Adev = obj.updateDeviations( Mdev, Ka, Ydev ); - end - - % Save mean - if returnMean - output.Amean(:,t) = Amean; - end - - % Save deviations - if returnDevs - output.Adev(:,:,t) = repmat( Adev, [1,1,nt]); - end - - % Ensemble variance - if returnVar - Avar = sum(Adev.^2, 2) ./ (nEns-1); - output.Avar(:,t) = repmat( Avar, [1,nt] ); - end - - % Ensemble percentiles - if returnPercs - Amean = permute(Amean, [1 3 2]); - Aperc = prctile( Adev, percentiles, 2 ); - output.Aperc(:,:,t) = Amean + Aperc; - end - - % Posterior calculations - if posteriorCalcs - output.calcs(:,:,t) = posteriorCalculations( Amean, Adev, Q ); - end - - % Progress bar - if showProgress - progressbar(k/nSet); - end -end - -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/inflateCovariance.m b/4. DA Analyses/jointOfflineKalmanFilter/inflateCovariance.m deleted file mode 100644 index 02e78b28..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/inflateCovariance.m +++ /dev/null @@ -1,6 +0,0 @@ -function[Mdev] = inflateCovariance( Mdev, inflate ) -%% Inflates the covariance of an ensemble. -% -% Mdev = inflateCovariance( Mdev, inflate ); -Mdev = Mdev .* sqrt(inflate); -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/jointOfflineKalmanFilter.m b/4. DA Analyses/jointOfflineKalmanFilter/jointOfflineKalmanFilter.m deleted file mode 100644 index 620e9773..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/jointOfflineKalmanFilter.m +++ /dev/null @@ -1,33 +0,0 @@ -classdef jointOfflineKalmanFilter - % Implements an offline, EnSRF Kalman Filter that updates all - % observations simultaneously. - - - properties - % Kalman filter essentials - M; % Prior ensemble - D; % Observations - R; % Observation uncertainty - Ye; % Proxy estimates - - % Adjustments to covariance - w; % State vector localization weights - yloc; % Y localization weights - inflate; % Inflation factor - P; % Pre-specified covariance matrix - - % Values calculated on the fly - posteriorCalcs; % Forward models that calculate values from the posterior. - dynamicLocalizer; % Generates localization weights. - - % Types of output - returnMean; - returnVariance; - returnDeviations; - returnPercentiles; - end - - methods - end - -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/kalmanAdjusted.m b/4. DA Analyses/jointOfflineKalmanFilter/kalmanAdjusted.m deleted file mode 100644 index 95c69a14..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/kalmanAdjusted.m +++ /dev/null @@ -1,5 +0,0 @@ -function[Ka] = kalmanAdjusted( Knum, Kdenom, R ) -R = diag(R); -Ksqrt = sqrtm(Kdenom); -Ka = Knum * (Ksqrt^(-1))' * (Ksqrt + sqrtm(R))^(-1); -end diff --git a/4. DA Analyses/jointOfflineKalmanFilter/kalmanDenominator.m b/4. DA Analyses/jointOfflineKalmanFilter/kalmanDenominator.m deleted file mode 100644 index c6e911d0..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/kalmanDenominator.m +++ /dev/null @@ -1,4 +0,0 @@ -function[Kdenom] = kalmanDenominator( Ycov, R ) -R = diag(R); -Kdenom = Ycov + R; -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/kalmanGain.m b/4. DA Analyses/jointOfflineKalmanFilter/kalmanGain.m deleted file mode 100644 index 8d65d38b..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/kalmanGain.m +++ /dev/null @@ -1,3 +0,0 @@ -function[K] = kalmanGain( Knum, Kdenom ) -K = Knum / Kdenom; -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/localizeCovariance.m b/4. DA Analyses/jointOfflineKalmanFilter/localizeCovariance.m deleted file mode 100644 index dfcab823..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/localizeCovariance.m +++ /dev/null @@ -1,4 +0,0 @@ -function[Knum, Ycov] = localizeCovariance(Knum, Ycov, w, yloc) -Knum = w .* Knum; -Ycov = yloc .* Ycov; -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/posteriorCalculations.m b/4. DA Analyses/jointOfflineKalmanFilter/posteriorCalculations.m deleted file mode 100644 index edaa2da4..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/posteriorCalculations.m +++ /dev/null @@ -1,19 +0,0 @@ -function[calcs] = posteriorCalculations( Amean, Adev, Q ) -% Amean: nState x nTime -% -% Adev: nState x nEns -% -% Q: nCalcs x 1 - -% Preallocate the output -nTime = size(Amean,2); -nCalc = numel(Q); -calcs = NaN( nCalc, 2, nTime ); - -% Get the indices for each calculator -for s = 1:numel(Q) - H = Q{s}.H; - calcs(s,:,:) = Q{s}.run( Amean(H,:), Adev(H,:) ); -end - -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/preallocateENSRF.m b/4. DA Analyses/jointOfflineKalmanFilter/preallocateENSRF.m deleted file mode 100644 index 79777e01..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/preallocateENSRF.m +++ /dev/null @@ -1,52 +0,0 @@ -function[output, calculateMean, calculateDevs] = preallocateENSRF( nObs, ... - nTime, nState, nEns, nPercs, nCalcs, returnMean, returnVar, returnDevs ) -%% Preallocates output for the filter. Determines whether to update -% ensemble means and deviations based on required output. - -% Initialize structure -output = struct; - -% Record which calculations are necessary -calculateMean = false; -calculateDevs = false; - -% Calibration ratio -output.calibRatio = NaN(nObs, nTime); - -% Ensemble mean -if returnMean - output.Amean = NaN(nState, nTime); - calculateMean = true; -end - -% Ensemble variance -if returnVar - output.Avar = NaN(nState, nTime); - calculateDevs = true; -end - -% Ensemble deviations -if returnDevs - output.Adev = NaN( nState, nEns, nTime ); - calculateDevs = true; -end - -% Ensemble percentiles -if nPercs > 0 - output.Aperc = NaN( nState, nPercs, nTime ); - calculateMean = true; - calculateDevs = true; -end - -% Posterior calculations -if nCalcs > 0 - output.calcs = NaN( nCalcs, 2, nTime ); - calculateMean = true; - calculateDevs = true; -end - -end - - - - \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/updateDeviations.m b/4. DA Analyses/jointOfflineKalmanFilter/updateDeviations.m deleted file mode 100644 index 373a35b4..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/updateDeviations.m +++ /dev/null @@ -1,4 +0,0 @@ -function[Adev] = updateDeviations( Mdev, Ka, Ydev ) -% Updates the ensemble deviations -Adev = Mdev - Ka * Ydev; -end \ No newline at end of file diff --git a/4. DA Analyses/jointOfflineKalmanFilter/updateMean.m b/4. DA Analyses/jointOfflineKalmanFilter/updateMean.m deleted file mode 100644 index 5c999e53..00000000 --- a/4. DA Analyses/jointOfflineKalmanFilter/updateMean.m +++ /dev/null @@ -1,14 +0,0 @@ -function[Amean] = updateMean( Mmean, K, D, Ymean ) -% Updates the ensemble mean -% -% Mmean: nState x nTime -% -% K: nState x nSite -% -% innov: nSite x nTime -% -% Amean: nState x nTime - -Amean = Mmean + K * (D - Ymean); - -end \ No newline at end of file diff --git a/4. DA Analyses/posteriorMean.m b/4. DA Analyses/posteriorMean.m deleted file mode 100644 index 59f5372f..00000000 --- a/4. DA Analyses/posteriorMean.m +++ /dev/null @@ -1,35 +0,0 @@ -classdef posteriorMean - - properties - H; - w; - end - - methods - function[postCalc] = run(obj, Amean, Adev) - % Amean: nState x nTime - % Adev: nState x nEns - - % Vectorizes the computation of weighted mean and variance for - % ensembles with different means and the same deviations. - - % The mean of an (ensemble of state vector weighted means) is - % the weighted mean of the (mean of the ensemble of state vectors) - % (Try the math, it checks out) - denom = sum(obj.w,1); - postMean = sum(obj.w.*Amean,1) ./ denom; - - % The variance is derived from the weighted means of the - % state vector ensemble deviations. - sAdev = sum(obj.w.*Adev,1) ./ denom; - unbias = 1 / (size(sAdev,2)-1); - postVar = unbias * sum( sAdev.^2, 2) ; - postVar = postVar * ones(size(postMean)); - - % Output - postCalc = [postMean', postVar']; - postCalc = permute( postCalc, [3 2 1]); - - end - end -end \ No newline at end of file diff --git a/1. Grid files/@gridfile/add.m b/@gridfile/add.m similarity index 100% rename from 1. Grid files/@gridfile/add.m rename to @gridfile/add.m diff --git a/1. Grid files/@gridfile/buildSources.m b/@gridfile/buildSources.m similarity index 100% rename from 1. Grid files/@gridfile/buildSources.m rename to @gridfile/buildSources.m diff --git a/1. Grid files/@gridfile/buildSourcesForFiles.m b/@gridfile/buildSourcesForFiles.m similarity index 100% rename from 1. Grid files/@gridfile/buildSourcesForFiles.m rename to @gridfile/buildSourcesForFiles.m diff --git a/1. Grid files/@gridfile/checkAllowedDims.m b/@gridfile/checkAllowedDims.m similarity index 100% rename from 1. Grid files/@gridfile/checkAllowedDims.m rename to @gridfile/checkAllowedDims.m diff --git a/1. Grid files/@gridfile/checkMetadataField.m b/@gridfile/checkMetadataField.m similarity index 100% rename from 1. Grid files/@gridfile/checkMetadataField.m rename to @gridfile/checkMetadataField.m diff --git a/1. Grid files/@gridfile/checkMetadataStructure.m b/@gridfile/checkMetadataStructure.m similarity index 100% rename from 1. Grid files/@gridfile/checkMetadataStructure.m rename to @gridfile/checkMetadataStructure.m diff --git a/1. Grid files/@gridfile/checkSourcesMatchGrid.m b/@gridfile/checkSourcesMatchGrid.m similarity index 100% rename from 1. Grid files/@gridfile/checkSourcesMatchGrid.m rename to @gridfile/checkSourcesMatchGrid.m diff --git a/1. Grid files/@gridfile/collectFullPaths.m b/@gridfile/collectFullPaths.m similarity index 100% rename from 1. Grid files/@gridfile/collectFullPaths.m rename to @gridfile/collectFullPaths.m diff --git a/1. Grid files/@gridfile/collectPrimitives.m b/@gridfile/collectPrimitives.m similarity index 100% rename from 1. Grid files/@gridfile/collectPrimitives.m rename to @gridfile/collectPrimitives.m diff --git a/1. Grid files/@gridfile/commaDelimitedDims.m b/@gridfile/commaDelimitedDims.m similarity index 100% rename from 1. Grid files/@gridfile/commaDelimitedDims.m rename to @gridfile/commaDelimitedDims.m diff --git a/1. Grid files/@gridfile/convertSourceToPrimitives.m b/@gridfile/convertSourceToPrimitives.m similarity index 100% rename from 1. Grid files/@gridfile/convertSourceToPrimitives.m rename to @gridfile/convertSourceToPrimitives.m diff --git a/1. Grid files/@gridfile/defineMetadata.m b/@gridfile/defineMetadata.m similarity index 100% rename from 1. Grid files/@gridfile/defineMetadata.m rename to @gridfile/defineMetadata.m diff --git a/1. Grid files/@gridfile/expand.m b/@gridfile/expand.m similarity index 100% rename from 1. Grid files/@gridfile/expand.m rename to @gridfile/expand.m diff --git a/1. Grid files/@gridfile/findFileSources.m b/@gridfile/findFileSources.m similarity index 100% rename from 1. Grid files/@gridfile/findFileSources.m rename to @gridfile/findFileSources.m diff --git a/1. Grid files/@gridfile/gridfile.m b/@gridfile/gridfile.m similarity index 100% rename from 1. Grid files/@gridfile/gridfile.m rename to @gridfile/gridfile.m diff --git a/1. Grid files/@gridfile/hasDuplicateRows.m b/@gridfile/hasDuplicateRows.m similarity index 100% rename from 1. Grid files/@gridfile/hasDuplicateRows.m rename to @gridfile/hasDuplicateRows.m diff --git a/1. Grid files/@gridfile/info.m b/@gridfile/info.m similarity index 100% rename from 1. Grid files/@gridfile/info.m rename to @gridfile/info.m diff --git a/1. Grid files/@gridfile/load.m b/@gridfile/load.m similarity index 100% rename from 1. Grid files/@gridfile/load.m rename to @gridfile/load.m diff --git a/1. Grid files/@gridfile/metadata.m b/@gridfile/metadata.m similarity index 100% rename from 1. Grid files/@gridfile/metadata.m rename to @gridfile/metadata.m diff --git a/1. Grid files/@gridfile/new.m b/@gridfile/new.m similarity index 100% rename from 1. Grid files/@gridfile/new.m rename to @gridfile/new.m diff --git a/1. Grid files/@gridfile/padPrimitives.m b/@gridfile/padPrimitives.m similarity index 100% rename from 1. Grid files/@gridfile/padPrimitives.m rename to @gridfile/padPrimitives.m diff --git a/1. Grid files/@gridfile/processMetadata.m b/@gridfile/processMetadata.m similarity index 100% rename from 1. Grid files/@gridfile/processMetadata.m rename to @gridfile/processMetadata.m diff --git a/1. Grid files/@gridfile/remove.m b/@gridfile/remove.m similarity index 100% rename from 1. Grid files/@gridfile/remove.m rename to @gridfile/remove.m diff --git a/1. Grid files/@gridfile/renameSources.m b/@gridfile/renameSources.m similarity index 100% rename from 1. Grid files/@gridfile/renameSources.m rename to @gridfile/renameSources.m diff --git a/1. Grid files/@gridfile/repeatedLoad.m b/@gridfile/repeatedLoad.m similarity index 100% rename from 1. Grid files/@gridfile/repeatedLoad.m rename to @gridfile/repeatedLoad.m diff --git a/1. Grid files/@gridfile/review.m b/@gridfile/review.m similarity index 100% rename from 1. Grid files/@gridfile/review.m rename to @gridfile/review.m diff --git a/1. Grid files/@gridfile/rewriteMetadata.m b/@gridfile/rewriteMetadata.m similarity index 100% rename from 1. Grid files/@gridfile/rewriteMetadata.m rename to @gridfile/rewriteMetadata.m diff --git a/1. Grid files/@gridfile/save.m b/@gridfile/save.m similarity index 100% rename from 1. Grid files/@gridfile/save.m rename to @gridfile/save.m diff --git a/1. Grid files/@gridfile/sourceFilepath.m b/@gridfile/sourceFilepath.m similarity index 100% rename from 1. Grid files/@gridfile/sourceFilepath.m rename to @gridfile/sourceFilepath.m diff --git a/1. Grid files/@gridfile/update.m b/@gridfile/update.m similarity index 100% rename from 1. Grid files/@gridfile/update.m rename to @gridfile/update.m diff --git a/1. Grid files/@gridfile/updateMetadataField.m b/@gridfile/updateMetadataField.m similarity index 100% rename from 1. Grid files/@gridfile/updateMetadataField.m rename to @gridfile/updateMetadataField.m diff --git a/Global Utilities/checkFile.m b/Global Utilities/checkFile.m deleted file mode 100644 index c9b45c86..00000000 --- a/Global Utilities/checkFile.m +++ /dev/null @@ -1,44 +0,0 @@ -function[] = checkFile( filename, varargin ) -% 'extension', ext -% 'exist', tf - -if ~isstrflag(filename) - error('filename must be a string scalar or character row vector.'); -end - -% Optional inputs -[ext, checkExists] = parseInputs( varargin, {'extension', 'exist'}, {[], false}, {[],[]} ); -if ~isempty(ext) - if ~isstrflag(ext) - error('extension must be a string or character row vector.'); - end - ext = char(ext); - if ~strcmpi( ext(1), '.' ) - error('ext must begin with a "."'); - end -end -if ~islogical(checkExists) || ~isscalar(checkExists) - error('exist must be a scalar logical.'); -end - -% File name -if ~isstrflag( filename ) - error('file name must be a string or character row vector.'); -end - -% Extension -if ~isempty(ext) - filename = char(filename); - nExt = numel(ext); - if ~strcmp( filename( end-nExt + (1:nExt) ), ext ) - error('filename must end in a %s extension.', ext); - end -end - -% Existence -if checkExists && ~exist( filename, 'file' ) - error('The file %s does not exist. It may be misspelled or not on the active path.', filename ); -end - -end - \ No newline at end of file diff --git a/Global Utilities/convertToV7_3.m b/Global Utilities/convertToV7_3.m deleted file mode 100644 index 1679a863..00000000 --- a/Global Utilities/convertToV7_3.m +++ /dev/null @@ -1,79 +0,0 @@ -function[] = convertToV7_3( filename ) -% Converts a .mat file to a version 7.3 (v7.3) .mat file. -% -% dash.convertToV7_3( filename ) -% Resaves a mat file as -v7.3 - -% Check the file exists, get path -checkFile( filename, 'exist', true ); -fullfile = which( filename ); - -% Get the temp file name. Ensure no pre-existing temp files are overwritten -tempfile = fullfile; -while exist( tempfile, 'file' ) - tempfile = [tempfile, '.tmp']; %#ok -end - -% Test that the file is actually a matfile and not already v7.3 -try - m = matfile( filename ); -catch - error('File %s may not actually be a .mat file.', filename); -end -if m.Properties.SupportsPartialAccess - error('File %s is already v7.3', filename); -end - -% Get data fields, check for naming conflicts -fieldNames = string( fields( m ) ); -fieldNames(1) = []; -if any( ismember( fieldNames, ["m","tempfile","f","fields","load","fullfile","error"] ) ) - error('File %s contains variables that conflict with named variables in this function. You will need to resave it manually.', filename ); -end - -% Check for matfile naming conflict. Temporarily disable warning message. -id = 'MATLAB:load:variableNotFound'; -w = warning('query', id); -warning('off', id); -try - badProp = load( fullfile, 'Properties' ); - warning(w.state, id); -catch - warning(w.state, id); -end -if ~isempty(fields(badProp)) - error('File %s contains variables that conflict with named variables in this function. You will need to resave it manually.', filename ); -end - -% Attempt to load all the data at once. -fullLoad = true; -try - load( fullfile, '-mat', fieldNames{:} ); -catch - fullLoad = false; -end - -% Save all the data at once -if fullLoad - save( tempfile, '-mat', '-v7.3', fieldNames{:} ); - -% Save variables iteratively -else - m = matfile( tempfile ); - for f = 1:numel(fieldNames) - - % Try to load each variable in full - try - load( fullfile, '-mat', fieldNames{f} ); - catch - error('Variable %s is too large to load into memory.', fieldNames{f} ); - end - m.(fieldNames{f}) = fieldNames{f}; - end -end - -% Delete the old file and rename -delete( fullfile ); -movefile( tempfile, fullfile ); - -end diff --git a/Global Utilities/gaspariCohn.m b/Global Utilities/gaspariCohn.m deleted file mode 100644 index aa0fb4ed..00000000 --- a/Global Utilities/gaspariCohn.m +++ /dev/null @@ -1,45 +0,0 @@ -function[weights] = gaspariCohn( dist, R, scale ) -% Uses a gaspariCohn 5th order polynomial to determine localization weights -% -% [weights] = gaspariCohn( dist, R, scale ) - -% Set defaults -if ~exist('scale','var') || isempty(scale) - scale = 0.5; -elseif strcmpi(scale, 'optimal') - scale = sqrt(10/3); -end - -% Error check -if ~isscalar(scale) || ~isnumeric(scale) || scale<0 || scale>0.5 - error('The length scale must be a scalar on the interval [0, 0.5].'); -elseif ~isscalar(R) || ~isnumeric(R) || R<0 - error('R must be a positive numeric scalar.'); -elseif ~isnumeric(dist) || any(dist(:)<0) - error('dist must be a numeric array with no negative values.'); -end - -% Get the length scale and covariance localization radius. -c = scale * R; -Rloc = 2*c; % Note that Rloc <= R, they are not strictly equal - -% Get points that are inside/outside the localization radius. Points inside -% the radius are split into inside / outside the length scale -outRloc = (dist > Rloc); -inScale = (dist <= c); -outScale = (dist > c) & (dist <= Rloc); - -% Preallocate weights -weights = ones( size(dist) ); - -% Apply the polynomial to the distances -x = dist / c; -weights(inScale) = polyval([-.25,.5,.625,-5/3,0,1], x(inScale)); -weights(outScale) = polyval([1/12,-.5,.625,5/3,-5,4], x(outScale)) - 2./(3*x(outScale)); -weights(outRloc) = 0; - -% Weights should never be negative. Remove near-zero negative weights -% resulting from round-off errors. -weights( weights<0 ) = 0; - -end \ No newline at end of file diff --git a/Global Utilities/getDimIDs.m b/Global Utilities/getDimIDs.m deleted file mode 100644 index 4b48f57d..00000000 --- a/Global Utilities/getDimIDs.m +++ /dev/null @@ -1,40 +0,0 @@ -function[dimID, atts, varName, lonDim, latDim, levDim, timeDim, runDim, triDim, varDim] = getDimIDs -%% This creates string IDs for all dimensions that can possibly occur in gridded data. -% -% dimID = getDimIDs -% Returns a list of data dimensions. -% -% [dimID, specs, varName, lonDim, latDim, levDim, timeDim, runDim, triDim] = getDimIDs -% Returns a list of data dimensions and all unique names used by Dash. - -% Variable specifications. These are non-gridded metadata. -atts = "attributes"; - -% Variable name field in ensemble metadata -varName = "varName"; - -% Longitude (x coordinate) -lonDim = "lon"; - -% Latitude (y coordinate) -latDim = "lat"; - -% Tripole (x-y coordinate) -triDim = "tri"; - -% Level (z or height coordinate) -levDim = "lev"; - -% Time -timeDim = "time"; - -% Ensemble -runDim = "run"; - -% Variable dimension -varDim = "var"; - -% All grid dimensions -dimID = [lonDim, latDim, triDim, levDim, timeDim, runDim, varDim]; - -end \ No newline at end of file diff --git a/Global Utilities/haversine.m b/Global Utilities/haversine.m deleted file mode 100644 index 05e954f0..00000000 --- a/Global Utilities/haversine.m +++ /dev/null @@ -1,35 +0,0 @@ -function[D] = haversine( coord, ensCoord ) -%% Computes the distance between lat-lon coordinates and a set of ensemble -% coordinates. -% -% D = haversine( coord, ensCoord ) -% -% ---- Inputs ----- -% -% coord: A set of coordinates. First column lat, second column lon. (nCoord1 x 2) -% -% ensCoord: A second set of coordinates. First column lat, second column -% lon. (nCoord2 x 2) - -% Set the radius of the Earth -R = 6378.137; - -% Convert to radians -coord = coord * pi/180; -ensCoord = ensCoord * pi/180; - -% Transpose ensCoord for binary singleton expansion -ensCoord = ensCoord'; - -% Get the change in lat and lon -dLat = coord(:,1) - ensCoord(1,:); -dLon = coord(:,2) - ensCoord(2,:); - -% Get haversine function of the central angle -a = sin(dLat/2).^2 + ( cos(coord(:,1)) .* cos(ensCoord(1,:)) .* sin(dLon/2).^2); -c = 2 * atan2( sqrt(a), sqrt(1-a) ); - -% Get the distance -D = R * c; - -end \ No newline at end of file diff --git a/Global Utilities/isstrflag.m b/Global Utilities/isstrflag.m deleted file mode 100644 index 696c8ff9..00000000 --- a/Global Utilities/isstrflag.m +++ /dev/null @@ -1,13 +0,0 @@ -function[tf] = isstrflag( str ) -%% Tests if an input is a character row vector or string scalar - -tf = false; - -if ischar(str) && isrow(str) - tf = true; - -elseif isstring(str) && isscalar(str) - tf = true; -end - -end \ No newline at end of file diff --git a/Global Utilities/isstrlist.m b/Global Utilities/isstrlist.m deleted file mode 100644 index dc9682ee..00000000 --- a/Global Utilities/isstrlist.m +++ /dev/null @@ -1,6 +0,0 @@ -function[tf] = isstrlist( list ) -tf = false; -if isvector( list ) && ( (ischar(list) && isrow(list)) || isstring(list) || iscellstr(list) ) - tf = true; -end -end \ No newline at end of file diff --git a/Global Utilities/loadKeep.m b/Global Utilities/loadKeep.m deleted file mode 100644 index a25b669d..00000000 --- a/Global Utilities/loadKeep.m +++ /dev/null @@ -1,25 +0,0 @@ -function[scs, keep] = loadKeep( index ) - -if ~issorted( index, 'strictascend') - error('Indices must be strictly ascending.'); -end -scs = NaN(3,1); - -% Check for even spacing. If not, load the entire interval -loadInterval = index; -spacing = unique( diff(index) ); -if numel(index)>1 && numel(spacing)>1 - loadInterval = index(1):index(end); - scs(3) = 1; -elseif numel(index) > 1 - scs(3) = spacing; -else - scs(3) = 1; -end - -% Get the start and the count -scs(1) = loadInterval(1); -scs(2) = numel( loadInterval ); -[~, keep] = ismember( index, loadInterval ); - -end \ No newline at end of file diff --git a/Global Utilities/progressbar/progressbar.m b/Global Utilities/progressbar/progressbar.m deleted file mode 100644 index 011b88b1..00000000 --- a/Global Utilities/progressbar/progressbar.m +++ /dev/null @@ -1,365 +0,0 @@ -function progressbar(varargin) -% Description: -% progressbar() provides an indication of the progress of some task using -% graphics and text. Calling progressbar repeatedly will update the figure and -% automatically estimate the amount of time remaining. -% This implementation of progressbar is intended to be extremely simple to use -% while providing a high quality user experience. -% -% Features: -% - Can add progressbar to existing m-files with a single line of code. -% - Supports multiple bars in one figure to show progress of nested loops. -% - Optional labels on bars. -% - Figure closes automatically when task is complete. -% - Only one figure can exist so old figures don't clutter the desktop. -% - Remaining time estimate is accurate even if the figure gets closed. -% - Minimal execution time. Won't slow down code. -% - Randomized color. When a programmer gets bored... -% -% Example Function Calls For Single Bar Usage: -% progressbar % Initialize/reset -% progressbar(0) % Initialize/reset -% progressbar('Label') % Initialize/reset and label the bar -% progressbar(0.5) % Update -% progressbar(1) % Close -% -% Example Function Calls For Multi Bar Usage: -% progressbar(0, 0) % Initialize/reset two bars -% progressbar('A', '') % Initialize/reset two bars with one label -% progressbar('', 'B') % Initialize/reset two bars with one label -% progressbar('A', 'B') % Initialize/reset two bars with two labels -% progressbar(0.3) % Update 1st bar -% progressbar(0.3, []) % Update 1st bar -% progressbar([], 0.3) % Update 2nd bar -% progressbar(0.7, 0.9) % Update both bars -% progressbar(1) % Close -% progressbar(1, []) % Close -% progressbar(1, 0.4) % Close -% -% Notes: -% For best results, call progressbar with all zero (or all string) inputs -% before any processing. This sets the proper starting time reference to -% calculate time remaining. -% Bar color is choosen randomly when the figure is created or reset. Clicking -% the bar will cause a random color change. -% -% Demos: -% % Single bar -% m = 500; -% progressbar % Init single bar -% for i = 1:m -% pause(0.01) % Do something important -% progressbar(i/m) % Update progress bar -% end -% -% % Simple multi bar (update one bar at a time) -% m = 4; -% n = 3; -% p = 100; -% progressbar(0,0,0) % Init 3 bars -% for i = 1:m -% progressbar([],0) % Reset 2nd bar -% for j = 1:n -% progressbar([],[],0) % Reset 3rd bar -% for k = 1:p -% pause(0.01) % Do something important -% progressbar([],[],k/p) % Update 3rd bar -% end -% progressbar([],j/n) % Update 2nd bar -% end -% progressbar(i/m) % Update 1st bar -% end -% -% % Fancy multi bar (use labels and update all bars at once) -% m = 4; -% n = 3; -% p = 100; -% progressbar('Monte Carlo Trials','Simulation','Component') % Init 3 bars -% for i = 1:m -% for j = 1:n -% for k = 1:p -% pause(0.01) % Do something important -% % Update all bars -% frac3 = k/p; -% frac2 = ((j-1) + frac3) / n; -% frac1 = ((i-1) + frac2) / m; -% progressbar(frac1, frac2, frac3) -% end -% end -% end -% -% Author: -% Steve Hoelzer -% -% Revisions: -% 2002-Feb-27 Created function -% 2002-Mar-19 Updated title text order -% 2002-Apr-11 Use floor instead of round for percentdone -% 2002-Jun-06 Updated for speed using patch (Thanks to waitbar.m) -% 2002-Jun-19 Choose random patch color when a new figure is created -% 2002-Jun-24 Click on bar or axes to choose new random color -% 2002-Jun-27 Calc time left, reset progress bar when fractiondone == 0 -% 2002-Jun-28 Remove extraText var, add position var -% 2002-Jul-18 fractiondone input is optional -% 2002-Jul-19 Allow position to specify screen coordinates -% 2002-Jul-22 Clear vars used in color change callback routine -% 2002-Jul-29 Position input is always specified in pixels -% 2002-Sep-09 Change order of title bar text -% 2003-Jun-13 Change 'min' to 'm' because of built in function 'min' -% 2003-Sep-08 Use callback for changing color instead of string -% 2003-Sep-10 Use persistent vars for speed, modify titlebarstr -% 2003-Sep-25 Correct titlebarstr for 0% case -% 2003-Nov-25 Clear all persistent vars when percentdone = 100 -% 2004-Jan-22 Cleaner reset process, don't create figure if percentdone = 100 -% 2004-Jan-27 Handle incorrect position input -% 2004-Feb-16 Minimum time interval between updates -% 2004-Apr-01 Cleaner process of enforcing minimum time interval -% 2004-Oct-08 Seperate function for timeleftstr, expand to include days -% 2004-Oct-20 Efficient if-else structure for sec2timestr -% 2006-Sep-11 Width is a multiple of height (don't stretch on widescreens) -% 2010-Sep-21 Major overhaul to support multiple bars and add labels - -% 2019-Oct-24 (J King) Fixed to allow string inputs. Changed label -% interpreter to 'none'. - -persistent progfig progdata lastupdate - -% Get inputs -if nargin > 0 - input = varargin; - ninput = nargin; -else - % If no inputs, init with a single bar - input = {0}; - ninput = 1; -end - -% If task completed, close figure and clear vars, then exit -if isequal(input{1}, 1) - if ishandle(progfig) - delete(progfig) % Close progress bar - end - clear progfig progdata lastupdate % Clear persistent vars - drawnow - return -end - -% Init reset flag -resetflag = false; - -% Set reset flag if first input is a string -if ischar(input{1}) || isstring(input{1}) - resetflag = true; -end - -% Set reset flag if all inputs are zero -if isequal(input{1}, 0) - % If the quick check above passes, need to check all inputs - resetflag = true; - for b = 1:ninput - if ~isequal( input{b}, 0 ) - resetflag = false; - break; - end - end -end - -% Set reset flag if more inputs than bars -if ninput > length(progdata) - resetflag = true; -end - -% If reset needed, close figure and forget old data -if resetflag - if ishandle(progfig) - delete(progfig) % Close progress bar - end - progfig = []; - progdata = []; % Forget obsolete data -end - -% Create new progress bar if needed -if ishandle(progfig) -else % This strange if-else works when progfig is empty (~ishandle() does not) - - % Define figure size and axes padding for the single bar case - height = 0.03; - width = height * 8; - hpad = 0.02; - vpad = 0.25; - - % Figure out how many bars to draw - nbars = max(ninput, length(progdata)); - - % Adjust figure size and axes padding for number of bars - heightfactor = (1 - vpad) * nbars + vpad; - height = height * heightfactor; - vpad = vpad / heightfactor; - - % Initialize progress bar figure - left = (1 - width) / 2; - bottom = (1 - height) / 2; - progfig = figure(... - 'Units', 'normalized',... - 'Position', [left bottom width height],... - 'NumberTitle', 'off',... - 'Resize', 'off',... - 'MenuBar', 'none' ); - - % Initialize axes, patch, and text for each bar - left = hpad; - width = 1 - 2*hpad; - vpadtotal = vpad * (nbars + 1); - height = (1 - vpadtotal) / nbars; - for ndx = 1:nbars - % Create axes, patch, and text - bottom = vpad + (vpad + height) * (nbars - ndx); - progdata(ndx).progaxes = axes( ... - 'Position', [left bottom width height], ... - 'XLim', [0 1], ... - 'YLim', [0 1], ... - 'Box', 'on', ... - 'ytick', [], ... - 'xtick', [] ); - progdata(ndx).progpatch = patch( ... - 'XData', [0 0 0 0], ... - 'YData', [0 0 1 1] ); - progdata(ndx).progtext = text(0.99, 0.5, '', ... - 'HorizontalAlignment', 'Right', ... - 'FontUnits', 'Normalized', ... - 'FontSize', 0.7 ); - progdata(ndx).proglabel = text(0.01, 0.5, '', ... - 'HorizontalAlignment', 'Left', ... - 'FontUnits', 'Normalized', ... - 'FontSize', 0.7, 'Interpreter', 'none' ); - if ischar(input{ndx}) || isstring(input{ndx}) - set(progdata(ndx).proglabel, 'String', input{ndx}) - input{ndx} = 0; - end - - % Set callbacks to change color on mouse click - set(progdata(ndx).progaxes, 'ButtonDownFcn', {@changecolor, progdata(ndx).progpatch}) - set(progdata(ndx).progpatch, 'ButtonDownFcn', {@changecolor, progdata(ndx).progpatch}) - set(progdata(ndx).progtext, 'ButtonDownFcn', {@changecolor, progdata(ndx).progpatch}) - set(progdata(ndx).proglabel, 'ButtonDownFcn', {@changecolor, progdata(ndx).progpatch}) - - % Pick a random color for this patch - changecolor([], [], progdata(ndx).progpatch) - - % Set starting time reference - if ~isfield(progdata(ndx), 'starttime') || isempty(progdata(ndx).starttime) - progdata(ndx).starttime = clock; - end - end - - % Set time of last update to ensure a redraw - lastupdate = clock - 1; - -end - -% Process inputs and update state of progdata -for ndx = 1:ninput - if ~isempty(input{ndx}) - progdata(ndx).fractiondone = input{ndx}; - progdata(ndx).clock = clock; - end -end - -% Enforce a minimum time interval between graphics updates -myclock = clock; -if abs(myclock(6) - lastupdate(6)) < 0.01 % Could use etime() but this is faster - return -end - -% Update progress patch -for ndx = 1:length(progdata) - set(progdata(ndx).progpatch, 'XData', ... - [0, progdata(ndx).fractiondone, progdata(ndx).fractiondone, 0]) -end - -% Update progress text if there is more than one bar -if length(progdata) > 1 - for ndx = 1:length(progdata) - set(progdata(ndx).progtext, 'String', ... - sprintf('%1d%%', floor(100*progdata(ndx).fractiondone))) - end -end - -% Update progress figure title bar -if progdata(1).fractiondone > 0 - runtime = etime(progdata(1).clock, progdata(1).starttime); - timeleft = runtime / progdata(1).fractiondone - runtime; - timeleftstr = sec2timestr(timeleft); - titlebarstr = sprintf('%2d%% %s remaining', ... - floor(100*progdata(1).fractiondone), timeleftstr); -else - titlebarstr = ' 0%'; -end -set(progfig, 'Name', titlebarstr) - -% Force redraw to show changes -drawnow - -% Record time of this update -lastupdate = clock; - - -% ------------------------------------------------------------------------------ -function changecolor(h, e, progpatch) %#ok -% Change the color of the progress bar patch - -% Prevent color from being too dark or too light -colormin = 1.5; -colormax = 2.8; - -thiscolor = rand(1, 3); -while (sum(thiscolor) < colormin) || (sum(thiscolor) > colormax) - thiscolor = rand(1, 3); -end - -set(progpatch, 'FaceColor', thiscolor) - - -% ------------------------------------------------------------------------------ -function timestr = sec2timestr(sec) -% Convert a time measurement from seconds into a human readable string. - -% Convert seconds to other units -w = floor(sec/604800); % Weeks -sec = sec - w*604800; -d = floor(sec/86400); % Days -sec = sec - d*86400; -h = floor(sec/3600); % Hours -sec = sec - h*3600; -m = floor(sec/60); % Minutes -sec = sec - m*60; -s = floor(sec); % Seconds - -% Create time string -if w > 0 - if w > 9 - timestr = sprintf('%d week', w); - else - timestr = sprintf('%d week, %d day', w, d); - end -elseif d > 0 - if d > 9 - timestr = sprintf('%d day', d); - else - timestr = sprintf('%d day, %d hr', d, h); - end -elseif h > 0 - if h > 9 - timestr = sprintf('%d hr', h); - else - timestr = sprintf('%d hr, %d min', h, m); - end -elseif m > 0 - if m > 9 - timestr = sprintf('%d min', m); - else - timestr = sprintf('%d min, %d sec', m, s); - end -else - timestr = sprintf('%d sec', s); -end diff --git a/Global Utilities/progressbar/progressbar_license.txt b/Global Utilities/progressbar/progressbar_license.txt deleted file mode 100644 index 01499e52..00000000 --- a/Global Utilities/progressbar/progressbar_license.txt +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2005, Steve Hoelzer -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the distribution - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE -LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -POSSIBILITY OF SUCH DAMAGE. diff --git a/Global Utilities/samplingMatrix.m b/Global Utilities/samplingMatrix.m deleted file mode 100644 index c6c4783b..00000000 --- a/Global Utilities/samplingMatrix.m +++ /dev/null @@ -1,100 +0,0 @@ -function[H] = samplingMatrix( siteCoord, stateCoord, indexType ) -%% Creates a sampling matrix or vector that maps site (lat-lon) coordinates -% to the nearest set of (lat-lon) coordinates on a grid or set of stations. -% -% H = samplingMatrix( siteCoord, stateCoord ) -% Returns a logical sampling matrix. -% -% Hvec = samplingMatrix( siteCoord, stateCoord, 'linear' ) -% Returns a vector of linear indices. -% -% Hvec = samplingMatrix( siteCoord, {gridLat, {gridLat, gridLon}, 'linear' ) -% Returns a vector of linear indices after vectorizing a gridded set of -% lat-lon coordinates. -% -% Hsub = samplingMatrix( siteCoord, {gridLat, gridLon}, 'subscript' ) -% Returns subscript indices for a gridded set of lat-lon coordinates. -% -% ----- Inputs ----- -% -% siteCoord: A two column matrix of site latitude (column 1) and longitude -% (column 2) coordinates. (nSite x 2) -% -% stateCoord: A two column matrix of grid/station/domain coordinates. (nState x 2) -% -% gridLat: A 2D matrix of grid latitudes. -% -% gridLon: A 2D matrix of grid longitudes. -% -% ----- Outputs ----- -% -% H: A logical sampling matrix. (nSite x nGrid) -% -% Hvec: A vector of linear sampling indices. (nSite x 1) -% -% Hsub: A two column matrix of 2D sampling subscript indices. (nSite x 2) -% -% ----- Written By ----- -% Jonathan King, University of Arizona, 2019 - -% Set a default for the index type -if nargin == 2 - indexType = 'logical'; -end - -% If 2D grid coordinates, convert to a vector -if iscell(stateCoord) - gridSize = size( stateCoord{1} ); - - lat = stateCoord{1}(:); - lon = stateCoord{2}(:); - - stateCoord = [lat, lon]; -end - -% Check that the coordinates are not all NaN -if all( isnan(stateCoord), 2 ) - error('All the state coordinates contain NaN values.'); -elseif all( isnan(siteCoord), 2 ) - error('All the site coordinates contain NaN values.'); -end - -% Preallocate -nSite = size( siteCoord, 1); -Hvec = NaN( nSite, 1); - -% For each site... -for s = 1:nSite - - % Get the distance between the site and grid coords - dist = haversine( siteCoord(s,:), stateCoord ); - - % Get the index of the minimum distance - Hvec(s) = find( dist == min(dist), 1 ); -end - -% If returning a logical sampling matrix -if strcmpi( indexType, 'logical') - - % Preallocate - nGrid = size( stateCoord, 1); - H = false( nSite, nGrid); - - % For each site... - for s = 1:nSite - - % Convert the sampling index to true - H(s, Hvec(s)) = true; - end - -% Or output linear indices. -elseif strcmpi( indexType, 'linear') - H = Hvec; - -% Or get subscript indices -elseif strcmpi( indexType, 'subscript') - [Hrow, Hcol] = ind2sub( gridSize, Hvec ); - H = [Hrow, Hcol]; -end - -end \ No newline at end of file diff --git a/Global Utilities/scsIndices.m b/Global Utilities/scsIndices.m deleted file mode 100644 index 26aeb46b..00000000 --- a/Global Utilities/scsIndices.m +++ /dev/null @@ -1,9 +0,0 @@ -function[index] = scsIndices( scs ) -% Get the indices associated with start, count, stride -% -% index = scsIndices( scs ) -if numel(scs)~=3 - error('scs must have 3 elements.'); -end -index = scs(1) : scs(3) : scs(1)+scs(3)*(scs(2)-1); -end diff --git a/Global Utilities/subdim.m b/Global Utilities/subdim.m deleted file mode 100644 index 4043cf7f..00000000 --- a/Global Utilities/subdim.m +++ /dev/null @@ -1,33 +0,0 @@ -function[subDex] = subdim( linDex, siz ) -%% Subscripts linear indices to an N-dimensional array -% -% ----- Inputs ----- -% -% siz: The size of the N-dimensional array -% -% linDex: Linear indices. -% -% ----- Outputs ----- -% -% subDex: The subscripted indices. -% -% ----- Written By ----- -% Jonathan King, University of Arizona, 2019 - -% Ensure linDex is column -if ~iscolumn(linDex) - error('linDex must be a column vector'); -end - -% Get the number of dimensions -nDim = numel(siz); - -% Preallocate output cell -subDex = cell(1,nDim); - -% Fill subscript indices -[subDex{:}] = ind2sub( siz, linDex ); - -% Convert output cell to array -subDex = cell2mat(subDex); -end \ No newline at end of file diff --git a/Tutorials/T1_fileIO.m b/Tutorials/T1_fileIO.m deleted file mode 100644 index 3cb0e12b..00000000 --- a/Tutorials/T1_fileIO.m +++ /dev/null @@ -1,381 +0,0 @@ -%% Tutorial 1: FILE IO -clearvars; -close all; -clc; - -%% Introduction - -% This demonstrates how to create a .grid file from data. -% -% The purpose of a .grid file is to store information in an organized -% format for the DASH package. -% -% The grid files are serve as containers for various types of stored data, -% including NetCDF files, .mat files, and workspace arrays. For netcdf and -% .mat data sources, associated grid files do not actually store data, but -% rather instructions for how to extract information from the appropriate -% file. -% -% The methods used for working with .grid files are all stored in the -% gridFile class. See -% -% >> doc gridFile -% -% for details. In the documentation page you will see a section labeled -% "gridFile Methods" up at the top. This lists the functions I think will -% be useful for normal users. -% -% There is also a "Methods Summary" section near the bottom. This is much -% more comprehensive, but only worth learning if you want to develop the -% code. - -% The basic workflow for .grid files is: -% Step 1: Define metadata for the dimensions of all the gridded data to be -% placed in the .grid file. -% Step 2: Initialize a new .grid file. -% -% Then, for each gridded data source (NetCDF file, .mat file, or -% workspace array) -% Step 3: Specify some metadata for the dimensions in the data source. -% Step 4: Add the data source to the .grid file. - -% The purpose of the .grid files is to increase workflow efficiency. -% Storing data in the .grid file limits file IO to a single instance. -% Furthermore, all data is catalogued with associated metadata for use with -% the DASH package, so can be sorted via metadata at any later point. - - - -%% Define metadata. - -% Before initializing a new .grid file, we need to specify what data it can -% eventually contain. Do this by defining metadata for each dimension of -% the data that will be added to the file. - -% I'll illustrate this with an example case using some output fields from -% the CESM Last Millennium ensemble. -% -% We're going to be using near-surface temperature (Tref) and -% sea level pressure (SLP) from runs 2 and 3. - -% Here are the netcdf files containing the near surface temperature. You -% can see that the files for each run are split into 850-1849, and 1850-2005 -trefFiles = ["b.e11.BLMTRC5CN.f19_g16.002.cam.h0.TREFHT.085001-184912.nc"; - "b.e11.BLMTRC5CN.f19_g16.003.cam.h0.TREFHT.085001-184912.nc" - "b.e11.BLMTRC5CN.f19_g16.002.cam.h0.TREFHT.185001-200512.nc" - "b.e11.BLMTRC5CN.f19_g16.003.cam.h0.TREFHT.185001-200512.nc"]; - -% Similarly here are the files for sea level pressure -pslFiles = ["b.e11.BLMTRC5CN.f19_g16.002.cam.h0.PSL.085001-184912.nc"; - "b.e11.BLMTRC5CN.f19_g16.003.cam.h0.PSL.085001-184912.nc"; - "b.e11.BLMTRC5CN.f19_g16.002.cam.h0.PSL.185001-200512.nc"; - "b.e11.BLMTRC5CN.f19_g16.003.cam.h0.PSL.185001-200512.nc"]; - -% These files are all for data on the CESM-LME model grid. We want to add -% these files to a common .grid file that we can use later for data -% assimilation. We'll want to get all the possible spatial, time, run, and -% variable metadata and use it to define the scope of the .grid file. - -% So let's do that now. The spatial metadata can be obtained directly from -% the NetCDF files -lon = ncread( pslFiles(1), 'lon' ); -lat = ncread( pslFiles(1), 'lat' ); - -% Futhermore, we know the total time scope of the files is from 850-2005, -% spaced in monthly increments. -time = ( datetime(850,1,15) : calmonths(1) : datetime(2005,12,15) )'; - -% Also, these files are for runs 2 and 3. And they define 2 variables -% "Tref" and "SLP" -run = [2;3]; -var = ["Tref";"PSL"]; - -% We'll use these values to create a metadata structure that will define -% the scope of the grid file -meta = gridFile.defineMetadata( 'lon', lon, 'lat', lat, 'time', time, 'run', run, 'var', var ); - - -% So, this line defines metadata for the longitude, latitude, time, -% ensemble, and variable dimensions of our 5 dimensional dataset. We can -% use it now to initialize a new gridFile. -% -% I have used metadata values that I find meaningful, but you may prefer -% completely different metadata. That's fine! You can use any numeric, -% logical, character, string, cellstring, or datetime matrix to define -% metadata for any dimension. -% -% However, it is important to note that the metadata must have 1 ROW per -% element down the appropriate dimension. - -% For example, we could provide more information about the variables in -% their metadata. If we wanted to also provide information about variable -% units, we could do -% >> var = ["Tref", "Kelvin"; "Slp", "Pa"]; -% >> meta = gridFile.defineMetadata( 'lon', lon, 'lat', lat, 'time', time, 'run', run, 'var', var ); - - -%% Make a new .grid file - -% When intializing a new .grid file, we can also provide non-dimensional -% metadata, which I will refer to as attributes. To create attributes for -% the .grid file, provide a scalar strucutre whose fields contain the -% values of interest. Again, the metadata is entirely up to you, there are -% no required fields. -% -% In fact, attributes are entirely optional. You don't need to actually -% provide them when creating a .grid file. - -% For this tutorial, let's provide some information about the model and the -% grid -attributes = struct('Model', 'CESM-LME', 'Grid', 'f19_g16'); - -% Alright, let's initialize the .grid file. We'll call it tutorial.grid -myGrid = gridFile.new( 'tutorial.grid', meta, attributes ); - -% this says: Create a new grid file named tutorial.grid, -% the scope of the grid is defined by this metadata, -% the grid also has non-dimensional metadata in attributes. - -% Note that this returns an output "myGrid", which is an object that -% contains some information about this gridFile. Since different gridFiles -% contain different data, we will interact with them individually. So, we -% will need the "myGrid" object to add data to this grid file. -% -% You can also get a gridfile object for an existing gridFile by calling -% the "gridFile" method on the file name. So, for example -clearvars myGrid; -myGrid = gridFile('tutorial.grid'); - -% returns the same object. - - -%% Add data to a grid file - -% Alright, now that we've created the grid, we can start adding data. Or -% rather data sources. Instead of taking the time and effort to load data -% from our netCDF files, merge them together, permute to some dimensional -% order, and save to file (which would duplicate our data), we will instead -% add the netcdf files directly to the data grid. This is far more -% efficient, and allows automated merging of data stored across different -% workspace array, .mat, and NetCDF files. -% -% We will also need to provide some information about the data in data -% source so that the .grid file knows how to access the data at a later -% point. -% -% For a NetCDF file, we need to provide the name of the variable in the -% file, as well as the order of the dimensions of the data. The .grid files -% enforce an internal dimensional order when reading from different data -% sources, but you don't need to worry about it as a user. Simply provide -% the order in the data source, and the .grid file will handle the rest. - -% We can look in one of the NetCDF files -ncdisp( pslFiles(1), 'PSL'); - -% Looking at the PSL field, we can see it is (144 x 96 x 12000), which is -% longitude x latitude x time. These dimensions have specific names in -% dash. They are lon, lat, and time. In fact, the full set of possible -% named dimensions in dash are -% -% lon: Longitude / x-axis -% lat: Latitude / y-axis -% lev: Height / z-axis -% tri: A tripolar dimension (More on this later in the tutorial.) -% time: A time dimension -% run: A model-ensemble dimension -% var: A dimension for different variables. -% -% It is the user's responsibility to know the names of dimensions in dash. -% These can all be seen in the function "getDimIDs.m". If you would like to -% rename a dimension (say, from "lat" to "latitude"), you can do so by -% changing the name in "getDimIDs.m". Similarly, if you find you need more -% than 7 data dimensions, you can add additional dimensions to this -% function. However, for now, I'd recommend leaving it alone. - -% So, the dimensional order for our files is -dimOrder = ["lon","lat","time"]; - -% *** Note: The data sources added to a .grid file DO NOT need to store -% data in the same dimensional order. In this tutorial, all the data -% sources will have the same order, but you could easily have a data source -% that is, say lon x lat x time, and a second source that is time x lev x -% lat x lon and be fine. As long as you provide the order of the dimensions -% in the data source, everything will be reordered correctly in the grid. - - -% Now, we also need to specify the metadata for this data source. That way, -% the .grid file will know which data each data source contains. For these -% files, each NetCDF encompasses a full global spatial grid (so, the same -% lat and lon), one of two time spans, one of 3 runs, and one of two -% variables. - -% Here, we'll start with the temperature files. We'll start by defining the -% two time spans -preIndustrial = ( datetime(850,1,15) : calmonths(1) : datetime(1849,12,15) )'; -postIndustrial = ( datetime(1850,1,15): calmonths(1) : datetime(2005,12,15) )'; - -% We can then process each file using a loop. We'll specify the metadata -% for each file, and then add it to the grid -for f = 1:numel( trefFiles ) - - % Specify metadata - time = preIndustrial; - if ismember(f, [3 4]) - time = postIndustrial; - end - - run = 2; - if ismember( f, [2 4] ) - run = 3; - end - - sourceMetadata = gridFile.defineMetadata('lon',lon, 'lat',lat, 'time', time, 'run', run, 'var', 'Tref' ); - - % Add the data source - myGrid.addData( 'nc', trefFiles(f), "TREFHT", dimOrder, sourceMetadata ); - - % so, this says: Add some data to the grid file name 'tutorial.grid', - % the data I am adding is from a NetCDF file, - % the name of the file is in trefFiles, - % the name of the variable in the NetCDF is "TREFHT", - % the order of the dimensions of this variable is dimOrder, - % the data contained in this file is for this metadata. -end - -% We can do a similar loop for the PSL files -for f = 1:2:3 - time = preIndustrial; - if ismember(f, [3 4]) - time = postIndustrial; - end - run = 2; - if ismember(f, [2 4]) - run = 3; - end - meta = gridFile.defineMetadata('lat',lat,'lon',lon,'time',time,'run',run,'var',"PSL"); - - myGrid.addData( 'nc', pslFiles(f), "PSL", dimOrder, meta ); -end - -% Also, note that we can mix the type of data source used in a .grid -% files. For example, say I had PSL data stored in a .mat file named -% "LME_SLP_run4_850-1850.mat", and that the PSL variable was named "SLP" in -% this mat file, and the dimension order is lat x time x lon. Then I could -% do -% -% >> gridFile.addData( 'tutorial.grid', 'mat', "LME_SLP_run4_850-1850.mat", "SLP", ["lat", "time", "lon"], sourceMetadata ); - -% Nice! We've successfully added data to a .grid file. You can stop here if -% you like, but the next two sections explain a few more useful methods for -% working with .grid files - -%% Extract grid metadata. - -% Later, we will see that it is useful to catalogue data in a .grid file by -% its metadata. To extract metadata for a file, use - -meta = gridFile.meta('tutorial.grid'); - -% By looking at meta, we can see that it contains the metadata for each -% dimension, and also the grid attributes. -disp(meta); - - -%% Expand a .grid file. - -% As more data becomes available over time, it may be desirable to expand -% the scope of the .grid file. For example, in tutorial.grid we added data -% from runs 2 and 3 of CESM-LME. Let's say that run 4 just completed and -% that we would like to add it to the file as well. Currently, the scope of -% the .grid file only covers runs 2 and 3, so we will need to expand its -% scope to cover run 4. -% -% We will expand the scope of the grid file using the "expand" method. -% To use this method, specify the dimension being expanded, and the new -% metadata for the expanded elements. -% -% For this tutorial, let's say we anticipate not just run 4, but also runs -% 5 - 13. Then we can do -newRuns = (4:13)'; - -% >> gridFile.expand('tutorial.grid', 'run', newRuns ); -% so, this says: expand the scope of tutorial.grid, -% expand the run dimension to include 10 new elements, -% the metadata for these 10 new runs are in newRuns. - -% (For now, let's hold off on actually expanding this .grid file -- it will -% be more useful this way for later parts of the tutorial.) - -%% Rewrite metadata - -% Sometimes, it may be necessary to change the format of the metadata. Do -% this using the "rewriteMetadata" method. Syntax is similar to "expand"; -% first provide the dimension that is receiving new metadata, then provide -% the new metadata. -% -% For example, perhaps I decided to rename the variables. I can do -newMetadata = ["T";"P"]; - -myGrid.rewriteMetadata( 'var', newMetadata ); -% this says: rewrite some of the metadata in tutorial.grid, -% specifically rewrite the metadata for the var dimension, -% the new metadata is in newMetadata. - - -%% Working with tripolar .grids - -% Dash requires unique metadata at each index along a data dimension. -% However, this is not the case for tripolar grids. Typically, a tripolar -% grid will have a lat and a lon dimension, but the metadata at each grid -% node will be unique. How to work with this? - -% To address this issue in dash, we will want to combine the lat and lon -% dimensions into a single vector. Then, each lat/lon metadata coordinate -% will correspond to a single element down this new "tripolar" dimension. -% -% Simply note which dimensions need to be merged, and Dash will handle the -% rest. - -% Here we will do a demo using SSTs from a run of TRACE -% Start by building a new gridFile -clearvars; - -% First, get the data file -file = "tos_sfc_Odec_CCSM3_TraCE21ka.nc"; - -% Here we can see that the data in this file is lon x lat x time. -ncdisp( file, 'tos' ); - -% However, looking at the metadata we see that lat and lon are matrices the -% size of the spatial grid. We will need to convert them to a combined -% "tripolar" dimension. -lat = ncread( file, 'lat'); -lon = ncread( file, 'lon'); -time = ncread( file, 'time'); - -% First, we will specify the dimension order in this file -dimOrder = ["tri","tri","time"]; - -% this line indicates that the data has three dimensions, but that the -% first two dimensions will be merged into a common "tri" dimension. - -% Next, we will need to merge the metadata for lat and lon into a common -% "tri dimension. -lat = lat(:); -lon = lon(:); -tri = [lat, lon]; - -% We can now define the scope of the grid file -meta = gridFile.defineMetadata('time', time, 'tri', tri ); -grid = gridFile.new( 'trace_sst.grid', meta ); - -% And add in the file -grid.addData( 'nc', file, 'tos', ["tri","tri","time"], meta ); - -% This says: -% Add a data source to trace_sst.grid, -% The data source is a netcdf file -% The variable of interest is name "tos" -% tos has three dimensions -- the first two should be merged into a common "tri" dimension. -% the array has this metadata. - diff --git a/Tutorials/T2_designStateVectors.m b/Tutorials/T2_designStateVectors.m deleted file mode 100644 index 8396cea0..00000000 --- a/Tutorials/T2_designStateVectors.m +++ /dev/null @@ -1,298 +0,0 @@ -%% Tutorial 2: Building state vectors -clearvars; - -%% Introduction -% Our model data is now organized into a grid file. This is a general -% container that we can use for multiple assimilations. -% -% For a specific assimilation, we will need to specify which portion of the -% data we want to use. This is done by creating a special object that -% contains instructions on how to build a state vector from a .grid file. -% In the dash package, these design instructions are stored in a special -% object of the "stateDesign" class. - -% As with the "gridFile" class, you can see a reference page for -% stateDesign using -% -% >> doc stateDesign -% -% Again, I have listed the essential functions at the top. To see help for -% any of these functions do -% -% >> doc stateDesign.functionName -% -% >> help stateDesign.functionName -% -% Or click on the hyperlink on the stateDesign reference page. - -% Unlike the "gridFile" class, which was just a container for a set of -% fileIO functions, the stateDesign class describes unique variables, each -% describing a unique set of instructions used to build a state vector. -% Thus, we will usually treat stateDesigns like variables. However, they -% are a bit more complex than variables, because they also have some functions -% associated with them. From this point onward, I will refer to individual -% stateDesign variables as "stateDesign objects", and their associated -% functions as "methods". -% -% If it's useful conceptually, you can think of each stateDesign object -% as an extra-fancy "struct", and the use of dot-indexing is also similar -% to struct syntax. - -%% Create a new stateDesign object - -% So, let's start by designing a state vector that contains the summertime -% mean (JJA) of sea level pressure at each LME node in the northern -% hemisphere, and the global summertime mean sea level pressure. - -% We will start by initializing a new stateDesign object. We'll give it a -% name "My tutorial design" -design = stateDesign('My tutorial design'); - -% So, "design" is the name of a unique stateDesign object -% -% If we look at it in the console -% >> design -% -% we can see it has the name "My tutorial design", and some other fields -% which are mostly empty. - -% For demonstration purposes, we can make a second design -design2 = stateDesign('A different design'); -disp(design2); - -% Which stores a different value in the "name" field, and if we look in the -% Matlab workspace, we can see that things are distinct objects. Again, -% these are similar to extra fancy structure arrays. -clearvars design2; - -% However, one way that the stateDesign objects differ from stucts is that -% they have specific functions associated with them, known as methods. -% Use dot-indexing to apply a method to a specific stateDesign object. - -%% Add a variable - -% So, one of the essential stateDesign methods is "add". This adds a new -% variable to a state vector. Let's use it to add a new variable to the -% specific "My tutorial design" set of instructions. -% -% You can get help on this specific method via -% -% >> doc stateDesign.add -% >> help stateDesign.add -% -% OR by referencing a specific stateDesign objects, e.g. -% -% >> doc design.add -% >> help design.add - -% Here we can see that "add" accepts 2 inputs, the name of the variable, -% and the grid file in which the variable is located. -design = design.add( 'PSL', 'tutorial.grid' ); - -% So this say, add a new variable to design. The variable's name is 'PSL', -% and it is located in 'tutorial.grid'. -% -% Note that the updated design is provided as output. The command -% -% >> design.add( 'PSL', 'tutorial.grid') -% -% wouldn't actually do anything to design. It would instead return an -% updated design that Matlab would automatically name "ans". -% -% This is to allow multiple state vector designs to be branched off from -% some base set of instructions. - - -%% Edit the design - -% As a reminder, we want our state design to contain the JJA mean of PSL in -% the northern hemisphere, and the global mean JJA PSL. - -% Let's start with the JJA mean in the northern hemisphere. We need to -% provide instructions on what to select. Do this with the "edit" method. - -% We'll need to note which indices to extract data from along each -% dimension in the .grid file. Some metadata might be useful. To get .grid -% file metadata use -meta = gridFile.meta('tutorial.grid'); - -% We can see that meta has a field for each dimension. And each field -% contains the metadata for the dimension. - -% Lets get all the northern hemisphere latitudes and the starting index of -% each JJA mean (so, all of the Junes) -nh = meta.lat > 0; -june = month( meta.time ) == 6; - -% We are almost ready to edit the design, but first, an important concept: -% In this tutorial, I will refer to "state dimensions" and "ensemble -% dimensions". A state dimension is a dimension for which the metadata at -% each state vector element is fixed. An ensemble dimensions is one for -% which metadata is unique to each ensemble member. Say I have a model -% ensemble: -% -% 1 2 3 ... M -% E1 | -% E2 | -% ... | -% En | -% -% This is an ensemble with M ensemble members, and N state vector elements. -% Say that the state vector is some spatial grid, and the ensemble members -% are selected from different time points. -% -% Then, the spatial dimensions will be state dimensions. State element 1 -% will refer to the same lat-lon point in all ensemble members. State -% element 2 will refer to a different lat-lon point, but it will be the -% same point in all ensemble members. And so on... -% -% By contrast, time will be an ensemble dimension. Ensemble member one is -% selected from time point 1, ensemble member 2 is selected from time point -% 2, etc. Note that the time metadata at state vector element 1 is -% undefined -- it depends on which ensemble member you are looking at. - -% When editing a state design, you first note which variable you are -% editing, which dimension in the variable you want to edit, and whether -% that dimension is a state dimension or an ensemble dimension. -% -% stateDesign initializes all dimensions as state dimensions by default, so -% you will need to note all the dimensions that are ensemble dimensions for -% your data. - -% So, back to the example. Let's first do the northern hemisphere -design = design.edit( 'PSL', 'lat', 'state', 'index', nh ); - -% Next, we need to note which variable this is in the grid file -design = design.edit( 'PSL', 'var', 'state', 'index', 2 ); - -% This says: edit the PSL variable, edit the lat dimension, the lat -% dimension is a state dimension, the state vector should be built from the -% data at these lat indices. - -% Now, the jja mean -design = design.edit( 'PSL', 'time', 'ens', 'index', june, 'mean', [0 1 2] ); - -% This says: edit the PSL variable, edit the time dimension, the time -% dimension is an ensemble dimension, build the state vector using data -% that starts at june, but take a mean over june + 0, 1, and 2 subsequent -% time indices (so, june, july and august). - -% Also recall that we added in 2 runs to the .grid file. So we also need to -% note that the run dimension is an ensemble dimension -design = design.edit( 'PSL', 'run', 'ens' ); - -% Now, recall that we also want the PSL global mean in JJA. The dash -% package defines variables as elements with a unique set of instructions -% in a stateDesign. So, even though the global mean PSL is derived from the -% same climate model variable as the NH JJA mean, it is treated as a -% distinct variable in the state vector (because the instructions regarding -% spatial dimensions will be different). -% -% Let's add this variable, and then edit it in the design -design = design.add( 'PSL_globe', 'tutorial.grid' ); -design = design.edit( 'PSL_globe', 'time', 'ens', 'index', june, 'mean', [0 1 2] ); -design = design.edit( 'PSL_globe', 'lat', 'state', 'mean', true ); -design = design.edit( 'PSL_globe', 'lon', 'state', 'mean', true ); -design = design.edit( 'PSL_globe', 'run', 'ens'); -design = design.edit( 'PSL_globe', 'var', 'state', 'index', 2 ); - -% And we're done! Note that the syntax for means is different in state -% dimensions and ensemble dimensions. State dimensions have a simple -% true/false, whereas ensemble dimensions require the indices used to -% compute the mean. - -% If we wanted, we could instead use a weighted spatial mean. Say we knew -% the area in each grid node and wanted to weight by it. - -% (Here, I'm just undoing the previous mean) -design = design.edit( 'PSL_globe', 'lat', 'state', 'mean', false ); -design = design.edit( 'PSL_globe', 'lon', 'state', 'mean', false ); - -% And here I'm specifying to use a weighted mean -gridArea = rand( 96, 144 ); -design = design.weightedMean( 'PSL_globe', ["lat","lon"], gridArea ); - -% Note that before providing the weights, we needed to specify the order of -% the dimensions in the array of weights. - - -%% Sequences -% One other concept is a sequence. A sequence is a way for an ensemble -% dimension to have some metadata structure along the state vector. -% (Separate from the metadata for each ensemble member.) -% -% For example, say I wanted to use PSL in the northern hemisphere, but for -% J, J, and A in each month -- without a time mean. My ensemble would look -% something like: -% -% 1 2 3 ... M -% June E1 | -% June E2 | -% June E3 -% ... | -% July E1 | -% July E2 | -% July E3 | -% ... | -% Aug E1 | -% Aug E2 | -% Aug E3 | -% ... | - -% Now, each ensemble member is still associated with a unique time point. -% However, each state vector element is also associated with some time -% metadata (namely, June, July or August). This is referred to as a -% sequence. The syntax for sequences is similar to the syntax for means -% in ensemble dimensions. Note a reference index for the sequence -% (in this case June), and then the number of indices to progress from that -% reference to contstruct the sequence. -% -% So using our example -design = design.edit( 'PSL', 'time', 'ens', 'index', june, 'seq', [0 1 2], 'meta', ["June";"July";"August"] ); - -% Note that you MUST provide metadata for sequences. - -% It's also convenient to note that the reference index for a sequence does -% not need to be the actual start of the sequence. Instead of line 233, we -% could instead do -january = month( meta.time ) == 1; -design = design.edit( 'PSL', 'time', 'ens', 'index', january, 'seq', [5 6 7], 'meta', ["June","July","August"] ); - -% and get exactly the same result. - -%% A sequence of means. - -% Finally, the most complicated formulation. You may wish to take a -% sequence of means. Perhaps you are studying the effects of volcanic -% eruptions and want to look at the JJA mean of PSL in year 1, year 2, and -% year 3. This is a sequence (year 1, 2, 3) of means (JJA). -% -% In this syntax, the sequence indices refer to the indices from which to -% start counting the mean indices. So do: - -design = design.edit( 'PSL', 'time', 'ens', 'index', june, 'seq', [0, 12, 24], 'mean', [0 1 2], 'meta', ["Year 1","Year 2", "Year3"] ); - -% Here, the reference indices will be june for some ensemble member. To -% make the sequences, we will get reference indices that are 0 time steps, -% 12 monthly time steps (1 year), and 24 monthly time steps(2 years) from -% the reference. Then, from indices 0, 12, and 24, take a mean over the -% next +0, +1, and +2 time indices (JJA year 1, JJA year 2, JJA year 3). - -% As before, the reference indices are flexible. The line -design = design.edit( 'PSL', 'time', 'ens', 'index', january, 'seq', [0 12 24], 'mean', [5 6 7], 'meta', ["Year 1","Year 2","Year 3"]); - -% would produce the same results. - -%% Copying - -% As you add more and more variables to a state vector, it can get tedius -% to edit each individual dimension. -% -% If this is the case, check out the "copy" method, which copy and pastes -% design instructions between variables. - -% When using copy, note that reference indices (those following the 'index' -% flag) are matched via .grid file metadata, so don't worry if the -% dimensions have different lengths. -% -% There's an example of this at the very beginning of the next tutorial. \ No newline at end of file diff --git a/Tutorials/T3_buildEnsemble.m b/Tutorials/T3_buildEnsemble.m deleted file mode 100644 index 8219db10..00000000 --- a/Tutorials/T3_buildEnsemble.m +++ /dev/null @@ -1,199 +0,0 @@ -%% Tutorial 3: Build an ensemble -clearvars; - -%% Make the design -% -% The basic workflow for building an ensemble in dash is -% 1. Design a state vector -% 2. Write an ensemble to a .ens file -% 3. Load the ensemble - -% I'll start off by quickly making the state design we made in Tutorial 2. -% (PSL NH JJA mean, with PSL JJA global mean) -file = 'tutorial.grid'; -meta = gridFile.meta(file); -d = stateDesign('tutorial'); -d = d.add( 'PSL', file ); -d = d.add( 'PSL_globe', file); - -d = d.edit( 'PSL', 'var', 'state', 'index', 2 ); -d = d.edit( 'PSL', 'time', 'ens', 'index', month(meta.time)==6, 'mean', [0 1 2]); -d = d.edit( 'PSL', 'run', 'ens'); -d = d.copy( 'PSL', 'PSL_globe'); - -d = d.edit( 'PSL', 'lat', 'state', 'index', meta.lat>0 ); -d = d.edit( 'PSL_globe', 'lat', 'state', 'mean', true ); -d = d.edit('PSL_globe', 'lon', 'state', 'mean', true ); - - -%% Build the ensemble - -% This is a rather easy step. stateDesign objects have the method -% "buildEnsemble" which writes an ensemble to a .ens file. To use the -% method, specify the number of ensemble members, and the name of the .ens -% file. -ens = d.buildEnsemble( 50, 'tutorial.ens'); - -% You can also optionally specify whether the ensemble should be drawn at -% random or selected in order. -% -% Ordered selection can be useful if you want to maintain a time structure -% within the ensemble. I often find it useful when designing target fields -% for pseudo-proxy experiments. -ens2 = d.buildEnsemble( 50, 'tutorial_ordered.ens', false ); - -% Finally, you can note to overwrite exisiting files if desired. The line -try - ens = d.buildEnsemble( 50, 'tutorial.ens' ); -catch ME - disp( ME.message ) -end - -% will now throw an error. But -ens = d.buildEnsemble( 50, 'tutorial.ens', true, true ); - -% will not. - -%% The ensemble object. - -% Note that the output of buildEnsemble, ens, is an "ensemble" object. -% -% This stores metadata on the ensemble, and allows you to interact with the -% saved ensemble without ever bothering with the .ens file. - -% Some useful fields of the ensemble object include -ens.random -% which indicates whether the ensemble is drawn randomly or ordered - -ens.design -% The state design used to create the ensemble - -ens.metadata -% A metadata object often useful for PSMs - -ens.ensSize -% The size of the ensemble - -ens.hasnan -% Whether an ensemble member has any NaN values - -% See -% >> doc ensemble -% -% For available methods. - -% Currently, there are only two methods. "add" and "load" - -%% Add to ensemble - -% Use the add method when you want to add more ensemble members to an -% existing ensemble. - -% For example, if we do -ens.ensSize - -% we can see that the ensemble we built has 6913 state vector elements, -% and 50 ensemble members. - -% But we can do -ens.add( 5 ); - -% To add 5 more ensemble members -% Check out -ens.ensSize - -%% Load an ensemble - -% To actually load the ensemble into memory, use -M = ens.load; - -% This returns the actual ensemble data array stored in the .ens file. - -% It is also possible to only load a few specific ensemble members or a few -% specific variables. Specify which ones you want, and then load. -% -% For example, the following lines cause only the first, fifth and 55th -% ensemble members of the global mean PSL variable to be loaded. -ens.useMembers( [1 5 55] ); -ens.useVars( "PSL_globe" ); -M = ens.load; - -%% Ensemble metadata -ens = ensemble('tutorial.ens'); - -% The field -ens.metadata - -% Stores comprehensive metadata for the ensemble. It is often useful for -% PSMs, and can be provided directly via line 114. -% -% If you would like to interact directly with the ensemble metadata, please -% see -% -% >> doc ensembleMetadata - -% If you would like to check the metadata of certain -% state vector indices (perhaps to make sure that the ensemble was built -% correctly, use the "lookup" method. -% -% Inputs for this method are the dimensions of interest, and the state -% vector indices at which you would like to lookup data -[meta] = ens.metadata.lookup( ["lat","lon", "time"], [5:10] ); - -disp([meta.lat, meta.lon]) - -% Here we can see the lat-lon coordinates of state vector elements 5-10. - -% Note that -meta.time - -% is all NaN, because time is an ensemble dimension. If we had used a -% sequence for time, then meta would show the sequence metadata. - -% Sometimes, you may want to use ensemble metadata without actually -% building or loading an ensemble. If this is the case, you can generate an -% ensembleMetadata object using either a stateDesign object, or the name -% of an existing .ens file as the input. For example -ensMeta2 = ensembleMetadata( d ); -ensMeta3 = ensembleMetadata( 'tutorial.ens' ); - -% If you specified specific ensemble members or variables, you can get the -% metadata for just the loaded data by calling ens.metadata -% -% For example, after calling -ens.useMembers( [1 5 55] ); -ens.useVars( "PSL_globe" ); - -% The command -ensMeta = ens.metadata; - -% returns the metdata structure for the 5 member, global PSL ensemble - - -%% Existing ensembles - -% If you previously wrote a .ens file. Do -% >> ens = ensemble( fileName ); - -% to get the associated ensemble object. For example: -clearvars; -ens = ensemble('tutorial.ens'); - - -%% Run this, but ignore this, it's for the next tutorial. - -file = 'tutorial.grid'; -meta = gridFile.meta(file); -months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]; -d = stateDesign('tutorial'); -d = d.add( 'T', file ); -d = d.add( 'T_globe', file); -d = d.edit( 'T', 'time', 'ens', 'index', month(meta.time)==1, 'seq', 0:11, 'meta', months); -d = d.edit( 'T', 'run', 'ens'); -d = d.edit( 'T', 'var', 'state', 'index', 1 ); -d = d.copy( 'T', 'T_globe'); -d = d.edit( 'T', 'lat', 'state', 'index', meta.lat>0 ); -d = d.edit( 'T_globe', 'lat', 'state', 'mean', true ); -d = d.edit('T_globe', 'lon', 'state', 'mean', true ); - -d.buildEnsemble(10,'tutorial_sequence.ens'); \ No newline at end of file diff --git a/Tutorials/T4_PSMs.m b/Tutorials/T4_PSMs.m deleted file mode 100644 index 7a6c6347..00000000 --- a/Tutorials/T4_PSMs.m +++ /dev/null @@ -1,145 +0,0 @@ -%% Tutorial 4: PSMs - -% PSMs are stored as part of the PSM class. -% -% In general, you need to know 2 methods to use a PSM -% 1. The method to create a PSM object (which is always the name of the PSM) -% 2. The "getStateIndices" method. - -%% New PSM objects -clearvars; -% It is up to the user to know the names of the PSMs they wish to use. -% Current PSMs include -% -% linearPSM: Implements multivariate linear models -% ukPSM: Implements a uk37 forward model -% vstempPSM: Implements temperature-only VS-Lite -% -% Check out PSMs/Specific Forward Models for more - -% Here we'll demo with some ukPSMs - -% Let's say we have a 5 proxy sites in the northern hemisphere -lats = [1;15;60;22;18]; -lons = rand(5,1); -nSites = 5; - -% We'll initialize some new ukPSMs, and store them in a cell aary -F = cell(5,1); -for s = 1:nSites - F{s} = ukPSM( lats(s), lons(s) ); -end - -% Great! That's the first step to using a PSM. Again, the name of the PSM -% will vary. But you can do -% -% >> help psmName.psmName -% -% to see how to build a new PSM -help ukPSM.ukPSM - -% Or -% >> doc psmName -% -% to see if the PSM has any additional methods. - - -%% State indices - -% The method, "getStateIndices" is used by each PSM to determine which -% state vector elements it requires to run. The inputs will vary by PSM, so -% you will probably want to use -% -% >> help psmName.getStateIndices -% -% for syntax. - -help ukPSM.getStateIndices - -% We can see that the ukPSM requires knowing ensemble metadata, the name of -% the SST variable, and sequence metadata for all 12 months. We'll use the -% tutorial_sequence.ens ensemble built at the end of the last tutorial. -% Let's pretend that the Tref variables are actually SST variables for -% convenience. -ens = ensemble('tutorial_sequence.ens'); -months = ["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"]'; - -for s = 1:nSites - F{s}.getStateIndices( ens.metadata, "T", months ); -end - -% Great! The PSMs are ready for use - - -%% State indices for many PSMs - -% If you have many PSMs, it can be faster to calculate state indices all at -% once. To do this, determine which state vector indices are -% appropriate, and then use the method "setStateIndices" - -% For example, the ensembleMetadata method "closestLatLonIndices" can be -% used to determine the closest state vector indices to a given variable. -% Let's use that to redo the previous section of code: - -coords = [lats, lons]; -H = ens.metadata.closestLatLonIndices( coords, "T", 'time', months ); - -% Here, each column of H is the state vector indices for one of the PSMs. -% (If you want to check they are the same as in the previous section, -% compare: -% >> H(:,1) -% >> F{1}.H - -% Continuing our example, we can now set the state vector indices for each -% PSM -for s = 1:nSites - F{s}.setStateIndices( H(:,s) ); -end - -% Currently, closestLatLonIndices can only search one variable at a time. -% So if your PSMs need information from multiple variables, then: -% 1. calculate H for each variable -% 2. concatenate all the Hs -% 3. Call setStateIndices. - -% The following code gives an example. -secondVariableName = "T"; % Only one variable in the tutorial ensemble has - % lat-lon data, so pretend this is a different variable -H1 = ens.metadata.closestLatLonIndices( coords, "T", 'time', months ); -H2 = ens.metadata.closestLatLonIndices( coords, secondVariableName, 'time', months ); -Hcat = cat(1, H1, H2); - -for s = 1:nSites - F{s}.setStateIndices( Hcat(:,s) ); -end - - -%% Unit correction - -% (Revert to the original state indices) -for s = 1:nSites - F{s}.setStateIndices( H(:,s) ); -end - -% The ukPSM are written to process data in celsius. But model output is -% often in Kelvin, so it may be necessary to convert units when passing -% data to the PSM. - -% You can do this via the "setUnitConversion" method -for s = 1:nSites - F{s}.setUnitConversion( 'add', -273.15*ones(12,1) ); -end - -% here, I provided -273.15 twelve times because the ukPSM has 12 inputs -% (SST for each month). However, this will vary by PSM - -disp( F{1}.addUnit ) -% shows the additive constant being used for unit conversion. - -% Let's save these PSMs for the next tutorial -save('tutorial_psms.mat', 'F', 'lats','lons'); -%% Bias correction - -% You can also have PSMs apply a bias correction to input data. This is -% somewhat of an advanced topic, so see PSM.setBiasCorrector, and the -% biasCorrector classes for comprehensive details. \ No newline at end of file diff --git a/Tutorials/T5_Data_Assimilation.m b/Tutorials/T5_Data_Assimilation.m deleted file mode 100644 index 8e73bb85..00000000 --- a/Tutorials/T5_Data_Assimilation.m +++ /dev/null @@ -1,193 +0,0 @@ -%% Tutorial 5: Run data assimilation - -% Great, you've made all the things, let's run some analyses! - -% We will work by using specific classes to run different types of -% analyses. Currently, there are classes to implement kalman filters, -% particle filters, and optimal sensor tests. Each does heavy error - % checking on ensembles, observations, PSMs, and observation -% uncertainties to make sure everything has been prepared correctly for DA. -% -% This tutorial is only for ensemble Kalman filters, but the workflow for -% the other analyses is analogous. - -% So, start by creating a kalmanFilter object. Inputs are a model ensemble, -% PSMs, observations and uncertainty. -clearvars; -ens = ensemble('tutorial_sequence.ens'); -M = ens.load; -load('tutorial_psms.mat'); - -% We'll just make some random observations and uncertainties -D = rand(5, 100); -R = ones( 5, 100); - -kf = kalmanFilter(M, D, R, F); - -% Some notes: You can provide either an ensemble object, OR the associated -% ensemble data array to dash. The ensemble object is useful for methods -% that operate on ensembles too large to fit into memory. But the data -% array is generally faster if possible. - -% Also: You can provide R as a scalar (same uncertainty all observations -% all time steps), row vector( same uncertainty all observations but unique -% for each time step), column (unique observations same in all time steps), -% or as a matrix (unique for both observation and time step). - -%% Adjust analysis settings - -% The settings for different analyses are altered via the -% "settings" methods - -% Let's run using a serial updating scheme, an inflation factor of 3, and a -% localiation radius of 10000 km - -% We'll start with the localization radius. The "dash" class provides -% various tools for data assimilation. For most users, the most useful -% methods will be the "localizationWeights" and "regrid" methods. We'll -% come back to "regrid" later, but for now, let's use -% "dash.localizationWeights" to generate the localization scheme. -% -% Provide site coordinates, -% the ensemble metadata object, and the localization radius in km. -w = dash.spatialLocalization( [lats, lons], ens.metadata, 10000); - -% Great, let's adjust the settings -kf.settings('type', 'serial', 'inflate', 3, 'localize', w ); - -% Alright, if we want to see our settings we can do -kf -% and see that our settings have been stored. - -% Now, lets run the analysis -output = kf.run - -%% Regridding output - -% So now we have an output structure with various fields. One of the most -% interesting is often Amean -- that is, the ensemble mean of the analysis. - -% Right now it's in state vector form, but we can quickly extract a gridded -% variable from it (or any other state vector) using the "regrid" method - -% Inputs are the state vector (or matrix of state vectors), name of the -% variable we wish to extract, and ensemble metadata associated with the state -% vector. -Am = dash.regrid( output.Amean, 'T', ens.metadata ); - -% We can do -size(Am) -% to see that the product has been regridded - -% Although, usually it's useful to also have the metadata associated with -% the grid, which is provided as the second output. -[Am, meta] = dash.regrid( output.Amean, 'T', ens.metadata ); - -% Here -meta.lat -meta.lon -% show the lat and lon metadata along each dimension - -% and -meta.time -% shows the sequence metadata associated with the 12 time dimension -% indices. - -%% Regridding tripolar - -% Note that tripolar grids use the method "regridTripolar" to return to the -% original grid. (Calling "regrid" will still keep the tripolar spatial -% dimension as a single vector.) -% -% Note that this method requires one additional input. Typically, only the -% ocean grids from a tripolar coordinate system are use for data assimilation. -% The extra input is simply the indices of these grids in the complete state -% vector. - - -%% Reconstruct only a few selected variables. - -% It is also possible to only reconstruct a few variables for the kalman -% filter. For example, you may need many variables to run some PSMs, but -% are only interested in reconstructing one output variable. -% -% If an ensemble contains many variables, only reconstructing a few of them -% can greatly speed up the analysis. Note that this is only possible for -% joint updating schemes, or serial updating schemes with appended Ye. - -% To only reconstruct specific variables, use the method "reconstructVars". -% Inputs are the desired variables, and metadata for the ensemble. -% -% From our previous examples, let's say we now wish to only reconstruct the -% global temperature variable, but need the full spatial field to run the -% PSMs. Here, we'll use a joint updating scheme with no localization. -kf.settings('type','joint','localize',[]); -kf.reconstructVars( "T_globe", ens.metadata ); - -% When we run the filter -output = kf.run; - -% we can from the size of Amean that only the monthly global mean -% temperature was reconstructed. -size( output.Amean ); - -% Note that you will need to use modified ensemble metadata when generating -% localization weights or regridding partially reconstructed ensembles. Do -% this via the ensembleMetadata method: "useVars" - -% For example, this provides localization weights for the global -% temperature reconstruction -partialMeta = ens.metadata.useVars("T_globe"); -w = dash.spatialLocalization( [lats, lons], partialMeta, 10000 ); - -% (note that the localization weights are now only 12 x 5, rather than the -% previous 82956 x 5). - -% Similarly, use the metadata for the partially reconstructed ensemble to -% regrid the output. -Tglobe = dash.regrid( output.Amean, 'T_globe', partialMeta ); - - -%% Limit variables to only the values needed to run PSMs - -% Sometimes, it is desirable to only reconstruct a few values from certain -% variables. This is most common for proxy verification analyses. -% -% For example: Say we have a large gridded ocean variable used to run -% several PSMs. We want to do a proxy validation study, reconstructing -% proxies from the posterior ensemble. We only need a few values from the -% ocean field to run these PSMs, thus don't need to reconstruct the -% *entire* ocean. However, we do need to reconstruct the values used to run -% the PSMs. - -% To limit certain variables to PSMs, use the "dash.restrictVarsToPSMs" -% method. Inputs are the PSMs, and either an ensemble object, or its -% metadata. For example, let's look at an intial ensemble and PSMs - -ens = ensemble('tutorial_sequence.ens'); -M1 = ens.load; -size(M1) -F{1}.H -Y1 = dash.calculateYe( M1, F ); - -% We can see that M1 is fairly large, and F{1}.H points to certain state -% vector indices. Let's say we actually only need to the parts of the "T" -% variable that are used to run PSMs. We can do -dash.restrictVarsToPSMs( ["T"], F, ens ); - -% Now if we look at the ensemble and PSMs, we can see that M2 is smaller. -M2 = ens.load; -size(M2) - -% And that the PSMs state indices have been updated to reflect the reduced -% ensemble -F{1}.H - -% Furthermore, we can check that the Ye values are the same as would have -% been calculated for the full ensemble. -Y2 = dash.calculateYe( M2, F ); -isequal( Y1, Y2 ); - -% Do get covariance localization weights for the new ensemble, proceed as -% normal using -w = dash.spatialLocalization( [lats, lons], ens.metadata, 10000 ); \ No newline at end of file diff --git a/1. Grid files/dataSource.m b/dataSource.m similarity index 100% rename from 1. Grid files/dataSource.m rename to dataSource.m diff --git a/1. Grid files/matSource.m b/matSource.m similarity index 100% rename from 1. Grid files/matSource.m rename to matSource.m diff --git a/1. Grid files/ncSource.m b/ncSource.m similarity index 100% rename from 1. Grid files/ncSource.m rename to ncSource.m