From cb91cb7438c2d24e9082495e4f0b2d2d12b94a30 Mon Sep 17 00:00:00 2001 From: Jouni Helske Date: Wed, 10 May 2017 11:16:53 +0100 Subject: [PATCH] correct approximate EKF-MCMC --- src/mgg_ssm.cpp | 3 ++- src/nlg_amcmc.cpp | 6 ++---- vignettes/growth_model.Rmd | 8 ++++---- 3 files changed, 8 insertions(+), 9 deletions(-) diff --git a/src/mgg_ssm.cpp b/src/mgg_ssm.cpp index 093fdcf3..5a25dff1 100644 --- a/src/mgg_ssm.cpp +++ b/src/mgg_ssm.cpp @@ -472,6 +472,7 @@ arma::cube mgg_ssm::simulate_states() { um(j) = normal(engine); } asim.slice(0).col(0) = L_P1 * um; + arma::vec y_tmp = y; for (unsigned int t = 0; t < (n - 1); t++) { arma::uvec na_y = arma::find_nonfinite(y.col(t)); if (na_y.n_elem < p) { @@ -500,6 +501,6 @@ arma::cube mgg_ssm::simulate_states() { } asim.slice(0) += fast_smoother(); - + y = y_tmp; return asim; } diff --git a/src/nlg_amcmc.cpp b/src/nlg_amcmc.cpp index 07a840f6..a0df772f 100644 --- a/src/nlg_amcmc.cpp +++ b/src/nlg_amcmc.cpp @@ -431,8 +431,7 @@ void nlg_amcmc::gaussian_sampling(nlg_ssm model, const unsigned int n_threads) { gaussian_state_sampler(model, theta_storage, mode_storage, alpha_storage); } - posterior_storage = prior_storage + approx_loglik_storage - scales_storage + - log(weight_storage); + posterior_storage = prior_storage + approx_loglik_storage; } void nlg_amcmc::gaussian_state_sampler(nlg_ssm model, @@ -477,8 +476,7 @@ void nlg_amcmc::gaussian_state_sampler(nlg_ssm model, } approx_model.compute_HH(); approx_model.compute_RR(); - - alpha.slice(i) = approx_model.simulate_states(); + alpha.slice(i) = approx_model.simulate_states().slice(0).t(); } } diff --git a/vignettes/growth_model.Rmd b/vignettes/growth_model.Rmd index 77485a53..cb0c252a 100644 --- a/vignettes/growth_model.Rmd +++ b/vignettes/growth_model.Rmd @@ -118,7 +118,6 @@ model <- nlg_ssm(y = y, a1=pntrs$a1, P1 = pntrs$P1, Let's first run Extended Kalman filter and smoother using our initial guess for $\theta$: ```{r ekf} -library(diagis) out_filter <- ekf(model) out_smoother <- ekf_smoother(model) ts.plot(cbind(y, out_filter$att[, 2], out_smoother$alphahat[, 2]), col = 1:3) @@ -127,12 +126,13 @@ ts.plot(cbind(out_filter$att[, 1], out_smoother$alphahat[, 1]), col = 1:2) ## Markov chain Monte Carlo -For parameter inference, we can perform full Bayesian inference with \texttt{bssm}. There are multiple choices for the MCMC algorithm in the package, and here we will use $\psi$-PF based pseudo-marginal MCMC with delayed acceptance. Let us compare this approach with EKF-based approximate MCMC: +For parameter inference, we can perform full Bayesian inference with \texttt{bssm}. There are multiple choices for the MCMC algorithm in the package, and here we will use $\psi$-PF based pseudo-marginal MCMC with delayed acceptance. Let us compare this approach with EKF-based approximate MCMC (note that the number of MCMC iterations is rather small): ```{r mcmc} -out_mcmc_pm <- run_mcmc(model, n_iter = 1e4, nsim_states = 10, method = "pm", +out_mcmc_pm <- run_mcmc(model, n_iter = 5000, nsim_states = 10, method = "pm", simulation_method = "psi") -out_mcmc_ekf <- run_mcmc(model, n_iter = 1e4, method = "ekf") +out_mcmc_ekf <- run_mcmc(model, n_iter = 5000, method = "ekf") +library("diagis") alpha_ekf <- weighted_mean(out_mcmc_ekf$alpha, out_mcmc_ekf$counts) alpha_pm <- weighted_mean(out_mcmc_pm$alpha, out_mcmc_pm$counts) ts.plot(cbind(y, alpha_pm[, 2], alpha_ekf[, 2]), col = 1:3)