diff --git a/lectures/continuous_time/covid_sde.md b/lectures/continuous_time/covid_sde.md index 8ccedf32..e344d463 100644 --- a/lectures/continuous_time/covid_sde.md +++ b/lectures/continuous_time/covid_sde.md @@ -624,4 +624,4 @@ In this case, there are significant differences between the early and late death This bleak simulation has assumed that no individuals has long-term immunity and that there will be no medical advancements on that time horizon - both of which are unlikely to be true. -Nevertheless, it suggests that the timing of lifting lockdown has a more profound impact after 18 months if we allow stochastic shocks imperfect immunity. +Nevertheless, it suggests that the timing of lifting lockdown has a more profound impact after 18 months if we allow stochastic shocks imperfect immunity. \ No newline at end of file diff --git a/lectures/time_series_models/additive_functionals.md b/lectures/time_series_models/additive_functionals.md index fc836e92..9c54ed87 100644 --- a/lectures/time_series_models/additive_functionals.md +++ b/lectures/time_series_models/additive_functionals.md @@ -206,11 +206,6 @@ All of these objects are computed using the code below. -```{code-cell} julia -using LinearAlgebra, Statistics -``` - - ```{code-cell} julia --- tags: [remove-cell] @@ -219,13 +214,12 @@ using Test, Random ``` ```{code-cell} julia -using Distributions, LaTeXStrings, Parameters, Plots, QuantEcon - +using Distributions, LaTeXStrings, Plots, QuantEcon +using LinearAlgebra, Statistics ``` ```{code-cell} julia -function AMF_LSS_VAR(A, B, D, F = nothing; ν = nothing) - +function AMF_LSS_VAR(A, B, D, F = nothing; upsilon = nothing) if B isa AbstractVector B = reshape(B, length(B), 1) end @@ -250,33 +244,31 @@ function AMF_LSS_VAR(A, B, D, F = nothing; ν = nothing) F = reshape(F, length(F), 1) end - # set ν - if isnothing(ν) - ν = zeros(nm, 1) - elseif ndims(ν) == 1 - ν = reshape(ν, length(ν), 1) + # set upsilon + if isnothing(upsilon) + upsilon = zeros(nm, 1) + elseif ndims(upsilon) == 1 + upsilon = reshape(upsilon, length(upsilon), 1) else - throw(ArgumentError("ν must be column vector!")) + throw(ArgumentError("upsilon must be column vector!")) end - if size(ν, 1) != size(D, 1) - error("The size of ν is inconsistent with D!") + if size(upsilon, 1) != size(D, 1) + error("The size of upsilon is inconsistent with D!") end # construct BIG state space representation - lss = construct_ss(A, B, D, F, ν, nx, nk, nm) + lss = construct_ss(A, B, D, F, upsilon, nx, nk, nm) - return (A = A, B = B, D = D, F = F, ν = ν, nx = nx, nk = nk, nm = nm, lss = lss) + return (; A, B, D, F, upsilon, nx, nk, nm, lss) end -AMF_LSS_VAR(A, B, D) = - AMF_LSS_VAR(A, B, D, nothing, ν=nothing) -AMF_LSS_VAR(A, B, D, F, ν) = - AMF_LSS_VAR(A, B, D, [F], ν=[ν]) - -function construct_ss(A, B, D, F, - ν, nx, nk, nm) +AMF_LSS_VAR(A, B, D) = AMF_LSS_VAR(A, B, D, nothing, upsilon = nothing) +function AMF_LSS_VAR(A, B, D, F, upsilon) + AMF_LSS_VAR(A, B, D, [F], upsilon = [upsilon]) +end +function construct_ss(A, B, D, F, upsilon, nx, nk, nm) H, g = additive_decomp(A, B, D, F, nx) # auxiliary blocks with 0's and 1's to fill out the lss matrices @@ -295,7 +287,7 @@ function construct_ss(A, B, D, F, A1 = hcat(1, 0, nx0r, ny0r, ny0r) # transition for 1 A2 = hcat(1, 1, nx0r, ny0r, ny0r) # transition for t A3 = hcat(nx0c, nx0c, A, nyx0m', nyx0m') # transition for x_{t+1} - A4 = hcat(ν, ny0c, D, ny1m, ny0m) # transition for y_{t+1} + A4 = hcat(upsilon, ny0c, D, ny1m, ny0m) # transition for y_{t+1} A5 = hcat(ny0c, ny0c, nyx0m, ny0m, ny1m) # transition for m_{t+1} Abar = vcat(A1, A2, A3, A4, A5) @@ -308,13 +300,13 @@ function construct_ss(A, B, D, F, G2 = hcat(ny0c, ny0c, nyx0m, ny1m, ny0m) # selector for y_{t} G3 = hcat(ny0c, ny0c, nyx0m, ny0m, ny1m) # selector for martingale G4 = hcat(ny0c, ny0c, -g, ny0m, ny0m) # selector for stationary - G5 = hcat(ny0c, ν, nyx0m, ny0m, ny0m) # selector for trend + G5 = hcat(ny0c, upsilon, nyx0m, ny0m, ny0m) # selector for trend Gbar = vcat(G1, G2, G3, G4, G5) # build LSS type x0 = hcat(1, 0, nx0r, ny0r, ny0r) S0 = zeros(length(x0), length(x0)) - lss = LSS(Abar, Bbar, Gbar, zeros(nx+4nm, 1), x0, S0) + lss = LSS(Abar, Bbar, Gbar, zeros(nx + 4nm, 1), x0, S0) return lss end @@ -327,22 +319,22 @@ function additive_decomp(A, B, D, F, nx) return H, g end -function multiplicative_decomp(A, B, D, F, ν, nx) +function multiplicative_decomp(A, B, D, F, upsilon, nx) H, g = additive_decomp(A, B, D, F, nx) - ν_tilde = ν .+ 0.5 * diag(H * H') + upsilon_tilde = upsilon .+ 0.5 * diag(H * H') - return H, g, ν_tilde + return H, g, upsilon_tilde end function loglikelihood_path(amf, x, y) - @unpack A, B, D, F = amf + (; A, B, D, F) = amf k, T = size(y) FF = F * F' FFinv = inv(FF) - temp = y[:, 2:end]-y[:, 1:end-1] - D*x[:, 1:end-1] - obs = temp .* FFinv .* temp + temp = y[:, 2:end] - y[:, 1:(end - 1)] - D * x[:, 1:(end - 1)] + obs = temp .* FFinv .* temp obssum = cumsum(obs) - scalar = (logdet(FF) + k * log(2π)) * (1:T) + scalar = (logdet(FF) + k * log(2pi)) * (1:T) return -(obssum + scalar) / 2 end @@ -354,17 +346,16 @@ function loglikelihood(amf, x, y) end function plot_additive(amf, T; npaths = 25, show_trend = true) - # pull out right sizes so we know how to increment - @unpack nx, nk, nm = amf + (; nx, nk, nm) = amf # allocate space (nm is the number of additive functionals - we want npaths for each) - mpath = zeros(nm*npaths, T) + mpath = zeros(nm * npaths, T) mbounds = zeros(2nm, T) - spath = zeros(nm*npaths, T) + spath = zeros(nm * npaths, T) sbounds = zeros(2nm, T) - tpath = zeros(nm*npaths, T) - ypath = zeros(nm*npaths, T) + tpath = zeros(nm * npaths, T) + ypath = zeros(nm * npaths, T) # simulate for as long as we wanted moment_generator = moment_sequence(amf.lss) @@ -378,7 +369,8 @@ function plot_additive(amf, T; npaths = 25, show_trend = true) for ii in 1:nm li, ui = 2(ii - 1) + 1, 2ii if sqrt(yvar[nx + nm + ii, nx + nm + ii]) != 0.0 - madd_dist = Normal(ymeans[nx + nm + ii], sqrt(yvar[nx + nm + ii, nx + nm + ii])) + madd_dist = Normal(ymeans[nx + nm + ii], + sqrt(yvar[nx + nm + ii, nx + nm + ii])) mbounds[li, t] = quantile(madd_dist, 0.01) mbounds[ui, t] = quantile(madd_dist, 0.99) elseif sqrt(yvar[nx + nm + ii, nx + nm + ii]) == 0.0 @@ -389,7 +381,8 @@ function plot_additive(amf, T; npaths = 25, show_trend = true) end if sqrt(yvar[nx + 2nm + ii, nx + 2nm + ii]) != 0.0 - sadd_dist = Normal(ymeans[nx + 2nm + ii], sqrt(yvar[nx + 2nm + ii, nx + 2nm + ii])) + sadd_dist = Normal(ymeans[nx + 2nm + ii], + sqrt(yvar[nx + 2nm + ii, nx + 2nm + ii])) sbounds[li, t] = quantile(sadd_dist, 0.01) sbounds[ui, t] = quantile(sadd_dist, 0.99) elseif sqrt(yvar[nx + 2nm + ii, nx + 2nm + ii]) == 0.0 @@ -404,8 +397,8 @@ function plot_additive(amf, T; npaths = 25, show_trend = true) # pull out paths for n in 1:npaths - x, y = simulate(amf.lss,T) - for ii in 0:nm - 1 + x, y = simulate(amf.lss, T) + for ii in 0:(nm - 1) ypath[npaths * ii + n, :] = y[nx + ii + 1, :] mpath[npaths * ii + n, :] = y[nx + nm + ii + 1, :] spath[npaths * ii + n, :] = y[nx + 2nm + ii + 1, :] @@ -415,22 +408,24 @@ function plot_additive(amf, T; npaths = 25, show_trend = true) add_figs = [] - for ii in 0:nm-1 - li, ui = npaths*(ii), npaths*(ii + 1) + for ii in 0:(nm - 1) + li, ui = npaths * (ii), npaths * (ii + 1) LI, UI = 2ii, 2(ii + 1) push!(add_figs, - plot_given_paths(T, ypath[li + 1:ui, :], mpath[li + 1:ui, :], spath[li + 1:ui, :], - tpath[li + 1:ui, :], mbounds[LI + 1:UI, :], sbounds[LI + 1:UI, :], - show_trend = show_trend)) + plot_given_paths(T, ypath[(li + 1):ui, :], mpath[(li + 1):ui, :], + spath[(li + 1):ui, :], + tpath[(li + 1):ui, :], mbounds[(LI + 1):UI, :], + sbounds[(LI + 1):UI, :], + show_trend = show_trend)) end return add_figs end function plot_multiplicative(amf, T, npaths = 25, show_trend = true) # pull out right sizes so we know how to increment - @unpack nx, nk, nm = amf + (; nx, nk, nm) = amf # matrices for the multiplicative decomposition - H, g, ν_tilde = multiplicative_decomp(A, B, D, F, ν, nx) + H, g, upsilon_tilde = multiplicative_decomp(A, B, D, F, upsilon, nx) # allocate space (nm is the number of functionals - we want npaths for each) mpath_mult = zeros(nm * npaths, T) @@ -450,21 +445,24 @@ function plot_multiplicative(amf, T, npaths = 25, show_trend = true) # lower and upper bounds - for each multiplicative functional for ii in 1:nm - li, ui = 2(ii - 1)+1, 2ii + li, ui = 2(ii - 1) + 1, 2ii if yvar[nx + nm + ii, nx + nm + ii] != 0.0 - Mdist = LogNormal(ymeans[nx + nm + ii]- 0.5t * diag(H * H')[ii], - sqrt(yvar[nx + nm + ii, nx + nm + ii])) + Mdist = LogNormal(ymeans[nx + nm + ii] - + 0.5t * diag(H * H')[ii], + sqrt(yvar[nx + nm + ii, nx + nm + ii])) mbounds_mult[li, t] = quantile(Mdist, 0.01) mbounds_mult[ui, t] = quantile(Mdist, 0.99) elseif yvar[nx + nm + ii, nx + nm + ii] == 0.0 - mbounds_mult[li, t] = exp.(ymeans[nx + nm + ii] - 0.5t * diag(H * H')[ii]) - mbounds_mult[ui, t] = exp.(ymeans[nx + nm + ii] - 0.5t * diag(H * H')[ii]) + mbounds_mult[li, t] = exp.(ymeans[nx + nm + ii] - + 0.5t * diag(H * H')[ii]) + mbounds_mult[ui, t] = exp.(ymeans[nx + nm + ii] - + 0.5t * diag(H * H')[ii]) else error("standard error is negative") end if yvar[nx + 2nm + ii, nx + 2nm + ii] != 0.0 Sdist = LogNormal(-ymeans[nx + 2nm + ii], - sqrt(yvar[nx + 2nm + ii, nx + 2nm + ii])) + sqrt(yvar[nx + 2nm + ii, nx + 2nm + ii])) sbounds_mult[li, t] = quantile(Sdist, 0.01) sbounds_mult[ui, t] = quantile(Sdist, 0.99) elseif yvar[nx + 2nm + ii, nx + 2nm + ii] == 0.0 @@ -479,38 +477,43 @@ function plot_multiplicative(amf, T, npaths = 25, show_trend = true) # pull out paths for n in 1:npaths - x, y = simulate(amf.lss,T) - for ii in 0:nm-1 - ypath_mult[npaths * ii + n, :] = exp.(y[nx+ii+1, :]) - mpath_mult[npaths * ii + n, :] = - exp.(y[nx+nm + ii+1, :] - collect(1:T)*0.5*diag(H * H')[ii+1]) - spath_mult[npaths * ii + n, :] = 1 ./exp.(-y[nx+2*nm + ii+1, :]) - tpath_mult[npaths * ii + n, :] = - exp.(y[nx + 3nm + ii+1, :] + (1:T) * 0.5 * diag(H * H')[ii + 1]) + x, y = simulate(amf.lss, T) + for ii in 0:(nm - 1) + ypath_mult[npaths * ii + n, :] = exp.(y[nx + ii + 1, :]) + mpath_mult[npaths * ii + n, :] = exp.(y[nx + nm + ii + 1, :] - + collect(1:T) * 0.5 * + diag(H * H')[ii + 1]) + spath_mult[npaths * ii + n, :] = 1 ./ + exp.(-y[nx + 2 * nm + ii + 1, :]) + tpath_mult[npaths * ii + n, :] = exp.(y[nx + 3nm + ii + 1, :] + + (1:T) * 0.5 * + diag(H * H')[ii + 1]) end end mult_figs = [] - for ii in 0:nm-1 + for ii in 0:(nm - 1) li, ui = npaths * ii, npaths * (ii + 1) LI, UI = 2ii, 2(ii + 1) push!(mult_figs, - plot_given_paths(T, ypath_mult[li+1:ui, :], mpath_mult[li+1:ui, :], - spath_mult[li+1:ui, :], tpath_mult[li+1:ui, :], - mbounds_mult[LI+1:UI, :], sbounds_mult[LI+1:UI, :], - horline = 1.0, show_trend=show_trend)) + plot_given_paths(T, ypath_mult[(li + 1):ui, :], + mpath_mult[(li + 1):ui, :], + spath_mult[(li + 1):ui, :], + tpath_mult[(li + 1):ui, :], + mbounds_mult[(LI + 1):UI, :], + sbounds_mult[(LI + 1):UI, :], + horline = 1.0, show_trend = show_trend)) end return mult_figs end function plot_martingales(amf, T, npaths = 25) - # pull out right sizes so we know how to increment - @unpack A, B, D, F, ν, nx, nk, nm = amf + (; A, B, D, F, upsilon, nx, nk, nm) = amf # matrices for the multiplicative decomposition - H, g, ν_tilde = multiplicative_decomp(A, B, D, F, ν, nx) + H, g, upsilon_tilde = multiplicative_decomp(A, B, D, F, upsilon, nx) # allocate space (nm is the number of functionals - we want npaths for each) mpath_mult = zeros(nm * npaths, T) @@ -527,13 +530,16 @@ function plot_martingales(amf, T, npaths = 25) for ii in 1:nm li, ui = 2(ii - 1) + 1, 2ii if yvar[nx + nm + ii, nx + nm + ii] != 0.0 - Mdist = LogNormal(ymeans[nx + nm + ii] - 0.5^2 * t * diag(H * H')[ii], - sqrt(yvar[nx + nm + ii, nx + nm + ii])) + Mdist = LogNormal(ymeans[nx + nm + ii] - + 0.5^2 * t * diag(H * H')[ii], + sqrt(yvar[nx + nm + ii, nx + nm + ii])) mbounds_mult[li, t] = quantile(Mdist, 0.01) mbounds_mult[ui, t] = quantile(Mdist, 0.99) elseif yvar[nx + nm + ii, nx + nm + ii] == 0.0 - mbounds_mult[li, t] = ymeans[nx + nm + ii] - 0.5^2 * t * diag(H * H')[ii] - mbounds_mult[ui, t] = ymeans[nx + nm + ii]- 0.5t * diag(H * H')[ii] + mbounds_mult[li, t] = ymeans[nx + nm + ii] - + 0.5^2 * t * diag(H * H')[ii] + mbounds_mult[ui, t] = ymeans[nx + nm + ii] - + 0.5t * diag(H * H')[ii] else error("standard error is negative") end @@ -544,21 +550,23 @@ function plot_martingales(amf, T, npaths = 25) # pull out paths for n in 1:npaths x, y = simulate(amf.lss, T) - for ii in 0:nm-1 - mpath_mult[npaths * ii + n, :] = - exp.(y[nx+nm + ii+1, :] - (1:T) * 0.5 * diag(H * H')[ii+1]) + for ii in 0:(nm - 1) + mpath_mult[npaths * ii + n, :] = exp.(y[nx + nm + ii + 1, :] - + (1:T) * 0.5 * + diag(H * H')[ii + 1]) end end mart_figs = [] - for ii in 0:nm-1 - li, ui = npaths*(ii), npaths*(ii + 1) + for ii in 0:(nm - 1) + li, ui = npaths * (ii), npaths * (ii + 1) LI, UI = 2ii, 2(ii + 1) push!(mart_figs, - plot_martingale_paths(T, mpath_mult[li + 1:ui, :], - mbounds_mult[LI + 1:UI, :], horline = 1)) - plot!(mart_figs[ii + 1], title = L"Martingale components for many paths of $y_{ii + 1}$") + plot_martingale_paths(T, mpath_mult[(li + 1):ui, :], + mbounds_mult[(LI + 1):UI, :], horline = 1)) + plot!(mart_figs[ii + 1], + title = L"Martingale components for many paths of $y_{ii + 1}$") end return mart_figs @@ -571,10 +579,10 @@ function plot_given_paths(T, ypath, mpath, spath, tpath, mbounds, sbounds; trange = 1:T # allocate transpose - mpathᵀ = Matrix(mpath') + mpath_T = Matrix(mpath') # create figure - plots=plot(layout = (2, 2), size = (800, 800)) + plots = plot(layout = (2, 2), size = (800, 800)) # plot all paths together @@ -584,16 +592,19 @@ function plot_given_paths(T, ypath, mpath, spath, tpath, mbounds, sbounds; if show_trend plot!(plots[1], trange, tpath[1, :], label = L"t_t", color = :red) end - plot!(plots[1], seriestype = :hline, [horline], color = :black, linestyle=:dash, label = "") - plot!(plots[1], title = "One Path of All Variables", legend=:topleft) + plot!(plots[1], seriestype = :hline, [horline], color = :black, + linestyle = :dash, label = "") + plot!(plots[1], title = "One Path of All Variables", legend = :topleft) # plot martingale component plot!(plots[2], trange, mpath[1, :], color = :magenta, label = "") - plot!(plots[2], trange, mpathᵀ, alpha = 0.45, color = :magenta, label = "") + plot!(plots[2], trange, mpath_T, alpha = 0.45, color = :magenta, label = "") ub = mbounds[2, :] lb = mbounds[1, :] - plot!(plots[2], ub, fillrange = lb, alpha = 0.25, color = :magenta, label = "") - plot!(plots[2], seriestype = :hline, [horline], color = :black, linestyle =:dash, label = "") + plot!(plots[2], ub, fillrange = lb, alpha = 0.25, color = :magenta, + label = "") + plot!(plots[2], seriestype = :hline, [horline], color = :black, + linestyle = :dash, label = "") plot!(plots[2], title = "Martingale Components for Many Paths") # plot stationary component @@ -601,15 +612,18 @@ function plot_given_paths(T, ypath, mpath, spath, tpath, mbounds, sbounds; plot!(plots[3], Matrix(spath'), alpha = 0.25, color = :green, label = "") ub = sbounds[2, :] lb = sbounds[1, :] - plot!(plots[3], ub, fillrange = lb, alpha = 0.25, color = :green, label = "") - plot!(plots[3], seriestype = :hline, [horline], color = :black, linestyle=:dash, label = "") + plot!(plots[3], ub, fillrange = lb, alpha = 0.25, color = :green, + label = "") + plot!(plots[3], seriestype = :hline, [horline], color = :black, + linestyle = :dash, label = "") plot!(plots[3], title = "Stationary Components for Many Paths") # plot trend component if show_trend == true plot!(plots[4], Matrix(tpath'), color = :red, label = "") end - plot!(plots[4], seriestype = :hline, [horline], color = :black, linestyle =:dash, label = "") + plot!(plots[4], seriestype = :hline, [horline], color = :black, + linestyle = :dash, label = "") plot!(plots[4], title = "Trend Components for Many Paths") return plots @@ -627,8 +641,10 @@ function plot_martingale_paths(T, mpath, mbounds; ub = mbounds[2, :] lb = mbounds[1, :] #plot!(plt, lb, fillrange = ub, alpha = 0.25, color = :magenta, label = "") - plot!(plt, seriestype = :hline, [horline], color = :black, linestyle =:dash, label = "") - plot!(plt, trange, Matrix(mpath'), linewidth=0.25, color = :black, label = "") + plot!(plt, seriestype = :hline, [horline], color = :black, + linestyle = :dash, label = "") + plot!(plt, trange, Matrix(mpath'), linewidth = 0.25, color = :black, + label = "") return plt end @@ -645,40 +661,40 @@ Random.seed!(42); ``` ```{code-cell} julia -ϕ_1, ϕ_2, ϕ_3, ϕ_4 = 0.5, -0.2, 0, 0.5 -σ = 0.01 -ν = 0.01 # growth rate +phi_1, phi_2, phi_3, phi_4 = 0.5, -0.2, 0, 0.5 +sigma = 0.01 +upsilon = 0.01 # growth rate ## A matrix should be n x n -A = [ϕ_1 ϕ_2 ϕ_3 ϕ_4; - 1 0 0 0; - 0 1 0 0; - 0 0 1 0] +A = [phi_1 phi_2 phi_3 phi_4; + 1 0 0 0; + 0 1 0 0; + 0 0 1 0] # B matrix should be n x k -B = [σ, 0, 0, 0] +B = [sigma, 0, 0, 0] D = [1 0 0 0] * A F = [1, 0, 0, 0] ⋅ vec(B) -amf = AMF_LSS_VAR(A, B, D, F, ν) +amf = AMF_LSS_VAR(A, B, D, F, upsilon) T = 150 x, y = simulate(amf.lss, T) -plt_1=plot() -plt_2=plot() +plt_1 = plot() +plt_2 = plot() plots = [plt_1, plt_2] # plots = plot(layout = (2, 1)) plot!(plots[1], 1:T, y[amf.nx + 1, :], color = :black, lw = 2, label = "") -plot!(plots[1], title = L"A particular path of $y_t$") +plot!(plots[1], title = L"A particular path of $y_t$") plot!(plots[2], 1:T, y[1, :], color = :green, lw = 2, label = "") -plot!(plots[2], seriestype = :hline, [0], color = :black, lw = 2, linestyle=:dashdot, - label = "") +plot!(plots[2], seriestype = :hline, [0], color = :black, lw = 2, + linestyle = :dashdot, label = "") plot!(plots[2], title = L"Associated path of $x_t$") # plot(plots) -plot(plots[1], plots[2], layout=(2,1), size=(700,500)) +plot(plots[1], plots[2], layout = (2, 1), size = (700, 500)) ``` ```{code-cell} julia diff --git a/lectures/time_series_models/arma.md b/lectures/time_series_models/arma.md index 35498a51..8f2bde8f 100644 --- a/lectures/time_series_models/arma.md +++ b/lectures/time_series_models/arma.md @@ -80,12 +80,6 @@ For supplementary reading, see. * {cite}`CryerChan2008`, all ``` - - -```{code-cell} julia -using LinearAlgebra, Statistics -``` - ## Introduction Consider a sequence of random variables $\{ X_t \}$ indexed by $t \in \mathbb Z$ and taking values in $\mathbb R$. @@ -231,22 +225,24 @@ using Test ```{code-cell} julia using LaTeXStrings, Plots +using LinearAlgebra, Statistics - -plt_1=plot() -plt_2=plot() +plt_1 = plot() +plt_2 = plot() plots = [plt_1, plt_2] -for (i, ϕ) in enumerate((0.8, -0.8)) +for (i, phi) in enumerate((0.8, -0.8)) times = 0:16 - acov = [ϕ.^k ./ (1 - ϕ.^2) for k in times] - label = L"autocovariance, $\phi = %$ϕ$" - plot!(plots[i], times, acov, color=:blue, lw=2, marker=:circle, markersize=3, - alpha=0.6, label=label) - plot!(plots[i], legend=:topright, xlabel="time", xlim=(0,15)) - plot!(plots[i], seriestype=:hline, [0], linestyle=:dash, alpha=0.5, lw=2, label="") + acov = [phi .^ k ./ (1 - phi .^ 2) for k in times] + label = L"autocovariance, $\phi = %$phi$" + plot!(plots[i], times, acov, color = :blue, lw = 2, marker = :circle, + markersize = 3, + alpha = 0.6, label = label) + plot!(plots[i], legend = :topright, xlabel = "time", xlim = (0, 15)) + plot!(plots[i], seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, + lw = 2, label = "") end -plot(plots[1], plots[2], layout=(2,1), size=(700,500)) +plot(plots[1], plots[2], layout = (2, 1), size = (700, 500)) ``` ```{code-cell} julia @@ -254,9 +250,9 @@ plot(plots[1], plots[2], layout=(2,1), size=(700,500)) tags: [remove-cell] --- @testset begin - ϕ = 0.8 + phi = 0.8 times = 0:16 - acov = [ϕ.^k ./ (1 - ϕ.^2) for k in times] + acov = [phi.^k ./ (1 - phi.^2) for k in times] @test acov[4] ≈ 1.422222222222223 # Can't access acov directly because of scoping. @test acov[1] ≈ 2.7777777777777786 end @@ -480,21 +476,22 @@ It is a nice exercise to verify that {eq}`ma1_sd_ed` and {eq}`ar1_sd_ed` are ind Plotting {eq}`ar1_sd_ed` reveals the shape of the spectral density for the AR(1) model when $\phi$ takes the values 0.8 and -0.8 respectively ```{code-cell} julia -ar1_sd(ϕ, ω) = 1 ./ (1 .- 2 * ϕ * cos.(ω) .+ ϕ.^2) +ar1_sd(phi, omega) = 1 ./ (1 .- 2 * phi * cos.(omega) .+ phi .^ 2) -ω_s = range(0, π, length = 180) +omega_s = range(0, pi, length = 180) -plt_1=plot() -plt_2=plot() -plots=[plt_1, plt_2] +plt_1 = plot() +plt_2 = plot() +plots = [plt_1, plt_2] -for (i, ϕ) in enumerate((0.8, -0.8)) - sd = ar1_sd(ϕ, ω_s) - label = L"spectral density, $\phi = %$ϕ$" - plot!(plots[i], ω_s, sd, color=:blue, alpha=0.6, lw=2, label=label) - plot!(plots[i], legend=:top, xlabel="frequency", xlim=(0,π)) +for (i, phi) in enumerate((0.8, -0.8)) + sd = ar1_sd(phi, omega_s) + label = L"spectral density, $\phi = %$phi$" + plot!(plots[i], omega_s, sd, color = :blue, alpha = 0.6, lw = 2, + label = label) + plot!(plots[i], legend = :top, xlabel = "frequency", xlim = (0, pi)) end -plot(plots[1], plots[2], layout=(2,1), size=(700,500)) +plot(plots[1], plots[2], layout = (2, 1), size = (700, 500)) ``` ```{code-cell} julia @@ -502,9 +499,9 @@ plot(plots[1], plots[2], layout=(2,1), size=(700,500)) tags: [remove-cell] --- @testset begin - @test ar1_sd(0.8, ω_s)[18] ≈ 9.034248169239635 - @test ar1_sd(-0.8, ω_s)[18] ≈ 0.3155260821833043 - @test ω_s[1] == 0.0 && ω_s[end] ≈ π && length(ω_s) == 180 # Grid invariant. + @test ar1_sd(0.8, omega_s)[18] ≈ 9.034248169239635 + @test ar1_sd(-0.8, omega_s)[18] ≈ 0.3155260821833043 + @test omega_s[1] == 0.0 && omega_s[end] ≈ pi && length(omega_s) == 180 # Grid invariant. end ``` @@ -536,34 +533,35 @@ products on the right-hand side of {eq}`sumpr` is large. These ideas are illustrated in the next figure, which has $k$ on the horizontal axis ```{code-cell} julia -ϕ = -0.8 +phi = -0.8 times = 0:16 -y1 = [ϕ.^k ./ (1 - ϕ.^2) for k in times] -y2 = [cos.(π * k) for k in times] +y1 = [phi .^ k ./ (1 - phi .^ 2) for k in times] +y2 = [cos.(pi * k) for k in times] y3 = [a * b for (a, b) in zip(y1, y2)] -# Autocovariance when ϕ = -0.8 -plt_1 = plot(times, y1, color=:blue, lw=2, marker=:circle, markersize=3, - alpha=0.6, label=L"\gamma(k)") -plot!(plt_1, seriestype=:hline, [0], linestyle=:dash, alpha=0.5, - lw=2, label="") -plot!(plt_1, legend=:topright, xlim=(0,15), yticks=[-2, 0, 2]) +# Autocovariance when phi = -0.8 +plt_1 = plot(times, y1, color = :blue, lw = 2, marker = :circle, markersize = 3, + alpha = 0.6, label = L"\gamma(k)") +plot!(plt_1, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, + lw = 2, label = "") +plot!(plt_1, legend = :topright, xlim = (0, 15), yticks = [-2, 0, 2]) -# Cycles at frequence π -plt_2 = plot(times, y2, color=:blue, lw=2, marker=:circle, markersize=3, - alpha=0.6, label=L"cos(\pi k)") -plot!(plt_2, seriestype=:hline, [0], linestyle=:dash, alpha=0.5, - lw=2, label="") -plot!(plt_2, legend=:topright, xlim=(0,15), yticks=[-1, 0, 1]) +# Cycles at frequence pi +plt_2 = plot(times, y2, color = :blue, lw = 2, marker = :circle, markersize = 3, + alpha = 0.6, label = L"cos(\pi k)") +plot!(plt_2, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, + lw = 2, label = "") +plot!(plt_2, legend = :topright, xlim = (0, 15), yticks = [-1, 0, 1]) # Product -plt_3 = plot(times, y3, seriestype=:sticks, marker=:circle, markersize=3, - lw=2, label=L"\gamma(k) cos(\pi k)") -plot!(plt_3, seriestype=:hline, [0], linestyle=:dash, alpha=0.5, - lw=2, label="") -plot!(plt_3, legend=:topright, xlim=(0,15), ylim=(-3,3), yticks=[-1, 0, 1, 2, 3]) +plt_3 = plot(times, y3, seriestype = :sticks, marker = :circle, markersize = 3, + lw = 2, label = L"\gamma(k) cos(\pi k)") +plot!(plt_3, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, + lw = 2, label = "") +plot!(plt_3, legend = :topright, xlim = (0, 15), ylim = (-3, 3), + yticks = [-1, 0, 1, 2, 3]) -plot(plt_1, plt_2, plt_3, layout=(3,1), size=(800,600)) +plot(plt_1, plt_2, plt_3, layout = (3, 1), size = (800, 600)) ``` ```{code-cell} julia @@ -583,34 +581,35 @@ not matched, the sequence $\gamma(k) \cos(\omega k)$ contains both positive and negative terms, and hence the sum of these terms is much smaller ```{code-cell} julia -ϕ = -0.8 +phi = -0.8 times = 0:16 -y1 = [ϕ.^k ./ (1 - ϕ.^2) for k in times] -y2 = [cos.(π * k/3) for k in times] +y1 = [phi .^ k ./ (1 - phi .^ 2) for k in times] +y2 = [cos.(pi * k / 3) for k in times] y3 = [a * b for (a, b) in zip(y1, y2)] -# Autocovariance when ϕ = -0.8 -plt_1 = plot(times, y1, color=:blue, lw=2, marker=:circle, markersize=3, - alpha=0.6, label=L"\gamma(k)") -plot!(plt_1, seriestype=:hline, [0], linestyle=:dash, alpha=0.5, - lw=2, label="") -plot!(plt_1, legend=:topright, xlim=(0,15), yticks=[-2, 0, 2]) +# Autocovariance when phi = -0.8 +plt_1 = plot(times, y1, color = :blue, lw = 2, marker = :circle, markersize = 3, + alpha = 0.6, label = L"\gamma(k)") +plot!(plt_1, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, + lw = 2, label = "") +plot!(plt_1, legend = :topright, xlim = (0, 15), yticks = [-2, 0, 2]) -# Cycles at frequence π -plt_2 = plot(times, y2, color=:blue, lw=2, marker=:circle, markersize=3, - alpha=0.6, label=L"cos(\pi k/3)") -plot!(plt_2, seriestype=:hline, [0], linestyle=:dash, alpha=0.5, - lw=2, label="") -plot!(plt_2, legend=:topright, xlim=(0,15), yticks=[-1, 0, 1]) +# Cycles at frequence pi +plt_2 = plot(times, y2, color = :blue, lw = 2, marker = :circle, markersize = 3, + alpha = 0.6, label = L"cos(\pi k/3)") +plot!(plt_2, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, + lw = 2, label = "") +plot!(plt_2, legend = :topright, xlim = (0, 15), yticks = [-1, 0, 1]) # Product -plt_3 = plot(times, y3, seriestype=:sticks, marker=:circle, markersize=3, - lw=2, label=L"\gamma(k) cos(\pi k/3)") -plot!(plt_3, seriestype=:hline, [0], linestyle=:dash, alpha=0.5, - lw=2, label="") -plot!(plt_3, legend=:topright, xlim=(0,15), ylim=(-3,3), yticks=[-1, 0, 1, 2, 3]) +plt_3 = plot(times, y3, seriestype = :sticks, marker = :circle, markersize = 3, + lw = 2, label = L"\gamma(k) cos(\pi k/3)") +plot!(plt_3, seriestype = :hline, [0], linestyle = :dash, alpha = 0.5, + lw = 2, label = "") +plot!(plt_3, legend = :topright, xlim = (0, 15), ylim = (-3, 3), + yticks = [-1, 0, 1, 2, 3]) -plot(plt_1, plt_2, plt_3, layout=(3,1), size=(600,600)) +plot(plt_1, plt_2, plt_3, layout = (3, 1), size = (600, 600)) ``` ```{code-cell} julia @@ -753,64 +752,65 @@ using QuantEcon, Random # plot functions function plot_spectral_density(arma, plt) - (w, spect) = spectral_density(arma, two_pi=false) - plot!(plt, w, spect, lw=2, alpha=0.7,label="") - plot!(plt, title="Spectral density", xlim=(0,π), - xlabel="frequency", ylabel="spectrum", yscale=:log) + (w, spect) = spectral_density(arma, two_pi = false) + plot!(plt, w, spect, lw = 2, alpha = 0.7, label = "") + plot!(plt, title = "Spectral density", xlim = (0, pi), + xlabel = "frequency", ylabel = "spectrum", yscale = :log) return plt end function plot_spectral_density(arma) plt = plot() - plot_spectral_density(arma, plt=plt) + plot_spectral_density(arma, plt = plt) return plt end function plot_autocovariance(arma, plt) acov = autocovariance(arma) n = length(acov) - plot!(plt, 0:(n-1), acov, seriestype=:sticks, marker=:circle, - markersize=2,label="") - plot!(plt, seriestype=:hline, [0], color=:red, label="") - plot!(plt, title="Autocovariance", xlim=(-0.5, n-0.5), - xlabel="time", ylabel="autocovariance") + plot!(plt, 0:(n - 1), acov, seriestype = :sticks, marker = :circle, + markersize = 2, label = "") + plot!(plt, seriestype = :hline, [0], color = :red, label = "") + plot!(plt, title = "Autocovariance", xlim = (-0.5, n - 0.5), + xlabel = "time", ylabel = "autocovariance") return plt end function plot_autocovariance(arma) plt = plot() - plot_spectral_density(arma, plt=plt) + plot_spectral_density(arma, plt = plt) return plt end function plot_impulse_response(arma, plt) psi = impulse_response(arma) n = length(psi) - plot!(plt, 0:(n-1), psi, seriestype=:sticks, marker=:circle, - markersize=2, label="") - plot!(plt, seriestype=:hline, [0], color=:red, label="") - plot!(plt, title="Impluse response", xlim=(-0.5,n-0.5), - xlabel="time", ylabel="response") + plot!(plt, 0:(n - 1), psi, seriestype = :sticks, marker = :circle, + markersize = 2, label = "") + plot!(plt, seriestype = :hline, [0], color = :red, label = "") + plot!(plt, title = "Impluse response", xlim = (-0.5, n - 0.5), + xlabel = "time", ylabel = "response") return plt end function plot_impulse_response(arma) plt = plot() - plot_spectral_density(arma, plt=plt) + plot_spectral_density(arma, plt = plt) return plt end function plot_simulation(arma, plt) X = simulation(arma) n = length(X) - plot!(plt, 0:(n-1), X, lw=2, alpha=0.7, label="") - plot!(plt, title="Sample path", xlim=(0,0,n), xlabel="time", ylabel="state space") + plot!(plt, 0:(n - 1), X, lw = 2, alpha = 0.7, label = "") + plot!(plt, title = "Sample path", xlim = (0, 0, n), xlabel = "time", + ylabel = "state space") return plt end function plot_simulation(arma) plt = plot() - plot_spectral_density(arma, plt=plt) + plot_spectral_density(arma, plt = plt) return plt end @@ -822,14 +822,14 @@ function quad_plot(arma) plots = [plt_1, plt_2, plt_3, plt_4] plot_functions = [plot_spectral_density, - plot_impulse_response, - plot_autocovariance, - plot_simulation] + plot_impulse_response, + plot_autocovariance, + plot_simulation] for (i, plt, plot_func) in zip(1:1:4, plots, plot_functions) plots[i] = plot_func(arma, plt) end - return plot(plots[1], plots[2], plots[3], plots[4], layout=(2,2), size=(800,800)) - + return plot(plots[1], plots[2], plots[3], plots[4], layout = (2, 2), + size = (800, 800)) end ``` @@ -839,9 +839,9 @@ We'll use the model $X_t = 0.5 X_{t-1} + \epsilon_t - 0.8 \epsilon_{t-2}$ ```{code-cell} julia Random.seed!(42) # For reproducible results. -ϕ = 0.5; -θ = [0, -0.8]; -arma = ARMA(ϕ, θ, 1.0) +phi = 0.5; +theta = [0, -0.8]; +arma = ARMA(phi, theta, 1.0) quad_plot(arma) ``` @@ -864,7 +864,7 @@ end The call ```{code-block} julia -arma = ARMA(ϕ, θ, σ) +arma = ARMA(phi, theta, sigma) ``` creates an instance `arma` that represents the ARMA($p, q$) model @@ -874,15 +874,15 @@ X_t = \phi_1 X_{t-1} + ... + \phi_p X_{t-p} + \epsilon_t + \theta_1 \epsilon_{t-1} + ... + \theta_q \epsilon_{t-q} $$ -If `ϕ` and `θ` are arrays or sequences, then the interpretation will +If `phi` and `theta` are arrays or sequences, then the interpretation will be -* `ϕ` holds the vector of parameters $(\phi_1, \phi_2,..., \phi_p)$ -* `θ` holds the vector of parameters $(\theta_1, \theta_2,..., \theta_q)$ +* `phi` holds the vector of parameters $(\phi_1, \phi_2,..., \phi_p)$ +* `theta` holds the vector of parameters $(\theta_1, \theta_2,..., \theta_q)$ -The parameter `σ` is always a scalar, the standard deviation of the white noise. +The parameter `sigma` is always a scalar, the standard deviation of the white noise. -We also permit `ϕ` and `θ` to be scalars, in which case the model will be interpreted as +We also permit `phi` and `theta` to be scalars, in which case the model will be interpreted as $$ X_t = \phi X_{t-1} + \epsilon_t + \theta \epsilon_{t-1} diff --git a/lectures/time_series_models/classical_filtering.md b/lectures/time_series_models/classical_filtering.md index 87df5026..970ddfb2 100644 --- a/lectures/time_series_models/classical_filtering.md +++ b/lectures/time_series_models/classical_filtering.md @@ -61,11 +61,6 @@ In this lecture we investigate these ideas using mostly elementary linear algebr Useful references include {cite}`Whittle1963`, {cite}`HanSar1980`, {cite}`Orfanidisoptimum1988`, {cite}`Athanasios1991`, and {cite}`Muth1960`. - -```{code-cell} julia -using LinearAlgebra, Statistics -``` - ## Infinite Horizon Prediction and Filtering Problems We pose two related prediction and filtering problems. @@ -587,57 +582,55 @@ using Test ``` ```{code-cell} julia +using LinearAlgebra, Statistics using Polynomials.PolyCompat, LinearAlgebra import Polynomials.PolyCompat: roots, coeffs function LQFilter(d, h, y_m; r = nothing, - β = nothing, + beta = nothing, h_eps = nothing) - m = length(d) - 1 m == length(y_m) || throw(ArgumentError("y_m and d must be of same length = $m")) - # define the coefficients of ϕ up front - ϕ = zeros(2m + 1) - for i in -m:m - ϕ[m-i+1] = sum(diag(d*d', -i)) + # define the coefficients of phi up front + phi = zeros(2m + 1) + for i in (-m):m + phi[m - i + 1] = sum(diag(d * d', -i)) end - ϕ[m+1] = ϕ[m+1] + h + phi[m + 1] = phi[m + 1] + h - # if r is given calculate the vector ϕ_r + # if r is given calculate the vector phi_r if isnothing(r) k = nothing - ϕ_r = nothing + phi_r = nothing else k = size(r, 1) - 1 - ϕ_r = zeros(2k + 1) + phi_r = zeros(2k + 1) - for i = -k:k - ϕ_r[k-i+1] = sum(diag(r*r', -i)) + for i in (-k):k + phi_r[k - i + 1] = sum(diag(r * r', -i)) end if isnothing(h_eps) == false - ϕ_r[k+1] = ϕ_r[k+1] + h_eps + phi_r[k + 1] = phi_r[k + 1] + h_eps end end - # if β is given, define the transformed variables - if isnothing(β) - β = 1.0 + # if beta is given, define the transformed variables + if isnothing(beta) + beta = 1.0 else - d = β.^(collect(0:m)/2) * d - y_m = y_m * β.^(- collect(1:m)/2) + d = beta .^ (collect(0:m) / 2) * d + y_m = y_m * beta .^ (-collect(1:m) / 2) end - return (d = d, h = h, y_m = y_m, m = m, ϕ = ϕ, β = β, - ϕ_r = ϕ_r, k = k) + return (; d, h, y_m, m, phi, beta, phi_r, k) end function construct_W_and_Wm(lqf, N) - - d, m = lqf.d, lqf.m + (; d, m) = lqf W = zeros(N + 1, N + 1) W_m = zeros(N + 1, m) @@ -648,9 +641,9 @@ function construct_W_and_Wm(lqf, N) # (1) constuct the D_{m+1} matrix using the formula - for j in 1:(m+1) - for k in j:(m+1) - D_m1[j, k] = dot(d[1:j, 1], d[k-j+1:k, 1]) + for j in 1:(m + 1) + for k in j:(m + 1) + D_m1[j, k] = dot(d[1:j, 1], d[(k - j + 1):k, 1]) end end @@ -661,63 +654,63 @@ function construct_W_and_Wm(lqf, N) for j in 1:m for i in (j + 1):(m + 1) - M[i, j] = D_m1[i-j, m+1] + M[i, j] = D_m1[i - j, m + 1] end end M # Euler equations for t = 0, 1, ..., N-(m+1) - ϕ, h = lqf.ϕ, lqf.h + phi, h = lqf.phi, lqf.h W[1:(m + 1), 1:(m + 1)] = D_m1 + h * I W[1:(m + 1), (m + 2):(2m + 1)] = M for (i, row) in enumerate((m + 2):(N + 1 - m)) - W[row, (i + 1):(2m + 1 + i)] = ϕ' + W[row, (i + 1):(2m + 1 + i)] = phi' end for i in 1:m - W[N - m + i + 1 , end-(2m + 1 - i)+1:end] = ϕ[1:end-i] + W[N - m + i + 1, (end - (2m + 1 - i) + 1):end] = phi[1:(end - i)] end for i in 1:m - W_m[N - i + 2, 1:(m - i)+1] = ϕ[(m + 1 + i):end] + W_m[N - i + 2, 1:((m - i) + 1)] = phi[(m + 1 + i):end] end return W, W_m end function roots_of_characteristic(lqf) - m, ϕ = lqf.m, lqf.ϕ + (; m, phi) = lqf # Calculate the roots of the 2m-polynomial - ϕ_poly = Poly(ϕ[end:-1:1]) - proots = roots(ϕ_poly) + phi_poly = Poly(phi[end:-1:1]) + proots = roots(phi_poly) # sort the roots according to their length (in descending order) - roots_sorted = sort(proots, by=abs)[end:-1:1] - z_0 = sum(ϕ) / polyval(poly(proots), 1.0) + roots_sorted = sort(proots, by = abs)[end:-1:1] + z_0 = sum(phi) / polyval(poly(proots), 1.0) z_1_to_m = roots_sorted[1:m] # we need only those outside the unit circle - λ = 1 ./ z_1_to_m - return z_1_to_m, z_0, λ + lambda = 1 ./ z_1_to_m + return z_1_to_m, z_0, lambda end function coeffs_of_c(lqf) - m = lqf.m - z_1_to_m, z_0, λ = roots_of_characteristic(lqf) + (; m) = lqf + z_1_to_m, z_0, lambda = roots_of_characteristic(lqf) c_0 = (z_0 * prod(z_1_to_m) * (-1.0)^m)^(0.5) c_coeffs = coeffs(poly(z_1_to_m)) * z_0 / c_0 return c_coeffs end function solution(lqf) - z_1_to_m, z_0, λ = roots_of_characteristic(lqf) + z_1_to_m, z_0, lambda = roots_of_characteristic(lqf) c_0 = coeffs_of_c(lqf)[end] - A = zeros(lqf.m) + A = zeros(m) for j in 1:m - denom = 1 - λ/λ[j] + denom = 1 - lambda / lambda[j] A[j] = c_0^(-2) / prod(denom[1:m .!= j]) end - return λ, A + return lambda, A end function construct_V(lqf; N = nothing) @@ -728,12 +721,12 @@ function construct_V(lqf; N = nothing) throw(ArgumentError("N must be Integer!")) end - ϕ_r, k = lqf.ϕ_r, lqf.k + (; phi_r, k) = lqf V = zeros(N, N) for i in 1:N for j in 1:N - if abs(i-j) ≤ k - V[i, j] = ϕ_r[k + abs(i-j)+1] + if abs(i - j) <= k + V[i, j] = phi_r[k + abs(i - j) + 1] end end end @@ -751,7 +744,7 @@ function predict(lqf, a_hist, t) V = construct_V(N + 1) aux_matrix = zeros(N + 1, N + 1) - aux_matrix[1:t+1 , 1:t+1 ] = Matrix(I, t + 1, t + 1) + aux_matrix[1:(t + 1), 1:(t + 1)] = Matrix(I, t + 1, t + 1) L = cholesky(V).U' Ea_hist = inv(L) * aux_matrix * L * a_hist @@ -759,7 +752,7 @@ function predict(lqf, a_hist, t) end function optimal_y(lqf, a_hist, t = nothing) - β, y_m, m = lqf.β, lqf.y_m, lqf.m + (; beta, y_m, m) = lqf N = length(a_hist) - 1 W, W_m = construct_W_and_Wm(lqf, N) @@ -776,35 +769,35 @@ function optimal_y(lqf, a_hist, t = nothing) if isnothing(t) # if the problem is deterministic a_hist = J * a_hist - # transform the a sequence if β is given - if β != 1 - a_hist = reshape(a_hist * (β^(collect(N:0)/ 2)), N + 1, 1) + # transform the a sequence if beta is given + if beta != 1 + a_hist = reshape(a_hist * (beta^(collect(N:0) / 2)), N + 1, 1) end - ā = a_hist - W_m * y_m # ā from the lecutre - Uy = \(L, ā) # U @ ȳ = L^{-1}ā from the lecture - ȳ = \(U, Uy) # ȳ = U^{-1}L^{-1}ā - # Reverse the order of ȳ with the matrix J + a_bar = a_hist - W_m * y_m # a_bar from the lecutre + Uy = \(L, a_bar) # U @ y_bar = L^{-1}a_bar from the lecture + y_bar = \(U, Uy) # y_bar = U^{-1}L^{-1}a_bar + # Reverse the order of y_bar with the matrix J J = reverse(Matrix(I, N + m + 1, N + m + 1), dims = 2) - y_hist = J * vcat(ȳ, y_m) # y_hist : concatenated y_m and ȳ - # transform the optimal sequence back if β is given - if β != 1 - y_hist = y_hist .* β.^(- collect(-m:N)/2) + y_hist = J * vcat(y_bar, y_m) # y_hist : concatenated y_m and y_bar + # transform the optimal sequence back if beta is given + if beta != 1 + y_hist = y_hist .* beta .^ (-collect((-m):N) / 2) end else # if the problem is stochastic and we look at it Ea_hist = reshape(predict(a_hist, t), N + 1, 1) Ea_hist = J * Ea_hist - ā = Ea_hist - W_m * y_m # ā from the lecutre - Uy = \(L, ā) # U @ ȳ = L^{-1}ā from the lecture - ȳ = \(U, Uy) # ȳ = U^{-1}L^{-1}ā + a_bar = Ea_hist - W_m * y_m # a_bar from the lecutre + Uy = \(L, a_bar) # U @ y_bar = L^{-1}a_bar from the lecture + y_bar = \(U, Uy) # y_bar = U^{-1}L^{-1}a_bar - # Reverse the order of ȳ with the matrix J + # Reverse the order of y_bar with the matrix J J = reverse(Matrix(I, N + m + 1, N + m + 1), dims = 2) - y_hist = J * vcat(ȳ, y_m) # y_hist : concatenated y_m and ȳ + y_hist = J * vcat(y_bar, y_m) # y_hist : concatenated y_m and y_bar end - return y_hist, L, U, ȳ + return y_hist, L, U, y_bar end ``` @@ -830,7 +823,7 @@ y_m = zeros(m) d = [1.0, -2.0] r = [1.0, -2.0] h = 0.0 -example = LQFilter(d, h, y_m, r=d) +example = LQFilter(d, h, y_m, r = d) ``` The Wold representation is computed by example.coefficients_of_c(). @@ -861,7 +854,7 @@ and put it in $V$. Then we'll take a Cholesky decomposition of $V = L^{-1} L^{-1} = Li Li'$ and use it to form the vector of "moving average representations" $x = Li \varepsilon$ and the vector of "autoregressive representations" $L x = \varepsilon$ ```{code-cell} julia -V = construct_V(example,N=5) +V = construct_V(example, N = 5) ``` Notice how the lower rows of the "moving average representations" are converging to the appropriate infinite history Wold representation @@ -951,7 +944,7 @@ V = construct_V(example, N = 8) ```{code-cell} julia F = cholesky(V) Li = F.L -Li[end-2:end, :] +Li[(end - 2):end, :] ``` ```{code-cell} julia diff --git a/lectures/time_series_models/estspec.md b/lectures/time_series_models/estspec.md index 9f468e9f..a616292b 100644 --- a/lectures/time_series_models/estspec.md +++ b/lectures/time_series_models/estspec.md @@ -45,12 +45,6 @@ Once the basic technique has been explained, we will apply it to the analysis of For supplementary reading, see {cite}`Sargent1987` or {cite}`CryerChan2008`. - - -```{code-cell} julia -using LinearAlgebra, Statistics -``` - (periodograms)= ## {index}`Periodograms ` @@ -223,21 +217,24 @@ using Test ```{code-cell} julia using QuantEcon, Plots, Random +using LinearAlgebra, Statistics Random.seed!(42) # For reproducible results. n = 40 # Data size -ϕ = 0.5 # AR parameter -θ = [0, -0.8] # MA parameter -σ = 1.0 -lp = ARMA(ϕ, θ, σ) +phi = 0.5 # AR parameter +theta = [0, -0.8] # MA parameter +sigma = 1.0 +lp = ARMA(phi, theta, sigma) X = simulation(lp, ts_length = n) x, y = periodogram(X) -x_sd, y_sd = spectral_density(lp, two_pi=false, res=120) +x_sd, y_sd = spectral_density(lp, two_pi = false, res = 120) -plot(x, y,linecolor="blue", linewidth=2, linealpha=0.5, lab="periodogram") -plot!(x_sd, y_sd, linecolor="red", linewidth=2, linealpha=0.8, lab="spectral density") +plot(x, y, linecolor = "blue", linewidth = 2, linealpha = 0.5, + lab = "periodogram") +plot!(x_sd, y_sd, linecolor = "red", linewidth = 2, linealpha = 0.8, + lab = "spectral density") ``` ```{code-cell} julia @@ -314,14 +311,15 @@ Note the smaller weights towards the edges and larger weights in the center, so ```{code-cell} julia function hanning_window(M) - w = [0.5 - 0.5 * cos(2 * pi * n / (M - 1)) for n = 0:(M-1)] + w = [0.5 - 0.5 * cos(2 * pi * n / (M - 1)) for n in 0:(M - 1)] return w end window = hanning_window(25) / sum(hanning_window(25)) x = range(-12, 12, length = 25) -plot(x, window, color="darkblue", title="Hanning window", ylabel="Weights", - xlabel="Position in sequence of weights", legend=false, grid=false) +plot(x, window, color = "darkblue", title = "Hanning window", + ylabel = "Weights", + xlabel = "Position in sequence of weights", legend = false, grid = false) ``` ```{code-cell} julia @@ -522,10 +520,10 @@ Random.seed!(42); # reproducible results ```{code-cell} julia n = 400 -ϕ = 0.5 -θ = [0, -0.8] -σ = 1.0 -lp = ARMA(ϕ, θ, 1.0) +phi = 0.5 +theta = [0, -0.8] +sigma = 1.0 +lp = ARMA(phi, theta, 1.0) X = simulation(lp, ts_length = n) xs = [] @@ -541,7 +539,7 @@ for (i, wl) in enumerate([15, 55, 175]) # window lengths push!(xs, x) push!(ys, y) - x_sd, y_sd = spectral_density(lp, two_pi=false, res=120) + x_sd, y_sd = spectral_density(lp, two_pi = false, res = 120) push!(x_sds, x_sd) push!(y_sds, y_sd) @@ -566,13 +564,13 @@ end ``` ```{code-cell} julia -plot(xs, ys, layout=(3,1), color=:blue, alpha=0.5, - linewidth=2, label=["periodogram" "" ""]) -plot!(x_sds, y_sds, layout=(3,1), color=:red, alpha=0.8, - linewidth=2, label=["spectral density" "" ""]) -plot!(x_sms, y_sms, layout=(3,1), color=:black, - linewidth=2, label=["smoothed periodogram" "" ""]) -plot!(title=reshape(titles,1,length(titles))) +plot(xs, ys, layout = (3, 1), color = :blue, alpha = 0.5, + linewidth = 2, label = ["periodogram" "" ""]) +plot!(x_sds, y_sds, layout = (3, 1), color = :red, alpha = 0.8, + linewidth = 2, label = ["spectral density" "" ""]) +plot!(x_sms, y_sms, layout = (3, 1), color = :black, + linewidth = 2, label = ["smoothed periodogram" "" ""]) +plot!(title = reshape(titles, 1, length(titles))) ``` ### Exercise 2 @@ -587,26 +585,26 @@ Random.seed!(42); # reproducible results ```{code-cell} julia lp2 = ARMA(-0.9, 0.0, 1.0) wl = 65 -p = plot(layout=(3,1)) +p = plot(layout = (3, 1)) for i in 1:3 - X = simulation(lp2,ts_length=150) - plot!(p[i],xlims = (0,pi)) + X = simulation(lp2, ts_length = 150) + plot!(p[i], xlims = (0, pi)) - x_sd, y_sd = spectral_density(lp2,two_pi=false, res=180) - plot!(p[i],x_sd, y_sd, linecolor=:red, linestyle=:solid, - yscale=:log10, linewidth=2, linealpha=0.75, - label="spectral density",legend=:topleft) + x_sd, y_sd = spectral_density(lp2, two_pi = false, res = 180) + plot!(p[i], x_sd, y_sd, linecolor = :red, linestyle = :solid, + yscale = :log10, linewidth = 2, linealpha = 0.75, + label = "spectral density", legend = :topleft) x, y_smoothed = periodogram(X, "hamming", wl) - plot!(p[i],x, y_smoothed, linecolor=:black, linestyle=:solid, - yscale=:log10, linewidth=2, linealpha=0.75, - label="standard smoothed periodogram",legend=:topleft) + plot!(p[i], x, y_smoothed, linecolor = :black, linestyle = :solid, + yscale = :log10, linewidth = 2, linealpha = 0.75, + label = "standard smoothed periodogram", legend = :topleft) x, y_ar = ar_periodogram(X, "hamming", wl) - plot!(p[i],x, y_ar, linecolor=:blue, linestyle=:solid, - yscale=:log10, linewidth=2, linealpha=0.75, - label="AR smoothed periodogram",legend=:topleft) + plot!(p[i], x, y_ar, linecolor = :blue, linestyle = :solid, + yscale = :log10, linewidth = 2, linealpha = 0.75, + label = "AR smoothed periodogram", legend = :topleft) end p ``` diff --git a/lectures/time_series_models/lu_tricks.md b/lectures/time_series_models/lu_tricks.md index c517f5eb..06b34847 100644 --- a/lectures/time_series_models/lu_tricks.md +++ b/lectures/time_series_models/lu_tricks.md @@ -64,7 +64,7 @@ Useful references include {cite}`Whittle1963`, {cite}`HanSar1980`, {cite}`Orfani ```{code-cell} julia -using LaTeXStrings, Polynomials, Plots, Random, Parameters +using LaTeXStrings, Polynomials, Plots, Random using LinearAlgebra, Statistics ``` @@ -902,50 +902,50 @@ tags: [output_scroll] --- function LQFilter(d, h, y_m; r = nothing, - β = nothing, + beta = nothing, h_eps = nothing) m = length(d) - 1 m == length(y_m) || throw(ArgumentError("y_m and d must be of same length = $m")) - # define the coefficients of ϕ up front - ϕ = zeros(2m + 1) + # define the coefficients of phi up front + phi = zeros(2m + 1) for i in -m:m - ϕ[m-i+1] = sum(diag(d*d', -i)) + phi[m-i+1] = sum(diag(d*d', -i)) end - ϕ[m+1] = ϕ[m+1] + h + phi[m+1] = phi[m+1] + h - # if r is given calculate the vector ϕ_r + # if r is given calculate the vector phi_r if isnothing(r) k = nothing - ϕ_r = nothing + phi_r = nothing else k = size(r, 1) - 1 - ϕ_r = zeros(2k + 1) + phi_r = zeros(2k + 1) for i = -k:k - ϕ_r[k-i+1] = sum(diag(r*r', -i)) + phi_r[k-i+1] = sum(diag(r*r', -i)) end if h_eps != nothing - ϕ_r[k+1] = ϕ_r[k+1] + h_eps + phi_r[k+1] = phi_r[k+1] + h_eps end end - # if β is given, define the transformed variables - if isnothing(β) - β = 1.0 + # if beta is given, define the transformed variables + if isnothing(beta) + beta = 1.0 else - d = β.^(collect(0:m)/2) * d - y_m = y_m * β.^(- collect(1:m)/2) + d = beta.^(collect(0:m)/2) * d + y_m = y_m * beta.^(- collect(1:m)/2) end - return (d = d, h = h, y_m = y_m, m = m, ϕ = ϕ, β = β, ϕ_r = ϕ_r, k = k) + return (;d, h, y_m, m, phi, beta, phi_r, k) end function construct_W_and_Wm(lqf, N) - @unpack d, m = lqf + (;d, m) = lqf W = zeros(N + 1, N + 1) W_m = zeros(N + 1, m) @@ -974,67 +974,67 @@ function construct_W_and_Wm(lqf, N) M # Euler equations for t = 0, 1, ..., N-(m+1) - @unpack ϕ, h = lqf + (;phi, h) = lqf W[1:(m + 1), 1:(m + 1)] = D_m1 + h * I W[1:(m + 1), (m + 2):(2m + 1)] = M for (i, row) in enumerate((m + 2):(N + 1 - m)) - W[row, (i + 1):(2m + 1 + i)] = ϕ' + W[row, (i + 1):(2m + 1 + i)] = phi' end for i in 1:m - W[N - m + i + 1 , end-(2m + 1 - i)+1:end] = ϕ[1:end-i] + W[N - m + i + 1 , end-(2m + 1 - i)+1:end] = phi[1:end-i] end for i in 1:m - W_m[N - i + 2, 1:(m - i)+1] = ϕ[(m + 1 + i):end] + W_m[N - i + 2, 1:(m - i)+1] = phi[(m + 1 + i):end] end return W, W_m end function roots_of_characteristic(lqf) - @unpack m, ϕ = lqf + (;m, phi) = lqf # Calculate the roots of the 2m-polynomial - ϕ_poly=Polynomial(ϕ[end:-1:1]) - proots = roots(ϕ_poly) + phi_poly=Polynomial(phi[end:-1:1]) + proots = roots(phi_poly) # sort the roots according to their length (in descending order) roots_sorted = sort(proots, by=abs)[end:-1:1] - z_0 = sum(ϕ) / (fromroots(proots))(1.0) + z_0 = sum(phi) / (fromroots(proots))(1.0) z_1_to_m = roots_sorted[1:m] # we need only those outside the unit circle - λ = 1 ./ z_1_to_m - return z_1_to_m, z_0, λ + lambda = 1 ./ z_1_to_m + return z_1_to_m, z_0, lambda end function coeffs_of_c(lqf) - m = lqf.m - z_1_to_m, z_0, λ = roots_of_characteristic(lqf) + (;m) = lqf + z_1_to_m, z_0, lambda = roots_of_characteristic(lqf) c_0 = (z_0 * prod(z_1_to_m) * (-1.0)^m)^(0.5) c_coeffs = coeffs(Polynomial(z_1_to_m)) * z_0 / c_0 return c_coeffs end function solution(lqf) - z_1_to_m, z_0, λ = roots_of_characteristic(lqf) + z_1_to_m, z_0, lambda = roots_of_characteristic(lqf) c_0 = coeffs_of_c(lqf)[end] - A = zeros(lqf.m) + A = zeros(m) for j in 1:m - denom = 1 - λ/λ[j] + denom = 1 - lambda/lambda[j] A[j] = c_0^(-2) / prod(denom[1:m .!= j]) end - return λ, A + return lambda, A end function construct_V(lqf; N=nothing) - @unpack ϕ_r, k = lqf + (;phi_r, k) = lqf V = zeros(N, N) for i in 1:N for j in 1:N if abs(i-j) <= k - V[i, j] = ϕ_r[k + abs(i-j)+1] + V[i, j] = phi_r[k + abs(i-j)+1] end end end @@ -1060,7 +1060,7 @@ function predict(lqf, a_hist, t) end function optimal_y(lqf, a_hist, t = nothing) - @unpack β, y_m, m = lqf + (;beta, y_m, m) = lqf N = length(a_hist) - 1 W, W_m = construct_W_and_Wm(lqf, N) @@ -1077,35 +1077,35 @@ function optimal_y(lqf, a_hist, t = nothing) if isnothing(t) # if the problem is deterministic a_hist = J * a_hist - # transform the a sequence if β is given - if β != 1 - a_hist = reshape(a_hist * (β^(collect(N:0)/ 2)), N + 1, 1) + # transform the a sequence if beta is given + if beta != 1 + a_hist = reshape(a_hist * (beta^(collect(N:0)/ 2)), N + 1, 1) end - ā = a_hist - W_m * y_m # ā from the lecutre - Uy = \(L, ā) # U @ ȳ = L^{-1}ā from the lecture - ȳ = \(U, Uy) # ȳ = U^{-1}L^{-1}ā - # Reverse the order of ȳ with the matrix J + a_bar = a_hist - W_m * y_m # a_bar from the lecutre + Uy = \(L, a_bar) # U @ y_bar = L^{-1}a_bar from the lecture + y_bar = \(U, Uy) # y_bar = U^{-1}L^{-1}a_bar + # Reverse the order of y_bar with the matrix J J = reverse(I + zeros(N+m+1, N + m + 1), dims = 2) - y_hist = J * vcat(ȳ, y_m) # y_hist : concatenated y_m and ȳ - # transform the optimal sequence back if β is given - if β != 1 - y_hist = y_hist .* β.^(- collect(-m:N)/2) + y_hist = J * vcat(y_bar, y_m) # y_hist : concatenated y_m and y_bar + # transform the optimal sequence back if beta is given + if beta != 1 + y_hist = y_hist .* beta.^(- collect(-m:N)/2) end else # if the problem is stochastic and we look at it Ea_hist = reshape(predict(lqf, a_hist, t), N + 1, 1) Ea_hist = J * Ea_hist - ā = Ea_hist - W_m * y_m # ā from the lecutre - Uy = \(L, ā) # U @ ȳ = L^{-1}ā from the lecture - ȳ = \(U, Uy) # ȳ = U^{-1}L^{-1}ā + a_bar = Ea_hist - W_m * y_m # a_bar from the lecutre + Uy = \(L, a_bar) # U @ y_bar = L^{-1}a_bar from the lecture + y_bar = \(U, Uy) # y_bar = U^{-1}L^{-1}a_bar - # Reverse the order of ȳ with the matrix J + # Reverse the order of y_bar with the matrix J J = reverse(I + zeros(N + m + 1, N + m + 1), dims = 2) - y_hist = J * vcat(ȳ, y_m) # y_hist : concatenated y_m and ȳ + y_hist = J * vcat(y_bar, y_m) # y_hist : concatenated y_m and y_bar end - return y_hist, L, U, ȳ + return y_hist, L, U, y_bar end ``` @@ -1144,27 +1144,29 @@ using Test ```{code-cell} julia - # set seed and generate a_t sequence Random.seed!(123) n = 100 -a_seq = sin.(range(0, 5 * pi, length = n)) .+ 2 + 0.1 * randn(n) - -function plot_simulation(;γ=0.8, m=1, h=1., y_m=2.) - d = γ * [1, -1] - y_m = [y_m] - - testlq = LQFilter(d, h, y_m) - y_hist, L, U, y = optimal_y(testlq, a_seq) - y = y[end:-1:1] # reverse y - - # plot simulation results - time = 1:length(y) - plt = plot(time, a_seq / h, lw=2, color=:black, alpha=0.8, marker = :circle, - markersize = 2, label=L"a_t") - plot!(plt, time, y, lw=2, color=:blue, marker = :circle, markersize = 2, alpha=0.8, - label=L"y_t") - plot!(plt, xlabel="Time", grid=true, xlim=(0,maximum(time)), legend=:bottomleft) +a_seq = sin.(range(0, 5 * pi, length = n)) .+ 2 + 0.1 * randn(n) + +function plot_simulation(; gamma = 0.8, m = 1, h = 1.0, y_m = 2.0) + d = gamma * [1, -1] + y_m = [y_m] + + testlq = LQFilter(d, h, y_m) + y_hist, L, U, y = optimal_y(testlq, a_seq) + y = y[end:-1:1] # reverse y + + # plot simulation results + time = 1:length(y) + plt = plot(time, a_seq / h, lw = 2, color = :black, alpha = 0.8, + marker = :circle, + markersize = 2, label = L"a_t") + plot!(plt, time, y, lw = 2, color = :blue, marker = :circle, markersize = 2, + alpha = 0.8, + label = L"y_t") + plot!(plt, xlabel = "Time", grid = true, xlim = (0, maximum(time)), + legend = :bottomleft) end plot_simulation() @@ -1173,21 +1175,21 @@ plot_simulation() Here's what happens when we change $\gamma$ to 5.0 ```{code-cell} julia -plot_simulation(γ=5.0) +plot_simulation(gamma = 5.0) ``` And here's $\gamma = 10$ ```{code-cell} julia -plot_simulation(γ=10.0) +plot_simulation(gamma = 10.0) ``` ```{code-cell} julia --- tags: [remove-cell] --- -γ = 10.0 -d = γ*[-1, 1] +gamma = 10.0 +d = gamma*[-1, 1] y_hist, L, U, y = optimal_y(LQFilter(d , 1., [2.]), a_seq) @testset begin diff --git a/lectures/time_series_models/multiplicative_functionals.md b/lectures/time_series_models/multiplicative_functionals.md index 6f114691..c867628e 100644 --- a/lectures/time_series_models/multiplicative_functionals.md +++ b/lectures/time_series_models/multiplicative_functionals.md @@ -113,15 +113,18 @@ using Test, Random ```{code-cell} julia using LinearAlgebra, Statistics -using Distributions, LaTeXStrings, Parameters, Plots, QuantEcon +using Distributions, LaTeXStrings, Plots, QuantEcon import Distributions: loglikelihood ``` ```{code-cell} julia -AMF_LSS_VAR = @with_kw (A, B, D, F = 0.0, ν = 0.0, lss = construct_ss(A, B, D, F, ν)) +function AMF_LSS_VAR(; A, B, D, F = 0.0, nu = 0.0, + lss = construct_ss(A, B, D, F, nu)) + return (; A, B, D, F, nu, lss) +end -function construct_ss(A, B, D, F, ν) +function construct_ss(A, B, D, F, nu) H, g = additive_decomp(A, B, D, F) # Build A matrix for LSS @@ -129,7 +132,7 @@ function construct_ss(A, B, D, F, ν) A1 = [1 0 0 0 0] # Transition for 1 A2 = [1 1 0 0 0] # Transition for t A3 = [0 0 A 0 0] # Transition for x_{t+1} - A4 = [ν 0 D 1 0] # Transition for y_{t+1} + A4 = [nu 0 D 1 0] # Transition for y_{t+1} A5 = [0 0 0 0 1] # Transition for m_{t+1} Abar = vcat(A1, A2, A3, A4, A5) @@ -142,7 +145,7 @@ function construct_ss(A, B, D, F, ν) G2 = [0 0 0 1 0] # Selector for y_{t} G3 = [0 0 0 0 1] # Selector for martingale G4 = [0 0 -g 0 0] # Selector for stationary - G5 = [0 ν 0 0 0] # Selector for trend + G5 = [0 nu 0 0 0] # Selector for trend Gbar = vcat(G1, G2, G3, G4, G5) # Build LSS struct @@ -159,22 +162,22 @@ function additive_decomp(A, B, D, F) return H, g end -function multiplicative_decomp(A, B, D, F, ν) +function multiplicative_decomp(A, B, D, F, nu) H, g = additive_decomp(A, B, D, F) - ν_tilde = ν + 0.5 * H^2 + nu_tilde = nu + 0.5 * H^2 - return ν_tilde, H, g + return nu_tilde, H, g end function loglikelihood_path(amf, x, y) - @unpack A, B, D, F = amf + (; A, B, D, F) = amf T = length(y) FF = F^2 FFinv = inv(FF) - temp = y[2:end] - y[1:end-1] - D*x[1:end-1] - obs = temp .* FFinv .* temp + temp = y[2:end] - y[1:(end - 1)] - D * x[1:(end - 1)] + obs = temp .* FFinv .* temp obssum = cumsum(obs) - scalar = (log(FF) + log(2pi)) * (1:T-1) + scalar = (log(FF) + log(2pi)) * (1:(T - 1)) return -0.5 * (obssum + scalar) end @@ -220,7 +223,7 @@ function population_means(amf, T = 150) # Pull out moment generator moment_generator = moment_sequence(amf.lss) - for (tt, x) = enumerate(moment_generator) + for (tt, x) in enumerate(moment_generator) ymeans = x[2] xmean[tt] = ymeans[1] ymean[tt] = ymeans[2] @@ -266,13 +269,13 @@ Xmean_pop, Ymean_pop = population_means(amf, T) # Plot sample means vs population means plt_1 = plot(Xmean_t', color = :blue, label = L"(1/I) \sum_i x_t^i") plot!(plt_1, Xmean_pop, color = :black, label = L"E x_t") -plot!(plt_1, title = L"x_t", xlim = (0, T), legend = :bottomleft) +plot!(plt_1, title = L"x_t", xlim = (0, T), legend = :outertopright) plt_2 = plot(Ymean_t', color = :blue, label = L"(1/I) \sum_i x_t^i") plot!(plt_2, Ymean_pop, color = :black, label = L"E y_t") -plot!(plt_2, title = L"y_t", xlim = (0, T), legend = :bottomleft) +plot!(plt_2, title = L"y_t", xlim = (0, T), legend = :outertopright) -plot(plt_1, plt_2, layout = (2, 1), size = (800,500)) +plot(plt_1, plt_2, layout = (2, 1), size = (800, 500)) ``` ### Simulating log-likelihoods @@ -303,7 +306,7 @@ function simulate_likelihood(amf, Xit, Yit) I, T = size(Xit) # Allocate space - LLit = zeros(I, T-1) + LLit = zeros(I, T - 1) for i in 1:I LLit[i, :] = loglikelihood_path(amf, Xit[i, :], Yit[i, :]) @@ -320,7 +323,8 @@ LLmean_t = mean(LLT) plot(seriestype = :histogram, LLT, label = "") plot!(title = L"Distribution of $(I/T)log(L_T)|\theta_0$") -vline!([LLmean_t], linestyle = :dash, color = :black, lw = 2, alpha = 0.6, label = "") +vline!([LLmean_t], linestyle = :dash, color = :black, lw = 2, alpha = 0.6, + label = "") ``` ```{code-cell} julia @@ -347,13 +351,13 @@ Let's see this in a simulation normdist = Normal(0, F) mult = 1.175 println("The pdf at +/- $mult sigma takes the value: $(pdf(normdist,mult*F))") -println("Probability of dL being larger than 1 is approx: "* +println("Probability of dL being larger than 1 is approx: " * "$(cdf(normdist,mult*F)-cdf(normdist,-mult*F))") # Compare this to the sample analogue: -L_increment = LLit[:,2:end] - LLit[:,1:end-1] -r,c = size(L_increment) -frac_nonegative = sum(L_increment.>=0)/(c*r) +L_increment = LLit[:, 2:end] - LLit[:, 1:(end - 1)] +r, c = size(L_increment) +frac_nonegative = sum(L_increment .>= 0) / (c * r) print("Fraction of dlogL being nonnegative in the sample is: $(frac_nonegative)") ``` @@ -370,7 +374,7 @@ end Let's also plot the conditional pdf of $\Delta y_{t+1}$ ```{code-cell} julia -xgrid = range(-1, 1, length = 100) +xgrid = range(-1, 1, length = 100) println("The pdf at +/- one sigma takes the value: $(pdf(normdist, F)) ") plot(xgrid, pdf.(normdist, xgrid), label = "") plot!(title = L"Conditional pdf $f(\Delta y_{t+1} | x_t)$") @@ -418,16 +422,17 @@ Random.seed!(42); ```{code-cell} julia # Create the second (wrong) alternative model -amf2 = AMF_LSS_VAR(A = 0.9, B = 1.0, D = 0.55, F = 0.25) # parameters for θ_1 closer to θ_0 +amf2 = AMF_LSS_VAR(A = 0.9, B = 1.0, D = 0.55, F = 0.25) # parameters for theta_1 closer to theta_0 # Get likelihood from each path x^{i}, y^{i} LLit2 = simulate_likelihood(amf2, Xit, Yit) -LLT2 = 1/(T-1) * LLit2[:, end] +LLT2 = 1 / (T - 1) * LLit2[:, end] LLmean_t2 = mean(LLT2) plot(seriestype = :histogram, LLT2, label = "") -vline!([LLmean_t2], color = :black, lw = 2, linestyle = :dash, alpha = 0.6, label = "") +vline!([LLmean_t2], color = :black, lw = 2, linestyle = :dash, alpha = 0.6, + label = "") plot!(title = L"Distribution of $(1/T)log(L_T) | \theta_1)$") ``` @@ -444,9 +449,10 @@ end Let's see a histogram of the log-likelihoods under the true and the alternative model (same sample paths) ```{code-cell} julia -plot(seriestype = :histogram, LLT, bin = 50, alpha = 0.5, label = "True", normed = true) -plot!(seriestype = :histogram, LLT2, bin = 50, alpha = 0.5, label = "Alternative", - normed = true) +plot(seriestype = :histogram, LLT, bin = 50, alpha = 0.5, label = "True", + normed = true) +plot!(seriestype = :histogram, LLT2, bin = 50, alpha = 0.5, + label = "Alternative", normed = true) vline!([mean(LLT)], color = :black, lw = 2, linestyle = :dash, label = "") vline!([mean(LLT2)], color = :black, lw = 2, linestyle = :dash, label = "") ``` @@ -572,8 +578,8 @@ Random.seed!(42); ```{code-cell} julia function simulate_martingale_components(amf, T = 1_000, I = 5_000) # Get the multiplicative decomposition - @unpack A, B, D, F, ν, lss = amf - ν, H, g = multiplicative_decomp(A, B, D, F, ν) + (; A, B, D, F, nu, lss) = amf + nu, H, g = multiplicative_decomp(A, B, D, F, nu) # Allocate space add_mart_comp = zeros(I, T) @@ -585,13 +591,13 @@ function simulate_martingale_components(amf, T = 1_000, I = 5_000) add_mart_comp[i, :] = bar[3, :] end - mul_mart_comp = exp.(add_mart_comp' .- (0:T-1) * H^2 / 2)' + mul_mart_comp = exp.(add_mart_comp' .- (0:(T - 1)) * H^2 / 2)' return add_mart_comp, mul_mart_comp end # Build model -amf_2 = AMF_LSS_VAR(A = 0.8, B = 0.001, D = 1.0, F = 0.01, ν = 0.005) +amf_2 = AMF_LSS_VAR(A = 0.8, B = 0.001, D = 1.0, F = 0.01, nu = 0.005) amc, mmc = simulate_martingale_components(amf_2, 1_000, 5_000) @@ -666,16 +672,16 @@ We will plot the densities of $\log {\widetilde M}_t$ for different values of $t Here is some code that tackles these tasks ```{code-cell} julia -function Mtilde_t_density(amf, t; xmin = 1e-8, xmax = 5.0, npts = 5000) +function Mtilde_t_density(amf, t; xmin = 1e-8, xmax = 5.0, npts = 50) + (; A, B, D, F, nu) = amf # Pull out the multiplicative decomposition - νtilde, H, g = - multiplicative_decomp(amf.A, amf.B, amf.D, amf.F, amf.ν) - H2 = H*H + nutilde, H, g = multiplicative_decomp(A, B, D, F, nu) + H2 = H * H # The distribution mdist = LogNormal(-t * H2 / 2, sqrt(t * H2)) - x = range(xmin, xmax, length = npts) + x = range(xmin, xmax, length = npts) p = pdf.(mdist, x) return x, p @@ -684,29 +690,32 @@ end function logMtilde_t_density(amf, t; xmin = -15.0, xmax = 15.0, npts = 5000) # Pull out the multiplicative decomposition - @unpack A, B, D, F, ν = amf - νtilde, H, g = multiplicative_decomp(A, B, D, F, ν) + (; A, B, D, F, nu) = amf + nutilde, H, g = multiplicative_decomp(A, B, D, F, nu) H2 = H * H # The distribution lmdist = Normal(-t * H2 / 2, sqrt(t * H2)) - x = range(xmin, xmax, length = npts) + x = range(xmin, xmax, length = npts) p = pdf.(lmdist, x) return x, p end times_to_plot = [10, 100, 500, 1000, 2500, 5000] -dens_to_plot = [Mtilde_t_density(amf_2, t, xmin=1e-8, xmax=6.0) for t in times_to_plot] -ldens_to_plot = [logMtilde_t_density(amf_2, t, xmin=-10.0, xmax=10.0) for t in times_to_plot] +dens_to_plot = [Mtilde_t_density(amf_2, t, xmin = 1e-8, xmax = 6.0) + for t in times_to_plot] +ldens_to_plot = [logMtilde_t_density(amf_2, t, xmin = -10.0, xmax = 10.0) + for t in times_to_plot] # plot_title = "Densities of M_t^tilda" is required, however, plot_title is not yet # supported in Plots -plots = plot(layout = (3,2), size = (600,800)) +plots = plot(layout = (3, 2), size = (600, 800)) for (it, dens_t) in enumerate(dens_to_plot) x, pdf = dens_t - plot!(plots[it], title = "Density for time (time_to_plot[it])") + plot!(plots[it], title = "Density for time (time_to_plot[it])", + titlefontsize = 10) plot!(plots[it], pdf, fillrange = 0, label = "") end plot(plots) @@ -739,28 +748,29 @@ $\nu = \tilde{\nu}$. Here's our code ```{code-cell} julia -function Uu(amf, δ, γ) - @unpack A, B, D, F, ν = amf - ν_tilde, H, g = multiplicative_decomp(A, B, D, F, ν) +function Uu(amf, delta, gamma) + (; A, B, D, F, nu) = amf + nu_tilde, H, g = multiplicative_decomp(A, B, D, F, nu) - resolv = 1 / (1 - exp(-δ) * A) + resolv = 1 / (1 - exp(-delta) * A) vect = F + D * resolv * B - U_risky = exp(-δ) * resolv * D - u_risky = exp(-δ) / (1 - exp(-δ)) * (ν + 0.5 * (1 - γ) * (vect^2)) + U_risky = exp(-delta) * resolv * D + u_risky = exp(-delta) / (1 - exp(-delta)) * + (nu + 0.5 * (1 - gamma) * (vect^2)) U_det = 0 - u_det = exp(-δ) / (1 - exp(-δ)) * ν_tilde + u_det = exp(-delta) / (1 - exp(-delta)) * nu_tilde return U_risky, u_risky, U_det, u_det end # Set remaining parameters -δ = 0.02 -γ = 2.0 +delta = 0.02 +gamma = 2.0 # Get coeffs -U_r, u_r, U_d, u_d = Uu(amf_2, δ, γ) +U_r, u_r, U_d, u_d = Uu(amf_2, delta, gamma) ``` The values of the two processes are