diff --git a/.github/workflows/collab.yml b/.github/workflows/collab.yml index f2ede9bf..7a3284d4 100644 --- a/.github/workflows/collab.yml +++ b/.github/workflows/collab.yml @@ -30,7 +30,7 @@ jobs: - name: Install Build Software shell: bash -l {0} run: | - pip install jupyter-book==0.15.1 docutils==0.17.1 quantecon-book-theme==0.7.1 sphinx-tojupyter==0.3.0 sphinxext-rediraffe==0.2.7 sphinx-exercise==0.4.1 sphinxcontrib-youtube==1.1.0 sphinx-togglebutton==0.3.1 arviz==0.13.0 sphinx_proof==0.1.3 + pip install jupyter-book==0.15.1 docutils==0.17.1 quantecon-book-theme==0.7.2 sphinx-tojupyter==0.3.0 sphinxext-rediraffe==0.2.7 sphinx-exercise==0.4.1 sphinxcontrib-youtube==1.1.0 sphinx-togglebutton==0.3.1 arviz==0.13.0 sphinx_proof==0.2.0 sphinx_reredirects==0.1.3 # Build of HTML (Execution Testing) - name: Build HTML shell: bash -l {0} diff --git a/environment.yml b/environment.yml index 52036b80..f60099b5 100644 --- a/environment.yml +++ b/environment.yml @@ -4,19 +4,20 @@ channels: - conda-forge dependencies: - python=3.11 - - anaconda=2024.02 + - anaconda=2024.06 - pip - pip: - jupyter-book==0.15.1 - docutils==0.17.1 - - quantecon-book-theme==0.7.1 + - quantecon-book-theme==0.7.2 - sphinx-tojupyter==0.3.0 - sphinxext-rediraffe==0.2.7 - sphinx-exercise==0.4.1 - - sphinx-proof==0.1.3 + - sphinx-proof==0.2.0 - ghp-import==1.1.0 - sphinxcontrib-youtube==1.1.0 - sphinx-togglebutton==0.3.1 + - sphinx_reredirects==0.1.3 # Sandpit Requirements # - PuLP # - cvxpy diff --git a/lectures/_config.yml b/lectures/_config.yml index 59ada928..ecd808f0 100644 --- a/lectures/_config.yml +++ b/lectures/_config.yml @@ -35,7 +35,7 @@ latex: targetname: quantecon-python-intro.tex sphinx: - extra_extensions: [sphinx_multitoc_numbering, sphinxext.rediraffe, sphinx_exercise, sphinx_togglebutton, sphinx.ext.intersphinx, sphinx_proof, sphinx_tojupyter] + extra_extensions: [sphinx_multitoc_numbering, sphinxext.rediraffe, sphinx_exercise, sphinx_togglebutton, sphinx.ext.intersphinx, sphinx_proof, sphinx_tojupyter, sphinx_reredirects] config: bibtex_reference_style: author_year # false-positive links @@ -45,6 +45,7 @@ sphinx: width: 80% nb_code_prompt_show: "Show {type}" suppress_warnings: [mystnb.unknown_mime_type, myst.domains] + proof_minimal_theme: true # ------------- html_js_files: - https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js @@ -70,8 +71,6 @@ sphinx: analytics: google_analytics_id: G-QDS1YRJNGM launch_buttons: - notebook_interface : classic # The interface interactive links will activate ["classic", "jupyterlab"] - binderhub_url : https://mybinder.org # The URL of the BinderHub (e.g., https://mybinder.org) colab_url : https://colab.research.google.com thebe : false # Add a thebe button to pages (requires the repository to run on Binder) intersphinx_mapping: @@ -110,6 +109,9 @@ sphinx: mathjax_path: https://cdn.jsdelivr.net/npm/mathjax@3/es5/tex-mml-chtml.js rediraffe_redirects: index_toc.md: intro.md + # Remote Redirects + redirects: + ak2: https://python.quantecon.org/ak2.html tojupyter_static_file_path: ["_static"] tojupyter_target_html: true tojupyter_urlpath: "https://intro.quantecon.org/" diff --git a/lectures/_static/lecture_specific/long_run_growth/tooze_ch1_graph.png b/lectures/_static/lecture_specific/long_run_growth/tooze_ch1_graph.png index a3833f10..3ae6891e 100644 Binary files a/lectures/_static/lecture_specific/long_run_growth/tooze_ch1_graph.png and b/lectures/_static/lecture_specific/long_run_growth/tooze_ch1_graph.png differ diff --git a/lectures/_static/quant-econ.bib b/lectures/_static/quant-econ.bib index 34cad8c4..61f867e2 100644 --- a/lectures/_static/quant-econ.bib +++ b/lectures/_static/quant-econ.bib @@ -3,6 +3,25 @@ Note: Extended Information (like abstracts, doi, url's etc.) can be found in quant-econ-extendedinfo.bib file in _static/ ### + +@book{russell2004history, + title={History of western philosophy}, + author={Russell, Bertrand}, + year={2004}, + publisher={Routledge} +} + +@article{north1989, + title={Constitutions and commitment: the evolution of institutions governing public choice in seventeenth-century England}, + author={North, Douglass C and Weingast, Barry R}, + journal={The journal of economic history}, + volume={49}, + number={4}, + pages={803--832}, + year={1989}, + publisher={Cambridge University Press} +} + @incollection{keynes1940pay, title={How to Pay for the War}, author={Keynes, John Maynard}, diff --git a/lectures/_toc.yml b/lectures/_toc.yml index 220ba830..8e4de355 100644 --- a/lectures/_toc.yml +++ b/lectures/_toc.yml @@ -56,7 +56,6 @@ parts: - file: unpleasant - file: money_inflation_nonlinear - file: laffer_adaptive - - file: ak2 - caption: Stochastic Dynamics numbered: true chapters: diff --git a/lectures/ak2.md b/lectures/ak2.md deleted file mode 100644 index 67726502..00000000 --- a/lectures/ak2.md +++ /dev/null @@ -1,1280 +0,0 @@ ---- -jupytext: - text_representation: - extension: .md - format_name: myst - format_version: 0.13 - jupytext_version: 1.14.1 -kernelspec: - display_name: Python 3 (ipykernel) - language: python - name: python3 ---- - -# Transitions in an Overlapping Generations Model - -In addition to what’s in Anaconda, this lecture will need the following libraries: - -```{code-cell} ipython3 -:tags: [hide-output] -!pip install --upgrade quantecon -``` - -## Introduction - - -This lecture presents a life-cycle model consisting of overlapping generations of two-period lived people proposed by Peter Diamond -{cite}`diamond1965national`. - -We'll present the version that was analyzed in chapter 2 of Auerbach and -Kotlikoff (1987) {cite}`auerbach1987dynamic`. - -Auerbach and Kotlikoff (1987) used their two period model as a warm-up for their analysis of overlapping generation models of long-lived people that is the main topic of their book. - -Their model of two-period lived overlapping generations is a useful starting point because - -* it sets forth the structure of interactions between generations of different agents who are alive at a given date -* it activates forces and tradeoffs confronting the government and successive generations of people -* it is good laboratory for studying connections between government tax and subsidy programs and for policies for issuing and servicing government debt -* some interesting experiments involving transitions from one steady state to another can be computed by hand -* it is a good setting for illustrating a **shooting method** for solving a system of non-linear difference equations with initial and terminal condition - - ```{note} -Auerbach and Kotlikoff use computer code to calculate transition paths for their models with long-lived people. -``` - -We take the liberty of extending Auerbach and Kotlikoff's chapter 2 model to study some arrangements for redistributing resources across generations - - * these take the form of a sequence of age-specific lump sum taxes and transfers - -We study how these arrangements affect capital accumulation and government debt - -## Setting - -Time is discrete and is indexed by $t=0, 1, 2, \ldots$. - -The economy lives forever, but the people inside it do not. - -At each time $ t \geq 0$ a representative old person and a representative young person are alive. - -At time $t$ a representative old person coexists with a representative young person who will become an old person at time $t+1$. - -We assume that the population size is constant over time. - -A young person works, saves, and consumes. - -An old person dissaves and consumes, but does not work, - -A government lives forever, i.e., at $t=0, 1, 2, \ldots $. - -Each period $t \geq 0$, the government taxes, spends, transfers, and borrows. - - - - -Initial conditions set outside the model at time $t=0$ are - -* $K_0$ -- initial capital stock brought into time $t=0$ by a representative initial old person -* $D_0$ -- government debt falling due at $t=0$ and owned by a representative old person at time $t=0$ - -$K_0$ and $D_0$ are both measured in units of time $0$ goods. - -A government **policy** consists of five sequences $\{G_t, D_t, \tau_t, \delta_{ot}, \delta_{yt}\}_{t=0}^\infty $ whose components are - - * $\tau_t$ -- flat rate tax at time $t$ on wages and earnings from capital and government bonds - * $D_t$ -- one-period government bond principal due at time $t$, per capita - * $G_t$ -- government purchases of goods at time $t$, per capita - * $\delta_{yt}$ -- a lump sum tax on each young person at time $t$ - * $\delta_{ot}$ -- a lump sum tax on each old person at time $t$ - - - -An **allocation** is a collection of sequences $\{C_{yt}, C_{ot}, K_{t+1}, L_t, Y_t, G_t\}_{t=0}^\infty $; constituents of the sequences include - - * $K_t$ -- physical capital per capita - * $L_t$ -- labor per capita - * $Y_t$ -- output per capita - -and also - -* $C_{yt}$ -- consumption of young person at time $t \geq 0$ -* $C_{ot}$ -- consumption of old person at time $t \geq 0$ -* $K_{t+1} - K_t \equiv I_t $ -- investment in physical capital at time $t \geq 0$ -* $G_t$ -- government purchases - -National income and product accounts consist of a sequence of equalities - -* $Y_t = C_{yt} + C_{ot} + (K_{t+1} - K_t) + G_t, \quad t \geq 0$ - -A **price system** is a pair of sequences $\{W_t, r_t\}_{t=0}^\infty$; constituents of a price sequence include rental rates for the factors of production - -* $W_t$ -- rental rate for labor at time $t \geq 0$ -* $r_t$ -- rental rate for capital at time $t \geq 0$ - - -## Production - -There are two factors of production, physical capital $K_t$ and labor $L_t$. - -Capital does not depreciate. - -The initial capital stock $K_0$ is owned by the representative initial old person, who rents it to the firm at time $0$. - -Net investment rate $I_t$ at time $t$ is - -$$ -I_t = K_{t+1} - K_t -$$ - -The capital stock at time $t$ emerges from cumulating past rates of investment: - -$$ -K_t = K_0 + \sum_{s=0}^{t-1} I_s -$$ - -A Cobb-Douglas technology converts physical capital $K_t$ and labor services $L_t$ into -output $Y_t$ - -$$ -Y_t = K_t^\alpha L_t^{1-\alpha}, \quad \alpha \in (0,1) -$$ (eq:prodfn) - - -## Government - -At time $t-1$, the government issues one-period risk-free debt that promises to pay $D_t$ time $t$ goods per capita at time $t$. - -Young people at time $t$ purchase government debt $D_{t+1}$ that matures at time $t+1$. - -Government debt issued at $t$ bears a before-tax net rate of interest rate of $r_{t}$ at time $t+1$. - -The government budget constraint at time $t \geq 0$ is - -$$ -D_{t+1} - D_t = r_t D_t + G_t - T_t -$$ - -or - - - - -$$ -D_{t+1} = (1 + r_t) D_t + G_t - T_t . -$$ (eq:govbudgetsequence) - -Total tax collections net of transfers equal $T_t$ and satisfy - - -$$ -T_t = \tau_t W_t L_t + \tau_t r_t (D_t + K_t) + \delta_{yt} + \delta_{ot} -$$ - - - - -## Activities in Factor Markets - -**Old people:** At each $t \geq 0$, a representative old person - - * brings $K_t$ and $D_t$ into the period, - * rents capital to a representative firm for $r_{t} K_t$, - * pays taxes $\tau_t r_t (K_t+ D_t)$ on its rental and interest earnings, - * pays a lump sum tax $\delta_{ot}$ to the government, - * sells $K_t$ to a young person. - - - **Young people:** At each $t \geq 0$, a representative young person - * sells one unit of labor services to a representative firm for $W_t$ in wages, - * pays taxes $\tau_t W_t$ on its labor earnings - * pays a lump sum tax $\delta_{yt}$ to the goverment, - * spends $C_{yt}$ on consumption, - * acquires non-negative assets $A_{t+1}$ consisting of a sum of physical capital $K_{t+1}$ and one-period government bonds $D_{t+1}$ that mature at $t+1$. - -```{note} -If a lump-sum tax is negative, it means that the government pays the person a subsidy. -``` - - -## Representative firm's problem - -The representative firm hires labor services from young people at competitive wage rate $W_t$ and hires capital from old people at competitive rental rate -$r_t$. - -The rental rate on capital $r_t$ equals the interest rate on government one-period bonds. - -Units of the rental rates are: - -* for $W_t$, output at time $t$ per unit of labor at time $t$ -* for $r_t$, output at time $t$ per unit of capital at time $t$ - - -We take output at time $t$ as *numeraire*, so the price of output at time $t$ is one. - -The firm's profits at time $t$ are - -$$ -K_t^\alpha L_t^{1-\alpha} - r_t K_t - W_t L_t . -$$ - -To maximize profits a firm equates marginal products to rental rates: - -$$ -\begin{aligned} -W_t & = (1-\alpha) K_t^\alpha L_t^{-\alpha} \\ -r_t & = \alpha K_t^\alpha L_t^{1-\alpha} -\end{aligned} -$$ (eq:firmfonc) - -Output can be consumed either by old people or young people; or sold to young people who use it to augment the capital stock; or sold to the government for uses that do not generate utility for the people in the model (i.e., ``it is thrown into the ocean''). - - -The firm thus sells output to old people, young people, and the government. - - - - - - - - - -## Individuals' problems - -### Initial old person - -At time $t=0$, a representative initial old person is endowed with $(1 + r_0(1 - \tau_0)) A_0$ in initial assets. - -It must pay a lump sum tax to (if positive) or receive a subsidy from (if negative) -$\delta_{ot}$ the government. - -An old person's budget constraint is - - - -$$ -C_{o0} = (1 + r_0 (1 - \tau_0)) A_0 - \delta_{ot} . -$$ (eq:hbudgetold) - -An initial old person's utility function is $C_{o0}$, so the person's optimal consumption plan -is provided by equation {eq}`eq:hbudgetold`. - -### Young person - -At each $t \geq 0$, a young person inelastically supplies one unit of labor and in return -receives pre-tax labor earnings of $W_t$ units of output. - -A young person's post-tax-and-transfer earnings are $W_t (1 - \tau_t) - \delta_{yt}$. - -At each $t \geq 0$, a young person chooses a consumption plan $C_{yt}, C_{ot+1}$ -to maximize the Cobb-Douglas utility function - -$$ -U_t = C_{yt}^\beta C_{o,t+1}^{1-\beta}, \quad \beta \in (0,1) -$$ (eq:utilfn) - -subject to the following budget constraints at times $t$ and $t+1$: - -$$ -\begin{aligned} -C_{yt} + A_{t+1} & = W_t (1 - \tau_t) - \delta_{yt} \\ -C_{ot+1} & = (1+ r_{t+1} (1 - \tau_{t+1}))A_{t+1} - \delta_{ot} -\end{aligned} -$$ (eq:twobudgetc) - - -Solving the second equation of {eq}`eq:twobudgetc` for savings $A_{t+1}$ and substituting it into the first equation implies the present value budget constraint - -$$ -C_{yt} + \frac{C_{ot+1}}{1 + r_{t+1}(1 - \tau_{t+1})} = W_t (1 - \tau_t) - \delta_{yt} - \frac{\delta_{ot}}{1 + r_{t+1}(1 - \tau_{t+1})} -$$ (eq:onebudgetc) - -To solve the young person's choice problem, form a Lagrangian - -$$ -\begin{aligned} -{\mathcal L} & = C_{yt}^\beta C_{o,t+1}^{1-\beta} \\ & + \lambda \Bigl[ C_{yt} + \frac{C_{ot+1}}{1 + r_{t+1}(1 - \tau_{t+1})} - W_t (1 - \tau_t) + \delta_{yt} + \frac{\delta_{ot}}{1 + r_{t+1}(1 - \tau_{t+1})}\Bigr], -\end{aligned} -$$ (eq:lagC) - -where $\lambda$ is a Lagrange multiplier on the intertemporal budget constraint {eq}`eq:onebudgetc`. - - -After several lines of algebra, the intertemporal budget constraint {eq}`eq:onebudgetc` and the first-order conditions for maximizing ${\mathcal L}$ with respect to $C_{yt}, C_{ot+1}$ -imply that an optimal consumption plan satisfies - -$$ -\begin{aligned} -C_{yt} & = \beta \Bigl[ W_t (1 - \tau_t) - \delta_{yt} - \frac{\delta_{ot}}{1 + r_{t+1}(1 - \tau_{t+1})}\Bigr] \\ -\frac{C_{0t+1}}{1 + r_{t+1}(1-\tau_{t+1}) } & = (1-\beta) \Bigl[ W_t (1 - \tau_t) - \delta_{yt} - \frac{\delta_{ot}}{1 + r_{t+1}(1 - \tau_{t+1})}\Bigr] -\end{aligned} -$$ (eq:optconsplan) - -The first-order condition for minimizing Lagrangian {eq}`eq:lagC` with respect to the Lagrange multipler $\lambda$ recovers the budget constraint {eq}`eq:onebudgetc`, -which, using {eq}`eq:optconsplan` gives the optimal savings plan - -$$ -A_{t+1} = (1-\beta) [ (1- \tau_t) W_t - \delta_{yt}] + \beta \frac{\delta_{ot}}{1 + r_{t+1}(1 - \tau_{t+1})} -$$ (eq:optsavingsplan) - - -(sec-equilibrium)= -## Equilbrium - -**Definition:** An equilibrium is an allocation, a government policy, and a price system with the properties that -* given the price system and the government policy, the allocation solves - * representative firms' problems for $t \geq 0$ - * individual persons' problems for $t \geq 0$ -* given the price system and the allocation, the government budget constraint is satisfied for all $t \geq 0$. - - -## Next steps - - -To begin our analysis of equilibrium outcomes, we'll study the special case of the model with which Auerbach and -Kotlikoff (1987) {cite}`auerbach1987dynamic` began their analysis in chapter 2. - -It can be solved by hand. - -We shall do that next. - -After we derive a closed form solution, we'll pretend that we don't know and will compute equilibrium outcome paths. - -We'll do that by first formulating an equilibrium as a fixed point of a mapping from sequences of factor prices and tax rates to sequences of factor prices and tax rates. - -We'll compute an equilibrium by iterating to convergence on that mapping. - - -## Closed form solution - -To get the special chapter 2 case of Auerbach and Kotlikoff (1987) {cite}`auerbach1987dynamic`, we set both $\delta_{ot}$ and $\delta_{yt}$ to zero. - -As our special case of {eq}`eq:optconsplan`, we compute the following consumption-savings plan for a representative young person: - - -$$ -\begin{aligned} -C_{yt} & = \beta (1 - \tau_t) W_t \\ -A_{t+1} &= (1-\beta) (1- \tau_t) W_t -\end{aligned} -$$ - -Using {eq}`eq:firmfonc` and $A_t = K_t + D_t$, we obtain the following closed form transition law for capital: - -$$ -K_{t+1}=K_{t}^{\alpha}\left(1-\tau_{t}\right)\left(1-\alpha\right)\left(1-\beta\right) - D_{t}\\ -$$ (eq:Klawclosed) - -### Steady states - -From {eq}`eq:Klawclosed` and the government budget constraint {eq}`eq:govbudgetsequence`, we compute **time-invariant** or **steady state values** $\hat K, \hat D, \hat T$: - -$$ -\begin{aligned} -\hat{K} &=\hat{K}\left(1-\hat{\tau}\right)\left(1-\alpha\right)\left(1-\beta\right) - \hat{D} \\ -\hat{D} &= (1 + \hat{r}) \hat{D} + \hat{G} - \hat{T} \\ -\hat{T} &= \hat{\tau} \hat{Y} + \hat{\tau} \hat{r} \hat{D} . -\end{aligned} -$$ (eq:steadystates) - -These imply - -$$ -\begin{aligned} -\hat{K} &= \left[\left(1-\hat{\tau}\right)\left(1-\alpha\right)\left(1-\beta\right)\right]^{\frac{1}{1-\alpha}} \\ -\hat{\tau} &= \frac{\hat{G} + \hat{r} \hat{D}}{\hat{Y} + \hat{r} \hat{D}} -\end{aligned} -$$ - -Let's take an example in which - -1. there is no initial government debt, $D_t=0$, -2. government consumption $G_t$ equals $15\%$ of output $Y_t$ - -Our formulas for steady-state values tell us that - -$$ -\begin{aligned} -\hat{D} &= 0 \\ -\hat{G} &= 0.15 \hat{Y} \\ -\hat{\tau} &= 0.15 \\ -\end{aligned} -$$ - - - -### Implementation - -```{code-cell} ipython3 -import numpy as np -import matplotlib.pyplot as plt -from numba import njit -from quantecon.optimize import brent_max -``` - - -For parameters $\alpha = 0.3$ and $\beta = 0.5$, let's compute $\hat{K}$: - -```{code-cell} ipython3 -# parameters -α = 0.3 -β = 0.5 - -# steady states of τ and D -τ_hat = 0.15 -D_hat = 0. - -# solve for steady state of K -K_hat = ((1 - τ_hat) * (1 - α) * (1 - β)) ** (1 / (1 - α)) -K_hat -``` -Knowing $\hat K$, we can calculate other equilibrium objects. - -Let's first define some Python helper functions. - -```{code-cell} ipython3 -@njit -def K_to_Y(K, α): - - return K ** α - -@njit -def K_to_r(K, α): - - return α * K ** (α - 1) - -@njit -def K_to_W(K, α): - - return (1 - α) * K ** α - -@njit -def K_to_C(K, D, τ, r, α, β): - - # optimal consumption for the old when δ=0 - A = K + D - Co = A * (1 + r * (1 - τ)) - - # optimal consumption for the young when δ=0 - W = K_to_W(K, α) - Cy = β * W * (1 - τ) - - return Cy, Co -``` - -We can use these helper functions to obtain steady state values $\hat{Y}$, $\hat{r}$, and $\hat{W}$ associated with steady state values $\hat{K}$ and $\hat{r}$. - -```{code-cell} ipython3 -Y_hat, r_hat, W_hat = K_to_Y(K_hat, α), K_to_r(K_hat, α), K_to_W(K_hat, α) -Y_hat, r_hat, W_hat -``` - -Since steady state government debt $\hat{D}$ is $0$, all taxes are used to pay for government expenditures - -```{code-cell} ipython3 -G_hat = τ_hat * Y_hat -G_hat -``` - -We use the optimal consumption plans to find steady state consumptions for young and old - -```{code-cell} ipython3 -Cy_hat, Co_hat = K_to_C(K_hat, D_hat, τ_hat, r_hat, α, β) -Cy_hat, Co_hat -``` - -Let's store the steady state quantities and prices using an array called `init_ss` - -```{code-cell} ipython3 -init_ss = np.array([K_hat, Y_hat, Cy_hat, Co_hat, # quantities - W_hat, r_hat, # prices - τ_hat, D_hat, G_hat # policies - ]) -``` - - -### Transitions - - - -We have computed a steady state in which the government policy sequences are each constant over time. - - -We'll use this steady state as an initial condition at time $t=0$ for another economy in which government policy sequences are with time-varying sequences. - -To make sense of our calculation, we'll treat $t=0$ as time when a huge unanticipated shock occurs in the form of - - * a time-varying government policy sequences that disrupts an original steady state - * new government policy sequences are eventually time-invariant in the sense that after some date $T >0$, each sequence is constant over time. - * sudden revelation of a new government policy in the form of sequences starting at time $t=0$ - -We assume that everyone, including old people at time $t=0$, knows the new government policy sequence and chooses accordingly. - - - - -As the capital stock and other aggregates adjust to the fiscal policy change over time, the economy will approach a new steady state. - -We can find a transition path from an old steady state to a new steady state by employing a fixed-point algorithm in a space of sequences. - -But in our special case with its closed form solution, we have available a simpler and faster -approach. - -Here we define a Python class `ClosedFormTrans` that computes length $T$ transition path in response to a particular fiscal policy change. - -We choose $T$ large enough so that we have gotten very close to a new steady state after $T$ periods. - -The class takes three keyword arguments, `τ_pol`, `D_pol`, and `G_pol`. - -These are sequences of tax rate, government debt level, and government purchases, respectively. - -In each policy experiment below, we will pass two out of three as inputs required to depict a fiscal policy. - -We'll then compute the single remaining undetermined policy variable from the government budget constraint. - -When we simulate transition paths, it is useful to distinguish **state variables** at time $t$ such as $K_t, Y_t, D_t, W_t, r_t$ from **control variables** that include $C_{yt}, C_{ot}, \tau_{t}, G_t$. - -```{code-cell} ipython3 -class ClosedFormTrans: - """ - This class simulates length T transitional path of a economy - in response to a fiscal policy change given its initial steady - state. The simulation is based on the closed form solution when - the lump sum taxations are absent. - - """ - - def __init__(self, α, β): - - self.α, self.β = α, β - - def simulate(self, - T, # length of transitional path to simulate - init_ss, # initial steady state - τ_pol=None, # sequence of tax rates - D_pol=None, # sequence of government debt levels - G_pol=None): # sequence of government purchases - - α, β = self.α, self.β - - # unpack the steady state variables - K_hat, Y_hat, Cy_hat, Co_hat = init_ss[:4] - W_hat, r_hat = init_ss[4:6] - τ_hat, D_hat, G_hat = init_ss[6:9] - - # initialize array containers - # K, Y, Cy, Co - quant_seq = np.empty((T+1, 4)) - - # W, r - price_seq = np.empty((T+1, 2)) - - # τ, D, G - policy_seq = np.empty((T+2, 3)) - - # t=0, starting from steady state - K0, Y0 = K_hat, Y_hat - W0, r0 = W_hat, r_hat - D0 = D_hat - - # fiscal policy - if τ_pol is None: - D1 = D_pol[1] - G0 = G_pol[0] - τ0 = (G0 + (1 + r0) * D0 - D1) / (Y0 + r0 * D0) - elif D_pol is None: - τ0 = τ_pol[0] - G0 = G_pol[0] - D1 = (1 + r0) * D0 + G0 - τ0 * (Y0 + r0 * D0) - elif G_pol is None: - D1 = D_pol[1] - τ0 = τ_pol[0] - G0 = τ0 * (Y0 + r0 * D0) + D1 - (1 + r0) * D0 - - # optimal consumption plans - Cy0, Co0 = K_to_C(K0, D0, τ0, r0, α, β) - - # t=0 economy - quant_seq[0, :] = K0, Y0, Cy0, Co0 - price_seq[0, :] = W0, r0 - policy_seq[0, :] = τ0, D0, G0 - policy_seq[1, 1] = D1 - - # starting from t=1 to T - for t in range(1, T+1): - - # transition of K - K_old, τ_old = quant_seq[t-1, 0], policy_seq[t-1, 0] - D = policy_seq[t, 1] - K = K_old ** α * (1 - τ_old) * (1 - α) * (1 - β) - D - - # output, capital return, wage - Y, r, W = K_to_Y(K, α), K_to_r(K, α), K_to_W(K, α) - - # to satisfy the government budget constraint - if τ_pol is None: - D = D_pol[t] - D_next = D_pol[t+1] - G = G_pol[t] - τ = (G + (1 + r) * D - D_next) / (Y + r * D) - elif D_pol is None: - τ = τ_pol[t] - G = G_pol[t] - D = policy_seq[t, 1] - D_next = (1 + r) * D + G - τ * (Y + r * D) - elif G_pol is None: - D = D_pol[t] - D_next = D_pol[t+1] - τ = τ_pol[t] - G = τ * (Y + r * D) + D_next - (1 + r) * D - - # optimal consumption plans - Cy, Co = K_to_C(K, D, τ, r, α, β) - - # store time t economy aggregates - quant_seq[t, :] = K, Y, Cy, Co - price_seq[t, :] = W, r - policy_seq[t, 0] = τ - policy_seq[t+1, 1] = D_next - policy_seq[t, 2] = G - - self.quant_seq = quant_seq - self.price_seq = price_seq - self.policy_seq = policy_seq - - return quant_seq, price_seq, policy_seq - - def plot(self): - - quant_seq = self.quant_seq - price_seq = self.price_seq - policy_seq = self.policy_seq - - fig, axs = plt.subplots(3, 3, figsize=(14, 10)) - - # quantities - for i, name in enumerate(['K', 'Y', 'Cy', 'Co']): - ax = axs[i//3, i%3] - ax.plot(range(T+1), quant_seq[:T+1, i], label=name) - ax.hlines(init_ss[i], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - - # prices - for i, name in enumerate(['W', 'r']): - ax = axs[(i+4)//3, (i+4)%3] - ax.plot(range(T+1), price_seq[:T+1, i], label=name) - ax.hlines(init_ss[i+4], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - - # policies - for i, name in enumerate(['τ', 'D', 'G']): - ax = axs[(i+6)//3, (i+6)%3] - ax.plot(range(T+1), policy_seq[:T+1, i], label=name) - ax.hlines(init_ss[i+6], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') -``` - -We can create an instance `closed` for model parameters $\{\alpha, \beta\}$ and use it for various fiscal policy experiments. - - -```{code-cell} ipython3 -closed = ClosedFormTrans(α, β) -``` - -(exp-tax-cut)= -### Experiment 1: Tax cut - -To illustrate the power of `ClosedFormTrans`, let's first experiment with the following fiscal policy change: - -1. at $t=0$, the government unexpectedly announces a one-period tax cut, $\tau_0 =(1-\frac{1}{3}) \hat{\tau}$, by issuing government debt $\bar{D}$ -2. from $t=1$, the government will keep $D_t=\bar{D}$ and adjust $\tau_{t}$ to collect taxation to pay for the government consumption and interest payments on the debt -3. government consumption $G_t$ will be fixed at $0.15 \hat{Y}$ - -The following equations completely characterize the equilibrium transition path originating from the initial steady state - -$$ -\begin{aligned} -K_{t+1} &= K_{t}^{\alpha}\left(1-\tau_{t}\right)\left(1-\alpha\right)\left(1-\beta\right) - \bar{D} \\ -\tau_{0} &= (1-\frac{1}{3}) \hat{\tau} \\ -\bar{D} &= \hat{G} - \tau_0\hat{Y} \\ -\quad\tau_{t} & =\frac{\hat{G}+r_{t} \bar{D}}{\hat{Y}+r_{t} \bar{D}} -\end{aligned} -$$ - -We can simulate the transition for $20$ periods, after which the economy will be close to a new steady state. - -The first step is to prepare sequences of policy variables that describe fiscal policy. - -We must define sequences of government expenditure $\{G_t\}_{t=0}^{T}$ and debt level $\{D_t\}_{t=0}^{T+1}$ in advance, then pass them to the solver. - -```{code-cell} ipython3 -T = 20 - -# tax cut -τ0 = τ_hat * (1 - 1/3) - -# sequence of government purchase -G_seq = τ_hat * Y_hat * np.ones(T+1) - -# sequence of government debt -D_bar = G_hat - τ0 * Y_hat -D_seq = np.ones(T+2) * D_bar -D_seq[0] = D_hat -``` - -Let's use the `simulate` method of `closed` to compute dynamic transitions. - -Note that we leave `τ_pol` as `None`, since the tax rates need to be determined to satisfy the government budget constraint. - -```{code-cell} ipython3 -quant_seq1, price_seq1, policy_seq1 = closed.simulate(T, init_ss, - D_pol=D_seq, - G_pol=G_seq) -closed.plot() -``` - -We can also experiment with a lower tax cut rate, such as $0.2$. - -```{code-cell} ipython3 -# lower tax cut rate -τ0 = 0.15 * (1 - 0.2) - -# the corresponding debt sequence -D_bar = G_hat - τ0 * Y_hat -D_seq = np.ones(T+2) * D_bar -D_seq[0] = D_hat - -quant_seq2, price_seq2, policy_seq2 = closed.simulate(T, init_ss, - D_pol=D_seq, - G_pol=G_seq) -``` - -```{code-cell} ipython3 -fig, axs = plt.subplots(3, 3, figsize=(14, 10)) - -# quantities -for i, name in enumerate(['K', 'Y', 'Cy', 'Co']): - ax = axs[i//3, i%3] - ax.plot(range(T+1), quant_seq1[:T+1, i], label=name+', 1/3') - ax.plot(range(T+1), quant_seq2[:T+1, i], label=name+', 0.2') - ax.hlines(init_ss[i], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# prices -for i, name in enumerate(['W', 'r']): - ax = axs[(i+4)//3, (i+4)%3] - ax.plot(range(T+1), price_seq1[:T+1, i], label=name+', 1/3') - ax.plot(range(T+1), price_seq2[:T+1, i], label=name+', 0.2') - ax.hlines(init_ss[i+4], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# policies -for i, name in enumerate(['τ', 'D', 'G']): - ax = axs[(i+6)//3, (i+6)%3] - ax.plot(range(T+1), policy_seq1[:T+1, i], label=name+', 1/3') - ax.plot(range(T+1), policy_seq2[:T+1, i], label=name+', 0.2') - ax.hlines(init_ss[i+6], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') -``` - -The economy with lower tax cut rate at $t=0$ has the same transitional pattern, but is less distorted, and it converges to a new steady state with higher physical capital stock. - -(exp-expen-cut)= -### Experiment 2: Government asset accumulation - -Assume that the economy is initially in the same steady state. - -Now the government promises to cut its spending on services and goods by half $\forall t \geq 0$. - -The government targets the same tax rate $\tau_t=\hat{\tau}$ and to accumulate assets $-D_t$ over time. - -To conduct this experiment, we pass `τ_seq` and `G_seq` as inputs and let `D_pol` be determined along the path by satisfying the government budget constraint. - -```{code-cell} ipython3 -# government expenditure cut by a half -G_seq = τ_hat * 0.5 * Y_hat * np.ones(T+1) - -# targeted tax rate -τ_seq = τ_hat * np.ones(T+1) - -closed.simulate(T, init_ss, τ_pol=τ_seq, G_pol=G_seq); -closed.plot() -``` - -As the government accumulates the asset and uses it in production, the rental rate on capital falls and private investment falls. - -As a result, the ratio $-\frac{D_t}{K_t}$ of the government asset to physical capital used in production will increase over time - -```{code-cell} ipython3 -plt.plot(range(T+1), -closed.policy_seq[:-1, 1] / closed.quant_seq[:, 0]) -plt.xlabel('t') -plt.title('-D/K'); -``` - -We want to know how this policy experiment affects individuals. - -In the long run, future cohorts will enjoy higher consumption throughout their lives because they will earn higher labor income when they work. - -However, in the short run, old people suffer because increases in their labor income are not big enough to offset their losses of capital income. - -Such distinct long run and short run effects motivate us to study transition paths. - -```{note} -Although the consumptions in the new steady state are strictly higher, it is at a cost of fewer public services and goods. -``` - - -### Experiment 3: Temporary expenditure cut - -Let's now investigate a scenario in which the government also cuts its spending by half and accumulates the asset. - -But now let the government cut its expenditures only at $t=0$. - -From $t \geq 1$, the government expeditures return to $\hat{G}$ and $\tau_t$ adjusts to maintain the asset level $-D_t = -D_1$. - -```{code-cell} ipython3 -# sequence of government purchase -G_seq = τ_hat * Y_hat * np.ones(T+1) -G_seq[0] = 0 - -# sequence of government debt -D_bar = G_seq[0] - τ_hat * Y_hat -D_seq = D_bar * np.ones(T+2) -D_seq[0] = D_hat - -closed.simulate(T, init_ss, D_pol=D_seq, G_pol=G_seq); -closed.plot() -``` - -The economy quickly converges to a new steady state with higher physical capital stock, lower interest rate, higher wage rate, and higher consumptions for both the young and the old. - -Even though government expenditure $G_t$ returns to its high initial level from $t \geq 1$, the government can balance the budget at a lower tax rate because it gathers additional revenue $-r_t D_t$ from the asset accumulated during the temporary cut in the spendings. - -As in {ref}`exp-expen-cut`, old perople early in the transition periods suffer from this policy shock. - - -## A computational strategy - -With the preceding caluations, we studied dynamic transitions instigated by alternative fiscal policies. - -In all these experiments, we maintained the assumption that lump sum taxes were absent so that $\delta_{yt}=0, \delta_{ot}=0$. - -In this section, we investigate the transition dynamics when the lump sum taxes are present. - -The government will use lump sum taxes and transfers to redistribute resources across successive -generations. - -Including lump sum taxes disrupts closed form solution because of how they make optimal consumption and saving plans depend on future prices and tax rates. - -Therefore, we compute equilibrium transitional paths by finding a fixed point of a mapping from sequences to sequences. - - * that fixed point pins down an equilibrium - -To set the stage for the entry of the mapping whose fixed point we seek, we return to concepts introduced in - section {ref}`sec-equilibrium`. - - -**Definition:** Given parameters $\{\alpha$, $\beta\}$, a competitive equilibrium consists of - -* sequences of optimal consumptions $\{C_{yt}, C_{ot}\}$ -* sequences of prices $\{W_t, r_t\}$ -* sequences of capital stock and output $\{K_t, Y_t\}$ -* sequences of tax rates, government assets (debt), government purchases $\{\tau_t, D_t, G_t\, \delta_{yt}, \delta_{ot}\}$ - -with the properties that - -* given the price system and government fiscal policy, consumption plans are optimal -* the government budget constraints are satisfied for all $t$ - -An equilibrium transition path can be computed by "guessing and verifying" some endogenous sequences. - -In our {ref}`exp-tax-cut` example, sequences $\{D_t\}_{t=0}^{T}$ and $\{G_t\}_{t=0}^{T}$ are exogenous. - -In addition, we assume that the lump sum taxes $\{\delta_{yt}, \delta_{ot}\}_{t=0}^{T}$ are given and known to everybody inside the model. - -We can solve for sequences of other equilibrium sequences following the steps below - -1. guess prices $\{W_t, r_t\}_{t=0}^{T}$ and tax rates $\{\tau_t\}_{t=0}^{T}$ -2. solve for optimal consumption and saving plans $\{C_{yt}, C_{ot}\}_{t=0}^{T}$, treating the guesses of future prices and taxes as true -3. solve for transition of the capital stock $\{K_t\}_{t=0}^{T}$ -4. update the guesses for prices and tax rates with the values implied by the equilibrium conditions -5. iterate until convergence - -Let's implement this "guess and verify" approach - -We start by defining the Cobb-Douglas utility function - -```{code-cell} ipython3 -@njit -def U(Cy, Co, β): - - return (Cy ** β) * (Co ** (1-β)) -``` - -We use `Cy_val` to compute the lifetime value of an arbitrary consumption plan, $C_y$, given the intertemporal budget constraint. - -Note that it requires knowing future prices $r_{t+1}$ and tax rate $\tau_{t+1}$. - -```{code-cell} ipython3 -@njit -def Cy_val(Cy, W, r_next, τ, τ_next, δy, δo_next, β): - - # Co given by the budget constraint - Co = (W * (1 - τ) - δy - Cy) * (1 + r_next * (1 - τ_next)) - δo_next - - return U(Cy, Co, β) -``` - -An optimal consumption plan $C_y^*$ can be found by maximizing `Cy_val`. - -Here is an example that computes optimal consumption $C_y^*=\hat{C}_y$ in the steady state with $\delta_{yt}=\delta_{ot}=0,$ like one that we studied earlier - -```{code-cell} ipython3 -W, r_next, τ, τ_next = W_hat, r_hat, τ_hat, τ_hat -δy, δo_next = 0, 0 - -Cy_opt, U_opt, _ = brent_max(Cy_val, # maximand - 1e-6, # lower bound - W*(1-τ)-δy-1e-6, # upper bound - args=(W, r_next, τ, τ_next, δy, δo_next, β)) - -Cy_opt, U_opt -``` - -Let's define a Python class `AK2` that computes the transition paths with the fixed-point algorithm. - -It can handle nonzero lump sum taxes - -```{code-cell} ipython3 -class AK2(): - """ - This class simulates length T transitional path of a economy - in response to a fiscal policy change given its initial steady - state. The transitional path is found by employing a fixed point - algorithm to satisfy the equilibrium conditions. - - """ - - def __init__(self, α, β): - - self.α, self.β = α, β - - def simulate(self, - T, # length of transitional path to simulate - init_ss, # initial steady state - δy_seq, # sequence of lump sum tax for the young - δo_seq, # sequence of lump sum tax for the old - τ_pol=None, # sequence of tax rates - D_pol=None, # sequence of government debt levels - G_pol=None, # sequence of government purchases - verbose=False, - max_iter=500, - tol=1e-5): - - α, β = self.α, self.β - - # unpack the steady state variables - K_hat, Y_hat, Cy_hat, Co_hat = init_ss[:4] - W_hat, r_hat = init_ss[4:6] - τ_hat, D_hat, G_hat = init_ss[6:9] - - # K, Y, Cy, Co - quant_seq = np.empty((T+2, 4)) - - # W, r - price_seq = np.empty((T+2, 2)) - - # τ, D, G - policy_seq = np.empty((T+2, 3)) - policy_seq[:, 1] = D_pol - policy_seq[:, 2] = G_pol - - # initial guesses of prices - price_seq[:, 0] = np.ones(T+2) * W_hat - price_seq[:, 1] = np.ones(T+2) * r_hat - - # initial guesses of policies - policy_seq[:, 0] = np.ones(T+2) * τ_hat - - # t=0, starting from steady state - quant_seq[0, :2] = K_hat, Y_hat - - if verbose: - # prepare to plot iterations until convergence - fig, axs = plt.subplots(1, 3, figsize=(14, 4)) - - # containers for checking convergence - price_seq_old = np.empty_like(price_seq) - policy_seq_old = np.empty_like(policy_seq) - - # start iteration - i_iter = 0 - while True: - - if verbose: - # plot current prices at ith iteration - for i, name in enumerate(['W', 'r']): - axs[i].plot(range(T+1), price_seq[:T+1, i]) - axs[i].set_title(name) - axs[i].set_xlabel('t') - axs[2].plot(range(T+1), policy_seq[:T+1, 0], - label=f'{i_iter}th iteration') - axs[2].legend(bbox_to_anchor=(1.05, 1), loc='upper left') - axs[2].set_title('τ') - axs[2].set_xlabel('t') - - # store old prices from last iteration - price_seq_old[:] = price_seq - policy_seq_old[:] = policy_seq - - # start updating quantities and prices - for t in range(T+1): - K, Y = quant_seq[t, :2] - W, r = price_seq[t, :] - r_next = price_seq[t+1, 1] - τ, D, G = policy_seq[t, :] - τ_next, D_next, G_next = policy_seq[t+1, :] - δy, δo = δy_seq[t], δo_seq[t] - δy_next, δo_next = δy_seq[t+1], δo_seq[t+1] - - # consumption for the old - Co = (1 + r * (1 - τ)) * (K + D) - δo - - # optimal consumption for the young - out = brent_max(Cy_val, 1e-6, W*(1-τ)-δy-1e-6, - args=(W, r_next, τ, τ_next, - δy, δo_next, β)) - Cy = out[0] - - quant_seq[t, 2:] = Cy, Co - τ_num = ((1 + r) * D + G - D_next - δy - δo) - τ_denom = (Y + r * D) - policy_seq[t, 0] = τ_num / τ_denom - - # saving of the young - A_next = W * (1 - τ) - δy - Cy - - # transition of K - K_next = A_next - D_next - Y_next = K_to_Y(K_next, α) - W_next, r_next = K_to_W(K_next, α), K_to_r(K_next, α) - - quant_seq[t+1, :2] = K_next, Y_next - price_seq[t+1, :] = W_next, r_next - - i_iter += 1 - - if (np.max(np.abs(price_seq_old - price_seq)) < tol) & \ - (np.max(np.abs(policy_seq_old - policy_seq)) < tol): - if verbose: - print(f"Converge using {i_iter} iterations") - break - - if i_iter > max_iter: - if verbose: - print(f"Fail to converge using {i_iter} iterations") - break - - self.quant_seq = quant_seq - self.price_seq = price_seq - self.policy_seq = policy_seq - - return quant_seq, price_seq, policy_seq - - def plot(self): - - quant_seq = self.quant_seq - price_seq = self.price_seq - policy_seq = self.policy_seq - - fig, axs = plt.subplots(3, 3, figsize=(14, 10)) - - # quantities - for i, name in enumerate(['K', 'Y', 'Cy', 'Co']): - ax = axs[i//3, i%3] - ax.plot(range(T+1), quant_seq[:T+1, i], label=name) - ax.hlines(init_ss[i], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - - # prices - for i, name in enumerate(['W', 'r']): - ax = axs[(i+4)//3, (i+4)%3] - ax.plot(range(T+1), price_seq[:T+1, i], label=name) - ax.hlines(init_ss[i+4], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - - # policies - for i, name in enumerate(['τ', 'D', 'G']): - ax = axs[(i+6)//3, (i+6)%3] - ax.plot(range(T+1), policy_seq[:T+1, i], label=name) - ax.hlines(init_ss[i+6], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') -``` - -We can initialize an instance of class `AK2` with model parameters $\{\alpha, \beta\}$ and then use it to conduct fiscal policy experiments. - -```{code-cell} ipython3 -ak2 = AK2(α, β) -``` - -We first examine that the "guess and verify" method leads to the same numerical results as we obtain with the closed form solution when lump sum taxes are muted - -```{code-cell} ipython3 -δy_seq = np.ones(T+2) * 0. -δo_seq = np.ones(T+2) * 0. - -D_pol = np.zeros(T+2) -G_pol = np.ones(T+2) * G_hat - -# tax cut -τ0 = τ_hat * (1 - 1/3) -D1 = D_hat * (1 + r_hat * (1 - τ0)) + G_hat - τ0 * Y_hat - δy_seq[0] - δo_seq[0] -D_pol[0] = D_hat -D_pol[1:] = D1 -``` - -```{code-cell} ipython3 -quant_seq3, price_seq3, policy_seq3 = ak2.simulate(T, init_ss, - δy_seq, δo_seq, - D_pol=D_pol, G_pol=G_pol, - verbose=True) -``` - -```{code-cell} ipython3 -ak2.plot() -``` - -Next, we activate lump sum taxes. - -Let's alter our {ref}`exp-tax-cut` fiscal policy experiment by assuming that the government also increases lump sum taxes for both young and old people $\delta_{yt}=\delta_{ot}=0.005, t\geq0$. - -```{code-cell} ipython3 -δy_seq = np.ones(T+2) * 0.005 -δo_seq = np.ones(T+2) * 0.005 - -D1 = D_hat * (1 + r_hat * (1 - τ0)) + G_hat - τ0 * Y_hat - δy_seq[0] - δo_seq[0] -D_pol[1:] = D1 - -quant_seq4, price_seq4, policy_seq4 = ak2.simulate(T, init_ss, - δy_seq, δo_seq, - D_pol=D_pol, G_pol=G_pol) -``` - -Note how "crowding out" has been mitigated. - -```{code-cell} ipython3 -fig, axs = plt.subplots(3, 3, figsize=(14, 10)) - -# quantities -for i, name in enumerate(['K', 'Y', 'Cy', 'Co']): - ax = axs[i//3, i%3] - ax.plot(range(T+1), quant_seq3[:T+1, i], label=name+', $\delta$s=0') - ax.plot(range(T+1), quant_seq4[:T+1, i], label=name+', $\delta$s=0.005') - ax.hlines(init_ss[i], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# prices -for i, name in enumerate(['W', 'r']): - ax = axs[(i+4)//3, (i+4)%3] - ax.plot(range(T+1), price_seq3[:T+1, i], label=name+', $\delta$s=0') - ax.plot(range(T+1), price_seq4[:T+1, i], label=name+', $\delta$s=0.005') - ax.hlines(init_ss[i+4], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# policies -for i, name in enumerate(['τ', 'D', 'G']): - ax = axs[(i+6)//3, (i+6)%3] - ax.plot(range(T+1), policy_seq3[:T+1, i], label=name+', $\delta$s=0') - ax.plot(range(T+1), policy_seq4[:T+1, i], label=name+', $\delta$s=0.005') - ax.hlines(init_ss[i+6], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') -``` - -Comparing to {ref}`exp-tax-cut`, the government raises lump-sum taxes to finance the increasing debt interest payment, which is less distortionary comparing to raising the capital income tax rate. - - -### Experiment 4: Unfunded Social Security System - -In this experiment, lump-sum taxes are of equal magnitudes for old and the young, but of opposite signs. - -A negative lump-sum tax is a subsidy. - -Thus, in this experiment we tax the young and subsidize the old. - -We start the economy at the same initial steady state that we assumed in several earlier experiments. - -The government sets the lump sum taxes $\delta_{y,t}=-\delta_{o,t}=10\% \hat{C}_{y}$ starting from $t=0$. - -It keeps debt levels and expenditures at their steady state levels $\hat{D}$ and $\hat{G}$. - -In effect, this experiment amounts to launching an unfunded social security system. - -We can use our code to compute the transition ignited by launching this system. - -Let's compare the results to the {ref}`exp-tax-cut`. - -```{code-cell} ipython3 -δy_seq = np.ones(T+2) * Cy_hat * 0.1 -δo_seq = np.ones(T+2) * -Cy_hat * 0.1 - -D_pol[:] = D_hat - -quant_seq5, price_seq5, policy_seq5 = ak2.simulate(T, init_ss, - δy_seq, δo_seq, - D_pol=D_pol, G_pol=G_pol) -``` - -```{code-cell} ipython3 -fig, axs = plt.subplots(3, 3, figsize=(14, 10)) - -# quantities -for i, name in enumerate(['K', 'Y', 'Cy', 'Co']): - ax = axs[i//3, i%3] - ax.plot(range(T+1), quant_seq3[:T+1, i], label=name+', tax cut') - ax.plot(range(T+1), quant_seq5[:T+1, i], label=name+', transfer') - ax.hlines(init_ss[i], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# prices -for i, name in enumerate(['W', 'r']): - ax = axs[(i+4)//3, (i+4)%3] - ax.plot(range(T+1), price_seq3[:T+1, i], label=name+', tax cut') - ax.plot(range(T+1), price_seq5[:T+1, i], label=name+', transfer') - ax.hlines(init_ss[i+4], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') - -# policies -for i, name in enumerate(['τ', 'D', 'G']): - ax = axs[(i+6)//3, (i+6)%3] - ax.plot(range(T+1), policy_seq3[:T+1, i], label=name+', tax cut') - ax.plot(range(T+1), policy_seq5[:T+1, i], label=name+', transfer') - ax.hlines(init_ss[i+6], 0, T+1, color='r', linestyle='--') - ax.legend() - ax.set_xlabel('t') -``` - -An initial old person benefits especially when the social security system is launched because he receives a transfer but pays nothing for it. - -But in the long run, consumption rates of both young and old people decrease because the the social security system decreases incentives to save. - -That lowers the stock of physical capital and consequently lowers output. - -The government must then raise tax rate in order to pay for its expenditures. - -The higher rate on capital income further distorts incentives to save. diff --git a/lectures/ar1_processes.md b/lectures/ar1_processes.md index 90cc0514..fe54d3d1 100644 --- a/lectures/ar1_processes.md +++ b/lectures/ar1_processes.md @@ -60,6 +60,9 @@ where $a, b, c$ are scalar-valued parameters (Equation {eq}`can_ar1` is sometimes called a **stochastic difference equation**.) +```{prf:example} +:label: ar1_ex_ar + For example, $X_t$ might be * the log of labor income for a given household, or @@ -70,6 +73,7 @@ of the previous value and an IID shock $W_{t+1}$. (We use $t+1$ for the subscript of $W_{t+1}$ because this random variable is not observed at time $t$.) +``` The specification {eq}`can_ar1` generates a time series $\{ X_t\}$ as soon as we specify an initial condition $X_0$. @@ -330,7 +334,10 @@ Notes: * In {eq}`ar1_ergo`, convergence holds with probability one. * The textbook by {cite}`MeynTweedie2009` is a classic reference on ergodicity. -For example, if we consider the identity function $h(x) = x$, we get +```{prf:example} +:label: ar1_ex_id + +If we consider the identity function $h(x) = x$, we get $$ \frac{1}{m} \sum_{t = 1}^m X_t \to @@ -339,7 +346,7 @@ $$ $$ In other words, the time series sample mean converges to the mean of the stationary distribution. - +``` Ergodicity is important for a range of reasons. diff --git a/lectures/cagan_ree.md b/lectures/cagan_ree.md index fab16d01..f0274b56 100644 --- a/lectures/cagan_ree.md +++ b/lectures/cagan_ree.md @@ -18,7 +18,7 @@ kernelspec: We'll use linear algebra first to explain and then do some experiments with a "monetarist theory of price levels". -Economists call it a "monetary" or "monetarist" theory of price levels because effects on price levels occur via a central banks's decisions to print money supply. +Economists call it a "monetary" or "monetarist" theory of price levels because effects on price levels occur via a central bank's decisions to print money supply. * a goverment's fiscal policies determine whether its _expenditures_ exceed its _tax collections_ * if its expenditures exceed its tax collections, the government can instruct the central bank to cover the difference by _printing money_ @@ -27,7 +27,7 @@ Economists call it a "monetary" or "monetarist" theory of price levels because e Such a theory of price levels was described by Thomas Sargent and Neil Wallace in chapter 5 of {cite}`sargent2013rational`, which reprints a 1981 Federal Reserve Bank of Minneapolis article entitled "Unpleasant Monetarist Arithmetic". -Sometimes this theory is also called a "fiscal theory of price levels" to emphasize the importance of fisal deficits in shaping changes in the money supply. +Sometimes this theory is also called a "fiscal theory of price levels" to emphasize the importance of fiscal deficits in shaping changes in the money supply. The theory has been extended, criticized, and applied by John Cochrane {cite}`cochrane2023fiscal`. @@ -41,7 +41,7 @@ persistent inflation. The "monetarist" or "fiscal theory of price levels" asserts that -* to _start_ a persistent inflation the government beings persistently to run a money-financed government deficit +* to _start_ a persistent inflation the government begins persistently to run a money-financed government deficit * to _stop_ a persistent inflation the government stops persistently running a money-financed government deficit diff --git a/lectures/complex_and_trig.md b/lectures/complex_and_trig.md index 8fe53202..7f40497c 100644 --- a/lectures/complex_and_trig.md +++ b/lectures/complex_and_trig.md @@ -103,12 +103,16 @@ from sympy import (Symbol, symbols, Eq, nsolve, sqrt, cos, sin, simplify, ### An Example +```{prf:example} +:label: ct_ex_com + Consider the complex number $z = 1 + \sqrt{3} i$. For $z = 1 + \sqrt{3} i$, $x = 1$, $y = \sqrt{3}$. It follows that $r = 2$ and $\theta = \tan^{-1}(\sqrt{3}) = \frac{\pi}{3} = 60^o$. +``` Let's use Python to plot the trigonometric form of the complex number $z = 1 + \sqrt{3} i$. diff --git a/lectures/cons_smooth.md b/lectures/cons_smooth.md index b2bace94..2719baa8 100644 --- a/lectures/cons_smooth.md +++ b/lectures/cons_smooth.md @@ -21,7 +21,7 @@ In this lecture, we'll study a famous model of the "consumption function" that M In this lecture, we'll study what is often called the "consumption-smoothing model" using matrix multiplication and matrix inversion, the same tools that we used in this QuantEcon lecture {doc}`present values `. -Formulas presented in {doc}`present value formulas` are at the core of the consumption smoothing model because we shall use them to define a consumer's "human wealth". +Formulas presented in {doc}`present value formulas` are at the core of the consumption-smoothing model because we shall use them to define a consumer's "human wealth". The key idea that inspired Milton Friedman was that a person's non-financial income, i.e., his or her wages from working, could be viewed as a dividend stream from that person's ''human capital'' @@ -39,7 +39,7 @@ It will take a while for a "present value" or asset price explicilty to appear i ## Analysis -As usual, we'll start with by importing some Python modules. +As usual, we'll start by importing some Python modules. ```{code-cell} ipython3 import numpy as np @@ -128,7 +128,7 @@ Indeed, we shall see that when $\beta R = 1$ (a condition assumed by Milton Frie By **smoother** we mean as close as possible to being constant over time. -The preference for smooth consumption paths that is built into the model gives it the name "consumption smoothing model". +The preference for smooth consumption paths that is built into the model gives it the name "consumption-smoothing model". Let's dive in and do some calculations that will help us understand how the model works. @@ -176,7 +176,7 @@ $$ \sum_{t=0}^T R^{-t} c_t = a_0 + h_0. $$ (eq:budget_intertemp) -Equation {eq}`eq:budget_intertemp` says that the present value of the consumption stream equals the sum of finanical and non-financial (or human) wealth. +Equation {eq}`eq:budget_intertemp` says that the present value of the consumption stream equals the sum of financial and non-financial (or human) wealth. Robert Hall {cite}`Hall1978` showed that when $\beta R = 1$, a condition Milton Friedman had also assumed, it is "optimal" for a consumer to smooth consumption by setting @@ -196,7 +196,7 @@ $$ (eq:conssmoothing) Equation {eq}`eq:conssmoothing` is the consumption-smoothing model in a nutshell. -## Mechanics of Consumption smoothing model +## Mechanics of consumption-smoothing model As promised, we'll provide step-by-step instructions on how to use linear algebra, readily implemented in Python, to compute all objects in play in the consumption-smoothing model. @@ -338,14 +338,14 @@ print('Welfare:', welfare(cs_model, c_seq)) ### Experiments -In this section we decribe how a consumption sequence would optimally respond to different sequences sequences of non-financial income. +In this section we describe how a consumption sequence would optimally respond to different sequences sequences of non-financial income. -First we create a function `plot_cs` that generate graphs for different instances of the consumption smoothing model `cs_model`. +First we create a function `plot_cs` that generates graphs for different instances of the consumption-smoothing model `cs_model`. This will help us avoid rewriting code to plot outcomes for different non-financial income sequences. ```{code-cell} ipython3 -def plot_cs(model, # consumption smoothing model +def plot_cs(model, # consumption-smoothing model a0, # initial financial wealth y_seq # non-financial income process ): @@ -368,7 +368,7 @@ def plot_cs(model, # consumption smoothing model plt.show() ``` -In the experiments below, please study how consumption and financial asset sequences vary accross different sequences for non-financial income. +In the experiments below, please study how consumption and financial asset sequences vary across different sequences for non-financial income. #### Experiment 1: one-time gain/loss @@ -602,7 +602,7 @@ First, we define the welfare with respect to $\xi_1$ and $\phi$ def welfare_rel(ξ1, ϕ): """ Compute welfare of variation sequence - for given ϕ, ξ1 with a consumption smoothing model + for given ϕ, ξ1 with a consumption-smoothing model """ cvar_seq = compute_variation(cs_model, ξ1=ξ1, @@ -661,13 +661,13 @@ QuantEcon lecture {doc}`geometric series `. In particular, it **lowers** the government expenditure multiplier relative to one implied by the original Keynesian consumption function presented in {doc}`geometric series `. -Friedman's work opened the door to an enlighening literature on the aggregate consumption function and associated government expenditure multipliers that +Friedman's work opened the door to an enlightening literature on the aggregate consumption function and associated government expenditure multipliers that remains active today. ## Appendix: solving difference equations with linear algebra -In the preceding sections we have used linear algebra to solve a consumption smoothing model. +In the preceding sections we have used linear algebra to solve a consumption-smoothing model. The same tools from linear algebra -- matrix multiplication and matrix inversion -- can be used to study many other dynamic models. @@ -749,7 +749,7 @@ is the inverse of $A$ and check that $A A^{-1} = I$ ``` -### Second order difference equation +### Second-order difference equation A second-order linear difference equation for $\{y_t\}_{t=0}^T$ is @@ -783,6 +783,6 @@ Multiplying both sides by inverse of the matrix on the left again provides the ```{exercise} :label: consmooth_ex2 -As an exercise, we ask you to represent and solve a **third order linear difference equation**. +As an exercise, we ask you to represent and solve a **third-order linear difference equation**. How many initial conditions must you specify? ``` diff --git a/lectures/eigen_I.md b/lectures/eigen_I.md index 46dc221f..948b2f05 100644 --- a/lectures/eigen_I.md +++ b/lectures/eigen_I.md @@ -88,7 +88,8 @@ itself. This means $A$ is an $n \times n$ matrix that maps (or "transforms") a vector $x$ in $\mathbb{R}^n$ to a new vector $y=Ax$ also in $\mathbb{R}^n$. -Here's one example: +```{prf:example} +:label: eigen1_ex_sq $$ \begin{bmatrix} @@ -116,6 +117,7 @@ $$ transforms the vector $x = \begin{bmatrix} 1 \\ 3 \end{bmatrix}$ to the vector $y = \begin{bmatrix} 5 \\ 2 \end{bmatrix}$. +``` Let's visualize this using Python: diff --git a/lectures/eigen_II.md b/lectures/eigen_II.md index 52fd505e..5ccd9ebb 100644 --- a/lectures/eigen_II.md +++ b/lectures/eigen_II.md @@ -26,7 +26,7 @@ In addition to what's in Anaconda, this lecture will need the following librarie In this lecture we will begin with the foundational concepts in spectral theory. -Then we will explore the Perron-Frobenius Theorem and connect it to applications in Markov chains and networks. +Then we will explore the Perron-Frobenius theorem and connect it to applications in Markov chains and networks. We will use the following imports: @@ -64,6 +64,9 @@ An $n \times n$ nonnegative matrix $A$ is called irreducible if $A + A^2 + A^3 + In other words, for each $i,j$ with $1 \leq i, j \leq n$, there exists a $k \geq 0$ such that $a^{k}_{ij} > 0$. +```{prf:example} +:label: eigen2_ex_irr + Here are some examples to illustrate this further: $$ @@ -94,6 +97,7 @@ $$ $C$ is not irreducible since $C^k = C$ for all $k \geq 0$ and thus $c^{k}_{12},c^{k}_{21} = 0$ for all $k \geq 0$. +``` ### Left eigenvectors @@ -159,7 +163,7 @@ This is a more common expression and where the name left eigenvectors originates For a square nonnegative matrix $A$, the behavior of $A^k$ as $k \to \infty$ is controlled by the eigenvalue with the largest absolute value, often called the **dominant eigenvalue**. -For any such matrix $A$, the Perron-Frobenius Theorem characterizes certain +For any such matrix $A$, the Perron-Frobenius theorem characterizes certain properties of the dominant eigenvalue and its corresponding eigenvector. ```{prf:Theorem} Perron-Frobenius Theorem @@ -188,7 +192,7 @@ Let's build our intuition for the theorem using a simple example we have seen [b Now let's consider examples for each case. -#### Example: Irreducible matrix +#### Example: irreducible matrix Consider the following irreducible matrix $A$: @@ -204,7 +208,7 @@ We can compute the dominant eigenvalue and the corresponding eigenvector eig(A) ``` -Now we can see the claims of the Perron-Frobenius Theorem holds for the irreducible matrix $A$: +Now we can see the claims of the Perron-Frobenius theorem holds for the irreducible matrix $A$: 1. The dominant eigenvalue is real-valued and non-negative. 2. All other eigenvalues have absolute values less than or equal to the dominant eigenvalue. @@ -223,6 +227,9 @@ Let $A$ be a square nonnegative matrix and let $A^k$ be the $k^{th}$ power of $A A matrix is called **primitive** if there exists a $k \in \mathbb{N}$ such that $A^k$ is everywhere positive. +```{prf:example} +:label: eigen2_ex_prim + Recall the examples given in irreducible matrices: $$ @@ -244,10 +251,11 @@ B^2 = \begin{bmatrix} 1 & 0 \\ $$ $B$ is irreducible but not primitive since there are always zeros in either principal diagonal or secondary diagonal. +``` We can see that if a matrix is primitive, then it implies the matrix is irreducible but not vice versa. -Now let's step back to the primitive matrices part of the Perron-Frobenius Theorem +Now let's step back to the primitive matrices part of the Perron-Frobenius theorem ```{prf:Theorem} Continous of Perron-Frobenius Theorem :label: con-perron-frobenius @@ -259,7 +267,7 @@ If $A$ is primitive then, $ r(A)^{-m} A^m$ converges to $v w^{\top}$ when $m \rightarrow \infty$. The matrix $v w^{\top}$ is called the **Perron projection** of $A$. ``` -#### Example 1: Primitive matrix +#### Example 1: primitive matrix Consider the following primitive matrix $B$: @@ -277,7 +285,7 @@ We compute the dominant eigenvalue and the corresponding eigenvector eig(B) ``` -Now let's give some examples to see if the claims of the Perron-Frobenius Theorem hold for the primitive matrix $B$: +Now let's give some examples to see if the claims of the Perron-Frobenius theorem hold for the primitive matrix $B$: 1. The dominant eigenvalue is real-valued and non-negative. 2. All other eigenvalues have absolute values strictly less than the dominant eigenvalue. @@ -373,18 +381,18 @@ check_convergence(B) The result shows that the matrix is not primitive as it is not everywhere positive. -These examples show how the Perron-Frobenius Theorem relates to the eigenvalues and eigenvectors of positive matrices and the convergence of the power of matrices. +These examples show how the Perron-Frobenius theorem relates to the eigenvalues and eigenvectors of positive matrices and the convergence of the power of matrices. In fact we have already seen the theorem in action before in {ref}`the Markov chain lecture `. (spec_markov)= -#### Example 2: Connection to Markov chains +#### Example 2: connection to Markov chains We are now prepared to bridge the languages spoken in the two lectures. A primitive matrix is both irreducible and aperiodic. -So Perron-Frobenius Theorem explains why both {ref}`Imam and Temple matrix ` and [Hamilton matrix](https://en.wikipedia.org/wiki/Hamiltonian_matrix) converge to a stationary distribution, which is the Perron projection of the two matrices +So Perron-Frobenius theorem explains why both {ref}`Imam and Temple matrix ` and [Hamilton matrix](https://en.wikipedia.org/wiki/Hamiltonian_matrix) converge to a stationary distribution, which is the Perron projection of the two matrices ```{code-cell} ipython3 P = np.array([[0.68, 0.12, 0.20], @@ -449,7 +457,7 @@ As we have seen, the largest eigenvalue for a primitive stochastic matrix is one This can be proven using [Gershgorin Circle Theorem](https://en.wikipedia.org/wiki/Gershgorin_circle_theorem), but it is out of the scope of this lecture. -So by the statement (6) of Perron-Frobenius Theorem, $\lambda_i<1$ for all $i` -of the Law of Large Numbers, which considered IID $X_1, \ldots, X_n$ with common distribution $F$ +of the law of large numbers, which considered IID $X_1, \ldots, X_n$ with common distribution $F$ If $\mathbb E |X_i|$ is finite, then the sample mean $\bar X_n := \frac{1}{n} \sum_{i=1}^n X_i$ satisfies @@ -957,7 +965,7 @@ the sample mean $\bar X_n := \frac{1}{n} \sum_{i=1}^n X_i$ satisfies where $\mu := \mathbb E X_i = \int x F(dx)$ is the common mean of the sample. The condition $\mathbb E | X_i | = \int |x| F(dx) < \infty$ holds -in most cases but can fail if the distribution $F$ is very heavy tailed. +in most cases but can fail if the distribution $F$ is very heavy-tailed. For example, it fails for the Cauchy distribution. @@ -1006,7 +1014,7 @@ We return to this point in the exercises. We have now seen that 1. heavy tails are frequent in economics and -2. the Law of Large Numbers fails when tails are very heavy. +2. the law of large numbers fails when tails are very heavy. But what about in the real world? Do heavy tails matter? @@ -1261,7 +1269,7 @@ Present discounted value of tax revenue will be estimated by The Pareto distribution is assumed to take the form {eq}`pareto` with $\bar x = 1$ and $\alpha = 1.05$. -(The value the tail index $\alpha$ is plausible given the data {cite}`gabaix2016power`.) +(The value of the tail index $\alpha$ is plausible given the data {cite}`gabaix2016power`.) To make the lognormal option as similar as possible to the Pareto option, choose its parameters such that the mean and median of both distributions are the same. @@ -1315,7 +1323,7 @@ $$ which we solve for $\mu$ and $\sigma$ given $\alpha = 1.05$. -Here is code that generates the two samples, produces the violin plot and +Here is the code that generates the two samples, produces the violin plot and prints the mean and standard deviation of the two samples. ```{code-cell} ipython3 diff --git a/lectures/inequality.md b/lectures/inequality.md index cd6dfcf0..b1ec0e11 100644 --- a/lectures/inequality.md +++ b/lectures/inequality.md @@ -18,19 +18,23 @@ kernelspec: In the lecture {doc}`long_run_growth` we studied how GDP per capita has changed for certain countries and regions. -Per capital GDP is important because it gives us an idea of average income for +Per capita GDP is important because it gives us an idea of average income for households in a given country. However, when we study income and wealth, averages are only part of the story. +```{prf:example} +:label: ie_ex_av + For example, imagine two societies, each with one million people, where * in the first society, the yearly income of one man is $100,000,000 and the income of the - others is zero + others are zero * in the second society, the yearly income of everyone is $100 These countries have the same income per capita (average income is $100) but the lives of the people will be very different (e.g., almost everyone in the first society is starving, even though one person is fabulously rich). +``` The example above suggests that we should go beyond simple averages when we study income and wealth. @@ -532,7 +536,7 @@ Let's look at the Gini coefficient for the distribution of income in the US. We will get pre-computed Gini coefficients (based on income) from the World Bank using the [wbgapi](https://blogs.worldbank.org/opendata/introducing-wbgapi-new-python-package-accessing-world-bank-data). -Let's use the `wbgapi` package we imported earlier to search the world bank data for Gini to find the Series ID. +Let's use the `wbgapi` package we imported earlier to search the World Bank data for Gini to find the Series ID. ```{code-cell} ipython3 wb.search("gini") @@ -755,8 +759,9 @@ min_year = plot_data.year.min() max_year = plot_data.year.max() ``` -The time series for all three countries start and stop in different years. We will add a year mask to the data to -improve clarity in the chart including the different end years associated with each countries time series. +The time series for all three countries start and stop in different years. + +We will add a year mask to the data to improve clarity in the chart including the different end years associated with each country's time series. ```{code-cell} ipython3 labels = [1979, 1986, 1991, 1995, 2000, 2020, 2021, 2022] + \ @@ -783,7 +788,7 @@ fig.show() This figure is built using `plotly` and is {ref}` available on the website ` ``` -This plot shows that all three Western economies GDP per capita has grown over +This plot shows that all three Western economies' GDP per capita has grown over time with some fluctuations in the Gini coefficient. From the early 80's the United Kingdom and the US economies both saw increases diff --git a/lectures/inflation_history.md b/lectures/inflation_history.md index 013954d4..5c5d0740 100644 --- a/lectures/inflation_history.md +++ b/lectures/inflation_history.md @@ -131,6 +131,10 @@ By staring at {numref}`lrpl` carefully, you might be able to guess when these te During these episodes, the gold/silver standard was temporarily abandoned when a government printed paper money to pay for war expenditures. +```{note} +This quantecon lecture {doc}`french_rev` describes circumstances leading up to and during the big inflation that occurred during the French Revolution. +``` + Despite these temporary lapses, a striking thing about the figure is that price levels were roughly constant over three centuries. In the early century, two other features of this data attracted the attention of [Irving Fisher](https://en.wikipedia.org/wiki/Irving_Fisher) of Yale University and [John Maynard Keynes](https://en.wikipedia.org/wiki/John_Maynard_Keynes) of Cambridge University. @@ -649,7 +653,7 @@ The US government stood ready to convert a dollar into a specified amount of gol Immediately after World War I, Hungary, Austria, Poland, and Germany were not on the gold standard. -Their currencies were “fiat” or "unbacked", meaning that they were not backed by credible government promises to convert them into gold or silver coins on demand. +Their currencies were "fiat" or "unbacked", meaning that they were not backed by credible government promises to convert them into gold or silver coins on demand. The governments printed new paper notes to pay for goods and services. @@ -665,6 +669,6 @@ Chapter 3 of {cite}`sargent2002big` described deliberate changes in policy that Each government stopped printing money to pay for goods and services once again and made its currency convertible to the US dollar or the UK pound. -The story told in {cite}`sargent2002big` is grounded in a "monetarist theory of the price level" described in {doc}`cagan_ree` and {doc}`cagan_adaptive`. +The story told in {cite}`sargent2002big` is grounded in a *monetarist theory of the price level* described in {doc}`cagan_ree` and {doc}`cagan_adaptive`. Those lectures discuss theories about what owners of those rapidly depreciating currencies were thinking and how their beliefs shaped responses of inflation to government monetary and fiscal policies. diff --git a/lectures/input_output.md b/lectures/input_output.md index 8d6d649b..7c8170c2 100644 --- a/lectures/input_output.md +++ b/lectures/input_output.md @@ -120,7 +120,7 @@ A basic framework for their analysis is After introducing the input-output model, we describe some of its connections to {doc}`linear programming lecture `. -## Input output analysis +## Input-output analysis Let @@ -184,7 +184,7 @@ plt.text(1.6, -0.5, r'$d_{2}$') plt.show() ``` -**Feasible allocations must satisfy** +*Feasible allocations must satisfy* $$ \begin{aligned} @@ -263,8 +263,10 @@ $$ $$ +```{prf:example} +:label: io_ex_tg -For example a two good economy described by +For example a two-good economy described by $$ A = @@ -279,6 +281,7 @@ d = 2 \end{bmatrix} $$ (eq:inout_ex) +``` ```{code-cell} ipython3 A = np.array([[0.1, 40], @@ -336,6 +339,9 @@ $$ Equation {eq}`eq:inout_frontier` sweeps out a **production possibility frontier** of final consumption bundles $d$ that can be produced with exogenous labor input $x_0$. +```{prf:example} +:label: io_ex_ppf + Consider the example in {eq}`eq:inout_ex`. Suppose we are now given @@ -345,6 +351,7 @@ a_0^\top = \begin{bmatrix} 4 & 100 \end{bmatrix} $$ +``` Then we can find $A_0^\top$ by @@ -507,9 +514,9 @@ This illustrates that an element $l_{ij}$ of $L$ shows the total impact on secto ## Applications of graph theory -We can further study input output networks through applications of {doc}`graph theory `. +We can further study input-output networks through applications of {doc}`graph theory `. -An input output network can be represented by a weighted directed graph induced by the adjacency matrix $A$. +An input-output network can be represented by a weighted directed graph induced by the adjacency matrix $A$. The set of nodes $V = [n]$ is the list of sectors and the set of edges is given by @@ -550,7 +557,7 @@ The above figure indicates that manufacturing is the most dominant sector in the ### Output multipliers -Another way to rank sectors in input output networks is via output multipliers. +Another way to rank sectors in input-output networks is via output multipliers. The **output multiplier** of sector $j$ denoted by $\mu_j$ is usually defined as the total sector-wide impact of a unit change of demand in sector $j$. diff --git a/lectures/intro_supply_demand.md b/lectures/intro_supply_demand.md index baacb821..aea36eb5 100644 --- a/lectures/intro_supply_demand.md +++ b/lectures/intro_supply_demand.md @@ -33,7 +33,7 @@ Exports were regarded as good because they brought in bullion (gold flowed into Imports were regarded as bad because bullion was required to pay for them (gold flowed out). -This [zero-sum](https://en.wikipedia.org/wiki/Zero-sum_game) view of economics was eventually overturned by the work of the classical economists such as [Adam Smith](https://en.wikipedia.org/wiki/Adam_Smith) and [David Ricado](https://en.wikipedia.org/wiki/David_Ricardo), who showed how freeing domestic and international trade can enhance welfare. +This [zero-sum](https://en.wikipedia.org/wiki/Zero-sum_game) view of economics was eventually overturned by the work of the classical economists such as [Adam Smith](https://en.wikipedia.org/wiki/Adam_Smith) and [David Ricardo](https://en.wikipedia.org/wiki/David_Ricardo), who showed how freeing domestic and international trade can enhance welfare. There are many different expressions of this idea in economics. @@ -68,6 +68,9 @@ Before we look at the model of supply and demand, it will be helpful to have som ### A discrete example +```{prf:example} +:label: isd_ex_cs + Regarding consumer surplus, suppose that we have a single good and 10 consumers. These 10 consumers have different preferences; in particular, the amount they would be willing to pay for one unit of the good differs. @@ -79,6 +82,7 @@ Suppose that the willingness to pay for each of the 10 consumers is as follows: | willing to pay | 98 | 72 | 41 | 38 | 29 | 21 | 17 | 12 | 11 | 10 | (We have ordered consumers by willingness to pay, in descending order.) +``` If $p$ is the price of the good and $w_i$ is the amount that consumer $i$ is willing to pay, then $i$ buys when $w_i \geq p$. @@ -253,6 +257,9 @@ Let $v_i$ be the price at which producer $i$ is willing to sell the good. When the price is $p$, producer surplus for producer $i$ is $\max\{p - v_i, 0\}$. +```{prf:example} +:label: isd_ex_dc + For example, a producer willing to sell at \$10 and selling at price \$20 makes a surplus of \$10. Total producer surplus is given by @@ -273,6 +280,7 @@ p = 2 q^2 $$ The shaded area is the total producer surplus in this continuous model. +``` ```{code-cell} ipython3 --- @@ -351,7 +359,7 @@ Many of these rules relate to one of the most beautiful and powerful results in We will not try to cover these ideas here, partly because the subject is too big, and partly because you only need to know one rule for this lecture, stated below. -If $f(x) = c + \mathrm{d} x$, then +If $f(x) = c + dx$, then $$ \int_a^b f(x) \mathrm{d} x = c (b - a) + \frac{d}{2}(b^2 - a^2) @@ -670,7 +678,7 @@ This is the competitive equilibrium quantity. Observe that the equilibrium quantity equals the same $q$ given by equation {eq}`eq:old1`. The outcome that the quantity determined by equation {eq}`eq:old1` equates -supply to demand brings us a **key finding:** +supply to demand brings us a *key finding*: * a competitive equilibrium quantity maximizes our welfare criterion @@ -689,11 +697,11 @@ Our generalizations will extend the preceding analysis of a market for a single In addition -* we'll derive **demand curves** from a consumer problem that maximizes a - **utility function** subject to a **budget constraint**. +* we'll derive *demand curves* from a consumer problem that maximizes a + *utility function* subject to a *budget constraint*. -* we'll derive **supply curves** from the problem of a producer who is price - taker and maximizes his profits minus total costs that are described by a **cost function**. +* we'll derive *supply curves* from the problem of a producer who is price + taker and maximizes his profits minus total costs that are described by a *cost function*. ## Exercises diff --git a/lectures/laffer_adaptive.md b/lectures/laffer_adaptive.md index fd7b7f37..684f2e6f 100644 --- a/lectures/laffer_adaptive.md +++ b/lectures/laffer_adaptive.md @@ -33,7 +33,7 @@ that we adopted in lectures {doc}`money_inflation` and lectures {doc}`money_infl We shall discover that changing our hypothesis about expectations formation in this way will change some our findings and leave others intact. In particular, we shall discover that * replacing rational expectations with adaptive expectations leaves the two stationary inflation rates unchanged, but that $\ldots$ -* it reverse the perverse dynamics by making the **lower** stationary inflation rate the one to which the system typically converges +* it reverses the perverse dynamics by making the **lower** stationary inflation rate the one to which the system typically converges * a more plausible comparative dynamic outcome emerges in which now inflation can be **reduced** by running **lower** government deficits These more plausible comparative dynamics underlie the "old time religion" that states that @@ -50,7 +50,7 @@ by dropping rational expectations and instead assuming that people form expecta {cite}`marcet2003recurrent` and {cite}`sargent2009conquest` extended that work and applied it to study recurrent high-inflation episodes in Latin America. ``` -## The Model +## The model Let @@ -88,9 +88,9 @@ $$ (eq:adaptex) where $\delta \in (0,1)$ -## Computing An Equilibrium Sequence +## Computing an equilibrium sequence -Equation the expressions for $m_{t+1}$ promided by {eq}`eq:ada_mdemand` and {eq}`eq:ada_msupply2` and use equation {eq}`eq:adaptex` to eliminate $\pi_t^*$ to obtain +Equation the expressions for $m_{t+1}$ provided by {eq}`eq:ada_mdemand` and {eq}`eq:ada_msupply2` and use equation {eq}`eq:adaptex` to eliminate $\pi_t^*$ to obtain the following equation for $p_t$: $$ @@ -99,7 +99,7 @@ $$ (eq:pequation) **Pseudo-code** -Here is pseudo code for our algorithm. +Here is the pseudo-code for our algorithm. Starting at time $0$ with initial conditions $(m_0, \pi_{-1}^*, p_{-1})$, for each $t \geq 0$ deploy the following steps in order: @@ -111,14 +111,14 @@ deploy the following steps in order: This completes the algorithm. -## Claims or Conjectures +## Claims or conjectures It will turn out that * if they exist, limiting values $\overline \pi$ and $\overline \mu$ will be equal -* if limiting values exists, there are two possible limiting values, one high, one low +* if limiting values exist, there are two possible limiting values, one high, one low * unlike the outcome in lecture {doc}`money_inflation_nonlinear`, for almost all initial log price levels and expected inflation rates $p_0, \pi_{t}^*$, the limiting $\overline \pi = \overline \mu$ is the **lower** steady state value @@ -128,7 +128,7 @@ It will turn out that * the preceding equation for $p_0$ comes from $m_1 - p_0 = - \alpha \bar \pi$ -## Limiting Values of Inflation Rate +## Limiting values of inflation rate As in our earlier lecture {doc}`money_inflation_nonlinear`, we can compute the two prospective limiting values for $\bar \pi$ by studying the steady-state Laffer curve. @@ -213,15 +213,15 @@ print(f'The two steady state of π are: {π_l, π_u}') We find two steady state $\bar \pi$ values -## Steady State Laffer Curve +## Steady-state Laffer curve -The following figure plots the steady state Laffer curve together with the two stationary inflation rates. +The following figure plots the steady-state Laffer curve together with the two stationary inflation rates. ```{code-cell} ipython3 --- mystnb: figure: - caption: Seigniorage as function of steady state inflation. The dashed brown lines + caption: Seigniorage as function of steady-state inflation. The dashed brown lines indicate $\pi_l$ and $\pi_u$. name: laffer_curve_adaptive width: 500px @@ -258,11 +258,11 @@ def plot_laffer(model, πs): plot_laffer(model, (π_l, π_u)) ``` -## Associated Initial Price Levels +## Associated initial price levels Now that we have our hands on the two possible steady states, we can compute two initial log price levels $p_{-1}$, which as initial conditions, imply that $\pi_t = \bar \pi $ for all $t \geq 0$. -In particular, to initiate a fixed point of the dynamic Laffer curve dynamics we set +In particular, to initiate a fixed point of the dynamic Laffer curve dynamics, we set $$ p_{-1} = m_0 + \alpha \pi^* @@ -348,7 +348,7 @@ eq_g = lambda x: np.exp(-model.α * x) - np.exp(-(1 + model.α) * x) print('eq_g == g:', np.isclose(eq_g(m_seq[-1] - m_seq[-2]), model.g)) ``` -## Slippery Side of Laffer Curve Dynamics +## Slippery side of Laffer curve dynamics We are now equipped to compute time series starting from different $p_{-1}, \pi_{-1}^*$ settings, analogous to those in this lecture {doc}`money_inflation` and this lecture {doc}`money_inflation_nonlinear`. diff --git a/lectures/lake_model.md b/lectures/lake_model.md index ba11f07f..f70da94f 100644 --- a/lectures/lake_model.md +++ b/lectures/lake_model.md @@ -36,7 +36,7 @@ The "flows" between the two lakes are as follows: 3. employed workers separate from their jobs at rate $\alpha$. 4. unemployed workers find jobs at rate $\lambda$. -The below graph illustrates the lake model. +The graph below illustrates the lake model. ```{figure} /_static/lecture_specific/lake_model/lake_model_worker.png :name: lake_model_graphviz @@ -216,7 +216,7 @@ Moreover, the times series of unemployment and employment seems to grow at some Since by intuition if we consider unemployment pool and employment pool as a closed system, the growth should be similar to the labor force. -We next ask whether the long run growth rates of $e_t$ and $u_t$ +We next ask whether the long-run growth rates of $e_t$ and $u_t$ also dominated by $1+b-d$ as labor force. The answer will be clearer if we appeal to {ref}`Perron-Frobenius theorem`. diff --git a/lectures/lp_intro.md b/lectures/lp_intro.md index 27c8093f..102ad4fd 100644 --- a/lectures/lp_intro.md +++ b/lectures/lp_intro.md @@ -40,6 +40,11 @@ We provide a standard form of a linear program and methods to transform other fo We tell how to solve a linear programming problem using [SciPy](https://scipy.org/) and [Google OR-Tools](https://developers.google.com/optimization). +```{seealso} +In another lecture, we will employ the linear programming method to solve the +{doc}`optimal transport problem `. +``` + Let's start with some standard imports. ```{code-cell} ipython3 @@ -91,6 +96,8 @@ $$ The following graph illustrates the firm's constraints and iso-revenue lines. +Iso-revenue lines show all the combinations of materials and labor that produce the same revenue. + ```{code-cell} ipython3 --- tags: [hide-input] @@ -335,7 +342,7 @@ OR-Tools tells us that the best investment strategy is: 3. At the beginning of the third year, the bank balance should be $ \$75,072.245 $. -4. At the end of the third year, the mutual fund will get payouts from the annuity and corporate bond and repay its loan from the bank. At the end it will own $ \$141018.24 $, so that it's total net rate of return over the three periods is $ 41.02\%$. +4. At the end of the third year, the mutual fund will get payouts from the annuity and corporate bond and repay its loan from the bank. At the end it will own $ \$141,018.24 $, so that it's total net rate of return over the three periods is $ 41.02\%$. @@ -542,14 +549,14 @@ c_ex2 = np.array([1.30*3, 0, 0, 1.06, 1.30]) A_ex2 = np.array([[1, 1, 0, 0, 0], [1, -rate, 1, 0, 1], [1, 0, -rate, 1, 0]]) -b_ex2 = np.array([100000, 0, 0]) +b_ex2 = np.array([100_000, 0, 0]) # Bounds on decision variables bounds_ex2 = [( 0, None), - (-20000, None), - (-20000, None), - (-20000, None), - ( 0, 50000)] + (-20_000, None), + (-20_000, None), + (-20_000, None), + ( 0, 50_000)] ``` Let's solve the problem and check the status using `success` attribute. @@ -581,7 +588,7 @@ SciPy tells us that the best investment strategy is: 3. At the beginning of the third year, the mutual fund should borrow $ \$20,000$ from the bank and invest in the annuity. -4. At the end of the third year, the mutual fund will get payouts from the annuity and corporate bond and repay its loan from the bank. At the end it will own $ \$141018.24 $, so that it's total net rate of return over the three periods is $ 41.02\% $. +4. At the end of the third year, the mutual fund will get payouts from the annuity and corporate bond and repay its loan from the bank. At the end it will own $ \$141,018.24 $, so that it's total net rate of return over the three periods is $ 41.02\% $. diff --git a/lectures/markov_chains_II.md b/lectures/markov_chains_II.md index 59b8e23c..fe2ad9b3 100644 --- a/lectures/markov_chains_II.md +++ b/lectures/markov_chains_II.md @@ -71,6 +71,8 @@ that The stochastic matrix $P$ is called **irreducible** if all states communicate; that is, if $x$ and $y$ communicate for all $(x, y)$ in $S \times S$. +```{prf:example} +:label: mc2_ex_ir For example, consider the following transition probabilities for wealth of a fictitious set of households @@ -95,6 +97,7 @@ $$ It's clear from the graph that this stochastic matrix is irreducible: we can eventually reach any state from any other state. +``` We can also test this using [QuantEcon.py](http://quantecon.org/quantecon-py)'s MarkovChain class @@ -107,6 +110,9 @@ mc = qe.MarkovChain(P, ('poor', 'middle', 'rich')) mc.is_irreducible ``` +```{prf:example} +:label: mc2_ex_pf + Here's a more pessimistic scenario in which poor people remain poor forever ```{image} /_static/lecture_specific/markov_chains_II/Irre_2.png @@ -116,6 +122,7 @@ Here's a more pessimistic scenario in which poor people remain poor forever This stochastic matrix is not irreducible since, for example, rich is not accessible from poor. +``` Let's confirm this @@ -272,6 +279,9 @@ In any of these cases, ergodicity will hold. ### Example: a periodic chain +```{prf:example} +:label: mc2_ex_pc + Let's look at the following example with states 0 and 1: $$ @@ -291,7 +301,7 @@ The transition graph shows that this model is irreducible. ``` Notice that there is a periodic cycle --- the state cycles between the two states in a regular way. - +``` Not surprisingly, this property is called [periodicity](https://stats.libretexts.org/Bookshelves/Probability_Theory/Probability_Mathematical_Statistics_and_Stochastic_Processes_(Siegrist)/16%3A_Markov_Processes/16.05%3A_Periodicity_of_Discrete-Time_Chains). @@ -392,7 +402,7 @@ plt.show() ````{exercise} :label: mc_ex1 -Benhabib el al. {cite}`benhabib_wealth_2019` estimated that the transition matrix for social mobility as the following +Benhabib et al. {cite}`benhabib_wealth_2019` estimated that the transition matrix for social mobility as the following $$ P:= diff --git a/lectures/mle.md b/lectures/mle.md index ee00c399..8a15d6ac 100644 --- a/lectures/mle.md +++ b/lectures/mle.md @@ -39,6 +39,8 @@ $$ where $w$ is wealth. +```{prf:example} +:label: mle_ex_wt For example, if $a = 0.05$, $b = 0.1$, and $\bar w = 2.5$, this means @@ -46,7 +48,7 @@ For example, if $a = 0.05$, $b = 0.1$, and $\bar w = 2.5$, this means * a 10% tax on wealth in excess of 2.5. The unit is 100,000, so $w= 2.5$ means 250,000 dollars. - +``` Let's go ahead and define $h$: ```{code-cell} ipython3 @@ -242,7 +244,7 @@ num = (ln_sample - μ_hat)**2 σ_hat ``` -Let's plot the log-normal pdf using the estimated parameters against our sample data. +Let's plot the lognormal pdf using the estimated parameters against our sample data. ```{code-cell} ipython3 dist_lognorm = lognorm(σ_hat, scale = exp(μ_hat)) diff --git a/lectures/money_inflation.md b/lectures/money_inflation.md index 17ec7e0f..d7e37ecd 100644 --- a/lectures/money_inflation.md +++ b/lectures/money_inflation.md @@ -35,7 +35,7 @@ Our model equates the demand for money to the supply at each time $t \geq 0$. Equality between those demands and supply gives a *dynamic* model in which money supply and price level *sequences* are simultaneously determined by a set of simultaneous linear equations. -These equations take the form of what are often called vector linear **difference equations**. +These equations take the form of what is often called vector linear **difference equations**. In this lecture, we'll roll up our sleeves and solve those equations in two different ways. @@ -49,19 +49,19 @@ In this lecture we will encounter these concepts from macroeconomics: * perverse dynamics under rational expectations in which the system converges to the higher stationary inflation tax rate * a peculiar comparative stationary-state outcome connected with that stationary inflation rate: it asserts that inflation can be *reduced* by running *higher* government deficits, i.e., by raising more resources by printing money. -The same qualitive outcomes prevail in this lecture {doc}`money_inflation_nonlinear` that studies a nonlinear version of the model in this lecture. +The same qualitative outcomes prevail in this lecture {doc}`money_inflation_nonlinear` that studies a nonlinear version of the model in this lecture. These outcomes set the stage for the analysis to be presented in this lecture {doc}`laffer_adaptive` that studies a nonlinear version of the present model; it assumes a version of "adaptive expectations" instead of rational expectations. That lecture will show that * replacing rational expectations with adaptive expectations leaves the two stationary inflation rates unchanged, but that $\ldots$ -* it reverse the pervese dynamics by making the *lower* stationary inflation rate the one to which the system typically converges +* it reverses the perverse dynamics by making the *lower* stationary inflation rate the one to which the system typically converges * a more plausible comparative dynamic outcome emerges in which now inflation can be *reduced* by running *lower* government deficits -This outcome will be used to justify a selection of a stationary inflation rate that underlies the analysis of unpleasant monetarist arithmetic to be studies in this lecture {doc}`unpleasant`. +This outcome will be used to justify a selection of a stationary inflation rate that underlies the analysis of unpleasant monetarist arithmetic to be studied in this lecture {doc}`unpleasant`. -We'll use theses tools from linear algebra: +We'll use these tools from linear algebra: * matrix multiplication * matrix inversion @@ -349,7 +349,7 @@ g2 = seign(msm.R_l, msm) print(f'R_l, g_l = {msm.R_l:.4f}, {g2:.4f}') ``` -Now let's compute the maximum steady-state amount of seigniorage that could be gathered by printing money and the state state rate of return on money that attains it. +Now let's compute the maximum steady-state amount of seigniorage that could be gathered by printing money and the state-state rate of return on money that attains it. ## Two computation strategies @@ -385,13 +385,21 @@ m_t & = b_{t-1} p_t \end{aligned} $$ (eq:method1) -**Remark 1:** method 1 uses an indirect approach to computing an equilibrium by first computing an equilibrium $\{R_t, b_t\}_{t=0}^\infty$ sequence and then using it to back out an equilibrium $\{p_t, m_t\}_{t=0}^\infty$ sequence. - +```{prf:remark} +:label: method_1 +Method 1 uses an indirect approach to computing an equilibrium by first computing an equilibrium $\{R_t, b_t\}_{t=0}^\infty$ sequence and then using it to back out an equilibrium $\{p_t, m_t\}_{t=0}^\infty$ sequence. +``` -**Remark 2:** notice that method 1 starts by picking an **initial condition** $R_0$ from a set $[\frac{\gamma_2}{\gamma_1}, R_u]$. Equilibrium $\{p_t, m_t\}_{t=0}^\infty$ sequences are not unique. There is actually a continuum of equilibria indexed by a choice of $R_0$ from the set $[\frac{\gamma_2}{\gamma_1}, R_u]$. +```{prf:remark} +:label: initial_condition +Notice that method 1 starts by picking an **initial condition** $R_0$ from a set $[\frac{\gamma_2}{\gamma_1}, R_u]$. Equilibrium $\{p_t, m_t\}_{t=0}^\infty$ sequences are not unique. There is actually a continuum of equilibria indexed by a choice of $R_0$ from the set $[\frac{\gamma_2}{\gamma_1}, R_u]$. +``` -**Remark 3:** associated with each selection of $R_0$ there is a unique $p_0$ described by +```{prf:remark} +:label: unique_selection +Associated with each selection of $R_0$ there is a unique $p_0$ described by equation {eq}`eq:p0fromR0`. +``` ### Method 2 @@ -950,7 +958,7 @@ Those dynamics are "perverse" not only in the sense that they imply that the mon ```{note} -The same qualitive outcomes prevail in this lecture {doc}`money_inflation_nonlinear` that studies a nonlinear version of the model in this lecture. +The same qualitative outcomes prevail in this lecture {doc}`money_inflation_nonlinear` that studies a nonlinear version of the model in this lecture. ``` diff --git a/lectures/money_inflation_nonlinear.md b/lectures/money_inflation_nonlinear.md index 7bd8306a..07922373 100644 --- a/lectures/money_inflation_nonlinear.md +++ b/lectures/money_inflation_nonlinear.md @@ -35,17 +35,17 @@ As in that lecture, we discussed these topics: * an **inflation tax** that a government gathers by printing paper or electronic money * a dynamic **Laffer curve** in the inflation tax rate that has two stationary equilibria * perverse dynamics under rational expectations in which the system converges to the higher stationary inflation tax rate -* a peculiar comparative stationary-state analysis connected with that stationary inflation rate that assert that inflation can be *reduced* by running *higher* government deficits +* a peculiar comparative stationary-state analysis connected with that stationary inflation rate that asserts that inflation can be *reduced* by running *higher* government deficits These outcomes will set the stage for the analysis of {doc}`laffer_adaptive` that studies a version of the present model that uses a version of "adaptive expectations" instead of rational expectations. That lecture will show that * replacing rational expectations with adaptive expectations leaves the two stationary inflation rates unchanged, but that $\ldots$ -* it reverse the pervese dynamics by making the *lower* stationary inflation rate the one to which the system typically converges +* it reverses the perverse dynamics by making the *lower* stationary inflation rate the one to which the system typically converges * a more plausible comparative dynamic outcome emerges in which now inflation can be *reduced* by running *lower* government deficits -## The model +## The Model Let @@ -68,58 +68,14 @@ $$ (eq:msupply) where $g$ is the part of government expenditures financed by printing money. -**Remark:** Please notice that while equation {eq}`eq:mdemand` is linear in logs of the money supply and price level, equation {eq}`eq:msupply` is linear in levels. This will require adapting the equilibrium computation methods that we deployed in {doc}`money_inflation`. - -## Computing an equilibrium sequence - -We'll deploy a method similar to *Method 2* used in {doc}`money_inflation`. - -We'll take the time $t$ state vector to be $m_t, p_t$. - -* we'll treat $m_t$ as a ''natural state variable'' and $p_t$ as a ''jump'' variable. - -Let - -$$ -\lambda \equiv \frac{\alpha}{1+ \alpha} -$$ - -Let's rewrite equation {eq}`eq:mdemand`, respectively, as - -$$ -p_t = (1-\lambda) m_{t+1} + \lambda p_{t+1} -$$ (eq:mdemand2) - -We'll summarize our algorithm with the following pseudo-code. - -**Pseudo-code** - -* start for $m_0, p_0$ at time $t =0$ - -* solve {eq}`eq:msupply` for $m_{t+1}$ - -* solve {eq}`eq:mdemand2` for $p_{t+1} = \lambda^{-1} p_t + (1 - \lambda^{-1}) m_{t+1}$ - -* compute the inflation rate $\pi_t = p_{t+1} - p_t$ and growth of money supply $\mu_t = m_{t+1} - m_t $ - -* iterate on $t$ to convergence of $\pi_t \rightarrow \overline \pi$ and $\mu_t \rightarrow \overline \mu$ - -It will turn out that - -* if they exist, limiting values $\overline \pi$ and $\overline \mu$ will be equal - -* if limiting values exist, there are two possible limiting values, one high, one low - -* for almost all initial log price levels $p_0$, the limiting $\overline \pi = \overline \mu$ is -the higher value +```{prf:remark} +:label: linear_log +Please notice that while equation {eq}`eq:mdemand` is linear in logs of the money supply and price level, equation {eq}`eq:msupply` is linear in levels. This will require adapting the equilibrium computation methods that we deployed in {doc}`money_inflation`. +``` -* for each of the two possible limiting values $\overline \pi$ ,there is a unique initial log price level $p_0$ that implies that $\pi_t = \mu_t = \overline \mu$ for all $t \geq 0$ - * this unique initial log price level solves $\log(\exp(m_0) + g \exp(p_0)) - p_0 = - \alpha \overline \pi $ - - * the preceding equation for $p_0$ comes from $m_1 - p_0 = - \alpha \overline \pi$ -## Limiting values of inflation rate +## Limiting Values of Inflation Rate We can compute the two prospective limiting values for $\overline \pi$ by studying the steady-state Laffer curve. @@ -203,7 +159,7 @@ print(f'The two steady state of π are: {π_l, π_u}') We find two steady state $\overline \pi$ values. -## Steady state Laffer curve +## Steady State Laffer curve The following figure plots the steady state Laffer curve together with the two stationary inflation rates. @@ -247,9 +203,16 @@ def plot_laffer(model, πs): plot_laffer(model, (π_l, π_u)) ``` -## Associated initial price levels +## Initial Price Levels + +Now that we have our hands on the two possible steady states, we can compute two functions $\underline p(m_0)$ and +$\overline p(m_0)$, which as initial conditions for $p_t$ at time $t$, imply that $\pi_t = \overline \pi $ for all $t \geq 0$. + +The function $\underline p(m_0)$ will be associated with $\pi_l$ the lower steady-state inflation rate. + +The function $\overline p(m_0)$ will be associated with $\pi_u$ the lower steady-state inflation rate. + -Now that we have our hands on the two possible steady states, we can compute two initial log price levels $p_0$, which as initial conditions, imply that $\pi_t = \overline \pi $ for all $t \geq 0$. ```{code-cell} ipython3 def solve_p0(p0, m0, α, g, π): @@ -312,7 +275,68 @@ eq_g = lambda x: np.exp(-model.α * x) - np.exp(-(1 + model.α) * x) print('eq_g == g:', np.isclose(eq_g(m_seq[-1] - m_seq[-2]), model.g)) ``` -## Slippery side of Laffer curve dynamics +## Computing an Equilibrium Sequence + +We'll deploy a method similar to *Method 2* used in {doc}`money_inflation`. + +We'll take the time $t$ state vector to be the pair $(m_t, p_t)$. + +We'll treat $m_t$ as a ``natural state variable`` and $p_t$ as a ``jump`` variable. + +Let + +$$ +\lambda \equiv \frac{\alpha}{1+ \alpha} +$$ + +Let's rewrite equation {eq}`eq:mdemand` as + +$$ +p_t = (1-\lambda) m_{t+1} + \lambda p_{t+1} +$$ (eq:mdemand2) + +We'll summarize our algorithm with the following pseudo-code. + +**Pseudo-code** + +The heart of the pseudo-code iterates on the following mapping from state vector $(m_t, p_t)$ at time $t$ +to state vector $(m_{t+1}, p_{t+1})$ at time $t+1$. + + +* starting from a given pair $(m_t, p_t)$ at time $t \geq 0$ + + * solve {eq}`eq:msupply` for $m_{t+1}$ + + * solve {eq}`eq:mdemand2` for $p_{t+1} = \lambda^{-1} p_t + (1 - \lambda^{-1}) m_{t+1}$ + + * compute the inflation rate $\pi_t = p_{t+1} - p_t$ and growth of money supply $\mu_t = m_{t+1} - m_t $ + +Next, compute the two functions $\underline p(m_0)$ and $\overline p(m_0)$ described above + +Now initiate the algorithm as follows. + + * set $m_0 >0$ + * set a value of $p_0 \in [\underline p(m_0), \overline p(m_0)]$ and form the pair $(m_0, p_0)$ at time $t =0$ + +Starting from $(m_0, p_0)$ iterate on $t$ to convergence of $\pi_t \rightarrow \overline \pi$ and $\mu_t \rightarrow \overline \mu$ + +It will turn out that + +* if they exist, limiting values $\overline \pi$ and $\overline \mu$ will be equal + +* if limiting values exist, there are two possible limiting values, one high, one low + +* for almost all initial log price levels $p_0$, the limiting $\overline \pi = \overline \mu$ is +the higher value + +* for each of the two possible limiting values $\overline \pi$ ,there is a unique initial log price level $p_0$ that implies that $\pi_t = \mu_t = \overline \mu$ for all $t \geq 0$ + + * this unique initial log price level solves $\log(\exp(m_0) + g \exp(p_0)) - p_0 = - \alpha \overline \pi $ + + * the preceding equation for $p_0$ comes from $m_1 - p_0 = - \alpha \overline \pi$ + + +## Slippery Side of Laffer Curve Dynamics We are now equipped to compute time series starting from different $p_0$ settings, like those in {doc}`money_inflation`. @@ -399,7 +423,7 @@ Those dynamics are "perverse" not only in the sense that they imply that the mon * the figure indicates that inflation can be *reduced* by running *higher* government deficits, i.e., by raising more resources through printing money. ```{note} -The same qualitive outcomes prevail in {doc}`money_inflation` that studies a linear version of the model in this lecture. +The same qualitative outcomes prevail in {doc}`money_inflation` that studies a linear version of the model in this lecture. ``` We discovered that diff --git a/lectures/prob_dist.md b/lectures/prob_dist.md index 09174da3..22defb18 100644 --- a/lectures/prob_dist.md +++ b/lectures/prob_dist.md @@ -4,14 +4,13 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.14.5 + jupytext_version: 1.16.1 kernelspec: display_name: Python 3 (ipykernel) language: python name: python3 --- - # Distributions and Probabilities ```{index} single: Distributions and Probabilities @@ -23,6 +22,7 @@ In this lecture we give a quick introduction to data and probability distributio ```{code-cell} ipython3 :tags: [hide-output] + !pip install --upgrade yfinance ``` @@ -35,7 +35,6 @@ import scipy.stats import seaborn as sns ``` - ## Common distributions In this section we recall the definitions of some well-known distributions and explore how to manipulate them with SciPy. @@ -46,18 +45,22 @@ Let's start with discrete distributions. A discrete distribution is defined by a set of numbers $S = \{x_1, \ldots, x_n\}$ and a **probability mass function** (PMF) on $S$, which is a function $p$ from $S$ to $[0,1]$ with the property -$$ \sum_{i=1}^n p(x_i) = 1 $$ +$$ +\sum_{i=1}^n p(x_i) = 1 +$$ We say that a random variable $X$ **has distribution** $p$ if $X$ takes value $x_i$ with probability $p(x_i)$. That is, -$$ \mathbb P\{X = x_i\} = p(x_i) \quad \text{for } i= 1, \ldots, n $$ +$$ +\mathbb P\{X = x_i\} = p(x_i) \quad \text{for } i= 1, \ldots, n +$$ The **mean** or **expected value** of a random variable $X$ with distribution $p$ is $$ - \mathbb{E}[X] = \sum_{i=1}^n x_i p(x_i) +\mathbb{E}[X] = \sum_{i=1}^n x_i p(x_i) $$ Expectation is also called the *first moment* of the distribution. @@ -67,7 +70,7 @@ We also refer to this number as the mean of the distribution (represented by) $p The **variance** of $X$ is defined as $$ - \mathbb{V}[X] = \sum_{i=1}^n (x_i - \mathbb{E}[X])^2 p(x_i) +\mathbb{V}[X] = \sum_{i=1}^n (x_i - \mathbb{E}[X])^2 p(x_i) $$ Variance is also called the *second central moment* of the distribution. @@ -75,8 +78,8 @@ Variance is also called the *second central moment* of the distribution. The **cumulative distribution function** (CDF) of $X$ is defined by $$ - F(x) = \mathbb{P}\{X \leq x\} - = \sum_{i=1}^n \mathbb 1\{x_i \leq x\} p(x_i) +F(x) = \mathbb{P}\{X \leq x\} + = \sum_{i=1}^n \mathbb 1\{x_i \leq x\} p(x_i) $$ Here $\mathbb 1\{ \textrm{statement} \} = 1$ if "statement" is true and zero otherwise. @@ -95,7 +98,6 @@ n = 10 u = scipy.stats.randint(1, n+1) ``` - Here's the mean and variance: ```{code-cell} ipython3 @@ -115,7 +117,6 @@ u.pmf(1) u.pmf(2) ``` - Here's a plot of the probability mass function: ```{code-cell} ipython3 @@ -129,7 +130,6 @@ ax.set_ylabel('PMF') plt.show() ``` - Here's a plot of the CDF: ```{code-cell} ipython3 @@ -143,10 +143,8 @@ ax.set_ylabel('CDF') plt.show() ``` - The CDF jumps up by $p(x_i)$ at $x_i$. - ```{exercise} :label: prob_ex1 @@ -162,55 +160,56 @@ Check that your answers agree with `u.mean()` and `u.var()`. Another useful distribution is the Bernoulli distribution on $S = \{0,1\}$, which has PMF: $$ -p(x_i)= -\begin{cases} -p & \text{if $x_i = 1$}\\ -1-p & \text{if $x_i = 0$} -\end{cases} +p(i) = \theta^i (1 - \theta)^{1-i} +\qquad (i = 0, 1) $$ -Here $x_i \in S$ is the outcome of the random variable. +Here $\theta \in [0,1]$ is a parameter. + +We can think of this distribution as modeling probabilities for a random trial with success probability $\theta$. + +* $p(1) = \theta$ means that the trial succeeds (takes value 1) with probability $\theta$ +* $p(0) = 1 - \theta$ means that the trial fails (takes value 0) with + probability $1-\theta$ + +The formula for the mean is $\theta$, and the formula for the variance is $\theta(1-\theta)$. We can import the Bernoulli distribution on $S = \{0,1\}$ from SciPy like so: ```{code-cell} ipython3 -p = 0.4 -u = scipy.stats.bernoulli(p) +θ = 0.4 +u = scipy.stats.bernoulli(θ) ``` - -Here's the mean and variance: +Here's the mean and variance at $\theta=0.4$ ```{code-cell} ipython3 u.mean(), u.var() ``` -The formula for the mean is $p$, and the formula for the variance is $p(1-p)$. - - -Now let's evaluate the PMF: +We can evaluate the PMF as follows ```{code-cell} ipython3 -u.pmf(0) -u.pmf(1) +u.pmf(0), u.pmf(1) ``` - #### Binomial distribution Another useful (and more interesting) distribution is the **binomial distribution** on $S=\{0, \ldots, n\}$, which has PMF: $$ - p(i) = \binom{n}{i} \theta^i (1-\theta)^{n-i} +p(i) = \binom{n}{i} \theta^i (1-\theta)^{n-i} $$ -Here $\theta \in [0,1]$ is a parameter. +Again, $\theta \in [0,1]$ is a parameter. The interpretation of $p(i)$ is: the probability of $i$ successes in $n$ independent trials with success probability $\theta$. For example, if $\theta=0.5$, then $p(i)$ is the probability of $i$ heads in $n$ flips of a fair coin. -The mean and variance are: +The formula for the mean is $n \theta$ and the formula for the variance is $n \theta (1-\theta)$. + +Let's investigate an example ```{code-cell} ipython3 n = 10 @@ -218,11 +217,17 @@ n = 10 u = scipy.stats.binom(n, θ) ``` +According to our formulas, the mean and variance are + ```{code-cell} ipython3 -u.mean(), u.var() +n * θ, n * θ * (1 - θ) ``` -The formula for the mean is $n \theta$ and the formula for the variance is $n \theta (1-\theta)$. +Let's see if SciPy gives us the same results: + +```{code-cell} ipython3 +u.mean(), u.var() +``` Here's the PMF: @@ -241,7 +246,6 @@ ax.set_ylabel('PMF') plt.show() ``` - Here's the CDF: ```{code-cell} ipython3 @@ -255,7 +259,6 @@ ax.set_ylabel('CDF') plt.show() ``` - ```{exercise} :label: prob_ex3 @@ -285,24 +288,65 @@ We can see that the output graph is the same as the one above. ```{solution-end} ``` +#### Geometric distribution + +The geometric distribution has infinite support $S = \{0, 1, 2, \ldots\}$ and its PMF is given by + +$$ +p(i) = (1 - \theta)^i \theta +$$ + +where $\theta \in [0,1]$ is a parameter + +(A discrete distribution has infinite support if the set of points to which it assigns positive probability is infinite.) + +To understand the distribution, think of repeated independent random trials, each with success probability $\theta$. + +The interpretation of $p(i)$ is: the probability there are $i$ failures before the first success occurs. + +It can be shown that the mean of the distribution is $1/\theta$ and the variance is $(1-\theta)/\theta$. + +Here's an example. + +```{code-cell} ipython3 +θ = 0.1 +u = scipy.stats.geom(θ) +u.mean(), u.var() +``` + +Here's part of the PMF: + +```{code-cell} ipython3 +fig, ax = plt.subplots() +n = 20 +S = np.arange(n) +ax.plot(S, u.pmf(S), linestyle='', marker='o', alpha=0.8, ms=4) +ax.vlines(S, 0, u.pmf(S), lw=0.2) +ax.set_xticks(S) +ax.set_xlabel('S') +ax.set_ylabel('PMF') +plt.show() +``` + #### Poisson distribution -Poisson distribution on $S = \{0, 1, \ldots\}$ with parameter $\lambda > 0$ has PMF +The Poisson distribution on $S = \{0, 1, \ldots\}$ with parameter $\lambda > 0$ has PMF $$ - p(i) = \frac{\lambda^i}{i!} e^{-\lambda} +p(i) = \frac{\lambda^i}{i!} e^{-\lambda} $$ -The interpretation of $p(i)$ is: the probability of $i$ events in a fixed time interval, where the events occur at a constant rate $\lambda$ and independently of each other. +The interpretation of $p(i)$ is: the probability of $i$ events in a fixed time interval, where the events occur independently at a constant rate $\lambda$. + +It can be shown that the mean is $\lambda$ and the variance is also $\lambda$. + +Here's an example. -The mean and variance are: ```{code-cell} ipython3 λ = 2 u = scipy.stats.poisson(λ) u.mean(), u.var() ``` - -The expectation of the Poisson distribution is $\lambda$ and the variance is also $\lambda$. Here's the PMF: @@ -321,18 +365,19 @@ ax.set_ylabel('PMF') plt.show() ``` - ### Continuous distributions -Continuous distributions are represented by a **probability density function**, which is a function $p$ over $\mathbb R$ (the set of all real numbers) such that $p(x) \geq 0$ for all $x$ and +A continuous distribution is represented by a **probability density function**, which is a function $p$ over $\mathbb R$ (the set of all real numbers) such that $p(x) \geq 0$ for all $x$ and -$$ \int_{-\infty}^\infty p(x) dx = 1 $$ +$$ +\int_{-\infty}^\infty p(x) dx = 1 +$$ We say that random variable $X$ has distribution $p$ if $$ - \mathbb P\{a < X < b\} = \int_a^b p(x) dx +\mathbb P\{a < X < b\} = \int_a^b p(x) dx $$ for all $a \leq b$. @@ -342,14 +387,14 @@ The definition of the mean and variance of a random variable $X$ with distributi For example, the mean of $X$ is $$ - \mathbb{E}[X] = \int_{-\infty}^\infty x p(x) dx +\mathbb{E}[X] = \int_{-\infty}^\infty x p(x) dx $$ The **cumulative distribution function** (CDF) of $X$ is defined by $$ - F(x) = \mathbb P\{X \leq x\} - = \int_{-\infty}^x p(x) dx +F(x) = \mathbb P\{X \leq x\} + = \int_{-\infty}^x p(x) dx $$ @@ -358,15 +403,15 @@ $$ Perhaps the most famous distribution is the **normal distribution**, which has density $$ - p(x) = \frac{1}{\sqrt{2\pi}\sigma} - \exp\left(-\frac{(x-\mu)^2}{2\sigma^2}\right) +p(x) = \frac{1}{\sqrt{2\pi}\sigma} + \exp\left(-\frac{(x-\mu)^2}{2\sigma^2}\right) $$ -This distribution has two parameters, $\mu$ and $\sigma$. +This distribution has two parameters, $\mu \in \mathbb R$ and $\sigma \in (0, \infty)$. -It can be shown that, for this distribution, the mean is $\mu$ and the variance is $\sigma^2$. +Using calculus, it can be shown that, for this distribution, the mean is $\mu$ and the variance is $\sigma^2$. -We can obtain the moments, PDF and CDF of the normal density as follows: +We can obtain the moments, PDF and CDF of the normal density via SciPy as follows: ```{code-cell} ipython3 μ, σ = 0.0, 1.0 @@ -396,7 +441,6 @@ plt.legend() plt.show() ``` - Here's a plot of the CDF: ```{code-cell} ipython3 @@ -413,23 +457,23 @@ plt.legend() plt.show() ``` - #### Lognormal distribution The **lognormal distribution** is a distribution on $\left(0, \infty\right)$ with density $$ - p(x) = \frac{1}{\sigma x \sqrt{2\pi}} - \exp \left(- \frac{\left(\log x - \mu\right)^2}{2 \sigma^2} \right) +p(x) = \frac{1}{\sigma x \sqrt{2\pi}} + \exp \left(- \frac{\left(\log x - \mu\right)^2}{2 \sigma^2} \right) $$ This distribution has two parameters, $\mu$ and $\sigma$. It can be shown that, for this distribution, the mean is $\exp\left(\mu + \sigma^2/2\right)$ and the variance is $\left[\exp\left(\sigma^2\right) - 1\right] \exp\left(2\mu + \sigma^2\right)$. -It has a nice interpretation: if $X$ is lognormally distributed, then $\log X$ is normally distributed. +It can be proved that -It is often used to model variables that are "multiplicative" in nature, such as income or asset prices. +* if $X$ is lognormally distributed, then $\log X$ is normally distributed, and +* if $X$ is normally distributed, then $\exp X$ is lognormally distributed. We can obtain the moments, PDF, and CDF of the lognormal density as follows: @@ -477,15 +521,16 @@ plt.show() #### Exponential distribution -The **exponential distribution** is a distribution on $\left(0, \infty\right)$ with density +The **exponential distribution** is a distribution supported on $\left(0, \infty\right)$ with density $$ - p(x) = \lambda \exp \left( - \lambda x \right) +p(x) = \lambda \exp \left( - \lambda x \right) +\qquad (x > 0) $$ -This distribution has one parameter, $\lambda$. +This distribution has one parameter $\lambda$. -It is related to the Poisson distribution as it describes the distribution of the length of the time interval between two consecutive events in a Poisson process. +The exponential distribution can be thought of as the continuous analog of the geometric distribution. It can be shown that, for this distribution, the mean is $1/\lambda$ and the variance is $1/\lambda^2$. @@ -535,8 +580,8 @@ plt.show() The **beta distribution** is a distribution on $(0, 1)$ with density $$ - p(x) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha) \Gamma(\beta)} - x^{\alpha - 1} (1 - x)^{\beta - 1} +p(x) = \frac{\Gamma(\alpha + \beta)}{\Gamma(\alpha) \Gamma(\beta)} + x^{\alpha - 1} (1 - x)^{\beta - 1} $$ where $\Gamma$ is the [gamma function](https://en.wikipedia.org/wiki/Gamma_function). @@ -591,14 +636,13 @@ plt.legend() plt.show() ``` - #### Gamma distribution The **gamma distribution** is a distribution on $\left(0, \infty\right)$ with density $$ - p(x) = \frac{\beta^\alpha}{\Gamma(\alpha)} - x^{\alpha - 1} \exp(-\beta x) +p(x) = \frac{\beta^\alpha}{\Gamma(\alpha)} + x^{\alpha - 1} \exp(-\beta x) $$ This distribution has two parameters, $\alpha > 0$ and $\beta > 0$. @@ -675,7 +719,6 @@ df = pd.DataFrame(data, columns=['name', 'income']) df ``` - In this situation, we might refer to the set of their incomes as the "income distribution." The terminology is confusing because this set is not a probability distribution @@ -694,32 +737,33 @@ Suppose we have an observed distribution with values $\{x_1, \ldots, x_n\}$ The **sample mean** of this distribution is defined as $$ - \bar x = \frac{1}{n} \sum_{i=1}^n x_i +\bar x = \frac{1}{n} \sum_{i=1}^n x_i $$ The **sample variance** is defined as $$ - \frac{1}{n} \sum_{i=1}^n (x_i - \bar x)^2 +\frac{1}{n} \sum_{i=1}^n (x_i - \bar x)^2 $$ For the income distribution given above, we can calculate these numbers via ```{code-cell} ipython3 -x = np.asarray(df['income']) -``` - -```{code-cell} ipython3 +x = df['income'] x.mean(), x.var() ``` - ```{exercise} :label: prob_ex4 -Check that the formulas given above produce the same numbers. +If you try to check that the formulas given above for the sample mean and sample +variance produce the same numbers, you will see that the variance isn't quite +right. This is because SciPy uses $1/(n-1)$ instead of $1/n$ as the term at the +front of the variance. (Some books define the sample variance this way.) +Confirm. ``` + ### Visualization Let's look at different ways that we can visualize one or more observed distributions. @@ -730,16 +774,12 @@ We will cover - kernel density estimates and - violin plots -+++ {"user_expressions": []} #### Histograms -+++ {"user_expressions": []} - We can histogram the income distribution we just constructed as follows ```{code-cell} ipython3 -x = df['income'] fig, ax = plt.subplots() ax.hist(x, bins=5, density=True, histtype='bar') ax.set_xlabel('income') @@ -747,11 +787,9 @@ ax.set_ylabel('density') plt.show() ``` -+++ {"user_expressions": []} - Let's look at a distribution from real data. -In particular, we will look at the monthly return on Amazon shares between 2000/1/1 and 2023/1/1. +In particular, we will look at the monthly return on Amazon shares between 2000/1/1 and 2024/1/1. The monthly return is calculated as the percent change in the share price over each month. @@ -759,28 +797,21 @@ So we will have one observation for each month. ```{code-cell} ipython3 :tags: [hide-output] -df = yf.download('AMZN', '2000-1-1', '2023-1-1', interval='1mo' ) + +df = yf.download('AMZN', '2000-1-1', '2024-1-1', interval='1mo') prices = df['Adj Close'] -data = prices.pct_change()[1:] * 100 -data.head() +x_amazon = prices.pct_change()[1:] * 100 +x_amazon.head() ``` -+++ {"user_expressions": []} - The first observation is the monthly return (percent change) over January 2000, which was ```{code-cell} ipython3 -data[0] +x_amazon.iloc[0] ``` -+++ {"user_expressions": []} - Let's turn the return observations into an array and histogram it. -```{code-cell} ipython3 -x_amazon = np.asarray(data) -``` - ```{code-cell} ipython3 fig, ax = plt.subplots() ax.hist(x_amazon, bins=20) @@ -789,13 +820,14 @@ ax.set_ylabel('density') plt.show() ``` -+++ {"user_expressions": []} - #### Kernel density estimates -Kernel density estimate (KDE) is a non-parametric way to estimate and visualize the PDF of a distribution. +Kernel density estimates (KDE) provide a simple way to estimate and visualize the density of a distribution. + +If you are not familiar with KDEs, you can think of them as a smoothed +histogram. -KDE will generate a smooth curve that approximates the PDF. +Let's have a look at a KDE formed from the Amazon return data. ```{code-cell} ipython3 fig, ax = plt.subplots() @@ -825,9 +857,8 @@ A suitable bandwidth is not too smooth (underfitting) or too wiggly (overfitting #### Violin plots -+++ {"user_expressions": []} -Yet another way to display an observed distribution is via a violin plot. +Another way to display an observed distribution is via a violin plot. ```{code-cell} ipython3 fig, ax = plt.subplots() @@ -837,43 +868,42 @@ ax.set_xlabel('KDE') plt.show() ``` -+++ {"user_expressions": []} - Violin plots are particularly useful when we want to compare different distributions. -For example, let's compare the monthly returns on Amazon shares with the monthly return on Apple shares. +For example, let's compare the monthly returns on Amazon shares with the monthly return on Costco shares. ```{code-cell} ipython3 :tags: [hide-output] -df = yf.download('AAPL', '2000-1-1', '2023-1-1', interval='1mo' ) + +df = yf.download('COST', '2000-1-1', '2024-1-1', interval='1mo') prices = df['Adj Close'] -data = prices.pct_change()[1:] * 100 -x_apple = np.asarray(data) +x_costco = prices.pct_change()[1:] * 100 ``` ```{code-cell} ipython3 fig, ax = plt.subplots() -ax.violinplot([x_amazon, x_apple]) +ax.violinplot([x_amazon, x_costco]) ax.set_ylabel('monthly return (percent change)') -ax.set_xlabel('KDE') +ax.set_xlabel('retailers') + +ax.set_xticks([1, 2]) +ax.set_xticklabels(['Amazon', 'Costco']) plt.show() ``` -+++ {"user_expressions": []} - ### Connection to probability distributions -+++ {"user_expressions": []} - Let's discuss the connection between observed distributions and probability distributions. Sometimes it's helpful to imagine that an observed distribution is generated by a particular probability distribution. For example, we might look at the returns from Amazon above and imagine that they were generated by a normal distribution. -Even though this is not true, it might be a helpful way to think about the data. +(Even though this is not true, it *might* be a helpful way to think about the data.) -Here we match a normal distribution to the Amazon monthly returns by setting the sample mean to the mean of the normal distribution and the sample variance equal to the variance. +Here we match a normal distribution to the Amazon monthly returns by setting the +sample mean to the mean of the normal distribution and the sample variance equal +to the variance. Then we plot the density and the histogram. @@ -894,14 +924,10 @@ ax.set_ylabel('density') plt.show() ``` -+++ {"user_expressions": []} - -The match between the histogram and the density is not very bad but also not very good. +The match between the histogram and the density is not bad but also not very good. One reason is that the normal distribution is not really a good fit for this observed data --- we will discuss this point again when we talk about {ref}`heavy tailed distributions`. -+++ {"user_expressions": []} - Of course, if the data really *is* generated by the normal distribution, then the fit will be better. Let's see this in action @@ -923,9 +949,6 @@ ax.set_ylabel('density') plt.show() ``` -+++ {"user_expressions": []} - Note that if you keep increasing $N$, which is the number of observations, the fit will get better and better. This convergence is a version of the "law of large numbers", which we will discuss {ref}`later`. - diff --git a/lectures/solow.md b/lectures/solow.md index 484f7664..0a5160b0 100644 --- a/lectures/solow.md +++ b/lectures/solow.md @@ -55,9 +55,11 @@ $$ Production functions with this property include * the **Cobb-Douglas** function $F(K, L) = A K^{\alpha} - L^{1-\alpha}$ with $0 \leq \alpha \leq 1$ and + L^{1-\alpha}$ with $0 \leq \alpha \leq 1$. * the **CES** function $F(K, L) = \left\{ a K^\rho + b L^\rho \right\}^{1/\rho}$ - with $a, b, \rho > 0$. + with $a, b, \rho > 0$. + +Here, $\alpha$ is the output elasticity of capital and $\rho$ is a parameter that determines the elasticity of substitution between capital and labor. We assume a closed economy, so aggregate domestic investment equals aggregate domestic saving. @@ -81,6 +83,7 @@ Setting $k_t := K_t / L$ and using homogeneity of degree one now yields $$ k_{t+1} + = s \frac{F(K_t, L)}{L} + (1 - \delta) \frac{K_t}{L} = s \frac{F(K_t, L)}{L} + (1 - \delta) k_t = s F(k_t, 1) + (1 - \delta) k_t $$ diff --git a/lectures/time_series_with_matrices.md b/lectures/time_series_with_matrices.md index 1e88336d..e73566ce 100644 --- a/lectures/time_series_with_matrices.md +++ b/lectures/time_series_with_matrices.md @@ -4,7 +4,7 @@ jupytext: extension: .md format_name: myst format_version: 0.13 - jupytext_version: 1.16.2 + jupytext_version: 1.16.1 kernelspec: display_name: Python 3 (ipykernel) language: python @@ -49,7 +49,12 @@ We will use the following imports: import numpy as np import matplotlib.pyplot as plt from matplotlib import cm -plt.rcParams["figure.figsize"] = (11, 5) #set default figure size + +# Custom figsize for this lecture +plt.rcParams["figure.figsize"] = (11, 5) + +# Set decimal printing to 3 decimal places +np.set_printoptions(precision=3, suppress=True) ``` ## Samuelson's model @@ -142,8 +147,8 @@ T = 80 α_1 = 1.53 α_2 = -.9 -y_neg1 = 28. # y_{-1} -y_0 = 24. +y_neg1 = 28.0 # y_{-1} +y_0 = 24.0 ``` Now we construct $A$ and $b$. @@ -197,6 +202,13 @@ point precision: np.allclose(y, y_second_method) ``` +$A$ is invertible as it is lower triangular and [its diagonal entries are non-zero](https://www.statlect.com/matrix-algebra/triangular-matrix) + +```{code-cell} ipython3 +# Check if A is lower triangular +np.allclose(A, np.tril(A)) +``` + ```{note} In general, `np.linalg.solve` is more numerically stable than using `np.linalg.inv` directly. @@ -392,7 +404,13 @@ class population_moments: Parameters: α_0, α_1, α_2, T, y_neg1, y_0 """ - def __init__(self, α_0, α_1, α_2, T, y_neg1, y_0, σ_u): + def __init__(self, α_0=10.0, + α_1=1.53, + α_2=-.9, + T=80, + y_neg1=28.0, + y_0=24.0, + σ_u=1): # compute A A = np.identity(T) @@ -437,8 +455,7 @@ class population_moments: return self.μ_y, self.Σ_y -series_process = population_moments( - α_0=10.0, α_1=1.53, α_2=-.9, T=80, y_neg1=28., y_0=24., σ_u=1) +series_process = population_moments() μ_y, Σ_y = series_process.get_moments() A_inv = series_process.A_inv @@ -483,12 +500,17 @@ Notice how the population variance increases and asymptotes. Let's print out the covariance matrix $\Sigma_y$ for a time series $y$. ```{code-cell} ipython3 -series_process = population_moments( - α_0=0, α_1=.8, α_2=0, T=6, y_neg1=0., y_0=0., σ_u=1) +series_process = population_moments(α_0=0, + α_1=.8, + α_2=0, + T=6, + y_neg1=0., + y_0=0., + σ_u=1) μ_y, Σ_y = series_process.get_moments() print("μ_y = ", μ_y) -print("Σ_y = ", Σ_y) +print("Σ_y = \n", Σ_y) ``` Notice that the covariance between $y_t$ and $y_{t-1}$ -- the elements on the superdiagonal -- are *not* identical. @@ -502,9 +524,9 @@ We describe how to do that in [Linear State Space Models](https://python.quantec But just to set the stage for that analysis, let's print out the bottom right corner of $\Sigma_y$. ```{code-cell} ipython3 -series_process = population_moments( - α_0=10.0, α_1=1.53, α_2=-.9, T=80, y_neg1=28., y_0=24., σ_u=1) +series_process = population_moments() μ_y, Σ_y = series_process.get_moments() + print("bottom right corner of Σ_y = \n", Σ_y[72:,72:]) ``` @@ -529,26 +551,13 @@ To study the structure of $A^{-1}$, we shall print just up to $3$ decimals. Let's begin by printing out just the upper left hand corner of $A^{-1}$. ```{code-cell} ipython3 -with np.printoptions(precision=3, suppress=True): - print(A_inv[0:7,0:7]) +print(A_inv[0:7,0:7]) ``` Evidently, $A^{-1}$ is a lower triangular matrix. - -Let's print out the lower right hand corner of $A^{-1}$ and stare at it. - -```{code-cell} ipython3 -with np.printoptions(precision=3, suppress=True): - print(A_inv[72:,72:]) -``` - Notice how every row ends with the previous row's pre-diagonal entries. - - - - Since $A^{-1}$ is lower triangular, each row represents $ y_t$ for a particular $t$ as the sum of - a time-dependent function $A^{-1} b$ of the initial conditions incorporated in $b$, and - a weighted sum of current and past values of the IID shocks $\{u_t\}$. @@ -566,9 +575,6 @@ This is a **moving average** representation with time-varying coefficients. Just as system {eq}`eq:eqma` constitutes a **moving average** representation for $y$, system {eq}`eq:eqar` constitutes an **autoregressive** representation for $y$. - - - ## A forward looking model Samuelson’s model is *backward looking* in the sense that we give it *initial conditions* and let it @@ -638,7 +644,7 @@ for i in range(T): ``` ```{code-cell} ipython3 -B +print(B) ``` ```{code-cell} ipython3 diff --git a/lectures/unpleasant.md b/lectures/unpleasant.md index 676edfa1..5eb80e31 100644 --- a/lectures/unpleasant.md +++ b/lectures/unpleasant.md @@ -241,6 +241,7 @@ p_T = \frac{m_0}{\gamma_1 - \overline g - \gamma_2 R_u^{-1}} = \gamma_1^{-1} $$ (eq:pTformula) ```{prf:remark} +:label: equivalence We can verify the equivalence of the two formulas on the right sides of {eq}`eq:pTformula` by recalling that $R_u$ is a root of the quadratic equation {eq}`eq:up_steadyquadratic` that determines steady state rates of return on currency. ```