From 747eeab9eb6b650a061273c511119e716bf73b46 Mon Sep 17 00:00:00 2001 From: "Matthew N. White" Date: Fri, 8 Mar 2024 16:55:18 -0500 Subject: [PATCH] Made basic ConsPortfolio simplified solver Matches behavior exactly for the default dictionary. Need to add vFunc functionality, then expand to discretized version. This commit has the new solver commented out in the type init method so as to not create a ton of new failing tests while it's still a WIP. --- HARK/ConsumptionSaving/ConsPortfolioModel.py | 379 +++++++++++++++++++ HARK/distribution.py | 2 +- 2 files changed, 380 insertions(+), 1 deletion(-) diff --git a/HARK/ConsumptionSaving/ConsPortfolioModel.py b/HARK/ConsumptionSaving/ConsPortfolioModel.py index 731f603de..ea9ba5054 100644 --- a/HARK/ConsumptionSaving/ConsPortfolioModel.py +++ b/HARK/ConsumptionSaving/ConsPortfolioModel.py @@ -29,6 +29,8 @@ ValueFuncCRRA, ) from HARK.metric import MetricObject +from HARK.rewards import UtilityFuncCRRA +from HARK.distribution import expected # Define a class to represent the single period solution of the portfolio choice problem @@ -180,6 +182,7 @@ def __init__(self, verbose=False, quiet=False, **kwds): else: solver = ConsPortfolioJointDistSolver self.solve_one_period = make_one_period_oo_solver(solver) + #self.solve_one_period = solve_one_period_ConsPortfolio self.update() @@ -328,6 +331,382 @@ def __init__(self, verbose=False, quiet=False, **kwds): # Set the solver for the portfolio model, and update various constructed attributes self.solve_one_period = make_one_period_oo_solver(ConsSequentialPortfolioSolver) + + +def solve_one_period_ConsPortfolio( solution_next, + ShockDstn, + IncShkDstn, + RiskyDstn, + LivPrb, + DiscFac, + CRRA, + Rfree, + PermGroFac, + BoroCnstArt, + aXtraGrid, + ShareGrid, + AdjustPrb, + ShareLimit, + vFuncBool, + DiscreteShareBool, + IndepDstnBool,): + """ + Solve one period of a consumption-saving problem with portfolio allocation + between a riskless and risky asset. This function handles various sub-cases + or variations on the problem, including the possibility that the agent does + not necessarily get to update their portfolio share in every period, or that + they must choose a discrete rather than continuous risky share. + + Parameters + ---------- + solution_next : PortfolioSolution + Solution to next period's problem. + ShockDstn : Distribution + Joint distribution of permanent income shocks, transitory income shocks, + and risky returns. This is only used if the input IndepDstnBool is False, + indicating that income and return distributions can't be assumed to be + independent. + IncShkDstn : Distribution + Discrete distribution of permanent income shocks and transitory income + shocks. This is only used if the input IndepDstnBool is True, indicating + that income and return distributions are independent. + RiskyDstn : Distribution + Distribution of risky asset returns. This is only used if the input + IndepDstnBool is True, indicating that income and return distributions + are independent. + LivPrb : float + Survival probability; likelihood of being alive at the beginning of + the succeeding period. + DiscFac : float + Intertemporal discount factor for future utility. + CRRA : float + Coefficient of relative risk aversion. + Rfree : float + Risk free interest factor on end-of-period assets. + PermGroFac : float + Expected permanent income growth factor at the end of this period. + BoroCnstArt: float or None + Borrowing constraint for the minimum allowable assets to end the + period with. In this model, it is *required* to be zero. + aXtraGrid: np.array + Array of "extra" end-of-period asset values-- assets above the + absolute minimum acceptable level. + ShareGrid : np.array + Array of risky portfolio shares on which to define the interpolation + of the consumption function when Share is fixed. Also used when the + risky share choice is specified as discrete rather than continuous. + AdjustPrb : float + Probability that the agent will be able to update his portfolio share. + ShareLimit : float + Limiting lower bound of risky portfolio share as mNrm approaches infinity. + vFuncBool: boolean + An indicator for whether the value function should be computed and + included in the reported solution. + DiscreteShareBool : bool + Indicator for whether risky portfolio share should be optimized on the + continuous [0,1] interval using the FOC (False), or instead only selected + from the discrete set of values in ShareGrid (True). If True, then + vFuncBool must also be True. + IndepDstnBool : bool + Indicator for whether the income and risky return distributions are in- + dependent of each other, which can speed up the expectations step. + + Returns + ------- + solution_now : PortfolioSolution + Solution to this period's problem. + """ + # Make sure the individual is liquidity constrained. Allowing a consumer to + # borrow *and* invest in an asset with unbounded (negative) returns is a bad mix. + if BoroCnstArt != 0.0: + raise ValueError("PortfolioConsumerType must have BoroCnstArt=0.0!") + + # Make sure that if risky portfolio share is optimized only discretely, then + # the value function is also constructed (else this task would be impossible). + if DiscreteShareBool and (not vFuncBool): + raise ValueError( + "PortfolioConsumerType requires vFuncBool to be True when DiscreteShareBool is True!" + ) + + # Define the current period utility function and effective discount factor + uFunc = UtilityFuncCRRA(CRRA) + DiscFacEff = DiscFac * LivPrb # "effective" discount factor + + # Unpack next period's solution for easier access + vPfuncAdj_next = solution_next.vPfuncAdj + dvdmFuncFxd_next = solution_next.dvdmFuncFxd + dvdsFuncFxd_next = solution_next.dvdsFuncFxd + vFuncAdj_next = solution_next.vFuncAdj + vFuncFxd_next = solution_next.vFuncFxd + + # Set a flag for whether the natural borrowing constraint is zero, which + # depends on whether the smallest transitory income shock is zero + BoroCnstNat_iszero = np.min(IncShkDstn.atoms[1]) == 0.0 + + # Prepare to calculate end-of-period marginal values by creating an array + # of market resources that the agent could have next period, considering + # the grid of end-of-period assets and the distribution of shocks he might + # experience next period. + + # Unpack the risky return shock distribution + Risky_next = RiskyDstn.atoms + RiskyMax = np.max(Risky_next) + RiskyMin = np.min(Risky_next) + + # bNrm represents R*a, balances after asset return shocks but before income. + # This just uses the highest risky return as a rough shifter for the aXtraGrid. + if BoroCnstNat_iszero: + aNrmGrid = aXtraGrid + bNrmGrid = np.insert(RiskyMax * aXtraGrid, 0, RiskyMin * aXtraGrid[0]) + else: + # Add an asset point at exactly zero + aNrmGrid = np.insert(aXtraGrid, 0, 0.0) + bNrmGrid = RiskyMax * np.insert(aXtraGrid, 0, 0.0) + + # Get grid and shock sizes, for easier indexing + aNrmCount = aNrmGrid.size + ShareCount = ShareGrid.size + + # Make tiled arrays to calculate future realizations of mNrm and Share when integrating over IncShkDstn + bNrmNext, ShareNext = np.meshgrid(bNrmGrid, ShareGrid, indexing="ij") + + # Define functions that are used internally to evaluate future realizations + def calc_mNrm_next(S, b): + """ + Calculate future realizations of market resources mNrm from the income + shock distribution S and normalized bank balances b. + """ + return b / (S["PermShk"] * PermGroFac) + S["TranShk"] + + def calc_dvdm_next(S, b, z): + """ + Evaluate realizations of marginal value of market resources next period, + based on the income distribution S, values of bank balances bNrm, and + values of the risky share z. + """ + mNrm_next = calc_mNrm_next(S, b) + dvdmAdj_next = vPfuncAdj_next(mNrm_next) + + if AdjustPrb < 1.0: + # Expand to the same dimensions as mNrm + Share_next_expanded = z + np.zeros_like(mNrm_next) + dvdmFxd_next = dvdmFuncFxd_next(mNrm_next, Share_next_expanded) + # Combine by adjustment probability + dvdm_next = AdjustPrb * dvdmAdj_next + (1.0 - AdjustPrb) * dvdmFxd_next + else: # Don't bother evaluating if there's no chance that portfolio share is fixed + dvdm_next = dvdmAdj_next + + dvdm_next = (S["PermShk"] * PermGroFac) ** (-CRRA) * dvdm_next + return dvdm_next + + def calc_dvds_next(S, b, z): + """ + Evaluate realizations of marginal value of risky share next period, based + on the income distribution S, values of bank balances bNrm, and values of + the risky share z. + """ + mNrm_next = calc_mNrm_next(S, b) + + # No marginal value of Share if it's a free choice! + dvdsAdj_next = np.zeros_like(mNrm_next) + + if AdjustPrb < 1.0: + # Expand to the same dimensions as mNrm + Share_next_expanded = z + np.zeros_like(mNrm_next) + dvdsFxd_next = dvdsFuncFxd_next(mNrm_next, Share_next_expanded) + # Combine by adjustment probability + dvds_next = AdjustPrb * dvdsAdj_next + (1.0 - AdjustPrb) * dvdsFxd_next + else: # Don't bother evaluating if there's no chance that portfolio share is fixed + dvds_next = dvdsAdj_next + + dvds_next = (S["PermShk"] * PermGroFac) ** (1.0 - CRRA) * dvds_next + return dvds_next + + # Calculate end-of-period marginal value of assets and shares at each point + # in aNrm and ShareGrid. Does so by taking expectation of next period marginal + # values across income and risky return shocks. + + # Calculate intermediate marginal value of bank balances by taking expectations over income shocks + dvdb_intermed = expected(calc_dvdm_next, IncShkDstn, args=(bNrmNext, ShareNext)) + dvdbNvrs_intermed = uFunc.derinv(dvdb_intermed, order=(1, 0)) + dvdbNvrsFunc_intermed = BilinearInterp(dvdbNvrs_intermed, bNrmGrid, ShareGrid) + dvdbFunc_intermed = MargValueFuncCRRA(dvdbNvrsFunc_intermed, CRRA) + + # Calculate intermediate marginal value of risky portfolio share by taking expectations over income shocks + dvds_intermed = expected(calc_dvds_next, IncShkDstn, args=(bNrmNext, ShareNext)) + dvdsFunc_intermed = BilinearInterp(dvds_intermed, bNrmGrid, ShareGrid) + + # Make tiled arrays to calculate future realizations of bNrm and Share when integrating over RiskyDstn + aNrmNow, ShareNext = np.meshgrid(aNrmGrid, ShareGrid, indexing="ij") + + # Define functions for calculating end-of-period marginal value + def calc_EndOfPrd_dvda(S, a, z): + ''' + Compute end-of-period marginal value of assets at values a, conditional + on risky asset return S and risky share z. + ''' + # Calculate future realizations of bank balances bNrm + Rxs = S - Rfree # Excess returns + Rport = Rfree + z * Rxs # Portfolio return + bNrm_next = Rport * a + + # Ensure shape concordance + z_rep = z + np.zeros_like(bNrm_next) + + # Calculate and return dvda + EndOfPrd_dvda = Rport * dvdbFunc_intermed(bNrm_next, z_rep) + return EndOfPrd_dvda + + def EndOfPrddvds_dist(S, a, z): + ''' + Compute end-of-period marginal value of risky share at values a, conditional + on risky asset return S and risky share z. + ''' + # Calculate future realizations of bank balances bNrm + Rxs = S - Rfree # Excess returns + Rport = Rfree + z * Rxs # Portfolio return + bNrm_next = Rport * a + + # Make the shares match the dimension of b, so that it can be vectorized + z_rep = z + np.zeros_like(bNrm_next) + + # Calculate and return dvds + EndOfPrd_dvds = Rxs * a * dvdbFunc_intermed(bNrm_next, z_rep) + dvdsFunc_intermed(bNrm_next, z_rep) + return EndOfPrd_dvds + + # Evaluate realizations of value and marginal value after asset returns are realized + + # Calculate end-of-period marginal value of assets by taking expectations + EndOfPrd_dvda = DiscFacEff * expected(calc_EndOfPrd_dvda, RiskyDstn, args=(aNrmNow, ShareNext)) + EndOfPrd_dvdaNvrs = uFunc.derinv(EndOfPrd_dvda) + + # Calculate end-of-period marginal value of risky portfolio share by taking expectations + EndOfPrd_dvds = DiscFacEff * expected(EndOfPrddvds_dist, RiskyDstn, args=(aNrmNow, ShareNext)) + + # Now find the optimal (continuous) risky share on [0,1] by solving the first + # order condition EndOfPrd_dvds == 0. + FOC_s = EndOfPrd_dvds # Relabel for convenient typing + + # For each value of aNrm, find the value of Share such that FOC_s == 0 + crossing = np.logical_and(FOC_s[:, 1:] <= 0.0, FOC_s[:, :-1] >= 0.0) + share_idx = np.argmax(crossing, axis=1) + # This represents the index of the segment of the share grid where dvds flips + # from positive to negative, indicating that there's a zero *on* the segment + + # Calculate the fractional distance between those share gridpoints where the + # zero should be found, assuming a linear function; call it alpha + a_idx = np.arange(aNrmCount) + bot_s = ShareGrid[share_idx] + top_s = ShareGrid[share_idx + 1] + bot_f = FOC_s[a_idx, share_idx] + top_f = FOC_s[a_idx, share_idx + 1] + bot_c = EndOfPrd_dvdaNvrs[a_idx, share_idx] + top_c = EndOfPrd_dvdaNvrs[a_idx, share_idx + 1] + alpha = 1.0 - top_f / (top_f - bot_f) + + # Calculate the continuous optimal risky share and optimal consumption + ShareAdj_now = (1.0 - alpha) * bot_s + alpha * top_s + cNrmAdj_now = (1.0 - alpha) * bot_c + alpha * top_c + + # If agent wants to put more than 100% into risky asset, he is constrained. + # Likewise if he wants to put less than 0% into risky asset, he is constrained. + constrained_top = FOC_s[:, -1] > 0.0 + constrained_bot = FOC_s[:, 0] < 0.0 + + # Apply those constraints to both risky share and consumption (but lower + # constraint should never be relevant) + ShareAdj_now[constrained_top] = 1.0 + ShareAdj_now[constrained_bot] = 0.0 + cNrmAdj_now[constrained_top] = EndOfPrd_dvdaNvrs[constrained_top, -1] + cNrmAdj_now[constrained_bot] = EndOfPrd_dvdaNvrs[constrained_bot, 0] + + # When the natural borrowing constraint is *not* zero, then aNrm=0 is in the + # grid, but there's no way to "optimize" the portfolio if a=0, and consumption + # can't depend on the risky share if it doesn't meaningfully exist. Apply + # a small fix to the bottom gridpoint (aNrm=0) when this happens. + if (not BoroCnstNat_iszero): + ShareAdj_now[0] = 1.0 + cNrmAdj_now[0] = EndOfPrd_dvdaNvrs[0, -1] + + # Construct functions characterizing the solution for this period + + # Calculate the endogenous mNrm gridpoints when the agent adjusts his portfolio, + # then construct the consumption function when the agent can adjust his share + mNrmAdj_now = np.insert(aNrmGrid + cNrmAdj_now, 0, 0.0) + cNrmAdj_now = np.insert(cNrmAdj_now, 0, 0.0) + cFuncAdj_now = LinearInterp(mNrmAdj_now, cNrmAdj_now) + + # Construct the marginal value (of mNrm) function when the agent can adjust + vPfuncAdj_now = MargValueFuncCRRA(cFuncAdj_now, CRRA) + + # Construct the consumption function when the agent *can't* adjust the risky + # share, as well as the marginal value of Share function + cFuncFxd_by_Share = [] + dvdsFuncFxd_by_Share = [] + for j in range(ShareCount): + cNrmFxd_temp = np.insert(EndOfPrd_dvdaNvrs[:, j], 0, 0.0) + mNrmFxd_temp = np.insert(aNrmGrid + cNrmFxd_temp[1:], 0, 0.0) + dvdsFxd_temp = np.insert(EndOfPrd_dvds[:, j], 0, EndOfPrd_dvds[0, j]) + cFuncFxd_by_Share.append(LinearInterp(mNrmFxd_temp, cNrmFxd_temp)) + dvdsFuncFxd_by_Share.append(LinearInterp(mNrmFxd_temp, dvdsFxd_temp)) + cFuncFxd_now = LinearInterpOnInterp1D(cFuncFxd_by_Share, ShareGrid) + dvdsFuncFxd_now = LinearInterpOnInterp1D(dvdsFuncFxd_by_Share, ShareGrid) + + # The share function when the agent can't adjust his portfolio is trivial + ShareFuncFxd_now = IdentityFunction(i_dim=1, n_dims=2) + + # Construct the marginal value of mNrm function when the agent can't adjust his share + dvdmFuncFxd_now = MargValueFuncCRRA(cFuncFxd_now, CRRA) + + # Construct the optimal risky share function when adjusting is possible + if BoroCnstNat_iszero: + Share_lower_bound = ShareLimit + else: + Share_lower_bound = 1.0 + ShareAdj_now = np.insert(ShareAdj_now, 0, Share_lower_bound) + ShareFuncAdj_now = LinearInterp(mNrmAdj_now, ShareAdj_now, ShareLimit, 0.0) + + # This is a point at which (a,c,share) have consistent length. Take the + # snapshot for storing the grid and values in the solution. + save_points = { + "a": deepcopy(aNrmGrid), + "eop_dvda_adj": uFunc.der(cNrmAdj_now), + "share_adj": deepcopy(ShareAdj_now), + "share_grid": deepcopy(ShareGrid), + "eop_dvda_fxd": uFunc.der(EndOfPrd_dvda), + "eop_dvds_fxd": EndOfPrd_dvds, + } + + # Add the value function if requested TODO + if vFuncBool: + vFuncAdj_now = NullFunc() + vFuncFxd_now = NullFunc() + else: # If vFuncBool is False, fill in dummy values + vFuncAdj_now = NullFunc() + vFuncFxd_now = NullFunc() + + # Package and return the solution + solution_now = PortfolioSolution( + cFuncAdj=cFuncAdj_now, + ShareFuncAdj=ShareFuncAdj_now, + vPfuncAdj=vPfuncAdj_now, + vFuncAdj=vFuncAdj_now, + cFuncFxd=cFuncFxd_now, + ShareFuncFxd=ShareFuncFxd_now, + dvdmFuncFxd=dvdmFuncFxd_now, + dvdsFuncFxd=dvdsFuncFxd_now, + vFuncFxd=vFuncFxd_now, + AdjPrb=AdjustPrb, + # WHAT IS THIS STUFF FOR?? + aGrid=save_points["a"], + Share_adj=save_points["share_adj"], + EndOfPrddvda_adj=save_points["eop_dvda_adj"], + ShareGrid=save_points["share_grid"], + EndOfPrddvda_fxd=save_points["eop_dvda_fxd"], + EndOfPrddvds_fxd=save_points["eop_dvds_fxd"], + + ) + return solution_now class ConsPortfolioSolver(MetricObject): diff --git a/HARK/distribution.py b/HARK/distribution.py index 3b5ac0463..6a8c4e0ee 100644 --- a/HARK/distribution.py +++ b/HARK/distribution.py @@ -1206,7 +1206,7 @@ def expected( method in that it uses numpy's vectorization and broadcasting rules to avoid costly iteration. Note: If you need to use a function that acts on single outcomes - of the distribution, consier `distribution.calc_expectation`. + of the distribution, consider `distribution.calc_expectation`. \\*args : Other inputs for func, representing the non-stochastic arguments. The the expectation is computed at ``f(dstn, *args)``.