diff --git a/examples/06_Scalable_GP_Classification_1D/SVGP_Classification_1D.ipynb b/examples/06_Scalable_GP_Classification_1D/SVGP_Classification_1D.ipynb new file mode 100644 index 000000000..20f502287 --- /dev/null +++ b/examples/06_Scalable_GP_Classification_1D/SVGP_Classification_1D.ipynb @@ -0,0 +1,445 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Scalable GP Classification in 1D (w/ SVGP)\n", + "\n", + "This example shows how to use grid interpolation based variational classification with an `AbstractVariationalGP` using a `VariationalStrategy` module while learning the inducing point locations. " + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "import math\n", + "import torch\n", + "import gpytorch\n", + "from matplotlib import pyplot as plt\n", + "from math import exp\n", + "\n", + "%matplotlib inline\n", + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "train_x = torch.linspace(0, 1, 260)\n", + "train_y = torch.cos(train_x * (2 * math.pi))" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "from gpytorch.models import AbstractVariationalGP\n", + "from gpytorch.variational import CholeskyVariationalDistribution\n", + "from gpytorch.variational import VariationalStrategy\n", + "class SVGPRegressionModel(AbstractVariationalGP):\n", + " def __init__(self, inducing_points):\n", + " variational_distribution = CholeskyVariationalDistribution(inducing_points.size(-1))\n", + " variational_strategy = VariationalStrategy(self,\n", + " inducing_points,\n", + " variational_distribution,\n", + " learn_inducing_locations=True)\n", + " super(SVGPRegressionModel, self).__init__(variational_strategy)\n", + " self.mean_module = gpytorch.means.ConstantMean()\n", + " self.covar_module = gpytorch.kernels.ScaleKernel(\n", + " gpytorch.kernels.RBFKernel(\n", + " log_lengthscale_prior=gpytorch.priors.SmoothedBoxPrior(0.001, 1., sigma=0.1, log_transform=True)\n", + " )\n", + " )\n", + " \n", + " def forward(self,x):\n", + " mean_x = self.mean_module(x)\n", + " covar_x = self.covar_module(x)\n", + " latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x)\n", + " return latent_pred\n", + "\n", + "\n", + "model = SVGPRegressionModel(inducing_points=train_x[:25])\n", + "likelihood = gpytorch.likelihoods.GaussianLikelihood()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/jrg365/gpytorch/gpytorch/utils/cholesky.py:14: UserWarning: torch.potrf is deprecated in favour of torch.cholesky and will be removed in the next release. Please use torch.cholesky instead and note that the :attr:`upper` argument in torch.cholesky defaults to ``False``.\n", + " potrf_list = [sub_mat.potrf() for sub_mat in mat.view(-1, *mat.shape[-2:])]\n", + "/home/jrg365/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py:74: UserWarning: torch.potrf is deprecated in favour of torch.cholesky and will be removed in the next release. Please use torch.cholesky instead and note that the :attr:`upper` argument in torch.cholesky defaults to ``False``.\n", + " ld_one = lr_flipped.potrf().diag().log().sum() * 2\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iter 1 - Loss: 29.468 [-5.286, 24.181, -0.001]\n", + "Iter 2 - Loss: 23.192 [-3.997, 19.195, -0.001]\n", + "Iter 3 - Loss: 18.659 [-4.088, 14.569, -0.001]\n", + "Iter 4 - Loss: 12.908 [-1.447, 11.460, -0.001]\n", + "Iter 5 - Loss: 10.229 [-1.383, 8.844, -0.002]\n", + "Iter 6 - Loss: 8.041 [-1.399, 6.638, -0.004]\n", + "Iter 7 - Loss: 6.311 [-1.405, 4.900, -0.006]\n", + "Iter 8 - Loss: 5.023 [-1.407, 3.608, -0.008]\n", + "Iter 9 - Loss: 4.121 [-1.410, 2.701, -0.010]\n", + "Iter 10 - Loss: 3.509 [-1.400, 2.097, -0.012]\n", + "Iter 11 - Loss: 3.160 [-1.393, 1.752, -0.014]\n", + "Iter 12 - Loss: 2.955 [-1.378, 1.560, -0.016]\n", + "Iter 13 - Loss: 2.820 [-1.380, 1.422, -0.017]\n", + "Iter 14 - Loss: 2.761 [-1.380, 1.362, -0.019]\n", + "Iter 15 - Loss: 2.734 [-1.381, 1.334, -0.019]\n", + "Iter 16 - Loss: 2.694 [-1.381, 1.293, -0.020]\n", + "Iter 17 - Loss: 2.679 [-1.381, 1.278, -0.020]\n", + "Iter 18 - Loss: 2.668 [-1.381, 1.268, -0.020]\n", + "Iter 19 - Loss: 2.654 [-1.380, 1.255, -0.019]\n", + "Iter 20 - Loss: 2.652 [-1.379, 1.255, -0.019]\n", + "Iter 21 - Loss: 2.657 [-1.377, 1.262, -0.018]\n", + "Iter 22 - Loss: 2.662 [-1.373, 1.272, -0.017]\n", + "Iter 23 - Loss: 2.657 [-1.369, 1.273, -0.015]\n", + "Iter 24 - Loss: 2.608 [-1.361, 1.233, -0.014]\n", + "Iter 25 - Loss: 2.538 [-1.353, 1.173, -0.012]\n", + "Iter 26 - Loss: 2.444 [-1.343, 1.090, -0.011]\n", + "Iter 27 - Loss: 2.337 [-1.333, 0.995, -0.010]\n", + "Iter 28 - Loss: 2.227 [-1.322, 0.897, -0.008]\n", + "Iter 29 - Loss: 2.125 [-1.312, 0.806, -0.007]\n", + "Iter 30 - Loss: 2.033 [-1.301, 0.726, -0.006]\n", + "Iter 31 - Loss: 1.950 [-1.290, 0.655, -0.005]\n", + "Iter 32 - Loss: 1.874 [-1.279, 0.591, -0.004]\n", + "Iter 33 - Loss: 1.804 [-1.269, 0.532, -0.003]\n", + "Iter 34 - Loss: 1.741 [-1.258, 0.480, -0.003]\n", + "Iter 35 - Loss: 1.691 [-1.247, 0.441, -0.002]\n", + "Iter 36 - Loss: 1.667 [-1.237, 0.428, -0.002]\n", + "Iter 37 - Loss: 1.622 [-1.227, 0.394, -0.001]\n", + "Iter 38 - Loss: 1.581 [-1.216, 0.364, -0.001]\n", + "Iter 39 - Loss: 1.556 [-1.206, 0.349, -0.001]\n", + "Iter 40 - Loss: 1.534 [-1.196, 0.337, -0.001]\n", + "Iter 41 - Loss: 1.520 [-1.186, 0.333, -0.001]\n", + "Iter 42 - Loss: 1.500 [-1.176, 0.322, -0.001]\n", + "Iter 43 - Loss: 1.464 [-1.166, 0.296, -0.001]\n", + "Iter 44 - Loss: 1.441 [-1.156, 0.284, -0.001]\n", + "Iter 45 - Loss: 1.421 [-1.146, 0.274, -0.001]\n", + "Iter 46 - Loss: 1.401 [-1.135, 0.264, -0.001]\n", + "Iter 47 - Loss: 1.378 [-1.125, 0.252, -0.001]\n", + "Iter 48 - Loss: 1.355 [-1.114, 0.239, -0.001]\n", + "Iter 49 - Loss: 1.328 [-1.104, 0.223, -0.001]\n", + "Iter 50 - Loss: 1.303 [-1.093, 0.209, -0.001]\n", + "Iter 51 - Loss: 1.276 [-1.083, 0.192, -0.001]\n", + "Iter 52 - Loss: 1.250 [-1.072, 0.177, -0.001]\n", + "Iter 53 - Loss: 1.221 [-1.060, 0.160, -0.001]\n", + "Iter 54 - Loss: 1.195 [-1.049, 0.146, -0.001]\n", + "Iter 55 - Loss: 1.171 [-1.037, 0.133, -0.001]\n", + "Iter 56 - Loss: 1.150 [-1.025, 0.123, -0.001]\n", + "Iter 57 - Loss: 1.130 [-1.014, 0.115, -0.001]\n", + "Iter 58 - Loss: 1.108 [-1.003, 0.104, -0.001]\n", + "Iter 59 - Loss: 1.089 [-0.992, 0.096, -0.001]\n", + "Iter 60 - Loss: 1.074 [-0.981, 0.092, -0.001]\n", + "Iter 61 - Loss: 1.063 [-0.971, 0.091, -0.001]\n", + "Iter 62 - Loss: 1.046 [-0.961, 0.084, -0.001]\n", + "Iter 63 - Loss: 1.030 [-0.950, 0.079, -0.001]\n", + "Iter 64 - Loss: 1.013 [-0.938, 0.074, -0.001]\n", + "Iter 65 - Loss: 0.996 [-0.927, 0.069, -0.001]\n", + "Iter 66 - Loss: 0.979 [-0.914, 0.064, -0.001]\n", + "Iter 67 - Loss: 0.966 [-0.900, 0.065, -0.001]\n", + "Iter 68 - Loss: 0.952 [-0.885, 0.066, -0.001]\n", + "Iter 69 - Loss: 0.940 [-0.871, 0.067, -0.001]\n", + "Iter 70 - Loss: 0.925 [-0.857, 0.067, -0.001]\n", + "Iter 71 - Loss: 0.913 [-0.843, 0.069, -0.001]\n", + "Iter 72 - Loss: 0.898 [-0.828, 0.070, -0.001]\n", + "Iter 73 - Loss: 0.884 [-0.812, 0.071, -0.001]\n", + "Iter 74 - Loss: 0.863 [-0.794, 0.068, -0.001]\n", + "Iter 75 - Loss: 0.844 [-0.773, 0.070, -0.001]\n", + "Iter 76 - Loss: 0.829 [-0.750, 0.078, -0.001]\n", + "Iter 77 - Loss: 0.814 [-0.726, 0.087, -0.001]\n", + "Iter 78 - Loss: 0.783 [-0.702, 0.080, -0.001]\n", + "Iter 79 - Loss: 0.764 [-0.680, 0.084, -0.001]\n", + "Iter 80 - Loss: 0.750 [-0.657, 0.093, -0.001]\n", + "Iter 81 - Loss: 0.723 [-0.632, 0.090, -0.001]\n", + "Iter 82 - Loss: 0.699 [-0.602, 0.096, -0.001]\n", + "Iter 83 - Loss: 0.663 [-0.569, 0.094, -0.001]\n", + "Iter 84 - Loss: 0.642 [-0.532, 0.109, -0.001]\n", + "Iter 85 - Loss: 0.609 [-0.499, 0.110, -0.001]\n", + "Iter 86 - Loss: 0.562 [-0.464, 0.097, -0.001]\n", + "Iter 87 - Loss: 0.542 [-0.430, 0.111, -0.001]\n", + "Iter 88 - Loss: 0.530 [-0.395, 0.134, -0.001]\n", + "Iter 89 - Loss: 0.485 [-0.359, 0.124, -0.001]\n", + "Iter 90 - Loss: 0.426 [-0.321, 0.104, -0.001]\n", + "Iter 91 - Loss: 0.399 [-0.283, 0.115, -0.001]\n", + "Iter 92 - Loss: 0.371 [-0.246, 0.125, -0.001]\n", + "Iter 93 - Loss: 0.312 [-0.211, 0.100, -0.001]\n", + "Iter 94 - Loss: 0.311 [-0.180, 0.130, -0.001]\n", + "Iter 95 - Loss: 0.277 [-0.150, 0.126, -0.001]\n", + "Iter 96 - Loss: 0.229 [-0.113, 0.115, -0.001]\n", + "Iter 97 - Loss: 0.229 [-0.084, 0.145, -0.001]\n", + "Iter 98 - Loss: 0.232 [-0.063, 0.168, -0.001]\n", + "Iter 99 - Loss: 0.167 [-0.036, 0.130, -0.001]\n", + "Iter 100 - Loss: 0.192 [-0.011, 0.180, -0.001]\n", + "Iter 101 - Loss: 0.143 [0.016, 0.158, -0.001]\n", + "Iter 102 - Loss: 0.088 [0.045, 0.133, -0.001]\n", + "Iter 103 - Loss: 0.111 [0.079, 0.189, -0.001]\n", + "Iter 104 - Loss: 0.131 [0.115, 0.246, -0.001]\n", + "Iter 105 - Loss: 0.069 [0.155, 0.223, -0.001]\n", + "Iter 106 - Loss: -0.031 [0.179, 0.148, -0.001]\n", + "Iter 107 - Loss: 0.024 [0.197, 0.220, -0.001]\n", + "Iter 108 - Loss: 0.049 [0.235, 0.283, -0.001]\n", + "Iter 109 - Loss: -0.086 [0.286, 0.199, -0.001]\n", + "Iter 110 - Loss: -0.139 [0.321, 0.182, -0.001]\n", + "Iter 111 - Loss: -0.135 [0.348, 0.212, -0.001]\n", + "Iter 112 - Loss: -0.205 [0.382, 0.176, -0.001]\n", + "Iter 113 - Loss: -0.225 [0.432, 0.206, -0.001]\n", + "Iter 114 - Loss: -0.231 [0.457, 0.225, -0.001]\n", + "Iter 115 - Loss: -0.308 [0.484, 0.175, -0.001]\n", + "Iter 116 - Loss: -0.382 [0.527, 0.144, -0.001]\n", + "Iter 117 - Loss: -0.367 [0.555, 0.188, -0.001]\n", + "Iter 118 - Loss: -0.383 [0.610, 0.227, -0.001]\n", + "Iter 119 - Loss: -0.469 [0.638, 0.168, -0.001]\n", + "Iter 120 - Loss: -0.441 [0.646, 0.204, -0.001]\n", + "Iter 121 - Loss: -0.487 [0.703, 0.216, -0.001]\n", + "Iter 122 - Loss: -0.487 [0.696, 0.208, -0.001]\n", + "Iter 123 - Loss: -0.585 [0.766, 0.180, -0.001]\n", + "Iter 124 - Loss: -0.642 [0.783, 0.140, -0.001]\n", + "Iter 125 - Loss: -0.613 [0.801, 0.188, -0.001]\n", + "Iter 126 - Loss: -0.662 [0.866, 0.203, -0.001]\n", + "Iter 127 - Loss: -0.701 [0.867, 0.165, -0.001]\n", + "Iter 128 - Loss: -0.696 [0.889, 0.192, -0.001]\n", + "Iter 129 - Loss: -0.817 [0.988, 0.170, -0.001]\n", + "Iter 130 - Loss: -0.734 [0.899, 0.163, -0.001]\n", + "Iter 131 - Loss: -0.646 [0.920, 0.274, -0.001]\n", + "Iter 132 - Loss: -0.858 [1.102, 0.243, -0.001]\n", + "Iter 133 - Loss: -0.800 [0.989, 0.188, -0.001]\n", + "Iter 134 - Loss: -0.653 [0.832, 0.179, -0.001]\n", + "Iter 135 - Loss: -0.969 [1.137, 0.167, -0.001]\n", + "Iter 136 - Loss: -0.914 [1.089, 0.174, -0.001]\n", + "Iter 137 - Loss: -0.859 [1.135, 0.275, -0.001]\n", + "Iter 138 - Loss: -0.926 [1.158, 0.231, -0.001]\n", + "Iter 139 - Loss: -1.058 [1.268, 0.210, -0.001]\n", + "Iter 140 - Loss: -0.999 [1.212, 0.211, -0.001]\n", + "Iter 141 - Loss: -0.976 [1.203, 0.227, -0.001]\n", + "Iter 142 - Loss: -1.142 [1.371, 0.228, -0.001]\n", + "Iter 143 - Loss: -1.046 [1.311, 0.264, -0.001]\n", + "Iter 144 - Loss: -1.123 [1.352, 0.228, -0.001]\n", + "Iter 145 - Loss: -1.018 [1.240, 0.222, -0.001]\n", + "Iter 146 - Loss: -1.215 [1.413, 0.197, -0.001]\n", + "Iter 147 - Loss: -1.131 [1.362, 0.231, -0.001]\n", + "Iter 148 - Loss: -1.213 [1.428, 0.214, -0.001]\n", + "Iter 149 - Loss: -1.099 [1.346, 0.246, -0.001]\n", + "Iter 150 - Loss: -1.190 [1.476, 0.284, -0.001]\n", + "Iter 151 - Loss: -1.220 [1.490, 0.269, -0.001]\n", + "Iter 152 - Loss: -0.717 [0.933, 0.216, -0.001]\n", + "Iter 153 - Loss: 0.511 [-0.279, 0.231, -0.001]\n", + "Iter 154 - Loss: 0.508 [-0.230, 0.277, -0.001]\n", + "Iter 155 - Loss: 2.351 [-1.855, 0.495, -0.001]\n", + "Iter 156 - Loss: 0.558 [-0.036, 0.521, -0.001]\n", + "Iter 157 - Loss: -0.400 [1.001, 0.600, -0.001]\n", + "Iter 158 - Loss: 0.260 [0.800, 1.059, -0.001]\n", + "Iter 159 - Loss: 0.697 [0.917, 1.613, -0.001]\n", + "Iter 160 - Loss: 0.275 [1.392, 1.666, -0.001]\n", + "Iter 161 - Loss: -0.025 [1.173, 1.147, -0.001]\n", + "Iter 162 - Loss: -0.505 [1.191, 0.685, -0.001]\n", + "Iter 163 - Loss: -0.910 [1.341, 0.430, -0.001]\n", + "Iter 164 - Loss: -0.741 [1.158, 0.416, -0.001]\n", + "Iter 165 - Loss: -0.732 [1.245, 0.511, -0.001]\n", + "Iter 166 - Loss: -0.518 [1.111, 0.592, -0.001]\n", + "Iter 167 - Loss: -0.553 [1.185, 0.631, -0.001]\n", + "Iter 168 - Loss: -0.655 [1.232, 0.576, -0.001]\n", + "Iter 169 - Loss: -0.602 [1.104, 0.500, -0.001]\n", + "Iter 170 - Loss: -0.867 [1.263, 0.395, -0.001]\n", + "Iter 171 - Loss: -0.904 [1.233, 0.329, -0.001]\n", + "Iter 172 - Loss: -0.939 [1.238, 0.299, -0.001]\n", + "Iter 173 - Loss: -1.001 [1.271, 0.269, -0.001]\n", + "Iter 174 - Loss: -0.910 [1.215, 0.303, -0.001]\n", + "Iter 175 - Loss: -0.982 [1.251, 0.268, -0.001]\n", + "Iter 176 - Loss: -0.839 [1.068, 0.229, -0.001]\n", + "Iter 177 - Loss: -0.950 [1.153, 0.202, -0.001]\n", + "Iter 178 - Loss: -1.048 [1.280, 0.231, -0.001]\n", + "Iter 179 - Loss: -0.966 [1.223, 0.257, -0.001]\n", + "Iter 180 - Loss: -1.059 [1.328, 0.268, -0.001]\n", + "Iter 181 - Loss: -0.794 [1.093, 0.298, -0.001]\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iter 182 - Loss: -1.030 [1.312, 0.282, -0.001]\n", + "Iter 183 - Loss: -0.902 [1.160, 0.257, -0.001]\n", + "Iter 184 - Loss: -1.067 [1.310, 0.242, -0.001]\n", + "Iter 185 - Loss: -1.109 [1.335, 0.225, -0.001]\n", + "Iter 186 - Loss: -1.116 [1.360, 0.243, -0.001]\n", + "Iter 187 - Loss: -0.871 [1.162, 0.290, -0.001]\n", + "Iter 188 - Loss: -1.147 [1.389, 0.242, -0.001]\n", + "Iter 189 - Loss: -1.124 [1.334, 0.209, -0.001]\n", + "Iter 190 - Loss: -1.220 [1.454, 0.234, -0.001]\n", + "Iter 191 - Loss: -1.077 [1.315, 0.238, -0.001]\n", + "Iter 192 - Loss: -1.324 [1.553, 0.228, -0.001]\n", + "Iter 193 - Loss: -1.118 [1.348, 0.229, -0.001]\n", + "Iter 194 - Loss: -1.284 [1.538, 0.253, -0.001]\n", + "Iter 195 - Loss: -1.232 [1.476, 0.243, -0.001]\n", + "Iter 196 - Loss: -1.407 [1.638, 0.230, -0.001]\n", + "Iter 197 - Loss: -1.325 [1.515, 0.188, -0.001]\n", + "Iter 198 - Loss: -1.405 [1.638, 0.232, -0.001]\n", + "Iter 199 - Loss: -1.364 [1.587, 0.222, -0.001]\n", + "Iter 200 - Loss: -1.421 [1.648, 0.227, -0.001]\n", + "CPU times: user 19.8 s, sys: 18.3 s, total: 38.1 s\n", + "Wall time: 5.56 s\n" + ] + } + ], + "source": [ + "from gpytorch.mlls.variational_elbo import VariationalELBO\n", + "\n", + "# Find optimal model hyperparameters\n", + "model.train()\n", + "likelihood.train()\n", + "\n", + "# Use the adam optimizer\n", + "optimizer = torch.optim.Adam([\n", + " {'params': model.parameters()},\n", + " {'params': likelihood.parameters()}\n", + "], lr=0.1)\n", + "\n", + "# \"Loss\" for GPs - the marginal log likelihood\n", + "# n_data refers to the amount of training data\n", + "mll = VariationalELBO(likelihood, model, train_y.size(0), combine_terms=False)\n", + "\n", + "def train():\n", + " num_iter = 200\n", + " for i in range(num_iter):\n", + " optimizer.zero_grad()\n", + " output = model(train_x)\n", + " # Calc loss and backprop gradients\n", + " log_lik, kl_div, log_prior = mll(output, train_y)\n", + " loss = -(log_lik - kl_div + log_prior)\n", + " loss.backward()\n", + " print('Iter %d - Loss: %.3f [%.3f, %.3f, %.3f]' % (i + 1, loss.item(), log_lik.item(), kl_div.item(), log_prior.item()))\n", + " optimizer.step()\n", + " \n", + "# Get clock time\n", + "with gpytorch.beta_features.diagonal_correction():\n", + " %time train()" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/home/jrg365/gpytorch/gpytorch/utils/cholesky.py:14: UserWarning: torch.potrf is deprecated in favour of torch.cholesky and will be removed in the next release. Please use torch.cholesky instead and note that the :attr:`upper` argument in torch.cholesky defaults to ``False``.\n", + " potrf_list = [sub_mat.potrf() for sub_mat in mat.view(-1, *mat.shape[-2:])]\n", + "/home/jrg365/gpytorch/gpytorch/lazy/added_diag_lazy_tensor.py:74: UserWarning: torch.potrf is deprecated in favour of torch.cholesky and will be removed in the next release. Please use torch.cholesky instead and note that the :attr:`upper` argument in torch.cholesky defaults to ``False``.\n", + " ld_one = lr_flipped.potrf().diag().log().sum() * 2\n" + ] + }, + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAQYAAADGCAYAAAAwqi48AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzt3Xt01NW58PHvzoUMuRK5JoTAELkGciNAeBWkXC1QbDhSpdpTizewtNhX3yrHarHV0/a0pe3RdrX2VIseFUVD9XhphSpvFUUFCcqlRhvRxgxXFSSQBMhz/tgzwySThIH5ZWaSPJ+1Zq3MzJ7fPDOZeWbv/dsXIyIopVSguGgHoJSKPZoYlFJBNDEopYJoYlBKBdHEoJQKoolBKRUk7MRgjHEZY94wxmw3xuw0xtzpRGBKqegx4Y5jMMYYIEVEjhpjEoFXgOUistmJAJVSkZcQ7gHEZpaj3quJ3ouOmlKqE3Okj8EYE2+MqQT2A+tF5HUnjquUio6wawwAInIKKDLG9ALWGWPGiMiOwDLGmOuA6wBSUlLGjRw50omnVkqdha1btx4Ukb5nKhd2H0PQAY35PlAnIj9rq0xpaals2bLF0edVSp2ZMWariJSeqZwTZyX6emsKGGN6AjOAv4d7XKVU9DjRlMgCVhtj4rGJ5nERecaB4yqlosSJsxJvA8UOxKKUihGOdD6qruvEiRPU1NRQX18f7VDUWXC5XOTk5JCYmHhOj9fEoNpVU1NDWloaQ4YMwY5lU7FORDh06BA1NTW43e5zOobOlVDtqq+vp3fv3poUOhFjDL179w6rlqeJQZ2RJoXOJ9z/mSYGFfNqamq45JJLGDZsGHl5eSxfvpzGxkYA/vjHP7Js2bIoRxgsNTW11dvj4+MpKioiPz+fwsJCVq1aRVNTU7vH2rNnD4888khHhNkmTQzKcR6Ph4suuoi9e/eGfSwRYcGCBXz5y1/mvffeo6qqiqNHj3Lbbbc5EGnrTp482WHH7tmzJ5WVlezcuZP169fz3HPPceed7U9IjkZiQEQifhk3bpyozmHXrl1n/ZilS5dKXFycLF26NOzn37Bhg0yePLnZbYcPH5bzzjtP6urq5IEHHpD58+fL7NmzZfjw4bJy5UoRETl69KjMmTNHCgoKJD8/X9asWSMiIlu2bJEpU6ZISUmJzJo1S2pra0VE5KKLLpIVK1bIlClTZOXKlTJ48GA5deqUiIjU1dVJTk6ONDY2yvvvvy+zZ8+WkpISufDCC2X37t0iIlJdXS1lZWVSWloq3/ve9yQlJaXV19Py9n/84x9y3nnnSVNTk3zwwQdy4YUXSnFxsRQXF8umTZtERGTixImSnp4uhYWFsmrVqjbLtdTa/w7YIiF8RzUxqHadTWJwuVyCnVnb7OJyuc75+X/1q1/JjTfeGHR7UVGRbN++XR544AEZMGCAHDx4UI4dOyb5+fny5ptvyhNPPCHXXHONv/xnn30mjY2NMmnSJNm/f7+IiKxZs0a+8Y1viIhNDIGJbP78+fLiiy/6y1199dUiIjJt2jSpqqoSEZHNmzfLF77wBRER+dKXviSrV68WEZF777035MQgItKrVy/Zu3ev1NXVyfHjx0VEpKqqSnzfk5deeknmzp3rL99WuZbCSQzalFCOqa6u5qtf/SrJyckAJCcnc8UVV/DBBx+c8zFFpNWOtMDbZ86cSe/evenZsycLFizglVdeYezYsWzYsIFbbrmFl19+mYyMDN5991127NjBzJkzKSoq4q677qKmpsZ/zMsuu6zZ34899hgAa9as4bLLLuPo0aO8+uqrLFy4kKKiIq6//no8Hg8AmzZtYtGiRQB87WtfO+vXCHbMyLXXXsvYsWNZuHAhu3btarV8qOXCoeMYlGOysrJIT0+nvr4el8tFfX096enpDBgw4JyPmZ+fz5NPPtnstiNHjvDPf/6TvLw8tm7dGpQ4jDEMHz6crVu38txzz7FixQpmzZpFeXk5+fn5vPbaa60+V0pKiv/v+fPns2LFCj755BO2bt3KtGnTqKuro1evXlRWVrb6+HM5E1BdXU18fDz9+vXjzjvvpH///mzfvp2mpiZcLlerj/nFL34RUrlwaI1BOWrfvn0sWbKEzZs3s2TJkrA7IKdPn86xY8d48MEHATh16hQ33XQTV111lb9msn79ej755BOOHz/On/70Jy644AJqa2tJTk7myiuv5Oabb+att95ixIgRHDhwwJ8YTpw4wc6dO1t93tTUVCZMmMDy5cuZN28e8fHxpKen43a7Wbt2LWB/6bdv3w7ABRdcwJo1awB4+OGHQ3ptBw4cYMmSJSxbtgxjDIcPHyYrK4u4uDgeeughTp06BUBaWhqff/65/3FtlXNUKO0Npy/ax9B5nEvno9M++ugjmTdvnpx//vkydOhQWbZsmdTX14uIyAMPPCALFy6UOXPmNOt8/POf/yxjx46VwsJCKS0tlTfffFNERLZt2yaTJ0+WgoICGT16tNx3330iYvsYfGV81q5dK4Bs3LjRf1t1dbXMnj1bCgoKZNSoUXLnnXf6b/d1Pv7oRz9qs48hLi5OCgsLZfTo0VJQUCA//elP/Z2cVVVVMnbsWJk4caLceuut/mM0NjbKtGnTpKCgQFatWtVmuZbC6WNwfD2GUOh6DJ3H7t27GTVqVLTDUOegtf9dxNZjUEp1PZoYlFJBNDEopYJoYlBKBdHEoJQK4sRisIOMMS8ZY3Z7t6hb7kRgSqnocaLGcBK4SURGAWXAN40xox04rlKAHVEYOMz45MmT9O3bl3nz5kUxqq4t7MQgIh4Recv79+fAbmBguMdVyiclJYUdO3Zw/PhxwI50HDhQP2IdydE+BmPMEOyK0bpFnXLUF7/4RZ599lkAHn30Uf+EJYC6ujoWL17M+PHjKS4u5qmnngLsOgaTJ0+mpKSEkpISXn31VQA2btzI1KlTufTSSxk5ciRXXHEF0RjoF8scm0RljEkFngRuFJEjrdzv36IuNzfXqadVEXTjjdDG/KFzVlQEv/zlmctdfvnl/OAHP2DevHm8/fbbLF68mJdffhmAu+++m2nTpnH//ffz2WefMWHCBGbMmEG/fv1Yv349LpeL9957j0WLFuEbcbtt2zZ27txJdnY2F1xwAZs2beLCCy909sV1Yo4kBmNMIjYpPCwiFa2VEZH7gPvADol24nlV91FQUMCePXt49NFHmTNnTrP7XnjhBZ5++ml+9jO7K2J9fT0fffQR2dnZLFu2jMrKSuLj46mqqvI/ZsKECeTk5ABQVFTEnj17NDEECDsxGDvX9A/AbhFZFX5IKlaF8svekebPn8/NN9/Mxo0bOXTokP92EeHJJ59kxIgRzcqvXLmyzenJSUlJ/r/j4+M7dDm3zsiJPoYLgK8B04wxld7LnDM9SKmztXjxYu644w7Gjh3b7PbZs2dzzz33+PsJtm3bBkRoenIX5cRZiVdExIhIgYgUeS/PORGcUoFycnJYvjx4mMztt9/OiRMnKCgoYMyYMdx+++0A3HDDDaxevZqysjKqqqqaLcSi2qfTrlW7dNp156XTrpVSjtLEoJQKoolBKRVEE4M6Ix0V2PmE+z/TxKDa5XK5OHTokCaHTkREOHToUFjLyuu+EqpdOTk51NTUcODAgWiHos6Cy+Xyj+w8F5oYVLsSExNxu93RDkNFmDYllFJBNDEopYJoYlBKBdHEoJQKoolBKRVEE4NSKogmBqVUEE0MSqkgmhiUUkE0MSilgmhiUEoFcWr5+PuBecB+ERnjxDE9Hg/l5eUcO3aM6upq8vLySEiw4fbo0YN169YxYMAAJ55KqU6jsrKSKVOm4Ha7ERH/dyM5OdnR74Qjaz4aY6YAR4EHQ0kMoaz5OH78M2zZIsA7wHZgM/CR//4+ffqQlZVFdXU1I0eO5JlnntFEoboUj8fD3Llzqaqq8v8w7tq1i/r6+oBSvYBSIJ6lS4fym9/8pt1jhrrmo2OLwXq3p3sm3MTQs2dP7wu/HfgKMAJI9N5bC7wE/BV4Htjrf1y/fv3o378/e/bs4ZVXXqGgoODcX4xSURJYI9i7dy/79+9vUaIfMAOYCkzBfj8A3gQmAHbKtW+fz5ZCTQyIiCMXYAiwo537rwO2AFtyc3OlLbW1tVJeXi7GGAEEEgVKBG4QeETAIyDey2aBmwRyvWXtpWfPnlJSUiJlZWXi8XjafC6lYkFtba1MnDhRSkpKxOVyNfss20uhwPcFtgZ89j8ReFrgVoHpAr0kLi5OFixY0O5nHtgioXyfQykU0oHOkBgCL+PGjWv3jVqyZEkrb07gpUDg3wTeCHij/ipwpUBSs7IJCQkybtw4TRAq5vgSQp8+fVr5jA8VWCnwrvfzfVLgbwK3eH8oTavfjaVLl7b7nKEmhpg8K7Fv3z7cbjcul4uEhATi4lqG+Tbw79iqUx622ZELPAR8DPwEsKvXnDx5kq1bt5KVlUV6ejpvv/12xF6HUq2prKwkPT2d7OxsXn/9dQ4ePOi9pwdwOba5/A/s5/qf2Mr2AGzT4SfAW9g8AHFxccTHx+NyufzND0eEkj1CueBgjaEt5eXl4na7ZeDAgRIXFyeAxMfHB2TMqQJrBU4INAo8JDA6KKtqE0NFg6+G0PLzCH0EbhfY660dvC+wQmBgs894fHy8ZGdni9vtlvLy8nOKgUg2JYBHAQ9wAqgBrm6v/Lkmhtb4koXL5ZLExETvG50r8HOBz71v9NpWE0RaWpps377dsViUak1tba0UFxe3khAGCfynwDHv5/QZgVkCRhITE8XlcoWVBFoT0cRwthcnE0OgwBqFTRK9BX4gcFjglMAfvf8MrUGojtd2DSFH4DcCDd6a7X8JjBRAkpOTHU8GgbplYgiUlZUlaWlpkpOTI3CewE8Ejnuz890CqZogVIfZtm1bKwkhU+A/vJ/DBoFfC+RIenp62E2EUHX7xOBTXl4uycnJkpaW5q0tPOitttUIfLXVnl1NDupc1dbWBpxq910SBL4tcMhbc71fIFfS0tIkOTm5w5NBoFATQ0yelXBSRUUFdXV1zJgxA7c7gaFDVwJl2MFSDwMbgOHNHpOVlUVcXJxzPbyqy/N4PJSVlZGdnW1/cf2mY0fv/go7jKeItLTluN3xzJgxg7q6OioqKqISc3u6zb4Svjd/wYIF7N37F+LiZnD06CLgx9jTnz/Engo6CdiaVFZWFh6PR4daq3Z5PB6ys7Nb3DoA+CVwGfA+MI+BA7dz5MhhUlNTqa6ujnicZ6PL1xha8tUgZs6cjtv9AgMHzgD+BNyFL6MHysrKCmurL9W1uVyuFknBYMcd7AYuAe4gJaUMt3sXEyaM58iRI9TW1kYl1rPR7RKDT0VFBdXV1UyYkEta2rXYf2Jf4A3gDgIrUw0NDdq0UEGSkpJoaGgIuCUPeBH4HbAVGEtm5r2kp/eguro6JpsMbem2icGnoqLC2//wDgMHXgw8BtwJbAKG+cv5mhaaHJTH4yEuLo7GxkbvLQZYhm2SFgNXk5t7NW73KaZOndopaggtdfvEAIG1h/NJS7sBuBSb/bcB1zQrq02L7s3Xn3C6gzEHWA/cA/x/YDRpaWsZN66k09USAmliCHC69vAW2dkXA68CvwfWYue9Ww0NDSQlJUUpShUtSUlJLfoTFmJrCROB6xg48Drc7iRmzJjRaROCjyaGFny1h4kTBzFs2LeA/4ftf9iGb747QGNjo/Y7dBPBTYcU4A/A48C7QBEZGY8zYcL4Tl1LCKSJoQ0VFRWMGTOatLTfARdgx6q8AnzHX0b7Hbq+4KZDIbZj8SrsKe7JZGZ+QnJycpdICD6aGNpxumlxkKysucAzwCqgAsjwl8vKytKmRRcU3HS4HrvEYCowjZyc+3C7B3XaDsb2aGI4A1/ToqxsJOnp3wBuxK57uwUY6y/X2NionZJdiMvlCmg6pAKPAL/FrpVQREZGJePHd52mQ0uaGEJUUVFBSkoymZkPYtfb64n99bjSX6ahoQFjjDYtOjGPx4MxJmB8wmjseopfAf4NmEtm5qku13RoSRPDWaitrWXq1Km43R4GDJgLvI5dNeoeTi9Ya5sWulJU5xM8tPly7IC3XsB0cnIewu0e0iWbDi1pYjhLvqbFpElDSU+/FPgZdnDLi0B/f7nCwkJtWnQizYc2J2DnOTyKXUatuMs3HVrSxHCObNMiiYyMu7ATZYqxvdUT/WW0aRH7fKciTzcd+mOT/HJscphGRsbxLt90aEkTQxhqa2uZNm0a+fk7gUlAA3b02+Jm5QYOHBiF6FQoBg0aFHAqsgxbQygBFgHfYcSIPKZNm9blmw4tOZIYjDEXG2PeNca8b4y51YljdhYVFRUMHz4ct/soyclTsYnhD8C9+CZiNTU1YYzRpkUMcblcGGM4deqU95ZrsP+749gEsYa0tDRGjx7drWoKPmEnBmNMPPBr4IvYLtxFxpjR4R63M/H1O2RknCQ9fRG23+Gb2EVg+vrLNTQ0aLMiBng8noCmQw/sacjfY5sQ4xk8+HPcbneXGNp8rpyoMUwA3heRahFpBNZgxxB3O7W1tUyffhHDh/8euAL71jRf40EHQ0VX807GAdhxCddj9ymZy4gR/Sgp6dwToJzgRGIYiN0Vw6fGe1szxpjrjDFbjDFbDhw44MDTxqaKigry8/NJSXkKuNB76yZsB6Wlg6Giw+VyBdQUJmI7iwuxk6FuIzU1uds2HVpyIjGYVm6ToBtE7hORUhEp7du3bysP6ToqKiqYNWsW+fkN2J2It2ArUj/G95brGYvICT7zsJjm/QlPkJ+fz8yZMzUpeDmRGGqAQQHXc7ArrXZrpzslU8nO/jrwG+AW4FkCp3DrJKyOd/rMQyK2U/gPwEZgPMnJ1eTm5jJ8+HBNCoFCWUq6vQu2670acGN7crYD+e09JpLLx8eC8vJyycjIELjGu5/AewL5zZYYT0pKinaYXU5SUuAGx/0FXvZuHfATAbvFodvtjnaYEUWklo8XkZPYoX9/wa6A+biI7Az3uF1JRUUFycnJZGSsxc6zSMHOs7jUX6ahoUH7HRzU/MyDb3xCMbav5xYGDx6E2+2mqKiozWN0Z46MYxCR50RkuIjkicjdThyzqzk9GOoIMA678s9a7JL18YCuDOWU5tOlr+d0f8Ik4HFGjBihZx7OQEc+RpCv32HQoARcri9i+x2+i61s9QF0ZahwNF9pyQU8gB2jsAHbCfwOqampeuYhBJoYIqyiooKPPvqIAQPOww6Cugq7QtRb+OZZiK4Mddaar7SUB7wG/Cvwfez6GZ/pmYez0G12ooo1xcXFGGNobNzAxx//H+AJ4G/ATdiec3vGokePHi32LlAtNR+fcAmwGjiFTQjPk5OTQ2Jiop55OAtaY4iS00vWTyAjoxrb7/AX7NoOjwHpgDYt2tN8fEICdij6n4D3sBOhnicjI6NbTZd2iiaGKPOdscjMNNhfu+8CC7BNi3GANi1a07zpMAR4GVvbugfbNPuQzMzMbjdd2imaGGLA6ZWhhjBw4CPARdjBOK9iV6W2g0t1sxur+XyHS7FL+4/CDm3+NsnJCeTm5naLlZY6iiaGGNG8abETO/HqWeyq1M9jJ/x07300mzcdfHs7rMW3t4Ptp4H+/fvz4Ycfak0hDJoYYszppgXYJsUSYDLwjvf66aZFd1pXsrKyMqDpMAmoxJ7RuQs7WW0PeXl5OmjJIZoYYpCvaTFo0CB69nwQ29ewB3gS2+Nu51oUFhZ2+dqDr5ZQXFyMHXH/79j+hATsKNLbSU7uQW5uLgUFBdrJ6JRQxk07felucyXCMWTIEO9Y/wSBlQKNArUC85vNtSgrKxOPxxPtcB21bdu2gNc4XmCHd67Dfwmk+e/rbvMdwkGk5kqojlVcXIzb7Wbo0FxgJXbxl33AU9j2te172Lx5c5c5c9G8lpAK/AI7YCkduBi7DNvn2nToQJoYYpyvU7KwsBC3201KynvAeGAFdgDP34Fv45tvkZWV1WmbFx6Ph7KysoC+hH8BdmFf32+BMcBfSE1Nxe12a9OhAxnxr5AbOaWlpbJly5aIP29XkJ2dTX19PZ9++ilwPva8/cXYzsnvAH/1l/V4PAwYMCAqcZ6t5pu95GNrCTOxnYw3YGsMkJmZicvl0tOQ58gYs1VESs9UTmsMnczpMQ9ucnMbsWvwLsBWuTcA/4P9ZbW1B2MMkyZNitkaRGVlJcYYb1LIwtYMtmM7XL+Fnfz0mr+WoGMTIiSUjginL9r56Izy8nJJS/N1wiUJfFfgU4FTAg8KDPN30PXp0ydmOihra2tl4sSJUlJS4o2vr8CPBeq8C9n8UiDTH3tmZqZkZWVFO+wugRA7HzUxdHLl5eXidrslNzfX+0XK9K5QVCdwUuC/BYqancEYN25cVBJEbW2tFBcXS2JiojeWQQI/FzjqTWYPCbj9cQ4ePFjcbreUl5dHPNauShNDN9O89uD7Ff6JwBHvKb6/Cizwnva0ZUpKSjq8FhFcO0BgssCjAie8l9UCI5olr7S0NE0IHSDUxKCdj13IggUL/G326upq760ZwLXY1fcGAx7gYewu3XbkZJ8+fcjNzQWgR48erFu3LqxOy8rKSqZMmYLb7aa2tpaDBw9i1wi+Avg6dl7DZ9ghzb8icPeBvLw8mpqaKCoq0rMNHSDUzsewEoMxZiH25PooYIKIhPRt18TQsXwJYt++fRw7dsx7axwwBzsGYA52ktZ7wDrgz9i9LxoBmyiysrKorq4mLy+PhIQETpw40eZ1EWl2365du6ivb8CusTgL+DKnN/t9Bbgfu5z+cX/MaWlpnDp1itmzZ2tC6ECRSgyjgCbgd8DNmhhiiy9BNDY2sn//fk6cOOG9pzd2jEA5MB2bJI4Db2AXqa0EdmAX/z4WfOAgiUAu9jRjAXbx1TLv8+A97jrsOhMfnH5UYiJ9+/YlKSlJawgREmpiCGsFJxHZ7X2ycA6jOkjgFy07O5ujR4+SkZFBTU0NcJ/3koqd5j0dOznpO9g5CT4HgP3AJ9jk0Yj92LiATOxalVk0P/O9C7tgyovYU6j7m8WVk5PD4cOHSU1N5eOPP3bq5SoHRWxpN2PMdcB1gL89qyLHd+5/wYIFJCYmBjQzjmKndz/rLZkIjMDuTzwU2y/RB/vrn4ZNGieABuyv/xZsH8GH2ISwy3vMYMnJyfTv319rB53AGZsSxpgN+AbkN3ebiDzlLbMRbUp0KoHNjE8//TSgL8I5xhji4uLo37+/NhdihGNNCRGZ4UxIKpa0/IIGJgqPx0NTUxNxcXE0NTX5y7R33fd3fHy8JoIuQFeJVkBwolDdW1hzJYwx5caYGmyv1bPGmL84E5ZSKprCPSuxDnseSinVhejsSqVUEE0MSqkgmhiUUkE0MSilgmhiUEoF0cSglAqiiUEpFUQTg1IqiCYGpVQQTQxKqSCaGJRSQTQxKKWCaGJQSgXRxKCUCqKJQSkVRBODUiqIJgalVBBNDEqpIOGu+fhTY8zfjTFvG2PWGWN6ORWYUip6wq0xrAfGiEgBUAWsCD8kpVS0hZUYROQFETnpvboZu6WxUqqTc7KPYTHwvIPHU0pFyRmXjw9xi7rbgJPAw+0cR/euVKqTCHuLOmPM14F5wHRpZyNMEfFtr0xpaWn7G2YqpaIqrA1njDEXA7cAF4mI87uiKqWiItw+hnuxe6OvN8ZUGmN+60BMSqkoC3eLuvOdCkQpFTt05KNSKogmBqVUEE0MSqkgmhiUUkE0MSilgmhiUEoF0cSglAqiiUEpFUQTg1IqiCYGpVQQTQxKqSCaGJRSQTQxKKWCaGJQSgXRxKCUCqKJQSkVRBODUiqIJgalVJBwt6j7oXd7ukpjzAvGmGynAlNKRU+4NYafikiBiBQBzwB3OBCTUirKwt2i7kjA1RRA94tQqgsIa5VoAGPM3cC/AoeBL4QdkVIq6kw7m0fZAiFsUecttwJwicj32ziOf4s6YATwbgjx9QEOhlAummI9xliPD2I/xliPD0KPcbCI9D1ToTMmhlAZYwYDz4rIGEcOaI+5RURKnTpeR4j1GGM9Poj9GGM9PnA+xnDPSgwLuDof+Ht44SilYkG4fQw/NsaMAJqAD4El4YeklIq2cLeo+xenAmnDfR18fCfEeoyxHh/EfoyxHh84HKNjfQxKqa5Dh0QrpYLERGIwxlxsjHnXGPO+MebWVu5PMsY85r3/dWPMkBiL7/8aY3Z5h4f/1XuGJqLOFGNAuUuNMWKMiXgveygxGmO+4n0vdxpjHoml+IwxucaYl4wx27z/6zkRju9+Y8x+Y8yONu43xpj/9Mb/tjGm5JyfTESiegHigX8AQ4EewHZgdIsyNwC/9f59OfBYjMX3BSDZ+/fSSMYXaozecmnA34DNQGmsxQgMA7YBmd7r/WIsvvuApd6/RwN7IvweTgFKgB1t3D8HeB4wQBnw+rk+VyzUGCYA74tItYg0AmuAS1qUuQRY7f37CWC6McbESnwi8pKIHPNe3QzkRCi2kGP0+iHwH0B9JIPzCiXGa4Ffi8inACKyP8biEyDd+3cGUBvB+BCRvwGftFPkEuBBsTYDvYwxWefyXLGQGAYC/wy4XuO9rdUyInISO/y6d0SiCy2+QFdjs3YknTFGY0wxMEhEnolkYAFCeR+HA8ONMZuMMZuNMRdHLLrQ4lsJXGmMqQGeA74VmdBCdraf1TaFPVfCAa398rc8VRJKmY4S8nMbY64ESoGLOjSiVp66ldv8MRpj4oBfAFdFKqBWhPI+JmCbE1Oxta6XjTFjROSzDo4NQotvEfBHEfm5MWYS8JA3vqaODy8kjn1PYqHGUAMMCrieQ3AVzV/GGJOArca1V6VyUijxYYyZAdwGzBeRhgjF5nOmGNOAMcBGY8webPvz6Qh3QIb6f35KRE6IyAfY+TTDiIxQ4rsaeBxARF4DXNg5CrEipM9qSCLZedJGh0kCUA24Od3pk9+izDdp3vn4eIzFV4ztuBoWq+9hi/IbiXznYyjv48XAau/ffbDV4t4xFN/zwFXev0d5v3Qmwu/jENrufJxL887HN875eSL5otp5sXOAKu+X6zbvbT/A/vqCzcxrgfeBN4ChMRbfBmAfUOm9PB1r72GLshFPDCG+jwbw3qEBAAAAY0lEQVRYBewC3gEuj7H4RgObvEmjEpgV4fgeBTzACWzt4GrsNIQlAe/fr73xvxPO/1hHPiqlgsRCH4NSKsZoYlBKBdHEoJQKoolBKRVEE4NSKogmBqVUEE0MSqkgmhiUUkH+F4JPEv8wsg32AAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "# Set model and likelihood into eval mode\n", + "model.eval()\n", + "likelihood.eval()\n", + "\n", + "# Initialize axes\n", + "f, ax = plt.subplots(1, 1, figsize=(4, 3))\n", + "\n", + "with torch.no_grad():\n", + " test_x = torch.linspace(0, 1, 101)\n", + " predictions = likelihood(model(test_x)).mean\n", + "\n", + "ax.plot(train_x.numpy(), train_y.numpy(), 'k*')\n", + "pred_labels = predictions\n", + "ax.plot(test_x.data.numpy(), pred_labels.numpy(), 'b')\n", + "ax.set_ylim([-3, 3])\n", + "ax.legend(['Observed Data', 'Mean', 'Confidence'])" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "MultivariateNormal(loc: torch.Size([101]))\n" + ] + } + ], + "source": [ + "with gpytorch.beta_features.fast_pred_var():\n", + " print(model(test_x))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "anaconda-cloud": {}, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.0" + } + }, + "nbformat": 4, + "nbformat_minor": 1 +} diff --git a/examples/06_Scalable_GP_Classification_1D/index.rst b/examples/06_Scalable_GP_Classification_1D/index.rst index bb7a1a6d4..5ca1a9b57 100644 --- a/examples/06_Scalable_GP_Classification_1D/index.rst +++ b/examples/06_Scalable_GP_Classification_1D/index.rst @@ -5,4 +5,5 @@ :maxdepth: 1 :hidden: + SVGP_Classification_1D.ipynb KISSGP_Classification_1D.ipynb diff --git a/gpytorch/beta_features.py b/gpytorch/beta_features.py index f2e572ae3..42647eb12 100644 --- a/gpytorch/beta_features.py +++ b/gpytorch/beta_features.py @@ -37,7 +37,7 @@ class diagonal_correction(_feature_flag): Add a diagonal correction to scalable inducing point methods """ - pass + _state = True class fast_pred_samples(_feature_flag): diff --git a/gpytorch/variational/variational_strategy.py b/gpytorch/variational/variational_strategy.py index ffff0fe26..3435599df 100644 --- a/gpytorch/variational/variational_strategy.py +++ b/gpytorch/variational/variational_strategy.py @@ -1,7 +1,9 @@ from __future__ import absolute_import, division, print_function, unicode_literals +import math import torch -from ..lazy import RootLazyTensor, MatmulLazyTensor +import gpytorch +from ..lazy import RootLazyTensor, PsdSumLazyTensor, DiagLazyTensor from .. import beta_features from ..module import Module from ..distributions import MultivariateNormal @@ -39,6 +41,8 @@ def __init__(self, model, inducing_points, variational_distribution, learn_induc super(VariationalStrategy, self).__init__() object.__setattr__(self, "model", model) + inducing_points = inducing_points.clone() + if inducing_points.dim() == 1: inducing_points = inducing_points.unsqueeze(-1) @@ -91,7 +95,7 @@ def forward(self, x): test_mean = full_mean[n_induc:] induc_mean = full_mean[:n_induc] - induc_induc_covar = full_covar[:n_induc, :n_induc] + induc_induc_covar = full_covar[:n_induc, :n_induc].add_jitter() induc_data_covar = full_covar[:n_induc, n_induc:] data_induc_covar = full_covar[n_induc:, :n_induc] data_data_covar = full_covar[n_induc:, n_induc:] @@ -116,7 +120,7 @@ def forward(self, x): # Compute predictive covariance predictive_covar = data_data_covar - if beta_features.fast_pred_var.on(): + if not self.training and beta_features.fast_pred_var.on(): correction = RootLazyTensor(data_induc_covar.matmul(self.prior_root_inv)).mul(-1) correction = correction + RootLazyTensor(data_induc_covar.matmul(self.variational_root)) predictive_covar = predictive_covar + correction @@ -124,8 +128,12 @@ def forward(self, x): induc_data_covar = induc_data_covar.evaluate() inv_product = induc_induc_covar.inv_matmul(induc_data_covar) factor = variational_dist.lazy_covariance_matrix.root_decomposition().matmul(inv_product) - right_factor = factor - inv_product - left_factor = (factor - induc_data_covar).transpose(-1, -2) - predictive_covar = predictive_covar + MatmulLazyTensor(left_factor, right_factor) + predictive_covar = RootLazyTensor(factor.transpose(-2, -1)) + + if gpytorch.beta_features.diagonal_correction.on(): + fake_diagonal = (inv_product * induc_data_covar).sum(0) + real_diagonal = data_data_covar.diag() + diag_correction = DiagLazyTensor((real_diagonal - fake_diagonal).clamp(0, math.inf)) + predictive_covar = PsdSumLazyTensor(predictive_covar, diag_correction) return MultivariateNormal(predictive_mean, predictive_covar) diff --git a/test/examples/test_svgp_gp_classification.py b/test/examples/test_svgp_gp_classification.py index 03c9ad49d..ab943d5a9 100644 --- a/test/examples/test_svgp_gp_classification.py +++ b/test/examples/test_svgp_gp_classification.py @@ -3,7 +3,7 @@ from __future__ import print_function from __future__ import unicode_literals -from math import exp, pi +from math import pi import os import random @@ -11,40 +11,44 @@ import unittest import gpytorch from torch import optim -from gpytorch.kernels import RBFKernel, ScaleKernel -from gpytorch.likelihoods import BernoulliLikelihood -from gpytorch.means import ConstantMean -from gpytorch.priors import SmoothedBoxPrior -from gpytorch.distributions import MultivariateNormal - - -train_x = torch.linspace(-1, 1, 10).unsqueeze(-1) -train_y = torch.sign(torch.cos(train_x * (2 * pi))).squeeze() - - -class GPClassificationModel(gpytorch.models.AbstractVariationalGP): - def __init__(self): - variational_distribution = gpytorch.variational.CholeskyVariationalDistribution(16) - variational_strategy = gpytorch.variational.VariationalStrategy( - self, torch.randn(16, 1), variational_distribution, learn_inducing_locations=True - ) - - super(GPClassificationModel, self).__init__(variational_strategy) - self.mean_module = ConstantMean(prior=SmoothedBoxPrior(-5, 5)) - self.covar_module = ScaleKernel( - RBFKernel(log_lengthscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1, log_transform=True)), - log_outputscale_prior=SmoothedBoxPrior(exp(-5), exp(6), sigma=0.1, log_transform=True), +from gpytorch.likelihoods import GaussianLikelihood +from gpytorch.models import AbstractVariationalGP +from gpytorch.variational import CholeskyVariationalDistribution +from gpytorch.variational import VariationalStrategy + + +def train_data(cuda=False): + train_x = torch.linspace(0, 1, 260) + train_y = torch.cos(train_x * (2 * pi)) + if cuda: + return train_x.cuda(), train_y.cuda() + else: + return train_x, train_y + + +class SVGPRegressionModel(AbstractVariationalGP): + def __init__(self, inducing_points): + variational_distribution = CholeskyVariationalDistribution(inducing_points.size(-1)) + variational_strategy = VariationalStrategy(self, + inducing_points, + variational_distribution, + learn_inducing_locations=True) + super(SVGPRegressionModel, self).__init__(variational_strategy) + self.mean_module = gpytorch.means.ConstantMean() + self.covar_module = gpytorch.kernels.ScaleKernel( + gpytorch.kernels.RBFKernel( + log_lengthscale_prior=gpytorch.priors.SmoothedBoxPrior(0.001, 1., sigma=0.1, log_transform=True) + ) ) - self.covar_module.base_kernel.initialize(log_lengthscale=-1) def forward(self, x): mean_x = self.mean_module(x) covar_x = self.covar_module(x) - latent_pred = MultivariateNormal(mean_x, covar_x) + latent_pred = gpytorch.distributions.MultivariateNormal(mean_x, covar_x) return latent_pred -class TestSVGPClassification(unittest.TestCase): +class TestSVGPRegression(unittest.TestCase): def setUp(self): if os.getenv("UNLOCK_SEED") is None or os.getenv("UNLOCK_SEED").lower() == "false": self.rng_state = torch.get_rng_state() @@ -57,18 +61,18 @@ def tearDown(self): if hasattr(self, "rng_state"): torch.set_rng_state(self.rng_state) - def test_kissgp_classification_error(self): - model = GPClassificationModel() - likelihood = BernoulliLikelihood() - mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=train_y.numel()) + def test_regression_error(self): + train_x, train_y = train_data() + likelihood = GaussianLikelihood() + model = SVGPRegressionModel(train_x[:25]) + mll = gpytorch.mlls.VariationalELBO(likelihood, model, num_data=len(train_y)) # Find optimal model hyperparameters model.train() likelihood.train() - - optimizer = optim.Adam(model.parameters(), lr=0.1) + optimizer = optim.Adam([{'params': model.parameters()}, {'params': likelihood.parameters()}], lr=0.1) optimizer.n_iter = 0 - for _ in range(75): + for _ in range(200): optimizer.zero_grad() output = model(train_x) loss = -mll(output, train_y) @@ -76,10 +80,9 @@ def test_kissgp_classification_error(self): optimizer.n_iter += 1 optimizer.step() - for _, param in model.named_parameters(): + for param in model.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) - for param in likelihood.parameters(): self.assertTrue(param.grad is not None) self.assertGreater(param.grad.norm().item(), 0) @@ -87,9 +90,42 @@ def test_kissgp_classification_error(self): # Set back to eval mode model.eval() likelihood.eval() - test_preds = likelihood(model(train_x)).mean.ge(0.5).float().mul(2).sub(1).squeeze() + test_preds = likelihood(model(train_x)).mean.squeeze() mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2) - self.assertLess(mean_abs_error.squeeze().item(), 1e-5) + assert mean_abs_error.item() < 1e-1 + + def test_regression_error_cuda(self): + if torch.cuda.is_available(): + train_x, train_y = train_data(cuda=True) + likelihood = GaussianLikelihood().cuda() + model = SVGPRegressionModel(train_x[:25]).cuda() + mll = gpytorch.mlls.VariationalMarginalLogLikelihood(likelihood, model, num_data=len(train_y)) + + # Find optimal model hyperparameters + model.train() + optimizer = optim.Adam([{'params': model.parameters()}, {'params': likelihood.parameters()}], lr=0.1) + optimizer.n_iter = 0 + for _ in range(200): + optimizer.zero_grad() + output = model(train_x) + loss = -mll(output, train_y) + loss.backward() + optimizer.n_iter += 1 + optimizer.step() + + for param in model.parameters(): + self.assertTrue(param.grad is not None) + self.assertGreater(param.grad.norm().item(), 0) + for param in likelihood.parameters(): + self.assertTrue(param.grad is not None) + self.assertGreater(param.grad.norm().item(), 0) + optimizer.step() + + # Set back to eval mode + model.eval() + test_preds = likelihood(model(train_x)).mean.squeeze() + mean_abs_error = torch.mean(torch.abs(train_y - test_preds) / 2) + self.assertLess(mean_abs_error.item(), 1e-1) if __name__ == "__main__":