From bdbce8902b1cd7f52251688c28a4cebd5b1af3a1 Mon Sep 17 00:00:00 2001 From: Zaccharie Ramzi Date: Wed, 20 Mar 2019 14:43:45 +0100 Subject: [PATCH 1/7] added fessler ref for restarting pogm --- modopt/opt/__init__.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modopt/opt/__init__.py b/modopt/opt/__init__.py index bed93544..cce3882c 100644 --- a/modopt/opt/__init__.py +++ b/modopt/opt/__init__.py @@ -8,6 +8,10 @@ References ---------- +.. [K2018] Kim, D., & Fessler, J. A. (2018). + Adaptive restart of the optimized gradient method for convex optimization. + Journal of Optimization Theory and Applications, 178(1), 240-263. + [https://link.springer.com/content/pdf/10.1007%2Fs10957-018-1287-4.pdf] .. [L2018] Liang, Jingwei, and Carola-Bibiane Schönlieb. Improving FISTA: Faster, Smarter and Greedier. arXiv preprint arXiv:1811.01430 (2018). From 7d08f74e53ef3bfef50e23382d3610663d04871c Mon Sep 17 00:00:00 2001 From: Zaccharie Ramzi Date: Wed, 20 Mar 2019 15:08:27 +0100 Subject: [PATCH 2/7] WIP started implementation of POGM with the init --- modopt/opt/algorithms.py | 84 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) diff --git a/modopt/opt/algorithms.py b/modopt/opt/algorithms.py index 9b768b3c..42747d6c 100644 --- a/modopt/opt/algorithms.py +++ b/modopt/opt/algorithms.py @@ -1072,3 +1072,87 @@ def retrieve_outputs(self): for obs in self._observers['cv_metrics']: metrics[obs.name] = obs.retrieve_metrics() self.metrics = metrics + +class POGM(SetUp): + r"""Proximal Optimised Gradient Method + + This class implements algorithm 3 from [K2018]_ + + Parameters + ---------- + u : np.ndarray + Initial guess for the u variable + x : np.ndarray + Initial guess for the x variable (primal) + y : np.ndarray + Initial guess for the y variable + z : np.ndarray + Initial guess for the z variable + grad : class + Gradient operator class + prox : class + Proximity operator class + cost : class or str, optional + Cost function class (default is 'auto'); Use 'auto' to automatically + generate a costObj instance + linear : class instance, optional + Linear operator class (default is None) + beta_param : float, optional + Initial value of the beta parameter (default is 1.0). This corresponds + to (1 / L) in [K2018]_ + sigma_bar : float, optional + Value of the shrinking parameter sigma bar (default is 1.0) + auto_iterate : bool, optional + Option to automatically begin iterations upon initialisation (default + is 'True') + """ + def __init__( + self, + u, + x, + y, + z, + grad, + prox, + cost, + linear=None, + beta_param=1.0, + sigma_bar=1.0, + auto_iterate=True, + metric_call_period=5, + metrics={}, + ): + # Set default algorithm properties + super(POGM, self).__init__( + metric_call_period=metric_call_period, + metrics=metrics, + linear=linear, + ) + + # set the initial variable values + (self._check_input_data(data) for data in (u, x, y, z)) + self._u_old = np.copy(u) + self._x_old = np.copy(x) + self._y_old = np.copy(y) + self._z_old = np.copy(z) + + # Set the algorithm operators + (self._check_operator(operator) for operator in (grad, prox, cost)) + self._grad = grad + self._prox = prox + self._linear = linear + if cost == 'auto': + self._cost_func = costObj([self._grad, self._prox]) + else: + self._cost_func = cost + + # Set the algorithm parameters + (self._check_param(param) for param in (beta_param, sigma_bar)) + if not (0 <= sigma_bar <=1): + raise ValueError('The sigma bar parameter needs to be in [0, 1]') + self._beta = beta_param + self._sigma_bar = sigma_bar + + # Automatically run the algorithm + if auto_iterate: + self.iterate() From 8963ed2d25babe7e872299b6cd2448775e5edfbe Mon Sep 17 00:00:00 2001 From: Zaccharie Ramzi Date: Wed, 20 Mar 2019 15:14:45 +0100 Subject: [PATCH 3/7] added surrounding functions for pogm algorithm, and also the fixed initial params (t, xi, sigma) --- modopt/opt/algorithms.py | 51 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/modopt/opt/algorithms.py b/modopt/opt/algorithms.py index 42747d6c..8ecd2b20 100644 --- a/modopt/opt/algorithms.py +++ b/modopt/opt/algorithms.py @@ -1152,7 +1152,58 @@ def __init__( raise ValueError('The sigma bar parameter needs to be in [0, 1]') self._beta = beta_param self._sigma_bar = sigma_bar + self._xi = self._sigma = self._t_old = 1.0 # Automatically run the algorithm if auto_iterate: self.iterate() + + def iterate(self, max_iter=150): + r"""Iterate + + This method calls update until either convergence criteria is met or + the maximum number of iterations is reached + + Parameters + ---------- + max_iter : int, optional + Maximum number of iterations (default is ``150``) + + """ + + self._run_alg(max_iter) + + # retrieve metrics results + self.retrieve_outputs() + # rename outputs as attributes + self.x_final = self._x_new + + def get_notify_observers_kwargs(self): + """ Return the mapping between the metrics call and the iterated + variables. + + Return + ---------- + notify_observers_kwargs: dict, + the mapping between the iterated variables. + """ + return { + 'u_new': self._u_new, + 'x_new': self._x_new, + 'y_new': self._y_new, + 'z_new': self._z_new, + 'xi': self._xi, + 'sigma': self._sigma, + 't': self._t_new, + 'idx': self.idx, + } + + def retrieve_outputs(self): + """ Declare the outputs of the algorithms as attributes: x_final, + y_final, metrics. + """ + + metrics = {} + for obs in self._observers['cv_metrics']: + metrics[obs.name] = obs.retrieve_metrics() + self.metrics = metrics From 00dc30bd4d174914b123d052b10f8b43756a2b1e Mon Sep 17 00:00:00 2001 From: Zaccharie Ramzi Date: Wed, 20 Mar 2019 15:40:19 +0100 Subject: [PATCH 4/7] finished implementing POGM --- modopt/opt/algorithms.py | 68 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 66 insertions(+), 2 deletions(-) diff --git a/modopt/opt/algorithms.py b/modopt/opt/algorithms.py index 8ecd2b20..17de4d32 100644 --- a/modopt/opt/algorithms.py +++ b/modopt/opt/algorithms.py @@ -1134,7 +1134,7 @@ def __init__( self._u_old = np.copy(u) self._x_old = np.copy(x) self._y_old = np.copy(y) - self._z_old = np.copy(z) + self._z = np.copy(z) # Set the algorithm operators (self._check_operator(operator) for operator in (grad, prox, cost)) @@ -1153,11 +1153,75 @@ def __init__( self._beta = beta_param self._sigma_bar = sigma_bar self._xi = self._sigma = self._t_old = 1.0 + self._grad.get_grad(self._x_old) + self._g_old = self._grad.grad # Automatically run the algorithm if auto_iterate: self.iterate() + def _update(self): + r"""Update + + This method updates the current reconstruction + + Notes + ----- + Implements algorithm 3 from [K2018]_ + + """ + # Step 4 from alg. 3 + self._grad.get_grad(self._x_old) + self._u_new = self._x_old - self._beta * self._grad.grad + + # Step 5 from alg. 3 + self._t_new = 0.5 * (1 + np.sqrt(1 + 4 * self._t_old**2)) + + # Step 6 from alg. 3 + t_shifted_ratio = (self._t_old - 1) / self._t_new + sigma_t_ratio = self._sigma * self._t_old / self._t_new + beta_xi_t_shifted_ratio = t_shifted_ratio * self._beta / self._xi + self._z = - beta_xi_t_shifted_ratio * (self._x_old - self._z) + self._z += self._u_new + self._z += t_shifted_ratio * (self._u_new - self._u_old) + self._z += sigma_t_ratio * (self._u_new - self._x_old) + + # Step 7 from alg. 3 + self._xi = self._beta * (1 + t_shifted_ratio + sigma_t_ratio) + + # Step 8 from alg. 3 + self._x_new = self._prox.op(self._z, extra_factor=self._xi) + + # Restarting and gamma-Decreasing + # Step 9 from alg. 3 + self._g_new = self._grad.grad - (self._x_new - self._z) / self._xi + + # Step 10 from alg 3. + self._y_new = self._x_old - self._beta * self._g_new + + # Step 11 from alg. 3 + restart_crit = np.vdot(- self._g_new, self._y_new - self._y_old) < 0 + if restart_crit: + self._t_new = 1 + self._sigma = 1 + + # Step 13 from alg. 3 + elif np.vdot(self._g_new, self._g_old) < 0: + self._sigma *= self._sigma_bar + + # updating variables + np.copyto(self._u_old, self._u_new) + np.copyto(self._t_old, self._t_new) + np.copyto(self._x_old, self._x_new) + np.copyto(self._g_old, self._g_new) + np.copyto(self._y_old, self._y_new) + + # Test cost function for convergence. + if self._cost_func: + self.converge = self.any_convergence_flag() or \ + self._cost_func.get_cost(self._x_new) + + def iterate(self, max_iter=150): r"""Iterate @@ -1191,7 +1255,7 @@ def get_notify_observers_kwargs(self): 'u_new': self._u_new, 'x_new': self._x_new, 'y_new': self._y_new, - 'z_new': self._z_new, + 'z_new': self._z, 'xi': self._xi, 'sigma': self._sigma, 't': self._t_new, From a4b844f4f658e54e2dda29caad74322cef4503be Mon Sep 17 00:00:00 2001 From: Zaccharie Ramzi Date: Wed, 20 Mar 2019 15:44:24 +0100 Subject: [PATCH 5/7] added default cost value auto --- modopt/opt/algorithms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modopt/opt/algorithms.py b/modopt/opt/algorithms.py index 17de4d32..002f6c9e 100644 --- a/modopt/opt/algorithms.py +++ b/modopt/opt/algorithms.py @@ -1114,7 +1114,7 @@ def __init__( z, grad, prox, - cost, + cost='auto', linear=None, beta_param=1.0, sigma_bar=1.0, From 28379b8d4a57670ca6f71b2d946f7fe45b20c3c9 Mon Sep 17 00:00:00 2001 From: Zaccharie Ramzi Date: Wed, 20 Mar 2019 15:52:08 +0100 Subject: [PATCH 6/7] corrected np copy for float bug --- modopt/opt/algorithms.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modopt/opt/algorithms.py b/modopt/opt/algorithms.py index 002f6c9e..e45667dd 100644 --- a/modopt/opt/algorithms.py +++ b/modopt/opt/algorithms.py @@ -1210,8 +1210,8 @@ def _update(self): self._sigma *= self._sigma_bar # updating variables + self._t_old = self._t_new np.copyto(self._u_old, self._u_new) - np.copyto(self._t_old, self._t_new) np.copyto(self._x_old, self._x_new) np.copyto(self._g_old, self._g_new) np.copyto(self._y_old, self._y_new) From 1e867ff60c5a4a36a7499f567546d45a51eb9a52 Mon Sep 17 00:00:00 2001 From: Zaccharie Ramzi Date: Wed, 20 Mar 2019 16:00:20 +0100 Subject: [PATCH 7/7] added unit tests for POGM --- modopt/tests/test_opt.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/modopt/tests/test_opt.py b/modopt/tests/test_opt.py index e5304090..3b16d788 100644 --- a/modopt/tests/test_opt.py +++ b/modopt/tests/test_opt.py @@ -97,6 +97,14 @@ def setUp(self): prox_dual=prox_dual_inst, linear=dummy(), cost=cost_inst, auto_iterate=False) + self.pogm1 = algorithms.POGM( + u=self.data1, + x=self.data1, + y=self.data1, + z=self.data1, + grad=grad_inst, + prox=prox_inst, + ) self.dummy = dummy() self.dummy.cost = lambda x: x self.setup._check_operator(self.dummy.cost) @@ -172,6 +180,12 @@ def test_condat(self): npt.assert_almost_equal(self.condat2.x_final, self.data1, err_msg='Incorrect Condat result.') + def test_pogm(self): + npt.assert_almost_equal( + self.pogm1.x_final, + self.data1, + err_msg='Incorrect POGM result.', + ) class CostTestCase(TestCase):