diff --git a/ff b/ff new file mode 100644 index 000000000..5cd60d55d --- /dev/null +++ b/ff @@ -0,0 +1,27 @@ +1. Add xtol and ftol as separate, independent criteria (so OR, not AND). +2. They will not be set by default, so users can enable them (or disable them) + for methods and problems where they think this is appropriate. +3. Adding relative versions means doubling the workload, and dealing with the + case where x or f has 0 at its optimum. The only benefit is that the user + can write set_xtol(generic_tol) instead of set_xtol(abs_tol / sensible_scaling_value). I propose we do absolute only, leaving scaling to the user (who has the info needed for this decision) +4. Array xtol + +Both matlab and scipy seem to be phasing out the "xtol"/"ftol" terminology (i.e. scipy is using xatol/ratol when relative, matlab is saying "StepTolerance"/"FunctionTolerance", so might follow that and + +5. Call the methods set_step_tolerance and set_function_tolerance. The current names are max_iterations, max_evaluations, max_unchanged_iterations, and threshold (stop if f < threshold). So could have set_step_tolerance and set_tolerance instead? Or set_min_step and set_min_change ? + +---------------------- + +Im happy with those proposals, doing absolute ones makes more sense as relative would presumably be with respect to initial guess or something which would be a bit strange to vary run to run. Haven't ever seen a package specify a vector of xtol, but it makes sense, maybe I have just been automatically doing [tol]*n_params behind the scenes, but a nice friendly message saying "the xtol should be a vector of length n_parameters specifying separate tolerances for each parameter" would make it easy enough to see what needs to be done. + +I think matlab's optimset (common optimisation options object used by a range of optimisers) is the thing to look at, that uses 'TolX' and 'TolFun' nowadays: optimset docs +image + +Although, the matlab optimisation toolbox itself has a different optimoptions which gets the options for a particular optimiser, they seem to more commonly use ObjectiveLimit (for "got to top of Ben Nevis" stopping), OptimalityTolerance or FunctionTolerance for TolF and StepTolerance for XTol, presumably based on how far in parameter space it moved (might even be a Euclidean distance or something?): + + + + +To-do: + + - tests for maximising (including for threshold) diff --git a/pints/_optimisers/__init__.py b/pints/_optimisers/__init__.py index 5887051d3..ae6a28502 100644 --- a/pints/_optimisers/__init__.py +++ b/pints/_optimisers/__init__.py @@ -442,8 +442,15 @@ def __init__( # :meth:`run` can only be called once self._has_run = False + # Post-run statistics + self._evaluations = None + self._iterations = None + self._time = None + # # Stopping criteria + # Note that we always minimise: likelihoods are wrapped in an Error + # class that multiplies by -1 # # Maximum iterations @@ -458,13 +465,70 @@ def __init__( # Maximum evaluations self._max_evaluations = None - # Threshold value - self._threshold = None - - # Post-run statistics - self._evaluations = None - self._iterations = None - self._time = None + # Function threshold: stop if f(x) < threshold + self._function_threshold = None + # Function tolerance: stop if abs(f[i] - f[i - 1]) < tolerance + self._function_tolerance = None + # Parameter tolerance: stop if all(abs(x[i] - x[i + 1])) < tolerance + self._parameter_tolerance = None + + def _check_stopping_criteria(self, iterations, unchanged_iterations, + evaluations, f_new, f_last, x_new, x_last): + """ + Checks the stopping criteria, returns either ``None`` or a string + explaining why to stop. + + Note: The 'error in optimiser' criterion is not checked here. + + Parameters + ---------- + iterations + The current number of iterations. + unchanged_iterations + The current number of iterations without a change. + evaluations + The current number of function evaluations. + f_new + The current function value (either ``f_best`` or ``f_guessed``). + f_last + The last function value (either ``f_best`` or ``f_guessed``). + x_new + The current position (either ``x_best`` or ``x_guessed``). + x_last + The current position (either ``x_best`` or ``x_guessed``). + + """ + # Maximum number of iterations + if (self._max_iterations is not None and + iteration >= self._max_iterations): + return f'Maximum number of iterations ({iteration}) reached.' + + # Maximum number of iterations without significant change + if (self._unchanged_max_iterations is not None and + unchanged_iterations >= self._unchanged_max_iterations): + return (f'No significant change for {unchanged_iterations}' + ' iterations.') + + # Maximum number of evaluations + if (self.__max_evaluations is not None and + evaluations >= self._max_evaluations): + return (f'Maximum number of evaluations ({self._max_evaluations})' + ' reached.') + + # Threshold function value + if (self._function threshold is not None and + f_new < self._function_threshold): + return ('Objective function crossed threshold (' + f'{self._function_threshold}).') + + + # Threshold function value + if (self._function threshold is not None and + f_new < self._function_threshold): + return ('Objective function crossed threshold (' + f'{self._function_threshold}).') + + def evaluations(self): """ @@ -490,6 +554,17 @@ def f_guessed_tracking(self): """ return self._use_f_guessed + def _has_stopping_criterion(self): + """ Returns ``True`` iff a stopping criterion has been set. """ + return any( + self._max_iterations is not None, + self._unchanged_max_iterations is not None, + self._max_evaluations is not None, + self._function_threshold is not None, + self._function_tolerance is not None, + self._parameter_tolerance is not None, + ) + def iterations(self): """ Returns the number of iterations performed during the last run, or @@ -546,12 +621,7 @@ def run(self): self._has_run = True # Check stopping criteria - has_stopping_criterion = False - has_stopping_criterion |= (self._max_iterations is not None) - has_stopping_criterion |= (self._unchanged_max_iterations is not None) - has_stopping_criterion |= (self._max_evaluations is not None) - has_stopping_criterion |= (self._threshold is not None) - if not has_stopping_criterion: + if not self._has_stopping_criterion(): raise ValueError('At least one stopping criterion must be set.') # Iterations and function evaluations @@ -652,6 +722,8 @@ def run(self): self._optimiser.tell(fs) # Update current scores + xb + fb = self._optimiser.f_best() fg = self._optimiser.f_guessed() fb_user, fg_user = (fb, fg) if self._minimising else (-fb, -fg) @@ -688,36 +760,7 @@ def run(self): # Check stopping criteria # - # Maximum number of iterations - if (self._max_iterations is not None and - iteration >= self._max_iterations): - running = False - halt_message = ('Maximum number of iterations (' - + str(iteration) + ') reached.') - - # Maximum number of iterations without significant change - halt = (self._unchanged_max_iterations is not None and - unchanged_iterations >= self._unchanged_max_iterations) - if running and halt: - running = False - halt_message = ('No significant change for ' + - str(unchanged_iterations) + ' iterations.') - - # Maximum number of evaluations - if (self._max_evaluations is not None and - evaluations >= self._max_evaluations): - running = False - halt_message = ( - 'Maximum number of evaluations (' - + str(self._max_evaluations) + ') reached.') - - # Threshold value - halt = (self._threshold is not None - and f_new < self._threshold) - if running and halt: - running = False - halt_message = ('Objective function crossed threshold: ' - + str(self._threshold) + '.') + # Error in optimiser error = self._optimiser.stop() @@ -811,9 +854,9 @@ def set_log_interval(self, iters=20, warm_up=3): Parameters ---------- - ``interval`` + interval A log message will be shown every ``iters`` iterations. - ``warm_up`` + warm_up A log message will be shown every iteration, for the first ``warm_up`` iterations. """ @@ -849,8 +892,8 @@ def set_log_to_screen(self, enabled): def set_max_evaluations(self, evaluations=None): """ - Adds a stopping criterion, allowing the routine to halt after the - given number of ``evaluations``. + Adds a stopping criterion so that the routine halts after the given + number of ``evaluations``. This criterion is disabled by default. To enable, pass in any positive integer. To disable again, use ``set_max_evaluations(None)``. @@ -864,8 +907,8 @@ def set_max_evaluations(self, evaluations=None): def set_max_iterations(self, iterations=10000): """ - Adds a stopping criterion, allowing the routine to halt after the - given number of ``iterations``. + Adds a stopping criterion so that the routine halts after the given + number of ``iterations``. This criterion is enabled by default. To disable it, use ``set_max_iterations(None)``. @@ -879,9 +922,9 @@ def set_max_iterations(self, iterations=10000): def set_max_unchanged_iterations(self, iterations=200, threshold=1e-11): """ - Adds a stopping criterion, allowing the routine to halt if the - objective function doesn't change by more than ``threshold`` for the - given number of ``iterations``. + Adds a stopping criterion so that the routine halts if the objective + function does not change by more than ``threshold`` for the given + number of ``iterations``. This criterion is enabled by default. To disable it, use ``set_max_unchanged_iterations(None)``. @@ -922,7 +965,8 @@ def set_parallel(self, parallel=False): def set_threshold(self, threshold): """ - Adds a stopping criterion, allowing the routine to halt once the + Adds a stopping criterion causing the routine to stop once the + objective function is less than the given ``threshold`` (when maximi objective function goes below a set ``threshold``. This criterion is disabled by default, but can be enabled by calling