From f7f4f2c7ca8a587c53bb457c302bebd0bfd7c36b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Petr=20Nevyho=C5=A1t=C4=9Bn=C3=BD?= Date: Wed, 15 Nov 2023 12:51:51 +0100 Subject: [PATCH] docs: overhaul the documentation --- .github/workflows/{main.yml => ci.yml} | 2 +- .github/workflows/publish.yml | 2 +- README.md | 78 +++--- examples/custom_algorithm.rs | 75 ++++++ examples/{rosenbrock.rs => equations.rs} | 22 +- examples/optimization.rs | 53 ++++ gomez-bench/benches/main.rs | 106 ++++---- src/algo.rs | 2 +- src/algo/lipo.rs | 78 +++--- src/algo/nelder_mead.rs | 90 ++++--- src/algo/steffensen.rs | 94 +++---- src/algo/trust_region.rs | 202 +++++++-------- src/analysis.rs | 50 ++-- src/core.rs | 9 - src/core/base.rs | 23 +- src/core/domain.rs | 16 +- src/core/function.rs | 10 +- src/core/optimizer.rs | 40 ++- src/core/solver.rs | 75 +++--- src/core/system.rs | 37 ++- src/derivatives.rs | 100 ++++---- src/driver.rs | 200 ++++++++------- src/lib.rs | 300 +++++++++++++---------- src/testing.rs | 187 +++++++------- 24 files changed, 1012 insertions(+), 839 deletions(-) rename .github/workflows/{main.yml => ci.yml} (99%) create mode 100644 examples/custom_algorithm.rs rename examples/{rosenbrock.rs => equations.rs} (67%) create mode 100644 examples/optimization.rs diff --git a/.github/workflows/main.yml b/.github/workflows/ci.yml similarity index 99% rename from .github/workflows/main.yml rename to .github/workflows/ci.yml index dad855b..17f6328 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/ci.yml @@ -1,4 +1,4 @@ -name: Gomez tests +name: CI on: push: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index e9f65bc..cf6d7bd 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -1,4 +1,4 @@ -name: Gomez publish +name: Publish on: push: diff --git a/README.md b/README.md index e1c2082..873637d 100644 --- a/README.md +++ b/README.md @@ -1,47 +1,67 @@ -# Gomez +# gomez -A pure Rust framework and implementation of (derivative-free) methods for -solving nonlinear (bound-constrained) systems of equations. +[![Build](https://img.shields.io/github/actions/workflow/status/datamole-ai/gomez/ci.yml?branch=main)](https://github.com/datamole-ai/gomez/actions) +[![License](https://img.shields.io/badge/license-MIT-blue.svg)](https://github.com/datamole-ai/gomez/blob/main/LICENSE) +[![Cargo](https://img.shields.io/crates/v/gomez.svg)](https://crates.io/crates/gomez) +[![Documentation](https://docs.rs/gomez/badge.svg)](https://docs.rs/gomez) -**Warning:** The code and API are still quite rough. Expect changes. +_gomez_ is a framework and implementation for **mathematical optimization** and +solving **non-linear systems of equations**. -This library provides a variety of solvers of nonlinear equation systems with -*n* equations and *n* unknowns written entirely in Rust. Bound constraints for -variables are supported first-class, which is useful for engineering -applications. All solvers implement the same interface which is designed to give -full control over the process and allows to combine different components to -achieve the desired solution. The implemented methods are historically proven -numerical methods or global optimization algorithms. +The library is written completely in Rust. Its focus is on being useful for +**practical problems** and having API that is simple for easy cases as well as +flexible for complicated ones. The name stands for **g**lobal **o**ptimization & +**n**on-linear **e**quations **s**olving, with a few typos. -The convergence of the numerical methods is tested on several problems and the -implementation is benchmarked against with -[GSL](https://www.gnu.org/software/gsl/doc/html/multiroots.html) library. +## Practical problems + +The main goal is to be useful for practical problems. This is manifested by the +following features: + +* _Derivative-free_. No algorithm requires an analytical derivative (gradient, + Hessian, Jacobian). Methods that use derivatives approximate it using finite + difference method1. +* _Constraints_ support. It is possible to specify the problem domain with + constraints2, which is necessary for many engineering applications. +* Non-naive implementations. The code is not a direct translation of a textbook + pseudocode. It's written with performance in mind and applies important + techniques from numerical mathematics. It also tries to handle situations that + hurt the methods but occur in practice. + +1 There is a plan to provide ways to override this approximation with +a real derivative. + +2 Currently, only unconstrained and box-bounded domains are +supported. ## Algorithms -* Trust region -- Recommended method to be used as a default and it will just - work in most of the cases. -* LIPO -- A global optimization algorithm useful for initial guesses search in - combination with a numerical solver. -* Steffensen -- Fast and lightweight method for one-dimensional systems. -* Nelder-Mead -- Not generally recommended, but may be useful for - low-dimensionality problems with an ill-defined Jacobian matrix. +* [Trust region](algo::trust_region) – Recommended method to be used as a + default and it will just work in most cases. +* [LIPO](algo::lipo) – Global optimization algorithm useful for searching good + initial guesses in combination with a numerical algorithm. +* [Steffensen](algo::steffensen) – Fast and lightweight method for solving + one-dimensional systems of equations. +* [Nelder-Mead](algo::nelder_mead) – Direct search optimization method that does + not use any derivatives. + +This list will be extended in the future. But at the same time, having as many +algorithms as possible is _not_ the goal. Instead, the focus is on providing +quality implementations of battle-tested methods. ## Roadmap -Listed *not* in order of priority. +Listed *not* in priority order. * [Homotopy continuation method](http://homepages.math.uic.edu/~jan/srvart/node4.html) to compare the - performance with the Trust region method. + performance with the trust region method * Conjugate gradient method -* Experimentation with various global optimization techniques for initial - guesses search +* Experimentation with various global optimization techniques for initial guess + search * Evolutionary/nature-inspired algorithms * Bayesian optimization -* Focus on initial guesses search and tools in general -* High-level drivers encapsulating the low-level API for users that do not need - the fine-grained control. +* Focus on initial guesses search and tools for analysis in general ## License @@ -50,5 +70,5 @@ Licensed under [MIT](LICENSE). There are `gsl-wrapper` and `gsl-sys` crates which are licensed under the [GPLv3](http://www.gnu.org/licenses/gpl-3.0.html) identically as [GSL](https://www.gnu.org/software/gsl/) itself. This code is part of the -repository, but is not part of the Gomez library. Its purpose is solely for +repository but is not part of the Gomez library. Its purpose is solely for comparison in Gomez benchmarks. diff --git a/examples/custom_algorithm.rs b/examples/custom_algorithm.rs new file mode 100644 index 0000000..82e21cf --- /dev/null +++ b/examples/custom_algorithm.rs @@ -0,0 +1,75 @@ +use fastrand::Rng; +use gomez::nalgebra as na; +use gomez::{Domain, Function, Optimizer, OptimizerDriver, Problem, Sample}; +use na::{storage::StorageMut, Dyn, IsContiguous, Vector}; + +struct Random { + rng: Rng, +} + +impl Random { + fn new(rng: Rng) -> Self { + Self { rng } + } +} + +impl Optimizer for Random +where + F::Field: Sample, +{ + const NAME: &'static str = "Random"; + type Error = std::convert::Infallible; + + fn opt_next( + &mut self, + f: &F, + dom: &Domain, + x: &mut Vector, + ) -> Result + where + Sx: StorageMut + IsContiguous, + { + // Randomly sample in the domain. + dom.sample(x, &mut self.rng); + + // We must compute the value. + Ok(f.apply(x)) + } +} + +// https://en.wikipedia.org/wiki/Rosenbrock_function +struct Rosenbrock { + a: f64, + b: f64, +} + +impl Problem for Rosenbrock { + type Field = f64; + + fn domain(&self) -> Domain { + Domain::rect(vec![-10.0, -10.0], vec![10.0, 10.0]) + } +} + +impl Function for Rosenbrock { + fn apply(&self, x: &na::Vector) -> Self::Field + where + Sx: na::Storage + IsContiguous, + { + (self.a - x[0]).powi(2) + self.b * (x[1] - x[0].powi(2)).powi(2) + } +} + +fn main() { + let f = Rosenbrock { a: 1.0, b: 1.0 }; + let mut optimizer = OptimizerDriver::builder(&f) + .with_algo(|_, _| Random::new(Rng::new())) + .build(); + + optimizer + .find(|state| { + println!("f(x) = {}\tx = {:?}", state.fx(), state.x()); + state.iter() >= 100 + }) + .unwrap(); +} diff --git a/examples/rosenbrock.rs b/examples/equations.rs similarity index 67% rename from examples/rosenbrock.rs rename to examples/equations.rs index 1341a97..7dea1d7 100644 --- a/examples/rosenbrock.rs +++ b/examples/equations.rs @@ -17,31 +17,31 @@ impl Problem for Rosenbrock { } impl System for Rosenbrock { - fn eval( + fn eval( &self, x: &na::Vector, - fx: &mut na::Vector, + rx: &mut na::Vector, ) where Sx: na::storage::Storage + IsContiguous, - Sfx: na::storage::StorageMut, + Srx: na::storage::StorageMut, { - fx[0] = (self.a - x[0]).powi(2); - fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); + rx[0] = (self.a - x[0]).powi(2); + rx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); } } fn main() -> Result<(), String> { - let f = Rosenbrock { a: 1.0, b: 1.0 }; - let mut solver = SolverDriver::builder(&f) - .with_initial(vec![-10.0, -5.0]) + let r = Rosenbrock { a: 1.0, b: 1.0 }; + let mut solver = SolverDriver::builder(&r) + .with_initial(vec![10.0, -5.0]) .build(); let tolerance = 1e-6; - let (_, fx) = solver + let (_, norm) = solver .find(|state| { println!( - "iter = {}\t|| fx || = {}\tx = {:?}", + "iter = {}\t|| r(x) || = {}\tx = {:?}", state.iter(), state.norm(), state.x() @@ -50,7 +50,7 @@ fn main() -> Result<(), String> { }) .map_err(|error| format!("{error}"))?; - if fx <= tolerance { + if norm <= tolerance { Ok(()) } else { Err("did not converge".to_string()) diff --git a/examples/optimization.rs b/examples/optimization.rs new file mode 100644 index 0000000..c232a41 --- /dev/null +++ b/examples/optimization.rs @@ -0,0 +1,53 @@ +use gomez::nalgebra as na; +use gomez::{Domain, Function, OptimizerDriver, Problem}; +use na::{Dyn, IsContiguous}; + +// https://en.wikipedia.org/wiki/Rosenbrock_function +struct Rosenbrock { + a: f64, + b: f64, +} + +impl Problem for Rosenbrock { + type Field = f64; + + fn domain(&self) -> Domain { + Domain::unconstrained(2) + } +} + +impl Function for Rosenbrock { + fn apply(&self, x: &na::Vector) -> Self::Field + where + Sx: na::Storage + IsContiguous, + { + (self.a - x[0]).powi(2) + self.b * (x[1] - x[0].powi(2)).powi(2) + } +} + +fn main() -> Result<(), String> { + let f = Rosenbrock { a: 1.0, b: 1.0 }; + let mut optimizer = OptimizerDriver::builder(&f) + .with_initial(vec![10.0, -5.0]) + .build(); + + let tolerance = 1e-6; + + let (_, value) = optimizer + .find(|state| { + println!( + "iter = {}\tf(x) = {}\tx = {:?}", + state.iter(), + state.fx(), + state.x() + ); + state.fx() <= tolerance || state.iter() >= 100 + }) + .map_err(|error| format!("{error}"))?; + + if value <= tolerance { + Ok(()) + } else { + Err("did not converge".to_string()) + } +} diff --git a/gomez-bench/benches/main.rs b/gomez-bench/benches/main.rs index 3f65818..6a94cfe 100644 --- a/gomez-bench/benches/main.rs +++ b/gomez-bench/benches/main.rs @@ -165,12 +165,12 @@ mod bullard_biegler { } } -fn bench_solve(bencher: divan::Bencher, with_system: GF, with_solver: GS) +fn bench_solve(bencher: divan::Bencher, with_system: GR, with_solver: GA) where - GF: Fn() -> (F, usize), - GS: Fn(&F, &Domain, &na::OVector) -> S, - F: TestSystem, - S: Solver, + GR: Fn() -> (R, usize), + GA: Fn(&R, &Domain, &na::OVector) -> A, + R: TestSystem, + A: Solver, { bencher .with_inputs(move || { @@ -181,14 +181,14 @@ where (f, dom, x, solver) }) .bench_local_values(|(f, dom, mut x, mut solver)| { - let mut fx = x.clone_owned(); + let mut rx = x.clone_owned(); let mut iter = 0; loop { - if solver.solve_next(&f, &dom, &mut x, &mut fx).is_err() { + if solver.solve_next(&f, &dom, &mut x, &mut rx).is_err() { panic!("solver error"); } - if fx.norm() < na::convert(TOLERANCE) { + if rx.norm() < na::convert(TOLERANCE) { return true; } @@ -201,55 +201,55 @@ where }); } -fn with_trust_region( - f: &F, - dom: &Domain, - _: &na::OVector, -) -> TrustRegion +fn with_trust_region( + r: &R, + dom: &Domain, + _: &na::OVector, +) -> TrustRegion where - F: Problem, + R: Problem, { - TrustRegion::new(f, dom) + TrustRegion::new(r, dom) } -fn with_nelder_mead( - f: &F, - dom: &Domain, - _: &na::OVector, -) -> NelderMead +fn with_nelder_mead( + r: &R, + dom: &Domain, + _: &na::OVector, +) -> NelderMead where - F: Problem, + R: Problem, { - NelderMead::new(f, dom) + NelderMead::new(r, dom) } -fn with_gsl_hybrids( - f: &F, - _: &Domain, - x: &na::OVector, -) -> GslSolverWrapper> +fn with_gsl_hybrids( + r: &R, + _: &Domain, + x: &na::OVector, +) -> GslSolverWrapper> where - F: TestSystem + Clone, + R: TestSystem + Clone, { GslSolverWrapper::new(GslFunctionWrapper::new( - f.clone(), + r.clone(), GslVec::from(x.as_slice()), )) } -pub struct GslFunctionWrapper { - f: F, +pub struct GslFunctionWrapper { + r: R, init: GslVec, } -impl GslFunctionWrapper { - pub fn new(f: F, init: GslVec) -> Self { - Self { f, init } +impl GslFunctionWrapper { + pub fn new(r: R, init: GslVec) -> Self { + Self { r, init } } } -impl> GslFunction for GslFunctionWrapper { - fn eval(&self, x: &GslVec, f: &mut GslVec) -> GslStatus { +impl> GslFunction for GslFunctionWrapper { + fn eval(&self, x: &GslVec, rx: &mut GslVec) -> GslStatus { use na::DimName; let dim = Dyn(x.len()); @@ -258,13 +258,13 @@ impl> GslFunction for GslFunctionWrapper { dim, na::U1::name(), ); - let mut fx = na::MatrixViewMut::::from_slice_generic( - f.as_mut_slice(), + let mut rx = na::MatrixViewMut::::from_slice_generic( + rx.as_mut_slice(), dim, na::U1::name(), ); - self.f.eval(&x, &mut fx); + self.r.eval(&x, &mut rx); GslStatus::ok() } @@ -273,37 +273,37 @@ impl> GslFunction for GslFunctionWrapper { } } -pub struct GslSolverWrapper { - solver: GslSolver, +pub struct GslSolverWrapper { + solver: GslSolver, } -impl GslSolverWrapper { - pub fn new(f: F) -> Self { +impl GslSolverWrapper { + pub fn new(r: R) -> Self { Self { - solver: GslSolver::new(f, HybridScaled), + solver: GslSolver::new(r, HybridScaled), } } } -impl> Solver for GslSolverWrapper> { +impl> Solver for GslSolverWrapper> { const NAME: &'static str = "GSL hybrids"; type Error = String; - fn solve_next( + fn solve_next( &mut self, - _f: &F, - _dom: &Domain, - x: &mut na::Vector, - fx: &mut na::Vector, + _r: &R, + _dom: &Domain, + x: &mut na::Vector, + rx: &mut na::Vector, ) -> Result<(), Self::Error> where - Sx: na::storage::StorageMut + IsContiguous, - Sfx: na::storage::StorageMut, + Sx: na::storage::StorageMut + IsContiguous, + Srx: na::storage::StorageMut, { let result = self.solver.step().to_result(); x.copy_from_slice(self.solver.root()); - fx.copy_from_slice(self.solver.residuals()); + rx.copy_from_slice(self.solver.residuals()); result } diff --git a/src/algo.rs b/src/algo.rs index 2c26adc..c200a9f 100644 --- a/src/algo.rs +++ b/src/algo.rs @@ -1,4 +1,4 @@ -//! The collection of implemented solvers. +//! The collection of implemented algorithms. pub mod lipo; pub mod nelder_mead; diff --git a/src/algo/lipo.rs b/src/algo/lipo.rs index 28c7aa3..a35ff49 100644 --- a/src/algo/lipo.rs +++ b/src/algo/lipo.rs @@ -44,13 +44,13 @@ pub enum PotentialMinimizer { /// Options for [`Lipo`] solver. #[derive(Debug, Clone, CopyGetters, Setters)] #[getset(get_copy = "pub", set = "pub")] -pub struct LipoOptions { +pub struct LipoOptions { /// Probability for Bernoulli distribution of exploring (evaluating sampled /// point unconditionally) the space. p_explore: f64, /// Alpha parameter for (1 + alpha)^i meshgrid for Lipschitz constant /// estimation. - alpha: AlphaInit, + alpha: AlphaInit, /// Number of sampling trials. If no potential minimizer is found after this /// number of trials, the solver returns error. sampling_trials: usize, @@ -60,7 +60,7 @@ pub struct LipoOptions { local_optimization_iters: usize, } -impl Default for LipoOptions { +impl Default for LipoOptions

{ fn default() -> Self { Self { p_explore: 0.1, @@ -72,31 +72,33 @@ impl Default for LipoOptions { } } -/// LIPO solver. See [module](self) documentation for more details. -pub struct Lipo { - options: LipoOptions, - alpha: F::Field, - xs: Vec>, - ys: Vec, +/// LIPO solver. +/// +/// See [module](self) documentation for more details. +pub struct Lipo { + options: LipoOptions

, + alpha: P::Field, + xs: Vec>, + ys: Vec, best: usize, - k: F::Field, - k_inf: F::Field, + k: P::Field, + k_inf: P::Field, rng: Rng, p_explore: f64, - tmp: OVector, - x_tmp: OVector, - local_optimizer: NelderMead, + tmp: OVector, + x_tmp: OVector, + local_optimizer: NelderMead

, iter: usize, } -impl Lipo { +impl Lipo

{ /// Initializes LIPO solver with default options. - pub fn new(f: &F, dom: &Domain, rng: Rng) -> Self { - Self::with_options(f, dom, LipoOptions::default(), rng) + pub fn new(p: &P, dom: &Domain, rng: Rng) -> Self { + Self::with_options(p, dom, LipoOptions::default(), rng) } /// Initializes LIPO solver with given options. - pub fn with_options(f: &F, dom: &Domain, options: LipoOptions, rng: Rng) -> Self { + pub fn with_options(p: &P, dom: &Domain, options: LipoOptions

, rng: Rng) -> Self { let dim = Dyn(dom.dim()); let p_explore = options.p_explore.clamp(0.0, 1.0); @@ -112,13 +114,13 @@ impl Lipo { xs: Vec::new(), ys: Vec::new(), best: 0, - k: F::Field::zero(), - k_inf: F::Field::zero(), + k: P::Field::zero(), + k_inf: P::Field::zero(), rng, p_explore, tmp: OVector::zeros_generic(dim, U1::name()), x_tmp: OVector::zeros_generic(dim, U1::name()), - local_optimizer: NelderMead::new(f, dom), + local_optimizer: NelderMead::new(p, dom), iter: 0, } } @@ -128,8 +130,8 @@ impl Lipo { self.xs.clear(); self.ys.clear(); self.best = 0; - self.k = F::Field::zero(); - self.k_inf = F::Field::zero(); + self.k = P::Field::zero(); + self.k_inf = P::Field::zero(); self.iter = 0; } @@ -140,8 +142,8 @@ impl Lipo { /// the LIPO solver gives extra information for free. pub fn add_evaluation( &mut self, - x: OVector, - y: F::Field, + x: OVector, + y: P::Field, ) -> Result<(), LipoError> { let alpha = self.alpha; @@ -177,9 +179,9 @@ impl Lipo { debug!("|| x - xi || = {}", dist); } - let it = try_convert(((*k_inf).ln() / (F::Field::one() + alpha).ln()).ceil()) + let it = try_convert(((*k_inf).ln() / (P::Field::one() + alpha).ln()).ceil()) .unwrap_or_default() as i32; - *k = (F::Field::one() + alpha).powi(it); + *k = (P::Field::one() + alpha).powi(it); if !k.is_finite() { return Err(LipoError::InfiniteLipschitzConstant); @@ -396,27 +398,27 @@ where } } -impl Solver for Lipo +impl Solver for Lipo where - F::Field: Sample, + R::Field: Sample, { const NAME: &'static str = "LIPO"; type Error = LipoError; - fn solve_next( + fn solve_next( &mut self, - f: &F, - dom: &Domain<::Field>, - x: &mut Vector<::Field, Dyn, Sx>, - fx: &mut Vector<::Field, Dyn, Sfx>, + r: &R, + dom: &Domain<::Field>, + x: &mut Vector<::Field, Dyn, Sx>, + rx: &mut Vector<::Field, Dyn, Srx>, ) -> Result<(), Self::Error> where - Sx: StorageMut<::Field, Dyn> + IsContiguous, - Sfx: StorageMut<::Field, Dyn>, + Sx: StorageMut<::Field, Dyn> + IsContiguous, + Srx: StorageMut<::Field, Dyn>, { - self.next_inner(f, dom, x)?; - f.eval(x, fx); + self.next_inner(r, dom, x)?; + r.eval(x, rx); Ok(()) } } diff --git a/src/algo/nelder_mead.rs b/src/algo/nelder_mead.rs index b4e6b66..ba2c08a 100644 --- a/src/algo/nelder_mead.rs +++ b/src/algo/nelder_mead.rs @@ -2,9 +2,9 @@ //! //! [Nelder-Mead](https://en.wikipedia.org/wiki/Nelder%E2%80%93Mead_method) //! simplex-reflection method is a popular derivative-free optimization -//! algorithm. It keeps a [simplex](https://en.wikipedia.org/wiki/Simplex) of *n -//! + 1* points and the simplex is reflected, expanded or contracted based on -//! squared norm of residuals comparison. +//! algorithm. It keeps a [simplex](https://en.wikipedia.org/wiki/Simplex) of +//! _n + 1_ points and the simplex is reflected, expanded or contracted based on +//! the function values comparison. //! //! # References //! @@ -53,23 +53,23 @@ pub enum CoefficientsFamily { /// Options for [`NelderMead`] solver. #[derive(Debug, Clone, CopyGetters, Setters)] #[getset(get_copy = "pub", set = "pub")] -pub struct NelderMeadOptions { +pub struct NelderMeadOptions { /// Family for coefficients adaptation or fixed coefficients. Default: /// balanced (see [`CoefficientsFamily`]). family: CoefficientsFamily, /// Coefficient for reflection operation. Default: `-1`. - reflection_coeff: F::Field, + reflection_coeff: P::Field, /// Coefficient for expansion operation. Default: `-2`. - expansion_coeff: F::Field, + expansion_coeff: P::Field, /// Coefficient for outer contraction operation. Default: `-0.5`. - outer_contraction_coeff: F::Field, + outer_contraction_coeff: P::Field, /// Coefficient for inner contraction operation. Default: `0.5`. - inner_contraction_coeff: F::Field, + inner_contraction_coeff: P::Field, /// Coefficient for shrinking operation. Default: `0.5`. - shrink_coeff: F::Field, + shrink_coeff: P::Field, } -impl Default for NelderMeadOptions { +impl Default for NelderMeadOptions

{ fn default() -> Self { Self { family: CoefficientsFamily::Standard, @@ -82,8 +82,8 @@ impl Default for NelderMeadOptions { } } -impl NelderMeadOptions { - fn overwrite_coeffs(&mut self, dom: &Domain) { +impl NelderMeadOptions

{ + fn overwrite_coeffs(&mut self, dom: &Domain) { let Self { family, reflection_coeff, @@ -102,14 +102,14 @@ impl NelderMeadOptions { *shrink_coeff = convert(0.5); } CoefficientsFamily::Balanced => { - let n: F::Field = convert(dom.dim() as f64); - let n_inv = F::Field::one() / n; + let n: P::Field = convert(dom.dim() as f64); + let n_inv = P::Field::one() / n; *reflection_coeff = convert(-1.0); *expansion_coeff = -(n_inv * convert(2.0) + convert(1.0)); - *outer_contraction_coeff = -(F::Field::one() - n_inv); + *outer_contraction_coeff = -(P::Field::one() - n_inv); *inner_contraction_coeff = -*outer_contraction_coeff; - *shrink_coeff = F::Field::one() - n_inv; + *shrink_coeff = P::Field::one() - n_inv; } CoefficientsFamily::GoldenSection => { let alpha = 1.0 / (0.5 * (5f64.sqrt() + 1.0)); @@ -126,27 +126,29 @@ impl NelderMeadOptions { } } -/// Nelder-Mead solver. See [module](self) documentation for more details. -pub struct NelderMead { - options: NelderMeadOptions, - scale: OVector, - centroid: OVector, - reflection: OVector, - expansion: OVector, - contraction: OVector, - simplex: Vec>, - errors: Vec, +/// Nelder-Mead solver. +/// +/// See [module](self) documentation for more details. +pub struct NelderMead { + options: NelderMeadOptions

, + scale: OVector, + centroid: OVector, + reflection: OVector, + expansion: OVector, + contraction: OVector, + simplex: Vec>, + errors: Vec, sort_perm: Vec, } -impl NelderMead { +impl NelderMead

{ /// Initializes Nelder-Mead solver with default options. - pub fn new(f: &F, dom: &Domain) -> Self { - Self::with_options(f, dom, NelderMeadOptions::default()) + pub fn new(p: &P, dom: &Domain) -> Self { + Self::with_options(p, dom, NelderMeadOptions::default()) } /// Initializes Nelder-Mead solver with given options. - pub fn with_options(_: &F, dom: &Domain, mut options: NelderMeadOptions) -> Self { + pub fn with_options(_: &P, dom: &Domain, mut options: NelderMeadOptions

) -> Self { let dim = Dyn(dom.dim()); options.overwrite_coeffs(dom); @@ -261,8 +263,6 @@ impl NelderMead { } sort_perm.extend(0..=n); - // Stable sort is important for sort_perm[0] being consistent with - // fx_best. sort_perm.sort_by(|a, b| { errors[*a] .partial_cmp(&errors[*b]) @@ -402,8 +402,7 @@ impl NelderMead { } }; - // Establish the ordering of simplex points. Stable sort is important - // for sort_perm[0] being consistent with fx_best. + // Establish the ordering of simplex points. sort_perm.sort_by(|a, b| { errors[*a] .partial_cmp(&errors[*b]) @@ -411,7 +410,7 @@ impl NelderMead { }); debug!( - "performed {}{},\t|| fx || = {} - {}", + "performed {}{},\tfx = {} - {}", transformation.as_str(), if not_feasible { " with projection" } else { "" }, errors[sort_perm[0]], @@ -420,7 +419,6 @@ impl NelderMead { // Return the best simplex point. x.copy_from(&simplex[sort_perm[0]]); - // fx corresponding to x is stored in `self.fx_best`. if transformation == Transformation::Shrinkage || transformation == Transformation::InnerContraction @@ -467,24 +465,24 @@ impl Optimizer for NelderMead { } } -impl Solver for NelderMead { +impl Solver for NelderMead { const NAME: &'static str = "Nelder-Mead"; type Error = NelderMeadError; - fn solve_next( + fn solve_next( &mut self, - f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + r: &R, + dom: &Domain, + x: &mut Vector, + rx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + Sx: StorageMut + IsContiguous, + Srx: StorageMut, { - self.next_inner(f, dom, x)?; - f.eval(x, fx); + self.next_inner(r, dom, x)?; + r.eval(x, rx); Ok(()) } } diff --git a/src/algo/steffensen.rs b/src/algo/steffensen.rs index 7e64b22..a70d2f6 100644 --- a/src/algo/steffensen.rs +++ b/src/algo/steffensen.rs @@ -7,7 +7,7 @@ //! //! # References //! -//! \[1\] [Wikipedia](https://en.wikipedia.org/wiki/Steffensen%27s_method) +//! \[1\] [Steffensen's method](https://en.wikipedia.org/wiki/Steffensen%27s_method) on Wikipedia //! //! \[2\] [A variant of Steffensen's method of fourth-order convergence and its //! applications](https://www.sciencedirect.com/science/article/pii/S0096300310002705) @@ -34,15 +34,15 @@ pub enum SteffensenVariant { /// Options for [`Steffensen`] solver. #[derive(Debug, Clone, CopyGetters, Setters)] #[getset(get_copy = "pub", set = "pub")] -pub struct SteffensenOptions { +pub struct SteffensenOptions { /// Variant of the Steffensen's method. Default: Liu (see /// [`SteffensenVariant`]). variant: SteffensenVariant, #[getset(skip)] - _phantom: PhantomData, + _phantom: PhantomData, } -impl Default for SteffensenOptions { +impl Default for SteffensenOptions

{ fn default() -> Self { Self { variant: SteffensenVariant::Liu, @@ -51,19 +51,21 @@ impl Default for SteffensenOptions { } } -/// Steffensen solver. See [module](self) documentation for more details. -pub struct Steffensen { - options: SteffensenOptions, +/// Steffensen solver. +/// +/// See [module](self) documentation for more details. +pub struct Steffensen { + options: SteffensenOptions

, } -impl Steffensen { +impl Steffensen

{ /// Initializes Steffensen solver with default options. - pub fn new(f: &F, dom: &Domain) -> Self { - Self::with_options(f, dom, SteffensenOptions::default()) + pub fn new(p: &P, dom: &Domain) -> Self { + Self::with_options(p, dom, SteffensenOptions::default()) } /// Initializes Steffensen solver with given options. - pub fn with_options(_f: &F, _dom: &Domain, options: SteffensenOptions) -> Self { + pub fn with_options(_p: &P, _dom: &Domain, options: SteffensenOptions

) -> Self { Self { options } } @@ -79,21 +81,21 @@ pub enum SteffensenError { InvalidDimensionality, } -impl Solver for Steffensen { +impl Solver for Steffensen { const NAME: &'static str = "Steffensen"; type Error = SteffensenError; - fn solve_next( + fn solve_next( &mut self, - f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + r: &R, + dom: &Domain, + x: &mut Vector, + rx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + Sx: StorageMut + IsContiguous, + Srx: StorageMut, { if dom.dim() != 1 { return Err(SteffensenError::InvalidDimensionality); @@ -103,53 +105,53 @@ impl Solver for Steffensen { let x0 = x[0]; - // Compute f(x). - f.eval(x, fx); - let fx0 = fx[0]; + // Compute r0(x). + r.eval(x, rx); + let r0x = rx[0]; - if fx0 == convert(0.0) { + if r0x == convert(0.0) { // No more solving to be done. return Ok(()); } match variant { SteffensenVariant::Standard => { - // Compute z = f(x + f(x)) and f(z). - x[0] += fx0; - f.eval(x, fx); - let fz0 = fx[0]; + // Compute z = f(x + r0(x)) and r0(z). + x[0] += r0x; + r.eval(x, rx); + let fz0 = rx[0]; // Compute the next point. - x[0] = x0 - (fx0 * fx0) / (fz0 - fx0); + x[0] = x0 - (r0x * r0x) / (fz0 - r0x); - // Compute f(x). - f.eval(x, fx); + // Compute r0(x). + r.eval(x, rx); } SteffensenVariant::Liu => { - // Compute z = f(x + f(x)) and f(z). - x[0] += fx0; + // Compute z = f(x + r0(x)) and r0(z). + x[0] += r0x; let z0 = x[0]; - f.eval(x, fx); - let fz0 = fx[0]; + r.eval(x, rx); + let r0z = rx[0]; - // Compute f[x, z]. - let f_xz = (fz0 - fx0) / (z0 - x0); + // Compute r0[x, z]. + let r0_xz = (r0z - r0x) / (z0 - x0); - // Compute y = x - f(x) / f[x, z] and f(y). - x[0] = x0 - fx0 / f_xz; + // Compute y = x - r0(x) / r0[x, z] and r0(y). + x[0] = x0 - r0x / r0_xz; let y0 = x[0]; - f.eval(x, fx); - let fy0 = fx[0]; + r.eval(x, rx); + let r0y = rx[0]; - // Compute f[x, y] and f[y, z]. - let f_xy = (fy0 - fx0) / (y0 - x0); - let f_yz = (fz0 - fy0) / (z0 - y0); + // Compute r0[x, y] and r0[y, z]. + let r0_xy = (r0y - r0x) / (y0 - x0); + let r0_yz = (r0z - r0y) / (z0 - y0); // Compute the next point. - x[0] = y0 - (f_xy - f_yz + f_xz) / (f_xy * f_xy) * fy0; + x[0] = y0 - (r0_xy - r0_yz + r0_xz) / (r0_xy * r0_xy) * r0y; - // Compute f(x). - f.eval(x, fx); + // Compute r0(x). + r.eval(x, rx); } } diff --git a/src/algo/trust_region.rs b/src/algo/trust_region.rs index 225eb17..a8c22da 100644 --- a/src/algo/trust_region.rs +++ b/src/algo/trust_region.rs @@ -53,25 +53,25 @@ pub enum DeltaInit { /// Options for [`TrustRegion`] solver. #[derive(Debug, Clone, CopyGetters, Setters)] #[getset(get_copy = "pub", set = "pub")] -pub struct TrustRegionOptions { +pub struct TrustRegionOptions { /// Minimum allowed trust region size. Default: `f64::EPSILON.sqrt()`. - delta_min: F::Field, + delta_min: P::Field, /// Maximum allowed trust region size. Default: `1e9`. - delta_max: F::Field, + delta_max: P::Field, /// Initial trust region size. Default: estimated (see [`DeltaInit`]). - delta_init: DeltaInit, + delta_init: DeltaInit, /// Minimum scaling factor for lambda in Levenberg-Marquardt step. Default: /// `1e-10`. - mu_min: F::Field, + mu_min: P::Field, /// Threshold for gain ratio to shrink trust region size if lower. Default: /// `0.25`. - shrink_thresh: F::Field, + shrink_thresh: P::Field, /// Threshold for gain ratio to expand trust region size if higher. Default: /// `0.75`. - expand_thresh: F::Field, + expand_thresh: P::Field, /// Threshold for gain ratio that needs to be exceeded to accept the /// calculated step. Default: `0.0001`. - accept_thresh: F::Field, + accept_thresh: P::Field, /// Number of step rejections that are allowed to happen before returning /// [`TrustRegionError::NoProgress`] error. Default: `10`. rejections_thresh: usize, @@ -82,7 +82,7 @@ pub struct TrustRegionOptions { prefer_greater_magnitude_in_cauchy: bool, } -impl TrustRegionOptions { +impl TrustRegionOptions

{ // XXX: This is a hack. Setting this to true influences the solver in a way // that is not based on mathematical theory. However, when used, this makes // the solver to work substantially better for specific cases. I can't @@ -99,10 +99,10 @@ impl TrustRegionOptions { } } -impl Default for TrustRegionOptions { +impl Default for TrustRegionOptions

{ fn default() -> Self { Self { - delta_min: F::Field::EPSILON_SQRT, + delta_min: P::Field::EPSILON_SQRT, delta_max: convert(1e9), delta_init: DeltaInit::Estimated, mu_min: convert(1e-10), @@ -116,39 +116,41 @@ impl Default for TrustRegionOptions { } } -/// Trust region solver. See [module](self) documentation for more details. -pub struct TrustRegion { - options: TrustRegionOptions, - delta: F::Field, - mu: F::Field, - scale: OVector, - jac: Jacobian, - grad: Gradient, - hes: Hessian, - q_tr_fx_neg: OVector, - newton: OVector, - grad_neg: OVector, - cauchy: OVector, - jac_tr_jac: OMatrix, - p: OVector, - temp: OVector, +/// Trust region solver. +/// +/// See [module](self) documentation for more details. +pub struct TrustRegion { + options: TrustRegionOptions

, + delta: P::Field, + mu: P::Field, + scale: OVector, + jac: Jacobian

, + grad: Gradient

, + hes: Hessian

, + q_tr_rx_neg: OVector, + newton: OVector, + grad_neg: OVector, + cauchy: OVector, + jac_tr_jac: OMatrix, + p: OVector, + temp: OVector, iter: usize, rejections_cnt: usize, } -impl TrustRegion { +impl TrustRegion

{ /// Initializes trust region solver with default options. - pub fn new(f: &F, dom: &Domain) -> Self { - Self::with_options(f, dom, TrustRegionOptions::default()) + pub fn new(p: &P, dom: &Domain) -> Self { + Self::with_options(p, dom, TrustRegionOptions::default()) } /// Initializes trust region solver with given options. - pub fn with_options(f: &F, dom: &Domain, options: TrustRegionOptions) -> Self { + pub fn with_options(p: &P, dom: &Domain, options: TrustRegionOptions

) -> Self { let dim = Dyn(dom.dim()); let delta_init = match options.delta_init { DeltaInit::Fixed(fixed) => fixed, // Zero is recognized in the function `next`. - DeltaInit::Estimated => F::Field::zero(), + DeltaInit::Estimated => P::Field::zero(), }; let scale = dom .scale() @@ -160,10 +162,10 @@ impl TrustRegion { delta: delta_init, mu: convert(0.5), scale, - jac: Jacobian::zeros(f), - grad: Gradient::zeros(f), - hes: Hessian::zeros(f), - q_tr_fx_neg: OVector::zeros_generic(dim, U1::name()), + jac: Jacobian::zeros(p), + grad: Gradient::zeros(p), + hes: Hessian::zeros(p), + q_tr_rx_neg: OVector::zeros_generic(dim, U1::name()), newton: OVector::zeros_generic(dim, U1::name()), grad_neg: OVector::zeros_generic(dim, U1::name()), cauchy: OVector::zeros_generic(dim, U1::name()), @@ -179,7 +181,7 @@ impl TrustRegion { pub fn reset(&mut self) { self.delta = match self.options.delta_init { DeltaInit::Fixed(fixed) => fixed, - DeltaInit::Estimated => F::Field::zero(), + DeltaInit::Estimated => P::Field::zero(), }; self.mu = convert(0.5); self.iter = 1; @@ -198,21 +200,21 @@ pub enum TrustRegionError { NoProgress, } -impl Solver for TrustRegion { +impl Solver for TrustRegion { const NAME: &'static str = "Trust-region"; type Error = TrustRegionError; - fn solve_next( + fn solve_next( &mut self, - f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + r: &R, + dom: &Domain, + x: &mut Vector, + rx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + Sx: StorageMut + IsContiguous, + Srx: StorageMut, { let TrustRegionOptions { delta_min, @@ -232,7 +234,7 @@ impl Solver for TrustRegion { mu, scale, jac, - q_tr_fx_neg, + q_tr_rx_neg, newton, grad_neg, cauchy, @@ -245,9 +247,9 @@ impl Solver for TrustRegion { } = self; #[allow(clippy::needless_late_init)] - let scaled_newton: &mut OVector; - let scale_inv2: &mut OVector; - let cauchy_scaled: &mut OVector; + let scaled_newton: &mut OVector; + let scale_inv2: &mut OVector; + let cauchy_scaled: &mut OVector; #[derive(Debug, Clone, Copy, PartialEq)] enum StepType { @@ -258,13 +260,13 @@ impl Solver for TrustRegion { Dogleg, } - // Compute F(x) and F'(x). - f.eval(x, fx); - jac.compute(f, x, scale, fx); + // Compute r(x) and r'(x). + r.eval(x, rx); + jac.compute(r, x, scale, rx); - let fx_norm = fx.norm(); + let rx_norm = rx.norm(); - let estimate_delta = *delta == F::Field::zero(); + let estimate_delta = *delta == R::Field::zero(); if estimate_delta { // Zero delta signifies that the initial delta is to be set // automatically and it has not been done yet. @@ -278,8 +280,8 @@ impl Solver for TrustRegion { // where K = 100. The approach is taken from GSL. for (j, col) in jac.column_iter().enumerate() { temp[j] = col.norm(); - if temp[j] == F::Field::zero() { - temp[j] = F::Field::one(); + if temp[j] == R::Field::zero() { + temp[j] = R::Field::one(); } } temp.component_mul_assign(x); @@ -287,21 +289,21 @@ impl Solver for TrustRegion { let factor = convert(100.0); *delta = temp.norm() * factor; - if *delta == F::Field::zero() { + if *delta == R::Field::zero() { *delta = factor; } } // Perform QR decomposition of F'(x). - let (q, r) = jac.clone_owned().qr().unpack(); + let (qr_q, qr_r) = jac.clone_owned().qr().unpack(); - // Compute -Q^T F(x). - q.tr_mul_to(fx, q_tr_fx_neg); - q_tr_fx_neg.neg_mut(); + // Compute -Q^T r(x). + qr_q.tr_mul_to(rx, q_tr_rx_neg); + q_tr_rx_neg.neg_mut(); - // Find the Newton step by solving the system R newton = -Q^T F(x). - newton.copy_from(q_tr_fx_neg); - let is_newton_valid = r.solve_upper_triangular_mut(newton); + // Find the Newton step by solving the system R newton = -Q^T r(x). + newton.copy_from(q_tr_rx_neg); + let is_newton_valid = qr_r.solve_upper_triangular_mut(newton); if !is_newton_valid { // TODO: Moore-Penrose pseudoinverse? @@ -309,7 +311,7 @@ impl Solver for TrustRegion { "Newton step is invalid for ill-defined Jacobian (zero columns: {:?})", jac.column_iter() .enumerate() - .filter(|(_, col)| col.norm() == F::Field::zero()) + .filter(|(_, col)| col.norm() == R::Field::zero()) .map(|(i, _)| i) .collect::>() ); @@ -330,12 +332,12 @@ impl Solver for TrustRegion { // Newton step is outside the trust region. We need to involve the // gradient. - // Compute -grad F(x) = -F'(x)^T F(x) = -R^T Q^T F(x). - r.tr_mul_to(q_tr_fx_neg, grad_neg); + // Compute -grad r(x) = -r'(x)^T r(x) = -R^T Q^T r(x). + qr_r.tr_mul_to(q_tr_rx_neg, grad_neg); let grad_norm = grad_neg.norm(); - if grad_norm == F::Field::zero() { + if grad_norm == R::Field::zero() { // Gradient is zero, it is useless to compute the dogleg step. // Instead, we take the Newton direction to the trust region // boundary. @@ -354,7 +356,7 @@ impl Solver for TrustRegion { // Compute D^(-2) (use p for storage). scale_inv2 = p; scale_inv2.copy_from(scale); - scale_inv2.apply(|s| *s = F::Field::one() / (*s * *s)); + scale_inv2.apply(|s| *s = R::Field::one() / (*s * *s)); // Compute g = -D^(-2) grad F, the steepest descent direction in // scaled space (use cauchy for storage). @@ -362,7 +364,7 @@ impl Solver for TrustRegion { cauchy.component_mul_assign(grad_neg); let p = scale_inv2; - // Compute tau = -(grad F)^T g / || F'(x) g ||^2. + // Compute tau = -(grad r)^T g / || r'(x) g ||^2. jac.mul_to(cauchy, temp); let jac_g_norm2 = temp.norm_squared(); let grad_neg_g = if !prefer_greater_magnitude_in_cauchy { @@ -452,7 +454,7 @@ impl Solver for TrustRegion { #[allow(clippy::suspicious_operation_groupings)] let d = (b * b + a * c_neg).sqrt(); - let alpha = if b <= F::Field::zero() { + let alpha = if b <= R::Field::zero() { (-b + d) / a } else { c_neg / (b + d) @@ -474,33 +476,33 @@ impl Solver for TrustRegion { // which overcomes this issue but at higher computational // expense. We are looking for lambda such that // - // (B + lambda I) p = - grad F(x) + // (B + lambda I) p = - grad r(x) // // such that p is the solution to // - // min 1/2 || F'(x) p + F(x) ||^2 s.t. || D p || <= delta. + // min 1/2 || r'(x) p + r(x) ||^2 s.t. || D p || <= delta. // // Determine lambda for Levenberg-Marquardt. A common choice // proven to lead to quadratic convergence is // - // lambda = || F(x) ||^d, + // lambda = || r(x) ||^d, // // where d is from (0, 2]. An adaptive choice for d is: // - // d = 1 / || F(x) || if || F(x) || >= 1 and 1 + 1 / k otherwise, + // d = 1 / || r(x) || if || r(x) || >= 1 and 1 + 1 / k otherwise, // // where k denotes the current iteration. Such choice // ensures that lambda is not large when the point is far - // from the solution (i.e., for large || F(x) ||). + // from the solution (i.e., for large || r(x) ||). // Determine lambda. - let d = if fx_norm >= F::Field::one() { - F::Field::one() / fx_norm + let d = if rx_norm >= R::Field::one() { + R::Field::one() / rx_norm } else { - F::Field::one() + F::Field::one() / convert(*iter as f64) + R::Field::one() + R::Field::one() / convert(*iter as f64) }; - let lambda = *mu * fx_norm.powf(d); + let lambda = *mu * rx_norm.powf(d); // Compute B = F'(x)^T F'(x), which is a symmetric matrix. jac.tr_mul_to(jac, jac_tr_jac); @@ -511,7 +513,7 @@ impl Solver for TrustRegion { jac_tr_jac_lambda[(i, i)] += lambda; } - // Solve p for (B + lambda I) p = - grad F(x). + // Solve p for (B + lambda I) p = - grad r(x). p.copy_from(grad_neg); let is_levenberg_marquardt_valid = @@ -551,7 +553,7 @@ impl Solver for TrustRegion { // Vectors for Newton and Cauchy steps are no longed used, so we reuse // their allocations for another purpose. let x_trial = newton; - let fx_trial = cauchy; + let rx_trial = cauchy; // Get candidate x' for the next iterate. x.add_to(p, x_trial); @@ -565,27 +567,27 @@ impl Solver for TrustRegion { x_trial.sub_to(x, p); } - // Compute F(x'). - f.eval(x_trial, fx_trial); - let is_trial_valid = fx_trial.iter().all(|fxi| fxi.is_finite()); - let fx_trial_norm = fx_trial.norm(); + // Compute r(x'). + r.eval(x_trial, rx_trial); + let is_trial_valid = rx_trial.iter().all(|rix| rix.is_finite()); + let rx_trial_norm = rx_trial.norm(); let gain_ratio = if is_trial_valid { // Compute the gain ratio. jac.mul_to(p, temp); - *temp += &*fx; - let predicted = fx_norm - temp.norm(); + *temp += &*rx; + let predicted = rx_norm - temp.norm(); let deny = if allow_ascent { // If ascent is allowed, then check only for zero, which would // make the gain ratio calculation ill-defined. - predicted == F::Field::zero() + predicted == R::Field::zero() } else { // If ascent is not allowed, test positivity of the predicted // gain. Note that even if the actual reduction was positive, // the step would be rejected anyway because the gain ratio // would be negative. - predicted <= F::Field::zero() + predicted <= R::Field::zero() }; if deny { @@ -594,9 +596,9 @@ impl Solver for TrustRegion { } else { debug!("predicted gain <= 0"); } - F::Field::zero() + R::Field::zero() } else { - let actual = fx_norm - fx_trial_norm; + let actual = rx_norm - rx_trial_norm; let gain_ratio = actual / predicted; debug!("gain ratio = {} / {} = {}", actual, predicted, gain_ratio); @@ -604,17 +606,17 @@ impl Solver for TrustRegion { } } else { debug!("trial step is invalid, gain ratio = 0"); - F::Field::zero() + R::Field::zero() }; // Decide if the step is accepted or not. if gain_ratio > accept_thresh { // Accept the trial step. x.copy_from(x_trial); - fx.copy_from(fx_trial); + rx.copy_from(rx_trial); debug!( - "step accepted, || fx || = {}, x = {:?}", - fx_trial_norm, + "step accepted, || rx || = {}, x = {:?}", + rx_trial_norm, x_trial.as_slice() ); @@ -710,7 +712,7 @@ impl Optimizer for TrustRegion { scale, hes, grad, - q_tr_fx_neg: q_tr_grad_neg, + q_tr_rx_neg: q_tr_grad_neg, newton, grad_neg, cauchy, @@ -744,16 +746,16 @@ impl Optimizer for TrustRegion { } // Perform QR decomposition of H(x). - let (q, r) = hes.clone_owned().qr().unpack(); + let (qr_q, qr_r) = hes.clone_owned().qr().unpack(); // Compute -Q^T grad f(x). grad_neg.copy_from(grad); grad_neg.neg_mut(); - q.tr_mul_to(grad_neg, q_tr_grad_neg); + qr_q.tr_mul_to(grad_neg, q_tr_grad_neg); // Find the Newton step by solving the system R newton = -Q^T grad f(x). newton.copy_from(q_tr_grad_neg); - let is_newton_valid = r.solve_upper_triangular_mut(newton); + let is_newton_valid = qr_r.solve_upper_triangular_mut(newton); if !is_newton_valid { // TODO: Moore-Penrose pseudoinverse? diff --git a/src/analysis.rs b/src/analysis.rs index 11c1006..a02f156 100644 --- a/src/analysis.rs +++ b/src/analysis.rs @@ -1,4 +1,4 @@ -//! Various analyses for supporting the solving. +//! Various supporting analyses. use nalgebra::{ convert, storage::StorageMut, ComplexField, DimName, Dyn, IsContiguous, OVector, RealField, @@ -36,16 +36,16 @@ pub fn estimate_magnitude_from_bounds(lower: T, upper: T) - /// /// \[1\] [On the Choice of Initial Guesses for the Newton-Raphson /// Algorithm](https://arxiv.org/abs/1911.12433) -pub fn detect_non_linear_vars_in_system( - f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, +pub fn detect_non_linear_vars_in_system( + r: &R, + dom: &Domain, + x: &mut Vector, + rx: &mut Vector, ) -> Vec where - F: System, - Sx: StorageMut + IsContiguous, - Sfx: StorageMut, + R: System, + Sx: StorageMut + IsContiguous, + Srx: StorageMut, { let dim = Dyn(dom.dim()); let scale = dom @@ -53,24 +53,24 @@ where .map(|scale| OVector::from_iterator_generic(dim, U1::name(), scale.iter().copied())) .unwrap_or_else(|| OVector::from_element_generic(dim, U1::name(), convert(1.0))); - // Compute F'(x) in the initial point. - f.eval(x, fx); - let jac1 = Jacobian::new(f, x, &scale, fx); + // Compute r'(x) in the initial point. + r.eval(x, rx); + let jac1 = Jacobian::new(r, x, &scale, rx); // Compute Newton step. - let mut p = fx.clone_owned(); + let mut p = rx.clone_owned(); p.neg_mut(); let qr = jac1.clone_owned().qr(); qr.solve_mut(&mut p); // Do Newton step. - p *= convert::<_, F::Field>(0.001); + p *= convert::<_, R::Field>(0.001); *x += p; - // Compute F'(x) after one Newton step. - f.eval(x, fx); - let jac2 = Jacobian::new(f, x, &scale, fx); + // Compute r'(x) after one Newton step. + r.eval(x, rx); + let jac2 = Jacobian::new(r, x, &scale, rx); // Linear variables have no effect on the Jacobian matrix. They can be // recognized by observing no change in corresponding columns (i.e., @@ -85,7 +85,7 @@ where .filter(|(_, (c1, c2))| { c1.iter() .zip(c2.iter()) - .any(|(a, b)| (*a - *b).abs() > F::Field::EPSILON_SQRT) + .any(|(a, b)| (*a - *b).abs() > R::Field::EPSILON_SQRT) }) .map(|(col, _)| col) .collect() @@ -134,16 +134,16 @@ mod tests { } impl System for NonLinearTest { - fn eval( + fn eval( &self, x: &Vector, - fx: &mut Vector, + rx: &mut Vector, ) where Sx: nalgebra::Storage + IsContiguous, - Sfx: StorageMut, + Srx: StorageMut, { - fx[0] = x[0]; - fx[1] = x[1].powi(2); + rx[0] = x[0]; + rx[1] = x[1].powi(2); } } @@ -153,10 +153,10 @@ mod tests { let dom = f.domain(); let mut x = nalgebra::dvector![2.0, 2.0]; - let mut fx = x.clone_owned(); + let mut rx = x.clone_owned(); assert_eq!( - detect_non_linear_vars_in_system(&f, &dom, &mut x, &mut fx), + detect_non_linear_vars_in_system(&f, &dom, &mut x, &mut rx), vec![1] ); } diff --git a/src/core.rs b/src/core.rs index a17c9e0..ab7f39c 100644 --- a/src/core.rs +++ b/src/core.rs @@ -1,12 +1,3 @@ -//! Core abstractions and types for Gomez. -//! -//! *Users* are mainly interested in implementing the [`System`] and [`Problem`] -//! traits, optionally specifying the [domain](Domain). -//! -//! Algorithms *developers* are interested in implementing the [`Solver`] trait -//! (or possibly the [`Optimizer`] trait too) as well as tools in -//! [derivatives](crate::derivatives). - mod base; mod domain; mod function; diff --git a/src/core/base.rs b/src/core/base.rs index 7524fbc..fdb6ddf 100644 --- a/src/core/base.rs +++ b/src/core/base.rs @@ -5,14 +5,16 @@ use super::domain::Domain; /// Trait implemented by real numbers. pub trait RealField: nalgebra::RealField { - /// Square root of double precision machine epsilon. This value is a - /// standard constant for epsilons in approximating first-order - /// derivate-based concepts. + /// Square root of machine epsilon. + /// + /// This value is a standard constant for epsilons in approximating + /// first-order derivate-based concepts. const EPSILON_SQRT: Self; - /// Cubic root of double precision machine epsilon. This value is a standard - /// constant for epsilons in approximating second-order derivate-based - /// concepts. + /// Cubic root of machine epsilon. + /// + /// This value is a standard constant for epsilons in approximating + /// second-order derivate-based concepts. const EPSILON_CBRT: Self; } @@ -26,14 +28,13 @@ impl RealField for f64 { const EPSILON_CBRT: Self = 0.0000060554544523933395; } -/// The base trait for [`System`](super::system::System) and -/// [`Function`](super::function::Function). +/// The base trait for [`Function`](super::function::Function) and +/// [`System`](super::system::System). pub trait Problem { - /// Type of the field, usually f32 or f64. + /// Field type, f32 or f64. type Field: RealField + Copy; - /// Get the domain (bound constraints) of the system. If not overridden, the - /// system is unconstrained. + /// Domain of the problem. fn domain(&self) -> Domain; } diff --git a/src/core/domain.rs b/src/core/domain.rs index 152b3af..f495baf 100644 --- a/src/core/domain.rs +++ b/src/core/domain.rs @@ -1,4 +1,4 @@ -//! Problem domain definition such as bound constraints for variables. +//! Problem domain definition (dimensionality, constraints). use std::iter::FromIterator; @@ -19,7 +19,7 @@ pub struct Domain { } impl Domain { - /// Creates unconstrained domain with given dimension. + /// Creates unconstrained domain with given dimensionality. pub fn unconstrained(dim: usize) -> Self { assert!(dim > 0, "empty domain"); @@ -34,9 +34,9 @@ impl Domain { } } - /// Creates rectangular domain with given bounds. + /// Creates rectangular domain with given lower and upper bounds. /// - /// Positive and negative infinity can be used to indicate value unbounded + /// Positive and negative infinity can be used to indicate a value unbounded /// in that dimension and direction. If the entire domain is unconstrained, /// use [`Domain::unconstrained`] instead. pub fn rect(lower: Vec, upper: Vec) -> Self { @@ -68,8 +68,10 @@ impl Domain { /// Sets a custom scale for the domain. /// - /// Scale value of a variable is the inverse of the expected magnitude of - /// that variable. + /// Scale of a variable is the inverse of its expected magnitude. + /// Appropriate scaling may be crucial for an algorithm to work well on + /// "poorly scaled" problems with highly varying magnitudes of its + /// variables. pub fn with_scale(mut self, scale: Vec) -> Self { assert!( scale.len() == self.lower.nrows(), @@ -83,7 +85,7 @@ impl Domain { self } - /// Gets the dimension of the domain. + /// Gets the dimensionality of the domain. pub fn dim(&self) -> usize { self.lower.nrows() } diff --git a/src/core/function.rs b/src/core/function.rs index 99dedc2..d653536 100644 --- a/src/core/function.rs +++ b/src/core/function.rs @@ -2,37 +2,31 @@ use nalgebra::{storage::Storage, Dyn, IsContiguous, Vector}; use super::{base::Problem, system::System}; -/// The trait for defining functions. +/// Definition of a function. /// /// ## Defining a function /// /// A function is any type that implements [`Function`] and [`Problem`] traits. -/// There is one required associated type (field) and one required method -/// ([`apply`](Function::apply)). /// /// ```rust /// use gomez::nalgebra as na; /// use gomez::{Domain, Function, Problem}; /// use na::{Dyn, IsContiguous}; /// -/// // A problem is represented by a type. /// struct Rosenbrock { /// a: f64, /// b: f64, /// } /// /// impl Problem for Rosenbrock { -/// // The numeric type. Usually f64 or f32. /// type Field = f64; /// -/// // The domain of the problem (could be bound-constrained). /// fn domain(&self) -> Domain { /// Domain::unconstrained(2) /// } /// } /// /// impl Function for Rosenbrock { -/// // Apply trial values of variables to the function. /// fn apply(&self, x: &na::Vector) -> Self::Field /// where /// Sx: na::storage::Storage + IsContiguous, @@ -43,7 +37,7 @@ use super::{base::Problem, system::System}; /// } /// ``` pub trait Function: Problem { - /// Calculate the function value given values of the variables. + /// Calculates the function value in given point. fn apply(&self, x: &Vector) -> Self::Field where Sx: Storage + IsContiguous; diff --git a/src/core/optimizer.rs b/src/core/optimizer.rs index 5fbfbae..cf6da30 100644 --- a/src/core/optimizer.rs +++ b/src/core/optimizer.rs @@ -2,26 +2,23 @@ use nalgebra::{storage::StorageMut, Dyn, IsContiguous, Vector}; use super::{domain::Domain, function::Function}; -/// Common interface for all optimizers. +/// Interface of an optimizer. /// -/// All optimizers implement a common interface defined by the [`Optimizer`] -/// trait. The essential method is [`opt_next`](Optimizer::opt_next) which takes -/// variables *x* and computes the next step. Thus it represents one iteration -/// in the process. Repeated call to this method should move *x* towards the -/// minimum in successful cases. +/// An optimizer is an iterative algorithm which takes a point _x_ and computes +/// the next step in the optimization process. Repeated calls to the next step +/// should eventually converge into a minimum _x'_. /// -/// If you implement an optimizer, please consider make it a contribution to -/// this library. +/// If you implement an optimizer, please reach out to discuss if we could +/// include it in gomez. /// /// ## Implementing an optimizer /// -/// Here is an implementation of a random optimizer (if such a thing can be -/// called an optimizer) which randomly generates values in a hope that -/// eventually goes to the minimum with enough luck. +/// Here is an implementation of a random "optimizer" which randomly generates +/// values in a hope that a minimum can be found with enough luck. /// /// ```rust /// use gomez::nalgebra as na; -/// use gomez::*; +/// use gomez::{Domain, Function, Optimizer, Sample}; /// use na::{storage::StorageMut, Dyn, IsContiguous, Vector}; /// use fastrand::Rng; /// @@ -63,23 +60,18 @@ pub trait Optimizer { /// Name of the optimizer. const NAME: &'static str; - /// Error type of the iteration. Represents an invalid operation during - /// computing the next step. + /// Error while computing the next step. type Error; /// Computes the next step in the optimization process. /// - /// The value of `x` is the current values of variables. After the method - /// returns, `x` should hold the variable values of the performed step and - /// the return value *must* be the function value of that step as computed - /// by [`Function::apply`]. + /// The value of `x` is the current point. After the method returns, `x` + /// should hold the variable values of the performed step and the return + /// value _must_ be the function value of that step as computed by + /// [`Function::apply`]. /// - /// It is implementation error not to return function value corresponding to - /// the computed step. - /// - /// The implementations *can* assume that subsequent calls to `next` pass - /// the value of `x` as was outputted in the previous iteration by the same - /// method. + /// The implementations _can_ assume that subsequent calls to `opt_next` + /// pass the value of `x` as was returned in the previous iteration fn opt_next( &mut self, f: &F, diff --git a/src/core/solver.rs b/src/core/solver.rs index 9fbbbff..6acc28e 100644 --- a/src/core/solver.rs +++ b/src/core/solver.rs @@ -2,26 +2,23 @@ use nalgebra::{storage::StorageMut, Dyn, IsContiguous, Vector}; use super::{domain::Domain, system::System}; -/// Common interface for all solvers. +/// Interface of a solver. /// -/// All solvers implement a common interface defined by the [`Solver`] trait. -/// The essential method is [`solve_next`](Solver::solve_next) which takes -/// variables *x* and computes the next step. Thus it represents one iteration -/// in the process. Repeated call to this method should converge *x* to the -/// solution in successful cases. +/// A solver is an iterative algorithm which takes a point _x_ and computes the +/// next step in the solving process. Repeated calls to the next step should +/// eventually converge into a solution _x'_ in successful cases. /// -/// If you implement a solver, please consider make it a contribution to this -/// library. +/// If you implement a solver, please reach out to discuss if we could include +/// it in gomez. /// /// ## Implementing a solver /// -/// Here is an implementation of a random solver (if such a thing can be called -/// a solver) which randomly generates values in a hope that a solution can be -/// found with enough luck. +/// Here is an implementation of a random "solver" which randomly generates +/// values in a hope that a solution can be found with enough luck. /// /// ```rust /// use gomez::nalgebra as na; -/// use gomez::*; +/// use gomez::{Domain, Sample, Solver, System}; /// use na::{storage::StorageMut, Dyn, IsContiguous, Vector}; /// use fastrand::Rng; /// @@ -35,63 +32,57 @@ use super::{domain::Domain, system::System}; /// } /// } /// -/// impl Solver for Random +/// impl Solver for Random /// where -/// F::Field: Sample, +/// R::Field: Sample, /// { /// const NAME: &'static str = "Random"; /// type Error = std::convert::Infallible; /// -/// fn solve_next( +/// fn solve_next( /// &mut self, -/// f: &F, -/// dom: &Domain, -/// x: &mut Vector, -/// fx: &mut Vector, +/// r: &R, +/// dom: &Domain, +/// x: &mut Vector, +/// rx: &mut Vector, /// ) -> Result<(), Self::Error> /// where -/// Sx: StorageMut + IsContiguous, -/// Sfx: StorageMut, +/// Sx: StorageMut + IsContiguous, +/// Srx: StorageMut, /// { /// // Randomly sample in the domain. /// dom.sample(x, &mut self.rng); /// /// // We must compute the residuals. -/// f.eval(x, fx); +/// r.eval(x, rx); /// /// Ok(()) /// } /// } /// ``` -pub trait Solver { +pub trait Solver { /// Name of the solver. const NAME: &'static str; - /// Error type of the iteration. Represents an invalid operation during - /// computing the next step. + /// Error while computing the next step. type Error; /// Computes the next step in the solving process. /// - /// The value of `x` is the current values of variables. After the method - /// returns, `x` should hold the variable values of the performed step and - /// `fx` *must* contain residuals of that step as computed by - /// [`System::eval`]. + /// The value of `x` is the current point. After the method returns, `x` + /// should hold the variable values of the performed step and `rx` _must_ + /// contain residuals of that step as computed by [`System::eval`]. /// - /// It is implementation error not to compute the residuals of the computed - /// step. - /// - /// The implementations *can* assume that subsequent calls to `next` pass - /// the value of `x` as was outputted in the previous iteration by the same - /// method. - fn solve_next( + /// The implementations _can_ assume that subsequent calls to `solve_next` + /// pass the value of `x` as was returned in the previous iteration. + fn solve_next( &mut self, - f: &F, - dom: &Domain, - x: &mut Vector, - fx: &mut Vector, + r: &R, + dom: &Domain, + x: &mut Vector, + rx: &mut Vector, ) -> Result<(), Self::Error> where - Sx: StorageMut + IsContiguous, - Sfx: StorageMut; + Sx: StorageMut + IsContiguous, + Srx: StorageMut; } diff --git a/src/core/system.rs b/src/core/system.rs index 8a8a230..a5df1e9 100644 --- a/src/core/system.rs +++ b/src/core/system.rs @@ -5,72 +5,67 @@ use nalgebra::{ use super::base::Problem; -/// The trait for defining equations systems. +/// Definition of a system of equations. /// /// ## Defining a system /// /// A system is any type that implements [`System`] and [`Problem`] traits. -/// There is one required associated type (field type) and one required method -/// ([`eval`](System::eval)). /// /// ```rust /// use gomez::nalgebra as na; /// use gomez::{Domain, Problem, System}; /// use na::{Dyn, IsContiguous}; /// -/// // A problem is represented by a type. /// struct Rosenbrock { /// a: f64, /// b: f64, /// } /// /// impl Problem for Rosenbrock { -/// // The numeric type. Usually f64 or f32. /// type Field = f64; /// -/// // The domain of the problem (could be bound-constrained). /// fn domain(&self) -> Domain { /// Domain::unconstrained(2) /// } /// } /// /// impl System for Rosenbrock { -/// // Evaluate trial values of variables to the system. -/// fn eval( +/// fn eval( /// &self, /// x: &na::Vector, -/// fx: &mut na::Vector, +/// rx: &mut na::Vector, /// ) where /// Sx: na::storage::Storage + IsContiguous, -/// Sfx: na::storage::StorageMut, +/// Srx: na::storage::StorageMut, /// { /// // Compute the residuals of all equations. -/// fx[0] = (self.a - x[0]).powi(2); -/// fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); +/// rx[0] = (self.a - x[0]).powi(2); +/// rx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); /// } /// } /// ``` pub trait System: Problem { - /// Calculate the residuals of the system given values of the variables. - fn eval( + /// Calculates the system residuals in given point. + fn eval( &self, x: &Vector, - fx: &mut Vector, + rx: &mut Vector, ) where Sx: Storage + IsContiguous, - Sfx: StorageMut; + Srx: StorageMut; - /// Calculate the residuals vector norm. + /// Calculates the system residuals vector norm. /// /// The default implementation allocates a temporary vector for the /// residuals on every call. If you plan to solve the system by an - /// optimizer, consider overriding the default implementation. + /// optimizer, consider overriding the default implementation to avoid this + /// allocation. fn norm(&self, x: &Vector) -> Self::Field where Sx: Storage + IsContiguous, { - let mut fx = x.clone_owned(); - self.eval(x, &mut fx); - fx.norm() + let mut rx = x.clone_owned(); + self.eval(x, &mut rx); + rx.norm() } } diff --git a/src/derivatives.rs b/src/derivatives.rs index 99e3260..dcbd183 100644 --- a/src/derivatives.rs +++ b/src/derivatives.rs @@ -10,44 +10,44 @@ use num_traits::{One, Zero}; use crate::core::{Function, Problem, RealField as _, System}; -/// Jacobian matrix of a system. +/// Jacobian matrix of a system of equations. #[derive(Debug)] -pub struct Jacobian { - jac: OMatrix, +pub struct Jacobian { + jac: OMatrix, } -impl Jacobian { +impl Jacobian { /// Initializes the Jacobian matrix with zeros. - pub fn zeros(f: &F) -> Self { - let dim = Dyn(f.domain().dim()); + pub fn zeros(r: &R) -> Self { + let dim = Dyn(r.domain().dim()); Self { jac: OMatrix::zeros_generic(dim, dim), } } } -impl Jacobian { - /// Compute Compute the Jacobian matrix of the system in given point with - /// given scale of variables. See [`compute`](Jacobian::compute) for more - /// details. - pub fn new( - f: &F, - x: &mut Vector, - scale: &Vector, - fx: &Vector, +impl Jacobian { + /// Computes the Jacobian matrix of the system of equations in given point + /// with given scale of variables. See [`compute`](Jacobian::compute) for + /// more details. + pub fn new( + r: &R, + x: &mut Vector, + scale: &Vector, + rx: &Vector, ) -> Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, - Sfx: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, + Srx: Storage, { - let mut jac = Self::zeros(f); - jac.compute(f, x, scale, fx); + let mut jac = Self::zeros(r); + jac.compute(r, x, scale, rx); jac } - /// Compute the Jacobian matrix of the system in given point with given - /// scale of variables. + /// Computes the Jacobian matrix of the system of equations in given point + /// with given scale of variables. /// /// The parameter `x` is mutable to allow temporary mutations avoiding /// unnecessary allocations, but after this method ends, the content of the @@ -55,19 +55,19 @@ impl Jacobian { /// /// Information about variable scale is useful for problematic cases of /// finite differentiation (e.g., when the value is near zero). - pub fn compute( + pub fn compute( &mut self, - f: &F, - x: &mut Vector, - scale: &Vector, - fx: &Vector, + r: &R, + x: &mut Vector, + scale: &Vector, + rx: &Vector, ) -> &mut Self where - Sx: StorageMut + IsContiguous, - Sscale: Storage, - Sfx: Storage, + Sx: StorageMut + IsContiguous, + Sscale: Storage, + Srx: Storage, { - let eps = F::Field::EPSILON_SQRT; + let eps = R::Field::EPSILON_SQRT; for (j, mut col) in self.jac.column_iter_mut().enumerate() { let xj = x[j]; @@ -75,22 +75,22 @@ impl Jacobian { // Compute the step size. We would like to have the step as small as // possible (to be as close to the zero -- i.e., real derivative -- // the real derivative as possible). But at the same time, very - // small step could cause F(x + e_j * step_j) ~= F(x) with very + // small step could cause r(x + e_j * step_j) ~= r(x) with very // small number of good digits. // // A reasonable way to balance these competing needs is to scale // each component by x_j itself. To avoid problems when x_j is close // to zero, it is modified to take the typical magnitude instead. - let magnitude = F::Field::one() / scale[j]; - let step = eps * xj.abs().max(magnitude) * xj.copysign(F::Field::one()); - let step = if step == F::Field::zero() { eps } else { step }; + let magnitude = R::Field::one() / scale[j]; + let step = eps * xj.abs().max(magnitude) * xj.copysign(R::Field::one()); + let step = if step == R::Field::zero() { eps } else { step }; // Update the point. x[j] = xj + step; - f.eval(x, &mut col); + r.eval(x, &mut col); - // Compute the derivative approximation: J[i, j] = (F(x + e_j * step_j) - F(x)) / step_j. - col -= fx; + // Compute the derivative approximation: J[i, j] = (r(x + e_j * step_j) - r(x)) / step_j. + col -= rx; col /= step; // Restore the original value. @@ -101,8 +101,8 @@ impl Jacobian { } } -impl Deref for Jacobian { - type Target = OMatrix; +impl Deref for Jacobian { + type Target = OMatrix; fn deref(&self) -> &Self::Target { &self.jac @@ -116,7 +116,7 @@ pub struct Gradient { } impl Gradient { - /// Initializes the Gradient matrix with zeros. + /// Initializes the gradient vector with zeros. pub fn zeros(f: &F) -> Self { let dim = Dyn(f.domain().dim()); Self { @@ -126,9 +126,8 @@ impl Gradient { } impl Gradient { - /// Compute Compute the gradient vector of the function in given point with - /// given scale of variables. See [`compute`](Gradient::compute) for more - /// details. + /// Computes the gradient vector of the function in given point with given + /// scale of variables. See [`compute`](Gradient::compute) for more details. pub fn new( f: &F, x: &mut Vector, @@ -144,7 +143,7 @@ impl Gradient { grad } - /// Compute the gradient vector of the function in given point with given + /// Computes the gradient vector of the function in given point with given /// scale of variables. /// /// The parameter `x` is mutable to allow temporary mutations avoiding @@ -178,7 +177,7 @@ impl Gradient { x[i] = xi + step; let fxi = f.apply(x); - // Compute the derivative approximation: grad[i] = (F(x + e_i * step_i) - F(x)) / step_i. + // Compute the derivative approximation: grad[i] = (f(x + e_i * step_i) - f(x)) / step_i. self.grad[i] = (fxi - fx) / step; // Restore the original value. @@ -197,7 +196,7 @@ impl Deref for Gradient { } } -/// Hessian matrix of a system. +/// Hessian matrix of a function. #[derive(Debug)] pub struct Hessian { hes: OMatrix, @@ -218,9 +217,8 @@ impl Hessian { } impl Hessian { - /// Compute Compute the Hessian matrix of the function in given point with - /// given scale of variables. See [`compute`](Hessian::compute) for more - /// details. + /// Computes the Hessian matrix of the function in given point with given + /// scale of variables. See [`compute`](Hessian::compute) for more details. pub fn new( f: &F, x: &mut Vector, @@ -236,7 +234,7 @@ impl Hessian { hes } - /// Compute the Hessian matrix of the function in given point with given + /// Computes the Hessian matrix of the function in given point with given /// scale of variables. /// /// The parameter `x` is mutable to allow temporary mutations avoiding diff --git a/src/driver.rs b/src/driver.rs index 63dbe37..8a6fc70 100644 --- a/src/driver.rs +++ b/src/driver.rs @@ -1,8 +1,8 @@ -//! High-level API for solving and optimization. +//! High-level API for optimization and solving. //! //! This module contains "drivers" that encapsulate all internal state and -//! provide a simple API to run the iterative process for solving or -//! optimization. This documentation describes usage for the solving systems of +//! provide a simple API to run the iterative process for optimization or +//! solving. This documentation describes usage for the solving systems of //! equations, but the API is basically the same for optimization. //! //! The simplest way of using the driver is to initialize it with the defaults: @@ -27,9 +27,9 @@ //! # } //! # } //! -//! let f = MySystem::new(); +//! let r = MySystem::new(); //! -//! let mut solver = SolverDriver::new(&f); +//! let mut solver = SolverDriver::new(&r); //! ``` //! If you need to specify additional settings, use the builder: //! @@ -53,9 +53,9 @@ //! # } //! # } //! -//! let f = MySystem::new(); +//! let r = MySystem::new(); //! -//! let mut solver = SolverDriver::builder(&f) +//! let mut solver = SolverDriver::builder(&r) //! .with_initial(vec![10.0, -10.0]) //! .with_algo(gomez::algo::NelderMead::new) //! .build(); @@ -85,23 +85,24 @@ //! # } //! # //! # impl System for MySystem { -//! # fn eval( +//! # fn eval( //! # &self, //! # x: &na::Vector, -//! # fx: &mut na::Vector, +//! # rx: &mut na::Vector, //! # ) where //! # Sx: na::storage::Storage + IsContiguous, -//! # Sfx: na::storage::StorageMut, +//! # Srx: na::storage::StorageMut, //! # { -//! # fx[0] = x[0] + x[1] + 1.0; -//! # fx[1] = (x[0] + x[1] - 1.0).powi(2); +//! # rx[0] = x[0] + x[1] + 1.0; +//! # rx[1] = (x[0] + x[1] - 1.0).powi(2); //! # } //! # } //! # -//! # let f = MySystem::new(); +//! # let r = MySystem::new(); //! # -//! # let mut solver = SolverDriver::new(&f); +//! # let mut solver = SolverDriver::new(&r); //! # +//! // Solution or solver error. //! let result = solver.find(|state| state.norm() <= 1e-6 || state.iter() >= 100); //! ``` //! @@ -130,25 +131,26 @@ //! # } //! # //! # impl System for MySystem { -//! # fn eval( +//! # fn eval( //! # &self, //! # x: &na::Vector, -//! # fx: &mut na::Vector, +//! # rx: &mut na::Vector, //! # ) where //! # Sx: na::storage::Storage + IsContiguous, -//! # Sfx: na::storage::StorageMut, +//! # Srx: na::storage::StorageMut, //! # { -//! # fx[0] = x[0] + x[1] + 1.0; -//! # fx[1] = (x[0] + x[1] - 1.0).powi(2); +//! # rx[0] = x[0] + x[1] + 1.0; +//! # rx[1] = (x[0] + x[1] - 1.0).powi(2); //! # } //! # } //! # -//! # let f = MySystem::new(); +//! # let r = MySystem::new(); //! # -//! # let mut solver = SolverDriver::new(&f); +//! # let mut solver = SolverDriver::new(&r); //! # //! loop { -//! let norm = solver.next().expect("no solver error"); +//! // Current point or solver error. +//! let result = solver.next(); //! // ... //! # break; //! } @@ -158,40 +160,40 @@ use nalgebra::{convert, DimName, Dyn, OVector, U1}; use crate::{algo::TrustRegion, Domain, Function, Optimizer, Problem, Solver, System}; -struct Builder<'a, F: Problem, A> { - f: &'a F, - dom: Domain, +struct Builder<'a, P: Problem, A> { + p: &'a P, + dom: Domain, algo: A, - x0: OVector, + x0: OVector, } -impl<'a, F: Problem> Builder<'a, F, TrustRegion> { - fn new(f: &'a F) -> Self { - let dom = f.domain(); - let algo = TrustRegion::new(f, &dom); +impl<'a, P: Problem> Builder<'a, P, TrustRegion

> { + fn new(p: &'a P) -> Self { + let dom = p.domain(); + let algo = TrustRegion::new(p, &dom); let dim = Dyn(dom.dim()); let x0 = OVector::from_element_generic(dim, U1::name(), convert(0.0)); - Self { f, dom, algo, x0 } + Self { p, dom, algo, x0 } } } -impl<'a, F: Problem, A> Builder<'a, F, A> { - fn with_initial(mut self, x0: Vec) -> Self { +impl<'a, P: Problem, A> Builder<'a, P, A> { + fn with_initial(mut self, x0: Vec) -> Self { let dim = Dyn(self.dom.dim()); self.x0 = OVector::from_vec_generic(dim, U1::name(), x0); self } - fn with_algo(self, factory: FA) -> Builder<'a, F, S2> + fn with_algo(self, factory: FA) -> Builder<'a, P, S2> where - FA: FnOnce(&F, &Domain) -> S2, + FA: FnOnce(&P, &Domain) -> S2, { - let algo = factory(self.f, &self.dom); + let algo = factory(self.p, &self.dom); Builder { - f: self.f, + p: self.p, dom: self.dom, algo, x0: self.x0, @@ -205,11 +207,11 @@ impl<'a, F: Problem, A> Builder<'a, F, A> { } /// Builder for the [`SolverDriver`]. -pub struct SolverBuilder<'a, F: Problem, A>(Builder<'a, F, A>); +pub struct SolverBuilder<'a, R: Problem, A>(Builder<'a, R, A>); -impl<'a, F: Problem, A> SolverBuilder<'a, F, A> { +impl<'a, R: Problem, A> SolverBuilder<'a, R, A> { /// Sets the initial point from which the iterative process starts. - pub fn with_initial(self, x0: Vec) -> Self { + pub fn with_initial(self, x0: Vec) -> Self { Self(self.0.with_initial(x0)) } @@ -218,24 +220,29 @@ impl<'a, F: Problem, A> SolverBuilder<'a, F, A> { /// This builder method accepts a closure that takes the reference to the /// problem and its domain. For many algorithms in gomez, you can simply /// pass the `new` constructor directly (e.g., `TrustRegion::new`). - pub fn with_algo(self, factory: FA) -> SolverBuilder<'a, F, S2> + pub fn with_algo(self, factory: FA) -> SolverBuilder<'a, R, S2> where - FA: FnOnce(&F, &Domain) -> S2, + FA: FnOnce(&R, &Domain) -> S2, { SolverBuilder(self.0.with_algo(factory)) } /// Builds the [`SolverDriver`]. - pub fn build(self) -> SolverDriver<'a, F, A> { - let Builder { f, dom, algo, x0 } = self.0.build(); - let fx = x0.clone_owned(); + pub fn build(self) -> SolverDriver<'a, R, A> { + let Builder { + p: r, + dom, + algo, + x0, + } = self.0.build(); + let rx = x0.clone_owned(); SolverDriver { - f, + r, dom, algo, x: x0, - fx, + rx, } } } @@ -245,57 +252,57 @@ impl<'a, F: Problem, A> SolverBuilder<'a, F, A> { /// For default settings, use [`SolverDriver::new`]. For more flexibility, use /// [`SolverDriver::builder`]. For the usage of the driver, see [module](self) /// documentation. -pub struct SolverDriver<'a, F: Problem, A> { - f: &'a F, - dom: Domain, +pub struct SolverDriver<'a, R: Problem, A> { + r: &'a R, + dom: Domain, algo: A, - x: OVector, - fx: OVector, + x: OVector, + rx: OVector, } -impl<'a, F: Problem> SolverDriver<'a, F, TrustRegion> { +impl<'a, R: Problem> SolverDriver<'a, R, TrustRegion> { /// Returns the builder for specifying additional settings. - pub fn builder(f: &'a F) -> SolverBuilder<'a, F, TrustRegion> { - SolverBuilder(Builder::new(f)) + pub fn builder(r: &'a R) -> SolverBuilder<'a, R, TrustRegion> { + SolverBuilder(Builder::new(r)) } /// Initializes the driver with the default settings. - pub fn new(f: &'a F) -> Self { - SolverDriver::builder(f).build() + pub fn new(r: &'a R) -> Self { + SolverDriver::builder(r).build() } } -impl<'a, F: Problem, S> SolverDriver<'a, F, S> { +impl<'a, R: Problem, S> SolverDriver<'a, R, S> { /// Returns reference to the current point. - pub fn x(&self) -> &[F::Field] { + pub fn x(&self) -> &[R::Field] { self.x.as_slice() } /// Returns reference to the current residuals. - pub fn fx(&self) -> &[F::Field] { - self.fx.as_slice() + pub fn rx(&self) -> &[R::Field] { + self.rx.as_slice() } /// Returns norm of the residuals. - pub fn norm(&self) -> F::Field { - self.fx.norm() + pub fn norm(&self) -> R::Field { + self.rx.norm() } } -impl<'a, F: System, A: Solver> SolverDriver<'a, F, A> { - /// Does one iteration of the process, returning the norm of the residuals - /// in case of no error. +impl<'a, R: System, A: Solver> SolverDriver<'a, R, A> { + /// Performs one iteration of the process, returning the current point and + /// norm of the residuals in case of no error. #[allow(clippy::should_implement_trait)] - pub fn next(&mut self) -> Result<(&[F::Field], F::Field), A::Error> { + pub fn next(&mut self) -> Result<(&[R::Field], R::Field), A::Error> { self.algo - .solve_next(self.f, &self.dom, &mut self.x, &mut self.fx)?; - Ok((self.x.as_slice(), self.fx.norm())) + .solve_next(self.r, &self.dom, &mut self.x, &mut self.rx)?; + Ok((self.x.as_slice(), self.rx.norm())) } /// Runs the iterative process until given stopping criterion is satisfied. - pub fn find(&mut self, stop: C) -> Result<(&[F::Field], F::Field), A::Error> + pub fn find(&mut self, stop: C) -> Result<(&[R::Field], R::Field), A::Error> where - C: Fn(SolverIterState<'_, F>) -> bool, + C: Fn(SolverIterState<'_, R>) -> bool, { let mut iter = 0; @@ -304,7 +311,7 @@ impl<'a, F: System, A: Solver> SolverDriver<'a, F, A> { let state = SolverIterState { x: &self.x, - fx: &self.fx, + rx: &self.rx, iter, }; @@ -316,33 +323,33 @@ impl<'a, F: System, A: Solver> SolverDriver<'a, F, A> { } } - /// Returns the name of the used solver. + /// Returns the name of the solver. pub fn name(&self) -> &str { A::NAME } } -/// State of the current iteration. -pub struct SolverIterState<'a, F: Problem> { - x: &'a OVector, - fx: &'a OVector, +/// State of the current iteration in the solving process. +pub struct SolverIterState<'a, R: Problem> { + x: &'a OVector, + rx: &'a OVector, iter: usize, } -impl<'a, F: Problem> SolverIterState<'a, F> { +impl<'a, R: Problem> SolverIterState<'a, R> { /// Returns reference to the current point. - pub fn x(&self) -> &[F::Field] { + pub fn x(&self) -> &[R::Field] { self.x.as_slice() } /// Returns reference to the current residuals. - pub fn fx(&self) -> &[F::Field] { - self.fx.as_slice() + pub fn rx(&self) -> &[R::Field] { + self.rx.as_slice() } - /// Returns norm of the residuals. - pub fn norm(&self) -> F::Field { - self.fx.norm() + /// Returns norm of the current residuals. + pub fn norm(&self) -> R::Field { + self.rx.norm() } /// Returns the current iteration number. @@ -374,7 +381,12 @@ impl<'a, F: Problem, A> OptimizerBuilder<'a, F, A> { /// Builds the [`OptimizerDriver`]. pub fn build(self) -> OptimizerDriver<'a, F, A> { - let Builder { f, dom, algo, x0 } = self.0.build(); + let Builder { + p: f, + dom, + algo, + x0, + } = self.0.build(); OptimizerDriver { f, @@ -386,7 +398,7 @@ impl<'a, F: Problem, A> OptimizerBuilder<'a, F, A> { } } -/// The driver for the process of solving a system of equations. +/// The driver for the process of optimizing a function. /// /// For default settings, use [`OptimizerDriver::new`]. For more flexibility, /// use [`OptimizerDriver::builder`]. For the usage of the driver, see @@ -424,8 +436,8 @@ impl<'a, F: Problem, A> OptimizerDriver<'a, F, A> { } impl<'a, F: Function, A: Optimizer> OptimizerDriver<'a, F, A> { - /// Does one iteration of the process, returning the function value in case - /// of no error. + /// Performs one iteration of the process, returning the current point and + /// function value in case of no error. #[allow(clippy::should_implement_trait)] pub fn next(&mut self) -> Result<(&[F::Field], F::Field), A::Error> { self.algo @@ -457,13 +469,13 @@ impl<'a, F: Function, A: Optimizer> OptimizerDriver<'a, F, A> { } } - /// Returns the name of the used optimizer. + /// Returns the name of the optimizer. pub fn name(&self) -> &str { A::NAME } } -/// State of the current iteration. +/// State of the current iteration if the optimization process. pub struct OptimizerIterState<'a, F: Problem> { x: &'a OVector, fx: F::Field, @@ -508,8 +520,8 @@ mod tests { #[test] fn solver_basic_use_case() { - let f = Sphere::new(4); - let mut solver = SolverDriver::builder(&f) + let r = Sphere::new(4); + let mut solver = SolverDriver::builder(&r) // Zeros are the root for sphere, there would be no point is such // test. .with_initial(vec![10.0; 4]) @@ -525,8 +537,8 @@ mod tests { #[test] fn solver_custom() { - let f = Sphere::new(1); - let mut solver = SolverDriver::builder(&f) + let r = Sphere::new(1); + let mut solver = SolverDriver::builder(&r) .with_algo(Steffensen::new) // Zeros is the root for sphere, there would be no point is such // test. diff --git a/src/lib.rs b/src/lib.rs index 7ce135f..6714ea8 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -2,147 +2,209 @@ #![allow(clippy::type_complexity)] #![warn(missing_docs)] -//! # Gomez +//! _gomez_ is a framework and implementation for **mathematical optimization** +//! and solving **non-linear systems of equations**. //! -//! A pure Rust framework and implementation of (derivative-free) methods for -//! solving nonlinear (bound-constrained) systems of equations. +//! The library is written completely in Rust. Its focus is on being useful for +//! **practical problems** and having API that is simple for easy cases as well +//! as flexible for complicated ones. The name stands for **g**lobal +//! **o**ptimization & **n**on-linear **e**quations **s**olving, with a few +//! typos. //! -//! This library provides a variety of solvers of nonlinear equation systems -//! with *n* equations and *n* unknowns written entirely in Rust. Bound -//! constraints for variables are supported first-class, which is useful for -//! engineering applications. All solvers implement the same interface which is -//! designed to give full control over the process and allows to combine -//! different components to achieve the desired solution. The implemented -//! methods are historically-proven numerical methods or global optimization -//! algorithms. +//! ## Practical problems //! -//! The convergence of the numerical methods is tested on several problems and -//! the implementation is benchmarked against with -//! [GSL](https://www.gnu.org/software/gsl/doc/html/multiroots.html) library. +//! The main goal is to be useful for practical problems. This is manifested by +//! the following features: +//! +//! * _Derivative-free_. No algorithm requires an analytical derivative +//! (gradient, Hessian, Jacobian). Methods that use derivatives approximate it +//! using finite difference method1. +//! * _Constraints_ support. It is possible to specify the problem domain with +//! constraints2, which is necessary for many engineering +//! applications. +//! * Non-naive implementations. The code is not a direct translation of a +//! textbook pseudocode. It's written with performance in mind and applies +//! important techniques from numerical mathematics. It also tries to handle +//! situations that hurt the methods but occur in practice. +//! +//! 1 There is a plan to provide ways to override this approximation +//! with a real derivative. +//! +//! 2 Currently, only unconstrained and box-bounded domains are +//! supported. //! //! ## Algorithms //! -//! * [Trust region](algo::trust_region) -- Recommended method to be used as a -//! default and it will just work in most of the cases. -//! * [LIPO](algo::lipo) -- A global optimization algorithm useful for initial -//! guesses search in combination with a numerical solver. -//! * [Steffensen](algo::steffensen) -- Fast and lightweight method for -//! one-dimensional systems. -//! * [Nelder-Mead](algo::nelder_mead) -- Not generally recommended, but may be -//! useful for low-dimensionality problems with ill-defined Jacobian matrix. -//! -//! ## Problem -//! -//! The problem of solving systems of nonlinear equations (multidimensional root -//! finding) is about finding values of *n* variables given *n* equations that -//! have to be satisfied. In our case, we consider a general algebraic form of -//! the equations (compare with specialized solvers for [systems of linear -//! equations](https://en.wikipedia.org/wiki/System_of_linear_equations)). +//! * [Trust region](algo::trust_region) – Recommended method to be used as a +//! default and it will just work in most cases. +//! * [LIPO](algo::lipo) – Global optimization algorithm useful for searching +//! good initial guesses in combination with a numerical algorithm. +//! * [Steffensen](algo::steffensen) – Fast and lightweight method for solving +//! one-dimensional systems of equations. +//! * [Nelder-Mead](algo::nelder_mead) – Direct search optimization method that +//! does not use any derivatives. //! -//! Mathematically, the problem is formulated as +//! This list will be extended in the future. But at the same time, having as +//! many algorithms as possible is _not_ the goal. Instead, the focus is on +//! providing quality implementations of battle-tested methods. //! -//! ```text -//! F(x) = 0, +//! ## Mathematical optimization //! -//! where F(x) = { f1(x), ..., fn(x) } -//! and x = { x1, ..., xn } -//! ``` +//! Given a function _f: D → R_ from some domain _D_ (in _Rn_) to the +//! real numbers and an initial point _x0_, the goal is to find a +//! point _x'_ such that _f(x')_ is a minimum. Note that gomez does not +//! guarantee that the minimum is global, although the focus is on global +//! optimization techniques. //! -//! Moreover, it is possible to add bound constraints to the variables. That is: +//! ### Example +//! +//! ```rust +//! use gomez::nalgebra as na; +//! use gomez::{Domain, Function, OptimizerDriver, Problem}; +//! use na::{Dyn, IsContiguous}; +//! +//! // Objective function is represented by a struct. +//! struct Rosenbrock { +//! a: f64, +//! b: f64, +//! } +//! +//! impl Problem for Rosenbrock { +//! // Field type, f32 or f64. +//! type Field = f64; //! -//! ```text -//! Li <= xi <= Ui for some bounds [L, U] for every i +//! // Domain of the function. +//! fn domain(&self) -> Domain { +//! Domain::unconstrained(2) +//! } +//! } +//! +//! impl Function for Rosenbrock { +//! // Body of the function, taking x and returning f(x). +//! fn apply(&self, x: &na::Vector) -> Self::Field +//! where +//! Sx: na::Storage + IsContiguous, +//! { +//! (self.a - x[0]).powi(2) + self.b * (x[1] - x[0].powi(2)).powi(2) +//! } +//! } +//! +//! let f = Rosenbrock { a: 1.0, b: 1.0 }; +//! let mut optimizer = OptimizerDriver::builder(&f) +//! .with_initial(vec![-10.0, -5.0]) +//! .build(); +//! +//! let (x, fx) = optimizer +//! .find(|state| state.fx() <= 1e-6 || state.iter() >= 100) +//! .expect("optimizer error"); +//! +//! println!("f(x) = {fx}\tx = {x:?}"); +//! # assert!(fx <= 1e-6); //! ``` //! -//! The bounds can be negative/positive infinity, effectively making the -//! variable unconstrained. +//! See [`OptimizerDriver`] and [`OptimizerBuilder`](driver::OptimizerBuilder) +//! for additional options. //! -//! More sophisticated constraints (such as (in)equalities consisting of -//! multiple variables) are currently out of the scope of this library. If you -//! are in need of those, feel free to contribute with the API design -//! incorporating them and the implementation of appropriate solvers. +//! ## Systems of equations +//! +//! Given a vector function _r: D → Rn_, with _ri: D → R_ +//! from some domain _D_ (in _Rn_) to the real numbers for _i = 1, 2, +//! ..., n_, and an initial point _x0_, the goal is to find a point +//! _x'_ such that _r(x) = 0_. Note that there is no constraint on the form of +//! the equations _ri_ (compare with specialized solvers for [systems +//! of linear +//! equations](https://en.wikipedia.org/wiki/System_of_linear_equations)). //! -//! When it comes to code, the problem is any type that implements the -//! [`System`] and [`Problem`] traits. +//! ### Example //! //! ```rust -//! // Gomez is based on `nalgebra` crate. //! use gomez::nalgebra as na; -//! use gomez::{Domain, Problem, System}; +//! use gomez::{Domain, Problem, SolverDriver, System}; //! use na::{Dyn, IsContiguous}; //! -//! // A problem is represented by a type. +//! // System of equations is represented by a struct. //! struct Rosenbrock { //! a: f64, //! b: f64, //! } //! //! impl Problem for Rosenbrock { -//! // The numeric type. Usually f64 or f32. +//! // Field type, f32 or f64. //! type Field = f64; //! -//! // Specification for the domain. At the very least, the dimension -//! // must be known. +//! // Domain of the system. //! fn domain(&self) -> Domain { //! Domain::unconstrained(2) //! } //! } //! //! impl System for Rosenbrock { -//! // Evaluate trial values of variables to the system. -//! fn eval( +//! // Evaluation of the system (computing the residuals). +//! fn eval( //! &self, //! x: &na::Vector, -//! fx: &mut na::Vector, +//! rx: &mut na::Vector, //! ) where //! Sx: na::storage::Storage + IsContiguous, -//! Sfx: na::storage::StorageMut, +//! Srx: na::storage::StorageMut, //! { -//! // Compute the residuals of all equations. -//! fx[0] = (self.a - x[0]).powi(2); -//! fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); +//! rx[0] = (self.a - x[0]).powi(2); +//! rx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); //! } //! } -//! ``` //! -//! And that's it. There is no need for defining gradient vector, Hessian or -//! Jacobian matrices. The library uses [finite -//! difference](https://en.wikipedia.org/wiki/Finite_difference_method) -//! technique (usually sufficient in practice) or algorithms that are -//! derivative-free by definition. +//! let r = Rosenbrock { a: 1.0, b: 1.0 }; +//! let mut solver = SolverDriver::builder(&r) +//! .with_initial(vec![-10.0, -5.0]) +//! .build(); //! -//! The previous example used unconstrained variable, but it is also possible to -//! specify bounds. +//! let (x, norm) = solver +//! .find(|state| state.norm() <= 1e-6 || state.iter() >= 100) +//! .expect("solver error"); //! -//! ```rust -//! # use gomez::nalgebra as na; -//! # use gomez::*; -//! # -//! # struct Rosenbrock { -//! # a: f64, -//! # b: f64, -//! # } -//! # -//! impl Problem for Rosenbrock { -//! # type Field = f64; -//! // ... -//! -//! fn domain(&self) -> Domain { -//! [(-10.0, 10.0), (-10.0, 10.0)].into_iter().collect() -//! } -//! } +//! println!("|| r(x) || = {norm}\tx = {x:?}"); +//! # assert!(norm <= 1e-6); //! ``` //! -//! ## Solving +//! See [`SolverDriver`] and [`SolverBuilder`](driver::SolverBuilder) for +//! additional options. //! -//! When you have your system available, you can use the [`SolverDriver`] to run -//! the iteration process until a stopping criterion is reached. +//! ## Custom algorithms +//! +//! It is possible to create a custom algorithm by implementing the +//! [`Optimizer`] and/or [`Solver`] trait. Then it can be used by the driver as +//! any other algorithm provided by gomez. Go see the documentation of the +//! traits to get more details. //! //! ```rust -//! use gomez::SolverDriver; //! # use gomez::nalgebra as na; -//! # use gomez::{Domain, Problem, System}; -//! # use na::{Dyn, IsContiguous}; +//! # use gomez::{Domain, Function, Optimizer, OptimizerDriver, Problem}; +//! # use na::{storage::StorageMut, Dyn, IsContiguous, Vector}; +//! # +//! # struct MyAlgo; +//! # +//! # impl MyAlgo { +//! # fn new(_: &Rosenbrock, _: &Domain) -> Self { +//! # Self +//! # } +//! # } +//! # +//! # impl Optimizer for MyAlgo { +//! # const NAME: &'static str = "my algo"; +//! # type Error = std::convert::Infallible; +//! # +//! # fn opt_next( +//! # &mut self, +//! # f: &Rosenbrock, +//! # dom: &Domain, +//! # x: &mut Vector, +//! # ) -> Result +//! # where +//! # Sx: StorageMut + IsContiguous, +//! # { +//! # Ok(0.0) +//! # } +//! # } //! # //! # struct Rosenbrock { //! # a: f64, @@ -153,65 +215,41 @@ //! # type Field = f64; //! # //! # fn domain(&self) -> Domain { -//! # Domain::unconstrained(2) +//! # Domain::rect(vec![-10.0, -10.0], vec![10.0, 10.0]) //! # } //! # } //! # -//! # impl System for Rosenbrock { -//! # fn eval( -//! # &self, -//! # x: &na::Vector, -//! # fx: &mut na::Vector, -//! # ) where -//! # Sx: na::storage::Storage + IsContiguous, -//! # Sfx: na::storage::StorageMut, +//! # impl Function for Rosenbrock { +//! # fn apply(&self, x: &na::Vector) -> Self::Field +//! # where +//! # Sx: na::Storage + IsContiguous, //! # { -//! # fx[0] = (self.a - x[0]).powi(2); -//! # fx[1] = self.b * (x[1] - x[0].powi(2)).powi(2); +//! # (self.a - x[0]).powi(2) + self.b * (x[1] - x[0].powi(2)).powi(2) //! # } //! # } -//! +//! # //! let f = Rosenbrock { a: 1.0, b: 1.0 }; -//! let mut solver = SolverDriver::builder(&f) -//! .with_initial(vec![-10.0, -5.0]) +//! let mut optimizer = OptimizerDriver::builder(&f) +//! .with_algo(|f, dom| MyAlgo::new(f, dom)) //! .build(); -//! -//! let tolerance = 1e-6; -//! -//! let (x, fx) = solver -//! .find(|state| { -//! println!( -//! "iter = {}\t|| fx || = {}\tx = {:?}", -//! state.iter(), -//! state.norm(), -//! state.x() -//! ); -//! state.norm() <= tolerance || state.iter() >= 100 -//! }) -//! .expect("solver encountered an error"); -//! -//! if fx <= tolerance { -//! println!("solved: {x:?}"); -//! } else { -//! println!("maximum number of iteration exceeded"); -//! } //! ``` //! +//! If you implement an algorithm, please reach out to discuss if we could +//! include it in gomez. +//! //! ## Roadmap //! -//! Listed *not* in order of priority. +//! Listed *not* in priority order. //! //! * [Homotopy continuation //! method](http://homepages.math.uic.edu/~jan/srvart/node4.html) to compare -//! the performance with Trust region method. +//! the performance with the trust region method //! * Conjugate gradient method //! * Experimentation with various global optimization techniques for initial -//! guesses search +//! guess search //! * Evolutionary/nature-inspired algorithms //! * Bayesian optimization -//! * Focus on initial guesses search and tools in general -//! * High-level drivers encapsulating the low-level API for users that do not -//! need the fine-grained control. +//! * Focus on initial guesses search and tools for analysis in general //! //! ## License //! diff --git a/src/testing.rs b/src/testing.rs index fbf19c1..b92d9a5 100644 --- a/src/testing.rs +++ b/src/testing.rs @@ -1,4 +1,4 @@ -//! Testing systems and utilities useful for benchmarking, debugging and smoke +//! Testing problems and utilities useful for benchmarking, debugging and smoke //! testing. //! //! [`ExtendedRosenbrock`] and [`Sphere`] are recommended for first tests. @@ -31,8 +31,8 @@ use thiserror::Error; use crate::core::{Domain, Function, Optimizer, Problem, Solver, System}; -/// Extension of the [`System`] trait that provides additional information that -/// is useful for testing solvers. +/// Extension of the [`Problem`] trait that provides additional information that +/// is useful for testing algorithms. pub trait TestProblem: Problem { /// Standard initial values for the problem. Using the same initial values is /// essential for fair comparison of methods. @@ -45,22 +45,24 @@ pub trait TestSystem: System + TestProblem where Self::Field: approx::RelativeEq, { - /// A set of roots (if known and finite). This is mostly just for - /// information, for example to know how close a solver got even if it - /// failed. For testing if a given point is root, [`TestSystem::is_root`] - /// should be used. + /// A set of roots (if known and finite). + /// + /// This is mostly just for information, for example to know how close a + /// solver got even if it failed. For testing if a given point is a + /// solution, [`TestSystem::is_root`] should be used. fn roots(&self) -> Vec> { Vec::new() } - /// Test if given point is a root of the system, given the tolerance `eps`. + /// Tests if given point is a solution of the system, given the tolerance + /// `eps`. fn is_root(&self, x: &Vector, eps: Self::Field) -> bool where Sx: Storage + IsContiguous, { - let mut fx = x.clone_owned(); - self.eval(x, &mut fx); - fx.norm() <= eps + let mut rx = x.clone_owned(); + self.eval(x, &mut rx); + rx.norm() <= eps } } @@ -70,15 +72,17 @@ pub trait TestFunction: Function + TestProblem where Self::Field: approx::RelativeEq, { - /// A set of global optima (if known and finite). This is mostly just for - /// information, for example to know how close an optimizer got even if it - /// failed. For testing if a given point is global optimum, [`TestFunction::is_optimum`] - /// should be used. + /// A set of global optima (if known and finite). + /// + /// This is mostly just for information, for example to know how close an + /// optimizer got even if it failed. For testing if a given point is a global + /// optimum, [`TestFunction::is_optimum`] should be used. fn optima(&self) -> Vec> { Vec::new() } - /// Test if given point is a root of the system, given the tolerance `eps`. + /// Test if given point is a global optimum of the system, given the + /// tolerance `eps`. fn is_optimum(&self, x: &Vector, eps: Self::Field) -> bool where Sx: Storage + IsContiguous; @@ -135,10 +139,10 @@ impl ExtendedRosenbrock { let x1 = x[i1] * alpha; let x2 = x[i2] / alpha; - let fx1 = 10.0 * (x2 - x1 * x1); - let fx2 = 1.0 - x1; + let rx1 = 10.0 * (x2 - x1 * x1); + let rx2 = 1.0 - x1; - [fx1, fx2].into_iter() + [rx1, rx2].into_iter() }) } } @@ -166,15 +170,15 @@ impl Problem for ExtendedRosenbrock { } impl System for ExtendedRosenbrock { - fn eval( + fn eval( &self, x: &Vector, - fx: &mut Vector, + rx: &mut Vector, ) where Sx: Storage + IsContiguous, - Sfx: StorageMut, + Srx: StorageMut, { - eval(self.residuals(x), fx) + eval(self.residuals(x), rx) } fn norm(&self, x: &Vector) -> Self::Field @@ -251,12 +255,12 @@ impl ExtendedPowell { let i3 = 4 * i + 2; let i4 = 4 * i + 3; - let fx1 = x[i1] + 10.0 * x[i2]; - let fx2 = 5f64.sqrt() * (x[i3] - x[i4]); - let fx3 = (x[i2] - 2.0 * x[i3]).powi(2); - let fx4 = 10f64.sqrt() * (x[i1] - x[i4]).powi(2); + let rx1 = x[i1] + 10.0 * x[i2]; + let rx2 = 5f64.sqrt() * (x[i3] - x[i4]); + let rx3 = (x[i2] - 2.0 * x[i3]).powi(2); + let rx4 = 10f64.sqrt() * (x[i1] - x[i4]).powi(2); - [fx1, fx2, fx3, fx4].into_iter() + [rx1, rx2, rx3, rx4].into_iter() }) } } @@ -276,15 +280,15 @@ impl Problem for ExtendedPowell { } impl System for ExtendedPowell { - fn eval( + fn eval( &self, x: &Vector, - fx: &mut Vector, + rx: &mut Vector, ) where Sx: Storage + IsContiguous, - Sfx: StorageMut, + Srx: StorageMut, { - eval(self.residuals(x), fx) + eval(self.residuals(x), rx) } fn norm(&self, x: &Vector) -> Self::Field @@ -363,15 +367,15 @@ impl Problem for BullardBiegler { } impl System for BullardBiegler { - fn eval( + fn eval( &self, x: &Vector, - fx: &mut Vector, + rx: &mut Vector, ) where Sx: Storage + IsContiguous, - Sfx: StorageMut, + Srx: StorageMut, { - eval(self.residuals(x), fx) + eval(self.residuals(x), rx) } fn norm(&self, x: &Vector) -> Self::Field @@ -442,15 +446,15 @@ impl Problem for Sphere { } impl System for Sphere { - fn eval( + fn eval( &self, x: &Vector, - fx: &mut Vector, + rx: &mut Vector, ) where Sx: Storage + IsContiguous, - Sfx: StorageMut, + Srx: StorageMut, { - eval(self.residuals(x), fx) + eval(self.residuals(x), rx) } fn norm(&self, x: &Vector) -> Self::Field @@ -520,14 +524,14 @@ impl Brown { let n = self.n as f64; let x_sum = x.sum(); - let fx0 = x.iter().product::() - 1.0; - let fxs = x + let r0x = x.iter().product::() - 1.0; + let rix = x .iter() .skip(1) .copied() .map(move |xi| xi + x_sum - n + 1.0); - std::iter::once(fx0).chain(fxs) + std::iter::once(r0x).chain(rix) } } @@ -546,15 +550,15 @@ impl Problem for Brown { } impl System for Brown { - fn eval( + fn eval( &self, x: &Vector, - fx: &mut Vector, + rx: &mut Vector, ) where Sx: Storage + IsContiguous, - Sfx: StorageMut, + Srx: StorageMut, { - eval(self.residuals(x), fx) + eval(self.residuals(x), rx) } fn norm(&self, x: &Vector) -> Self::Field @@ -624,15 +628,15 @@ impl Problem for Exponential { } impl System for Exponential { - fn eval( + fn eval( &self, x: &Vector, - fx: &mut Vector, + rx: &mut Vector, ) where Sx: Storage + IsContiguous, - Sfx: StorageMut, + Srx: StorageMut, { - eval(self.residuals(x), fx) + eval(self.residuals(x), rx) } fn norm(&self, x: &Vector) -> Self::Field @@ -652,7 +656,7 @@ impl TestProblem for Exponential { impl TestSystem for Exponential {} -/// This system is true for any assignment of x. +/// Any value of _x_ is a solution. #[derive(Debug, Clone, Copy)] pub struct InfiniteSolutions { n: usize, @@ -681,15 +685,15 @@ impl Problem for InfiniteSolutions { } impl System for InfiniteSolutions { - fn eval( + fn eval( &self, _x: &Vector, - fx: &mut Vector, + rx: &mut Vector, ) where Sx: Storage + IsContiguous, - Sfx: StorageMut, + Srx: StorageMut, { - fx.fill(0.0); + rx.fill(0.0); } fn norm(&self, _: &Vector) -> Self::Field @@ -709,37 +713,37 @@ impl TestProblem for InfiniteSolutions { impl TestSystem for InfiniteSolutions {} -/// Solving or optimization error of the testing solver/optimizer driver (see -/// [`solve`] and [`optimize`]). +/// Optimization or solving error of the testing optimizer/solver driver (see +/// [`optimize`] and [`solve`]). #[derive(Debug, Error)] pub enum TestingError { - /// Error of the solver used. + /// Error of the algorithm used. #[error("{0}")] Inner(#[from] E), - /// Solver did not terminate. - #[error("solver did not terminate")] + /// Algorithm did not terminate. + #[error("algorithm did not terminate")] Termination, } /// A simple solver driver that can be used in tests. -pub fn solve>( - f: &F, - dom: &Domain, - mut solver: S, - mut x: OVector, +pub fn solve>( + r: &R, + dom: &Domain, + mut solver: A, + mut x: OVector, max_iters: usize, - tolerance: F::Field, -) -> Result, TestingError> + tolerance: R::Field, +) -> Result, TestingError> where - S::Error: StdError, + A::Error: StdError, { - let mut fx = x.clone_owned(); + let mut rx = x.clone_owned(); let mut iter = 0; loop { - solver.solve_next(f, dom, &mut x, &mut fx)?; + solver.solve_next(r, dom, &mut x, &mut rx)?; - if fx.norm() <= tolerance { + if rx.norm() <= tolerance { return Ok(x); } @@ -751,7 +755,7 @@ where } } -/// A simple solver driver that can be used in tests. +/// A simple optimization driver that can be used in tests. pub fn optimize>( f: &F, dom: &Domain, @@ -782,37 +786,40 @@ where } } -/// Iterate the solver and inspect it in each iteration. This is useful for -/// testing evolutionary/nature-inspired algorithms. -pub fn iter, G>( - f: &F, - dom: &Domain, - mut solver: S, - mut x: OVector, +/// Iterates the solver and inspects it in each iteration. +/// +/// This is useful for testing evolutionary/nature-inspired algorithms. +pub fn iter, G>( + r: &R, + dom: &Domain, + mut solver: A, + mut x: OVector, iters: usize, mut inspect: G, -) -> Result<(), S::Error> +) -> Result<(), A::Error> where - S::Error: StdError, - G: FnMut(&S, &OVector, F::Field, usize), + A::Error: StdError, + G: FnMut(&A, &OVector, R::Field, usize), { - let mut fx = x.clone_owned(); + let mut rx = x.clone_owned(); for iter in 0..iters { - solver.solve_next(f, dom, &mut x, &mut fx)?; - inspect(&solver, &x, fx.norm(), iter); + solver.solve_next(r, dom, &mut x, &mut rx)?; + inspect(&solver, &x, rx.norm(), iter); } Ok(()) } -fn eval(residuals: impl Iterator, fx: &mut Vector) +fn eval(residuals: impl Iterator, rx: &mut Vector) where - Sfx: StorageMut, + Srx: StorageMut, { - fx.iter_mut().zip(residuals).for_each(|(fxi, v)| *fxi = v); + rx.iter_mut() + .zip(residuals) + .for_each(|(rix, res)| *rix = res); } fn norm(residuals: impl Iterator) -> f64 { - residuals.map(|v| v.powi(2)).sum::().sqrt() + residuals.map(|res| res.powi(2)).sum::().sqrt() }