diff --git a/CMakeLists.txt b/CMakeLists.txt index d93181e..8d25122 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -29,4 +29,5 @@ endfunction() set(BASIC_LA_PATH ${CMAKE_CURRENT_SOURCE_DIR}/basic_la) add_if_exists(basic_la) -add_if_exists(qr_decomposition) \ No newline at end of file +add_if_exists(qr_decomposition) +add_if_exists(conjugate_gradient) \ No newline at end of file diff --git a/ci_script.sh b/ci_script.sh index 05fc133..d3b0528 100644 --- a/ci_script.sh +++ b/ci_script.sh @@ -7,4 +7,6 @@ mkdir build cd build cmake .. make test_qr_decomposition -./test_qr_decomposition \ No newline at end of file +./test_qr_decomposition +make test_conjugate_gradient +./test_conjugate_gradient \ No newline at end of file diff --git a/conjugate_gradient/CMakeLists.txt b/conjugate_gradient/CMakeLists.txt index 37fb1d5..b87085e 100644 --- a/conjugate_gradient/CMakeLists.txt +++ b/conjugate_gradient/CMakeLists.txt @@ -2,6 +2,6 @@ add_catch(test_conjugate_gradient test.cpp conjugate_gradient.cpp) target_include_directories(test_conjugate_gradient PRIVATE ${BASIC_LA_PATH}) target_link_libraries(test_conjugate_gradient PRIVATE BASIC_LA) -# add_catch(bench_conjugate_gradient run.cpp conjugate_gradient.cpp) -# target_include_directories(bench_conjugate_gradient PRIVATE ${BASIC_LA_PATH}) -# target_link_libraries(bench_conjugate_gradient PRIVATE BASIC_LA) \ No newline at end of file +add_catch(bench_conjugate_gradient run.cpp conjugate_gradient.cpp) +target_include_directories(bench_conjugate_gradient PRIVATE ${BASIC_LA_PATH}) +target_link_libraries(bench_conjugate_gradient PRIVATE BASIC_LA) \ No newline at end of file diff --git a/conjugate_gradient/README.md b/conjugate_gradient/README.md index a3585e2..5b4cdbb 100644 --- a/conjugate_gradient/README.md +++ b/conjugate_gradient/README.md @@ -1,3 +1,38 @@ ### A simple version of CG (Conjugate Gradient). -##### Benchmark results in file `bench_results.txt` \ No newline at end of file +##### Benchmark results + +Benchmark CG +............................................................................... + +benchmark name samples iterations est run time + mean low mean high mean + std dev low std dev high std dev +------------------------------------------------------------------------------- +Size 256 5 1 158.155 ms + 30.6362 ms 30.3948 ms 30.8777 ms + 278.589 us 168.836 us 367.73 us + +Size 512 5 1 474.875 ms + 93.5246 ms 93.1857 ms 93.8352 ms + 384.98 us 251.424 us 502.811 us + +Size 1024 5 1 1.28954 s + 254.104 ms 253.491 ms 254.573 ms + 642.205 us 488.519 us 765.256 us + +Size 2048 5 1 3.11173 s + 618.631 ms 616.151 ms 621.485 ms + 2.99239 ms 2.11292 ms 3.53132 ms + +Size 4096 5 1 6.72483 s + 1.33877 s 1.33262 s 1.3448 s + 7.21186 ms 4.33359 ms 10.6794 ms + +Size 8192 5 1 17.1783 s + 3.45489 s 3.44111 s 3.48345 s + 21.1881 ms 4.29948 ms 29.4589 ms + +Size 16384 5 1 38.4892 s + 7.63827 s 7.62766 s 7.65727 s + 15.5452 ms 5.99529 ms 21.0671 ms \ No newline at end of file diff --git a/conjugate_gradient/bench_results.txt b/conjugate_gradient/bench_results.txt deleted file mode 100644 index cb9423e..0000000 --- a/conjugate_gradient/bench_results.txt +++ /dev/null @@ -1,32 +0,0 @@ -Randomness seeded to: 4283597719 - -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -bench_conjugate_gradient is a Catch2 v3.5.2 host application. -Run with -? for options - -------------------------------------------------------------------------------- -Benchmark CG -------------------------------------------------------------------------------- -/home/ypodlesov/cmc-ctm-matrix-algos/conjugate_gradient/run.cpp:13 -............................................................................... - -benchmark name samples iterations est run time - mean low mean high mean - std dev low std dev high std dev -------------------------------------------------------------------------------- -Size 10 5 3 293.685 us - 22.7369 us 21.3548 us 24.4768 us - 1.80988 us 817.926 ns 2.50256 us - -Size 500 5 1 4.98866 s - 994.748 ms 990.529 ms 1.00007 s - 5.57859 ms 2.41148 ms 7.72436 ms - -Size 1000 5 1 1.15473 m - 13.864 s 13.805 s 13.904 s - 55.3889 ms 27.3388 ms 81.9235 ms - - -=============================================================================== -All tests passed (6 assertions in 1 test case) - diff --git a/conjugate_gradient/conjugate_gradient.cpp b/conjugate_gradient/conjugate_gradient.cpp index c8a3ae4..d296886 100644 --- a/conjugate_gradient/conjugate_gradient.cpp +++ b/conjugate_gradient/conjugate_gradient.cpp @@ -3,35 +3,80 @@ #include bool ConjugateGradient(const SparseMatrix& a, const Vector& b, Vector& x) { - constexpr double eps = 0.000001; assert(a.data_ && b.data_ && a.row_cnt_ == b.mem_size_); if (!x.data_ || x.mem_size_ != a.col_cnt_) { x = Vector(a.col_cnt_); } Vector current_residual(b); Vector current_p(b); - Vector current_x(a.col_cnt_); + Vector current_x(a.row_cnt_); NHelpers::Nullify(current_x.data_, current_x.mem_size_); size_t n = b.mem_size_; double current_alpha, current_beta; - for (size_t j = 0; j < x.mem_size_ * x.mem_size_ && !NHelpers::RoughEq(current_residual.Norm2(), 0.0, 0.001); ++j) { - Vector ap; + for (size_t j = 0; j < x.mem_size_ * x.mem_size_ && !NHelpers::RoughEq(current_residual.Norm2(), 0.0); ++j) { + Vector ap(n); a.VecMult(current_p, ap); + + double ap_cur_p_dot_prod = NHelpers::InnerProd(ap.data_, current_p.data_, n); + if (NHelpers::RoughEq(ap_cur_p_dot_prod, 0.0)) { + break; + } + + double current_residual_norm = NHelpers::InnerProd(current_residual.data_, current_residual.data_, n); + current_alpha = current_residual_norm / ap_cur_p_dot_prod; + + current_x.PlusAX(current_p, current_alpha); + + Vector next_residual(n); + next_residual.AXPlusBY(current_residual, 1, ap, -current_alpha); + + if (NHelpers::RoughEq(current_residual_norm, 0.0)) { + break; + } + current_beta = NHelpers::InnerProd(next_residual.data_, next_residual.data_, n) / current_residual_norm; + current_p.AXPlusBY(next_residual, 1, current_p, current_beta); + current_residual = std::move(next_residual); + } + x = std::move(current_x); + return true; +} + +bool ConjugateGradient(const Matrix& a, const Vector& b, Vector& x) { + assert(a.data_ && b.data_ && a.row_cnt_ == b.mem_size_); + if (!x.data_ || x.mem_size_ != a.col_cnt_) { + x = Vector(a.col_cnt_); + } + Vector current_residual(b); + Vector current_p(b); + Vector current_x(a.row_cnt_); + NHelpers::Nullify(current_x.data_, current_x.mem_size_); + size_t n = b.mem_size_; + + double current_alpha, current_beta; + for (size_t j = 0; j < x.mem_size_ * x.mem_size_ && !NHelpers::RoughEq(current_residual.Norm2(), 0.0); ++j) { + Vector ap(n); + a.VecMult(current_p, ap); + double ap_cur_p_dot_prod = NHelpers::InnerProd(ap.data_, current_p.data_, n); - if (NHelpers::RoughEq(ap_cur_p_dot_prod, 0.0, eps)) { + if (NHelpers::RoughEq(ap_cur_p_dot_prod, 0.0)) { break; } + double current_residual_norm = NHelpers::InnerProd(current_residual.data_, current_residual.data_, n); current_alpha = current_residual_norm / ap_cur_p_dot_prod; + current_x.PlusAX(current_p, current_alpha); - Vector next_residual; + + Vector next_residual(n); next_residual.AXPlusBY(current_residual, 1, ap, -current_alpha); - if (NHelpers::RoughEq(current_residual_norm, 0.0, eps)) { + + if (NHelpers::RoughEq(current_residual_norm, 0.0)) { break; } current_beta = NHelpers::InnerProd(next_residual.data_, next_residual.data_, n) / current_residual_norm; current_p.AXPlusBY(next_residual, 1, current_p, current_beta); + current_residual = std::move(next_residual); } x = std::move(current_x); return true; diff --git a/conjugate_gradient/conjugate_gradient.h b/conjugate_gradient/conjugate_gradient.h index 567c467..cb0eac7 100644 --- a/conjugate_gradient/conjugate_gradient.h +++ b/conjugate_gradient/conjugate_gradient.h @@ -1,5 +1,7 @@ #pragma once #include +#include #include -bool ConjugateGradient(const SparseMatrix& a, const Vector& b, Vector& x); \ No newline at end of file +bool ConjugateGradient(const SparseMatrix& a, const Vector& b, Vector& x); +bool ConjugateGradient(const Matrix& a, const Vector& b, Vector& x); \ No newline at end of file diff --git a/conjugate_gradient/run.cpp b/conjugate_gradient/run.cpp index cde14cd..0d42637 100644 --- a/conjugate_gradient/run.cpp +++ b/conjugate_gradient/run.cpp @@ -1,42 +1,93 @@ +#include +#include +#include #include "conjugate_gradient.h" -#include "helpers.h" #include #include -void Prepare(TMatrix& A, TVector& b, const size_t n) { - std::srand(std::time(nullptr)); - A = TMatrix::GenerateRandomSymmetricDefinite(n, EDefiniteness::Positive); - b = TVector::CreateRandom(n); +void Prepare(SparseMatrix& a, Vector& b, const size_t n) { + std::stringstream file_name; + file_name << std::filesystem::current_path().string() << "/../" << "matrix_examples/sparse_spd/" << n; + std::ifstream fstream; + fstream.open(file_name.str()); + REQUIRE(fstream.is_open()); + fstream >> a; + fstream.close(); + REQUIRE(a.data_); + NHelpers::GenRandomVector(b, n, true); + Vector x(n); } TEST_CASE("Benchmark CG") { - TMatrix A; - TVector b; - TVector x; - TVector result; { - Prepare(A, b, 10); - BENCHMARK("Size 10") { - ConjugateGradient(A, b, x); + SparseMatrix a_sparse; + Matrix a_dense; + Vector b; + Vector x; + Prepare(a_sparse, b, 256); + BENCHMARK("Size 256") { + ConjugateGradient(a_sparse, b, x); }; - REQUIRE(TMatrix::MVMultiply(A, x, result)); - REQUIRE(RoughEq(TVector::Norm2(result - b), 0, 0.001)); } { - Prepare(A, b, 500); - BENCHMARK("Size 500") { - ConjugateGradient(A, b, x); + SparseMatrix a_sparse; + Matrix a_dense; + Vector b; + Vector x; + Prepare(a_sparse, b, 512); + BENCHMARK("Size 512") { + ConjugateGradient(a_sparse, b, x); }; - REQUIRE(TMatrix::MVMultiply(A, x, result)); - REQUIRE(RoughEq(TVector::Norm2(result - b), 0, 0.001)); } { - Prepare(A, b, 1000); - BENCHMARK("Size 1000") { - ConjugateGradient(A, b, x); + SparseMatrix a_sparse; + Matrix a_dense; + Vector b; + Vector x; + Prepare(a_sparse, b, 1024); + BENCHMARK("Size 1024") { + ConjugateGradient(a_sparse, b, x); + }; + } + { + SparseMatrix a_sparse; + Matrix a_dense; + Vector b; + Vector x; + Prepare(a_sparse, b, 2048); + BENCHMARK("Size 2048") { + ConjugateGradient(a_sparse, b, x); + }; + } + { + SparseMatrix a_sparse; + Matrix a_dense; + Vector b; + Vector x; + Prepare(a_sparse, b, 4096); + BENCHMARK("Size 4096") { + ConjugateGradient(a_sparse, b, x); + }; + } + { + SparseMatrix a_sparse; + Matrix a_dense; + Vector b; + Vector x; + Prepare(a_sparse, b, 8192); + BENCHMARK("Size 8192") { + ConjugateGradient(a_sparse, b, x); + }; + } + { + SparseMatrix a_sparse; + Matrix a_dense; + Vector b; + Vector x; + Prepare(a_sparse, b, 16384); + BENCHMARK("Size 16384") { + ConjugateGradient(a_sparse, b, x); }; - REQUIRE(TMatrix::MVMultiply(A, x, result)); - REQUIRE(RoughEq(TVector::Norm2(result - b), 0, 0.01)); } } diff --git a/conjugate_gradient/test.cpp b/conjugate_gradient/test.cpp index 286534d..b74ac10 100644 --- a/conjugate_gradient/test.cpp +++ b/conjugate_gradient/test.cpp @@ -1,59 +1,52 @@ #include "conjugate_gradient.h" #include "helpers.h" +#include +#include +#include #include +#include #include #include #include -using Catch::Generators::random; -using Catch::Generators::take; - static void Test(const uint32_t n) { - constexpr double eps = 0.000001; - std::srand(std::time(nullptr)); - auto a = SparseMatrix(n, n, n << 1); - NHelpers::GenRandom(a, true); - Vector b; - b.GenRandom(n, true); - Vector x; + + std::stringstream file_name; + file_name << std::filesystem::current_path().string() << "/../" << "matrix_examples/sparse_spd/" << n; + std::ifstream fstream; + fstream.open(file_name.str()); + REQUIRE(fstream.is_open()); + SparseMatrix a; + fstream >> a; + fstream.close(); + REQUIRE(a.data_); + Vector b(n); + NHelpers::GenRandomVector(b, n, true); + + Vector x(n); REQUIRE(ConjugateGradient(a, b, x)); - Vector result; + + Vector result(n); a.VecMult(x, result); - Vector diff; - diff.AXPlusBY(b, 1, result, 1); - std::cout << diff.Norm2() << std::endl; - REQUIRE(NHelpers::RoughEq(diff.Norm2(), 0.0, eps)); - // std::srand(std::time(nullptr)); - // const TMatrix A = TMatrix::GenerateRandomSymmetricDefinite(n, EDefiniteness::Positive); - // const TVector b = TVector::CreateRandom(n); - // TVector x; - // ConjugateGradient(A, b, x); - // TVector result; - // REQUIRE(TMatrix::MVMultiply(A, x, result)); - // REQUIRE(RoughEq(TVector::Norm2(result - b), 0, 0.001)); + result.PlusAX(b, -1); + constexpr double eps = 0.001; + std::cout << result.Norm2() << std::endl; + REQUIRE(NHelpers::RoughEq(result.Norm2(), 0.0, eps)); } -TEST_CASE("Size 3") { - Test(3); +TEST_CASE("Size 128") { + Test(128); } -// TEST_CASE("Size 512") { -// Test(512); -// } - -// TEST_CASE("Size 1024") { -// Test(1024); -// } - -// TEST_CASE("Size 2048") { -// Test(2048); -// } +TEST_CASE("Size 256") { + Test(256); +} -// TEST_CASE("Size 4096") { -// Test(4096); -// } +TEST_CASE("Size 512") { + Test(512); +} -// TEST_CASE("Size 8192") { -// Test(8192); -// } \ No newline at end of file +TEST_CASE("Size 1024") { + Test(1024); +} \ No newline at end of file