Skip to content

Commit

Permalink
Merge pull request #2 from ypodlesov/CG
Browse files Browse the repository at this point in the history
Add CG method.
  • Loading branch information
ypodlesov authored May 18, 2024
2 parents 66e98d4 + 9a8400e commit be218ef
Show file tree
Hide file tree
Showing 9 changed files with 209 additions and 112 deletions.
3 changes: 2 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -29,4 +29,5 @@ endfunction()

set(BASIC_LA_PATH ${CMAKE_CURRENT_SOURCE_DIR}/basic_la)
add_if_exists(basic_la)
add_if_exists(qr_decomposition)
add_if_exists(qr_decomposition)
add_if_exists(conjugate_gradient)
4 changes: 3 additions & 1 deletion ci_script.sh
Original file line number Diff line number Diff line change
Expand Up @@ -7,4 +7,6 @@ mkdir build
cd build
cmake ..
make test_qr_decomposition
./test_qr_decomposition
./test_qr_decomposition
make test_conjugate_gradient
./test_conjugate_gradient
6 changes: 3 additions & 3 deletions conjugate_gradient/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,6 @@ add_catch(test_conjugate_gradient test.cpp conjugate_gradient.cpp)
target_include_directories(test_conjugate_gradient PRIVATE ${BASIC_LA_PATH})
target_link_libraries(test_conjugate_gradient PRIVATE BASIC_LA)

# add_catch(bench_conjugate_gradient run.cpp conjugate_gradient.cpp)
# target_include_directories(bench_conjugate_gradient PRIVATE ${BASIC_LA_PATH})
# target_link_libraries(bench_conjugate_gradient PRIVATE BASIC_LA)
add_catch(bench_conjugate_gradient run.cpp conjugate_gradient.cpp)
target_include_directories(bench_conjugate_gradient PRIVATE ${BASIC_LA_PATH})
target_link_libraries(bench_conjugate_gradient PRIVATE BASIC_LA)
37 changes: 36 additions & 1 deletion conjugate_gradient/README.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,38 @@
### A simple version of CG (Conjugate Gradient).

##### Benchmark results in file `bench_results.txt`
##### Benchmark results

Benchmark CG
...............................................................................

benchmark name samples iterations est run time
mean low mean high mean
std dev low std dev high std dev
-------------------------------------------------------------------------------
Size 256 5 1 158.155 ms
30.6362 ms 30.3948 ms 30.8777 ms
278.589 us 168.836 us 367.73 us

Size 512 5 1 474.875 ms
93.5246 ms 93.1857 ms 93.8352 ms
384.98 us 251.424 us 502.811 us

Size 1024 5 1 1.28954 s
254.104 ms 253.491 ms 254.573 ms
642.205 us 488.519 us 765.256 us

Size 2048 5 1 3.11173 s
618.631 ms 616.151 ms 621.485 ms
2.99239 ms 2.11292 ms 3.53132 ms

Size 4096 5 1 6.72483 s
1.33877 s 1.33262 s 1.3448 s
7.21186 ms 4.33359 ms 10.6794 ms

Size 8192 5 1 17.1783 s
3.45489 s 3.44111 s 3.48345 s
21.1881 ms 4.29948 ms 29.4589 ms

Size 16384 5 1 38.4892 s
7.63827 s 7.62766 s 7.65727 s
15.5452 ms 5.99529 ms 21.0671 ms
32 changes: 0 additions & 32 deletions conjugate_gradient/bench_results.txt

This file was deleted.

59 changes: 52 additions & 7 deletions conjugate_gradient/conjugate_gradient.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,35 +3,80 @@
#include <utility>

bool ConjugateGradient(const SparseMatrix<double>& a, const Vector<double>& b, Vector<double>& x) {
constexpr double eps = 0.000001;
assert(a.data_ && b.data_ && a.row_cnt_ == b.mem_size_);
if (!x.data_ || x.mem_size_ != a.col_cnt_) {
x = Vector<double>(a.col_cnt_);
}
Vector<double> current_residual(b);
Vector<double> current_p(b);
Vector<double> current_x(a.col_cnt_);
Vector<double> current_x(a.row_cnt_);
NHelpers::Nullify(current_x.data_, current_x.mem_size_);
size_t n = b.mem_size_;

double current_alpha, current_beta;
for (size_t j = 0; j < x.mem_size_ * x.mem_size_ && !NHelpers::RoughEq<double, double>(current_residual.Norm2(), 0.0, 0.001); ++j) {
Vector<double> ap;
for (size_t j = 0; j < x.mem_size_ * x.mem_size_ && !NHelpers::RoughEq<double, double>(current_residual.Norm2(), 0.0); ++j) {
Vector<double> ap(n);
a.VecMult(current_p, ap);

double ap_cur_p_dot_prod = NHelpers::InnerProd(ap.data_, current_p.data_, n);
if (NHelpers::RoughEq<double, double>(ap_cur_p_dot_prod, 0.0)) {
break;
}

double current_residual_norm = NHelpers::InnerProd(current_residual.data_, current_residual.data_, n);
current_alpha = current_residual_norm / ap_cur_p_dot_prod;

current_x.PlusAX(current_p, current_alpha);

Vector<double> next_residual(n);
next_residual.AXPlusBY(current_residual, 1, ap, -current_alpha);

if (NHelpers::RoughEq<double, double>(current_residual_norm, 0.0)) {
break;
}
current_beta = NHelpers::InnerProd(next_residual.data_, next_residual.data_, n) / current_residual_norm;
current_p.AXPlusBY(next_residual, 1, current_p, current_beta);
current_residual = std::move(next_residual);
}
x = std::move(current_x);
return true;
}

bool ConjugateGradient(const Matrix<double>& a, const Vector<double>& b, Vector<double>& x) {
assert(a.data_ && b.data_ && a.row_cnt_ == b.mem_size_);
if (!x.data_ || x.mem_size_ != a.col_cnt_) {
x = Vector<double>(a.col_cnt_);
}
Vector<double> current_residual(b);
Vector<double> current_p(b);
Vector<double> current_x(a.row_cnt_);
NHelpers::Nullify(current_x.data_, current_x.mem_size_);
size_t n = b.mem_size_;

double current_alpha, current_beta;
for (size_t j = 0; j < x.mem_size_ * x.mem_size_ && !NHelpers::RoughEq<double, double>(current_residual.Norm2(), 0.0); ++j) {
Vector<double> ap(n);
a.VecMult(current_p, ap);

double ap_cur_p_dot_prod = NHelpers::InnerProd(ap.data_, current_p.data_, n);
if (NHelpers::RoughEq<double, double>(ap_cur_p_dot_prod, 0.0, eps)) {
if (NHelpers::RoughEq<double, double>(ap_cur_p_dot_prod, 0.0)) {
break;
}

double current_residual_norm = NHelpers::InnerProd(current_residual.data_, current_residual.data_, n);
current_alpha = current_residual_norm / ap_cur_p_dot_prod;

current_x.PlusAX(current_p, current_alpha);
Vector<double> next_residual;

Vector<double> next_residual(n);
next_residual.AXPlusBY(current_residual, 1, ap, -current_alpha);
if (NHelpers::RoughEq<double, double>(current_residual_norm, 0.0, eps)) {

if (NHelpers::RoughEq<double, double>(current_residual_norm, 0.0)) {
break;
}
current_beta = NHelpers::InnerProd(next_residual.data_, next_residual.data_, n) / current_residual_norm;
current_p.AXPlusBY(next_residual, 1, current_p, current_beta);
current_residual = std::move(next_residual);
}
x = std::move(current_x);
return true;
Expand Down
4 changes: 3 additions & 1 deletion conjugate_gradient/conjugate_gradient.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
#pragma once
#include <sparse_matrix.h>
#include <matrix.h>
#include <vector.h>

bool ConjugateGradient(const SparseMatrix<double>& a, const Vector<double>& b, Vector<double>& x);
bool ConjugateGradient(const SparseMatrix<double>& a, const Vector<double>& b, Vector<double>& x);
bool ConjugateGradient(const Matrix<double>& a, const Vector<double>& b, Vector<double>& x);
99 changes: 75 additions & 24 deletions conjugate_gradient/run.cpp
Original file line number Diff line number Diff line change
@@ -1,42 +1,93 @@
#include <filesystem>
#include <fstream>
#include <sstream>
#include "conjugate_gradient.h"
#include "helpers.h"

#include <catch2/catch_test_macros.hpp>
#include <catch2/benchmark/catch_benchmark.hpp>

void Prepare(TMatrix<double>& A, TVector<double>& b, const size_t n) {
std::srand(std::time(nullptr));
A = TMatrix<double>::GenerateRandomSymmetricDefinite(n, EDefiniteness::Positive);
b = TVector<double>::CreateRandom(n);
void Prepare(SparseMatrix<double>& a, Vector<double>& b, const size_t n) {
std::stringstream file_name;
file_name << std::filesystem::current_path().string() << "/../" << "matrix_examples/sparse_spd/" << n;
std::ifstream fstream;
fstream.open(file_name.str());
REQUIRE(fstream.is_open());
fstream >> a;
fstream.close();
REQUIRE(a.data_);
NHelpers::GenRandomVector(b, n, true);
Vector<double> x(n);
}

TEST_CASE("Benchmark CG") {
TMatrix<double> A;
TVector<double> b;
TVector<double> x;
TVector<double> result;
{
Prepare(A, b, 10);
BENCHMARK("Size 10") {
ConjugateGradient(A, b, x);
SparseMatrix<double> a_sparse;
Matrix<double> a_dense;
Vector<double> b;
Vector<double> x;
Prepare(a_sparse, b, 256);
BENCHMARK("Size 256") {
ConjugateGradient(a_sparse, b, x);
};
REQUIRE(TMatrix<double>::MVMultiply(A, x, result));
REQUIRE(RoughEq<double, double>(TVector<double>::Norm2(result - b), 0, 0.001));
}
{
Prepare(A, b, 500);
BENCHMARK("Size 500") {
ConjugateGradient(A, b, x);
SparseMatrix<double> a_sparse;
Matrix<double> a_dense;
Vector<double> b;
Vector<double> x;
Prepare(a_sparse, b, 512);
BENCHMARK("Size 512") {
ConjugateGradient(a_sparse, b, x);
};
REQUIRE(TMatrix<double>::MVMultiply(A, x, result));
REQUIRE(RoughEq<double, double>(TVector<double>::Norm2(result - b), 0, 0.001));
}
{
Prepare(A, b, 1000);
BENCHMARK("Size 1000") {
ConjugateGradient(A, b, x);
SparseMatrix<double> a_sparse;
Matrix<double> a_dense;
Vector<double> b;
Vector<double> x;
Prepare(a_sparse, b, 1024);
BENCHMARK("Size 1024") {
ConjugateGradient(a_sparse, b, x);
};
}
{
SparseMatrix<double> a_sparse;
Matrix<double> a_dense;
Vector<double> b;
Vector<double> x;
Prepare(a_sparse, b, 2048);
BENCHMARK("Size 2048") {
ConjugateGradient(a_sparse, b, x);
};
}
{
SparseMatrix<double> a_sparse;
Matrix<double> a_dense;
Vector<double> b;
Vector<double> x;
Prepare(a_sparse, b, 4096);
BENCHMARK("Size 4096") {
ConjugateGradient(a_sparse, b, x);
};
}
{
SparseMatrix<double> a_sparse;
Matrix<double> a_dense;
Vector<double> b;
Vector<double> x;
Prepare(a_sparse, b, 8192);
BENCHMARK("Size 8192") {
ConjugateGradient(a_sparse, b, x);
};
}
{
SparseMatrix<double> a_sparse;
Matrix<double> a_dense;
Vector<double> b;
Vector<double> x;
Prepare(a_sparse, b, 16384);
BENCHMARK("Size 16384") {
ConjugateGradient(a_sparse, b, x);
};
REQUIRE(TMatrix<double>::MVMultiply(A, x, result));
REQUIRE(RoughEq<double, double>(TVector<double>::Norm2(result - b), 0, 0.01));
}
}
Loading

0 comments on commit be218ef

Please sign in to comment.