Skip to content

Commit

Permalink
Merge pull request #312 from PDoakORNL/clang_fixes
Browse files Browse the repository at this point in the history
Issues revealed by clang apple or mainline on osx
  • Loading branch information
PDoakORNL authored Dec 13, 2023
2 parents e37b11f + 3829aa9 commit 8e7cac8
Show file tree
Hide file tree
Showing 5 changed files with 20 additions and 11 deletions.
2 changes: 1 addition & 1 deletion include/dca/linalg/matrix.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ class Matrix : public ALLOC {

ValueType* data_ = nullptr;

template <class ScalarType2, DeviceType device_name2>
template <class ScalarType2, DeviceType device_name2, class ALLOC2>
friend class dca::linalg::Matrix;
};

Expand Down
8 changes: 4 additions & 4 deletions include/dca/linalg/matrixop.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -295,8 +295,8 @@ void insertRow(Matrix<Scalar, CPU, ALLOC>& mat, int i) {
// Preconditions: mat is a square matrix.
// Postconditions: ipiv and work are resized to the needed dimension.
// \todo consider doing inverse at full precision reguardless of incoming Scalar precision
template <typename Scalar, DeviceType device_name, template <typename, DeviceType> class MatrixType>
void inverse(MatrixType<Scalar, device_name>& mat, Vector<int, CPU>& ipiv,
template <typename Scalar, DeviceType device_name, class ALLOC, template <typename, DeviceType, class> class MatrixType>
void inverse(MatrixType<Scalar, device_name, ALLOC>& mat, Vector<int, CPU>& ipiv,
Vector<Scalar, device_name>& work) {
assert(mat.is_square());

Expand All @@ -312,8 +312,8 @@ void inverse(MatrixType<Scalar, device_name>& mat, Vector<int, CPU>& ipiv,
work.ptr(), lwork);
}

template <typename Scalar, DeviceType device_name, template <typename, DeviceType> class MatrixType>
void inverse(MatrixType<Scalar, device_name>& mat) {
template <typename Scalar, DeviceType device_name, class ALLOC, template <typename, DeviceType, class> class MatrixType>
void inverse(MatrixType<Scalar, device_name, ALLOC>& mat) {
Vector<int, CPU> ipiv;
Vector<Scalar, device_name> work;
inverse(mat, ipiv, work);
Expand Down
4 changes: 2 additions & 2 deletions include/dca/linalg/util/util_matrixop.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -33,8 +33,8 @@ namespace util {

// Returns optimal lwork for inverse.
// In: mat
template <typename ScalarType, template <typename, DeviceType> class MatrixType>
int getInverseWorkSize(MatrixType<ScalarType, CPU>& mat) {
template <typename ScalarType, class ALLOC, template <typename, DeviceType, class> class MatrixType>
int getInverseWorkSize(MatrixType<ScalarType, CPU, ALLOC>& mat) {
assert(mat.is_square());

ScalarType tmp;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,7 +100,8 @@ const linalg::Matrix<dca::util::ComplexAlias<Scalar>, linalg::CPU>& SpaceTransfo
const auto& r = RDmn::parameter_type::get_elements()[j];
for (int i = 0; i < KDmn::dmn_size(); ++i) {
const auto& k = KDmn::parameter_type::get_elements()[i];
auto temp_exp = std::exp(dca::util::ComplexAlias<dca::util::RealAlias<Scalar>>{0, util::innerProduct(k, r)});
using Real = dca::util::RealAlias<Scalar>;
auto temp_exp = std::exp(dca::util::ComplexAlias<Real>{0, static_cast<Real>(util::innerProduct(k, r))});
T(i, j) = typename decltype(T)::ValueType{temp_exp.real(), temp_exp.imag()};
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -113,7 +113,10 @@ class CtauxClusterSolver {
void setSampleConfiguration(const io::Buffer&) {}

/** used for testing */
auto& getG0() { return g0_; };
auto& getG0() {
return g0_;
};

protected:
void warmUp(Walker& walker);

Expand Down Expand Up @@ -435,8 +438,13 @@ void CtauxClusterSolver<device_t, Parameters, Data, DIST>::computeErrorBars() {
std::vector<typename Data::TpGreensFunction> G4 = accumulator_.get_sign_times_G4();

for (std::size_t channel = 0; channel < G4.size(); ++channel) {
G4[channel] /= TpComplex{parameters_.get_beta() * parameters_.get_beta()} *
TpComplex{accumulator_.get_accumulated_sign().sum()};
if constexpr (dca::util::IsComplex_t<Scalar>::value)
G4[channel] /= TpComplex{parameters_.get_beta() * parameters_.get_beta()} *
TpComplex{static_cast<Scalar>(accumulator_.get_accumulated_sign().sum())};
else
G4[channel] /= TpComplex{parameters_.get_beta() * parameters_.get_beta()} *
TpComplex{static_cast<Real>(accumulator_.get_accumulated_sign().sum())};

concurrency_.average_and_compute_stddev(G4[channel], data_.get_G4_stdv()[channel]);
}
}
Expand Down

0 comments on commit 8e7cac8

Please sign in to comment.