Skip to content

Commit

Permalink
use new tensor class + signed integers
Browse files Browse the repository at this point in the history
- transition towards removing the buffer layout system
- replace all unsigned integers with signed integers
- new blas and dense solver interfaces
- updated todo

still some cleanup to do
  • Loading branch information
samayala22 committed Nov 5, 2024
1 parent ea75728 commit a18c2a4
Show file tree
Hide file tree
Showing 22 changed files with 1,168 additions and 926 deletions.
6 changes: 3 additions & 3 deletions TODO
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
- Get rid of exceptions
- All backend functions should return a tf::Task so that we can build a single taskflow graph -> most probably cancelled for now
- Retire Mesh object as it is simply not generic enough, all geometry data is simulation specific
- Replace unsigned ints with signed ints

- Take care of the narrowing conversions and uint <-> int comparisons
// store kinematics in the backend ??
// store flow properties in the backend ??
// store flow properties in the backend ??
// Should Memory be a singleton class ?
4 changes: 2 additions & 2 deletions headeronly/tinytypes.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ namespace tiny {

using u8 = std::uint8_t;
using u16 = std::uint16_t;
using u32 = std::uint32_t;
using u64 = std::uint64_t;
using i32 = std::uint32_t;
using i64 = std::uint64_t;

using i8 = std::int8_t;
using i16 = std::int16_t;
Expand Down
6 changes: 3 additions & 3 deletions tests/nlvlm_curves.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,10 @@ struct ThinAirfoilPolarLiftCurve : public LiftCurveFunctor{
};

template<typename T>
void linspace(T start, T end, u64 n, std::vector<T>& out) {
void linspace(T start, T end, i64 n, std::vector<T>& out) {
out.resize(n);
T step = (end - start) / (n - 1);
for (u64 i = 0; i < n; i++) {
for (i64 i = 0; i < n; i++) {
out[i] = start + i * step;
}
}
Expand Down Expand Up @@ -88,7 +88,7 @@ int main(int /*argc*/, char** /*argv*/) {

std::printf("\n| Alpha | CL | CD | CMx | CMy | CMz | CL Error | CD Error |\n");
std::printf("|------------|------------|------------|------------|------------|------------|-------------|-------------|\n");
for (u64 i = 0; i < test_alphas.size(); i++) {
for (i64 i = 0; i < test_alphas.size(); i++) {
const FlowData flow{test_alphas[i], 0.0f, 1.0f, 1.0f};
auto coeffs = simulation.run(flow, db);

Expand Down
94 changes: 94 additions & 0 deletions tests/unittests.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,94 @@
#include "vlm_backend.hpp"
#include "vlm_memory.hpp"

#include <cstdio>
#include <cassert>

#define CHECK(condition) \
do { \
if (!(condition)) { \
std::fprintf(stderr, \
"Assertion failed: %s\n" \
"File: %s, Line: %d\n", \
#condition, __FILE__, __LINE__);\
std::abort(); \
} \
} while (0)

using namespace vlm;

void print3d(const TensorView<f32, 3, Location::Host>& tensor) {
// Print the 3D tensor
for (i64 z = 0; z < tensor.shape(2); z++) {
std::printf("Layer %lld:\n", z);
for (i64 x = 0; x < tensor.shape(0); x++) {
for (i64 y = 0; y < tensor.shape(1); y++) {
std::printf("%6.1f ", tensor(x, y, z));
}
std::printf("\n");
}
std::printf("\n");
}
}

int main(int argc, char** argv) {
const std::vector<std::string> backends = get_available_backends();

for (const auto& backend_name : backends) {
std::unique_ptr<Backend> backend = create_backend(backend_name);
std::unique_ptr<Memory> memory = backend->create_memory_manager();

Tensor<f32, 3, Location::Host> tensor_h{*memory};
Tensor<f32, 3, Location::Device> tensor_d{*memory};

const i64 n = 3;
tensor_d.init({n, n, n});
tensor_h.init({n, n, n});

auto& tdv = tensor_d.view();
auto& thv = tensor_h.view();
assert(tdv.shape() == thv.shape());

for (u32 i = 0; i < tensor_h.size(); i++) {
tensor_h[i] = static_cast<float>(i);
}

thv.to(tdv);

{
auto bv = thv.slice(All, 1, Range{0, 3});

CHECK(bv(0, 0) == 3.0f);
CHECK(bv(1, 0) == 4.0f);
CHECK(bv(2, 1) == 14.0f);
CHECK(bv(2, 2) == 23.0f);
}

{
auto t = tensor_d.clone();
auto tv = t.view();
auto a = tv.slice(All, Range{0, 2}, 0);
auto b = tv.slice(All, Range{1, 3}, 2);
a.to(b);
tv.to(thv);

CHECK(thv(0, 2, 2) == 3.0f);
CHECK(thv(1, 1, 2) == 1.0f);
CHECK(thv(2, 2, 2) == 5.0f);
}

{
auto t = tensor_d.clone();
auto tv = t.view();
auto a = tv.slice(0, All, All);
auto b = tv.slice(1, All, All);
a.to(b);
tv.to(thv);

CHECK(thv(0, 0, 0) == thv(1, 0, 0));
CHECK(thv(0, 1, 1) == thv(1, 1, 1));
}
}

return 0;
}
Loading

0 comments on commit a18c2a4

Please sign in to comment.