diff --git a/glm/detail/_swizzle_func_gcc_vec.hpp b/glm/detail/_swizzle_func_gcc_vec.hpp new file mode 100644 index 0000000000..d7ad7da9fd --- /dev/null +++ b/glm/detail/_swizzle_func_gcc_vec.hpp @@ -0,0 +1,1271 @@ +#pragma once + +#ifdef __clang__ +#define GLM_SWIZZLE_GEN_VEC2_ENTRY(L, T, Q, CONST, A, B) \ + GLM_FUNC_QUALIFIER vec<2, T, Q> __attribute__((always_inline)) A ## B() const \ + { \ + using E = ElementCollection; \ + using G = typename vec::GccVec_t; \ + G vin; std::memcpy(&vin, &data, std::min(sizeof(data), sizeof(vin))); \ + GccVec<2, T, Q> vout; \ + static constexpr E testObj{};\ + static constexpr auto pA = &testObj.A;\ + static constexpr auto pB = &testObj.B;\ + vout=__builtin_shufflevector(vin, vin, getOffset(), getOffset()); \ + return vec<2, T, Q>(vout); \ + } + +#define GLM_SWIZZLE_GEN_VEC3_ENTRY(L, T, Q, CONST, A, B, C) \ + GLM_FUNC_QUALIFIER vec<3, T, Q> __attribute__((always_inline)) A ## B ## C() const \ + { \ + using E = ElementCollection; \ + using G = typename vec::GccVec_t; \ + G vin; std::memcpy(&vin, &data, std::min(sizeof(data), sizeof(vin))); \ + GccVec<4, T, Q> vout; \ + static constexpr E testObj{};\ + static constexpr auto pA = &testObj.A;\ + static constexpr auto pB = &testObj.B;\ + static constexpr auto pC = &testObj.C;\ + vout=__builtin_shufflevector(vin, vin, getOffset(), getOffset(), getOffset(), -1); \ + vec<3, T, Q> voutfin; std::memcpy(&voutfin, &vout, sizeof(voutfin)); \ + return voutfin; \ + } + +#define GLM_SWIZZLE_GEN_VEC4_ENTRY(L, T, Q, CONST, A, B, C, D) \ + GLM_FUNC_QUALIFIER vec<4, T, Q> __attribute__((always_inline)) A ## B ## C ## D() const \ + { \ + using E = ElementCollection; \ + using G = typename vec::GccVec_t; \ + G vin; std::memcpy(&vin, &data, std::min(sizeof(data), sizeof(vin))); \ + GccVec<4, T, Q> vout; \ + static constexpr E testObj{};\ + static constexpr auto pA = &testObj.A;\ + static constexpr auto pB = &testObj.B;\ + static constexpr auto pC = &testObj.C;\ + static constexpr auto pD = &testObj.D;\ + vout=__builtin_shufflevector(vin, vin, getOffset(), getOffset(), getOffset(), getOffset()); \ + return vec<4, T, Q>(vout); \ + } + +#define GLM_SWIZZLE_GEN_VEC2_ENTRY_DEF(T, Q, L, CONST, A, B) \ + template \ + GLM_FUNC_QUALIFIER vec vec::A ## B() const \ + { \ + return vec<2, T, Q>(this->A, this->B); \ + } + +#define GLM_SWIZZLE_GEN_VEC3_ENTRY_DEF(T, Q, L, CONST, A, B, C) \ + template \ + GLM_FUNC_QUALIFIER vec<3, T, Q> vec::A ## B ## C() const \ + { \ + return vec<3, T, Q>(this->A, this->B, this->C); \ + } + +#define GLM_SWIZZLE_GEN_VEC4_ENTRY_DEF(T, Q, L, CONST, A, B, C, D) \ + template \ + GLM_FUNC_QUALIFIER vec<4, T, Q> vec::A ## B ## C ## D() const \ + { \ + return vec<4, T, Q>(this->A, this->B, this->C, this->D); \ + } + +#define GLM_MUTABLE + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, Q, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, B, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, X, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, X, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, A, X) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, B, X) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, Q) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, Q, x, y) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, Q, r, g) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, Q, s, t) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, Q, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, B, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, C, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, X, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, X, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, X, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, A, X) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, B, X) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, C, X) + + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, Q, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, X, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, X, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, X, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, X, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, X, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, X, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, X, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, X, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, X, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, B, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, C, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, A, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, C, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, A, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, B, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, A, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, B, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, C, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, A, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, B, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, C, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, X, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, X, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, X, X, C) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMQ(T, Q, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, Q, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, Q, A, B, C) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, Q) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMQ(T, Q, x, y, z) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMQ(T, Q, r, g, b) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMQ(T, Q, s, t, p) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, A, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, B, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, D, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, D, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, D, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, B, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, C, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, A, X) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, B, X) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, C, X) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, D, X) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, X, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, X, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, X, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, X, D) +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, X, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, X, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, X, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, X, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, X, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, X, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, X, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, X, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, X, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, X, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, X, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, X, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, X, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, X, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, X, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, X, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, X, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, X, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, X, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, X, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, A, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, B, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, C, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , X, D, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, X, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, X, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, X, X) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, X, X) + + + +#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, C, A) \ + \ + \ + /*one-lane undefined, all unique components:*/\ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, C, A) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, X, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, X, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, X, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, X, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, X, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, X, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, X, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, X, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, X, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, X, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, X, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, X, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, X, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, X, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, X, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, X, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, X, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, X, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, X, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, X, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, X, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, X, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, X, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, X, C, A) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, X, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, X, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, X, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, X, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, X, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, X, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, X, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, X, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, X, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, X, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, X, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, X, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, X, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, X, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, X, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, X, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, X, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, X, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, X, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, X, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, X, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, X, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, X, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, X, A) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, B, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, D, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, B, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, C, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, D, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, C, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, A, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, D, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, A, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, C, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, D, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, C, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, A, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, D, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, A, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, B, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, D, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, B, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, B, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, A, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, B, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, C, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, A, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, C, X) \ + \ + /*swizzles w/ duplicate components:*/\ + \ + /*two lanes undefined: both unique components and duplicate components*/\ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, D, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, D, C) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, X, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, X, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, X, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, X, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, X, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, X, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, X, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, X, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, X, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, X, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, X, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, X, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, X, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, X, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, X, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, X, C) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, A, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, B, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, C, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, D, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, B, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, C, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, D, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, A, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, C, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, D, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, A, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, B, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, D, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, A, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, B, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, C, X) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, X, X ) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, X, X) \ + /* end of two lanes undefined */\ + /* three lanes undefined: */\ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, X, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, X, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, X, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, X, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, A, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, B, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, C, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, X, D, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, A, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, B, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, C, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , X, D, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, X, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, X, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, X, X, X) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, X, X, X) \ + /* end of three lanes undefined */\ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, D, C) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, D, D) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMQ(T, Q, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) \ + \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, Q) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMQ(T, Q, x, y, z, w) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMQ(T, Q, r, g, b, a) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMQ(T, Q, s, t, p, q) +#else +#define GLM_SWIZZLE_GEN_VEC2_ENTRY(L, T, Q, CONST, A, B) \ + GLM_FUNC_QUALIFIER vec<2, T, Q> __attribute__((always_inline)) A ## B() const \ + { \ + using E = ElementCollection; \ + using G = typename vec::GccVec_t; \ + G vin; std::memcpy(&vin, &data, std::min(sizeof(data), sizeof(vin))); \ + GccVec<2, T, Q> vout; \ + vout = __builtin_shufflevector(vin, vin, offsetof(E, A)/sizeof(A), offsetof(E, B)/sizeof(B)); \ + return vec<2, T, Q>(vout); \ + } + +#define GLM_SWIZZLE_GEN_VEC3_ENTRY(L, T, Q, CONST, A, B, C) \ + GLM_FUNC_QUALIFIER vec<3, T, Q> __attribute__((always_inline)) A ## B ## C() const \ + { \ + using E = ElementCollection; \ + using G = typename vec::GccVec_t; \ + G vin; std::memcpy(&vin, &data, std::min(sizeof(data), sizeof(vin))); \ + GccVec<4, T, Q> vout; \ + vout = __builtin_shufflevector(vin, vin, offsetof(E, A)/sizeof(A), offsetof(E, B)/sizeof(B), offsetof(E, C)/sizeof(C), -1); \ + vec<3, T, Q> voutfin; std::memcpy(&voutfin, &vout, sizeof(voutfin)); \ + return voutfin; \ + } + +#define GLM_SWIZZLE_GEN_VEC4_ENTRY(L, T, Q, CONST, A, B, C, D) \ + GLM_FUNC_QUALIFIER vec<4, T, Q> __attribute__((always_inline)) A ## B ## C ## D() const \ + { \ + using E = ElementCollection; \ + using G = typename vec::GccVec_t; \ + G vin; std::memcpy(&vin, &data, std::min(sizeof(data), sizeof(vin))); \ + GccVec<4, T, Q> vout; \ + vout=__builtin_shufflevector(vin, vin, offsetof(E, A)/sizeof(A), offsetof(E, B)/sizeof(B), offsetof(E, C)/sizeof(C), offsetof(E, D)/sizeof(D)); \ + return vec<4, T, Q>(vout); \ + } + +#define GLM_SWIZZLE_GEN_VEC2_ENTRY_DEF(T, Q, L, CONST, A, B) \ + template \ + GLM_FUNC_QUALIFIER vec vec::A ## B() const \ + { \ + return vec<2, T, Q>(this->A, this->B); \ + } + +#define GLM_SWIZZLE_GEN_VEC3_ENTRY_DEF(T, Q, L, CONST, A, B, C) \ + template \ + GLM_FUNC_QUALIFIER vec<3, T, Q> vec::A ## B ## C() const \ + { \ + return vec<3, T, Q>(this->A, this->B, this->C); \ + } + +#define GLM_SWIZZLE_GEN_VEC4_ENTRY_DEF(T, Q, L, CONST, A, B, C, D) \ + template \ + GLM_FUNC_QUALIFIER vec<4, T, Q> vec::A ## B ## C ## D() const \ + { \ + return vec<4, T, Q>(this->A, this->B, this->C, this->D); \ + } + +#define GLM_MUTABLE + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, Q, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(2, T, Q, GLM_MUTABLE, B, B) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, Q) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, Q, x, y) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, Q, r, g) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC2_SWIZZLE(T, Q, s, t) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, Q, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, B, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, C, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(3, T, Q, GLM_MUTABLE, C, B) + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, Q, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, A, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, B, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(3, T, Q, GLM_MUTABLE, C, B, B) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMQ(T, Q, A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC3_SWIZZLE(T, Q, A, B, C) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC3_SWIZZLE(T, Q, A, B, C) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, Q) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMQ(T, Q, x, y, z) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMQ(T, Q, r, g, b) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC3_COMQ(T, Q, s, t, p) + +#define GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, A, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, A, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, A, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, B, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, B, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, B, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, C, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, C, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, C, D) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, D, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, D, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, D, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, A, A) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, B, B) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, C, C) \ + GLM_SWIZZLE_GEN_VEC2_ENTRY(4, T, Q, GLM_MUTABLE, D, D) + +#define GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , A, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, B, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, A, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , B, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, C, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, C, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, C, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, A, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, B, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , C, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, D, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, D, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, D, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, D, C) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, A, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, B, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, C, D) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, A, A) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, B, B) \ + GLM_SWIZZLE_GEN_VEC3_ENTRY(4, T, Q, , D, C, C) + +#define GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, C, A) \ + /*swizzles w/ duplicate components:*/\ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, A, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, B, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, C, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , A, D, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, A, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, B, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, C, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , B, D, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, A, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, B, D, C) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, C, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , C, D, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, A, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, B, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, C, D, D) \ + \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, A, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, A, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, A, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, A, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, B, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, B, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, B, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, B, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, C, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, C, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, C, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, C, D) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, D, A) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, D, B) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, D, C) \ + GLM_SWIZZLE_GEN_VEC4_ENTRY(4, T, Q, , D, D, D, D) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMQ(T, Q, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC2_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC3_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) \ + GLM_SWIZZLE_GEN_VEC4_FROM_VEC4_SWIZZLE(T, Q, A, B, C, D) + +#define GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, Q) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMQ(T, Q, x, y, z, w) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMQ(T, Q, r, g, b, a) \ + GLM_SWIZZLE_GEN_VEC_FROM_VEC4_COMQ(T, Q, s, t, p, q) +#endif \ No newline at end of file diff --git a/glm/detail/_vectorize.hpp b/glm/detail/_vectorize.hpp index 807b1b1de6..7a03901f83 100644 --- a/glm/detail/_vectorize.hpp +++ b/glm/detail/_vectorize.hpp @@ -1,5 +1,7 @@ #pragma once - +#if GLM_SIMD_CONSTEXPR == 1 +#include "simd_constexpr/_vectorize.hpp" +#else namespace glm{ namespace detail { @@ -228,3 +230,4 @@ namespace detail }; }//namespace detail }//namespace glm +#endif \ No newline at end of file diff --git a/glm/detail/qualifier.hpp b/glm/detail/qualifier.hpp index cb4e108a7a..2e3a009a64 100644 --- a/glm/detail/qualifier.hpp +++ b/glm/detail/qualifier.hpp @@ -88,7 +88,77 @@ namespace detail T data[L]; } type; }; - +#if ((defined(__clang__) || defined(__GNUC__)) && (GLM_LANG_CXX20_FLAG & GLM_LANG)) && GLM_SIMD_CONSTEXPR +# if GLM_HAS_ALIGNOF + template + struct storage + { + typedef struct alignas(L * sizeof(T)) type { + T data[L]; + } type; + }; +# endif + template + static constexpr size_t requiredAlignment = alignof(T); + + template + struct __attribute__((packed,aligned(requiredAlignment))) storage<2, T, false> + { + using VType = std::conditional_t< std::is_same_v, uint8_t, T>; + typedef VType type __attribute__((aligned( requiredAlignment ), vector_size(2*sizeof(VType)))); + }; + template + struct __attribute__((packed,aligned(requiredAlignment))) storage<1, T, false> + { + using VType = std::conditional_t< std::is_same_v, uint8_t, T>; + typedef VType type __attribute__((aligned( requiredAlignment ),vector_size(sizeof(VType)))); + }; + template + struct storage<2, T, true> + { + using VType = std::conditional_t< std::is_same_v, uint8_t, T>; + typedef VType type __attribute__((aligned(2*sizeof(VType)),vector_size(2*sizeof(VType)))); + }; + + template + struct storage<1, T, true> + { + using VType = std::conditional_t< std::is_same_v, uint8_t, T>; + typedef VType type __attribute__((aligned(sizeof(VType)),vector_size(sizeof(VType)))); + }; + template + struct __attribute__((packed,aligned(requiredAlignment))) storage<3, T, false> + { + typedef struct __attribute__((packed,aligned(requiredAlignment))) type { + T data[3]; + } type; + }; + template + struct __attribute__((packed,aligned(requiredAlignment))) storage<4, T, false> + { + using VType = std::conditional_t< std::is_same_v, uint8_t, T>; + typedef VType type __attribute__((aligned( requiredAlignment ), vector_size(4*sizeof(VType)))); + }; +# if (!(GLM_ARCH & GLM_ARCH_SIMD_BIT)) + template + struct storage<4, T, true> + { + using VType = std::conditional_t< std::is_same_v, uint8_t, T>; + typedef VType type __attribute__((aligned(4*sizeof(VType)),vector_size(4*sizeof(VType)))); + }; + template + struct storage<3, T, true> + { + using VType = std::conditional_t< std::is_same_v, uint8_t, T>; + typedef VType type __attribute__((aligned(4*sizeof(VType)),vector_size(4*sizeof(VType)))); + }; + template<> + struct storage<4, bool, true> + { + typedef uint8_t type __attribute__((aligned(4*sizeof(uint8_t)),vector_size(4*sizeof(uint8_t)))); + }; +# endif +#else # if GLM_HAS_ALIGNOF template struct storage @@ -106,6 +176,7 @@ namespace detail } type; }; # endif +#endif # if GLM_ARCH & GLM_ARCH_SSE2_BIT template<> diff --git a/glm/detail/setup.hpp b/glm/detail/setup.hpp index 5e94f2dc16..482c5a9f6a 100644 --- a/glm/detail/setup.hpp +++ b/glm/detail/setup.hpp @@ -297,7 +297,7 @@ // N2235 Generalized Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2007/n2235.pdf // N3652 Extended Constant Expressions http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2013/n3652.html -#if (GLM_ARCH & GLM_ARCH_SIMD_BIT) // Compiler SIMD intrinsics don't support constexpr... +#if (GLM_ARCH & GLM_ARCH_SIMD_BIT) && GLM_SIMD_CONSTEXPR == 0 // Compiler SIMD intrinsics don't support constexpr... # define GLM_HAS_CONSTEXPR 0 #elif (GLM_COMPILER & GLM_COMPILER_CLANG) # define GLM_HAS_CONSTEXPR __has_feature(cxx_relaxed_constexpr) @@ -883,7 +883,7 @@ namespace detail # define GLM_FORCE_ALIGNED_GENTYPES #endif -#if GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (defined(GLM_FORCE_ALIGNED_GENTYPES) || (GLM_CONFIG_SIMD == GLM_ENABLE)) +#if (GLM_HAS_ALIGNOF && (GLM_LANG & GLM_LANG_CXXMS_FLAG) && (defined(GLM_FORCE_ALIGNED_GENTYPES) || (GLM_CONFIG_SIMD == GLM_ENABLE))) || GLM_SIMD_CONSTEXPR # define GLM_CONFIG_ALIGNED_GENTYPES GLM_ENABLE #else # define GLM_CONFIG_ALIGNED_GENTYPES GLM_DISABLE diff --git a/glm/detail/simd_constexpr/_vectorize.hpp b/glm/detail/simd_constexpr/_vectorize.hpp new file mode 100644 index 0000000000..be775dc847 --- /dev/null +++ b/glm/detail/simd_constexpr/_vectorize.hpp @@ -0,0 +1,92 @@ +#pragma once +namespace glm{ +namespace detail +{ + template class vec, length_t L, typename R, typename T, qualifier Q> + struct functor1 + { + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(R (*Func) (T x), vec const& v) + { + vec ret{v}; + #pragma GCC unroll(4) + for (int i = 0; i < L; i++) { + ret[i] = Func(v[i]); + } + return ret; + } + }; + + + template class vec, length_t L, typename T, qualifier Q> + struct functor2 + { + GLM_FUNC_QUALIFIER static vec call(T (*Func) (T x, T y), vec a, vec const& b) + { + vec ret{a}; + #pragma GCC unroll(4) + for (int i = 0; i < L; i++) { + ret[i] = Func(a[i], b[i]); + } + return ret; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(Fct Func, vec a, vec const& b) + { + vec ret{a}; + #pragma GCC unroll(4) + for (int i = 0; i < L; i++) { + ret[i] = Func(a[i], b[i]); + } + return ret; + } + }; + + template class vec, length_t L, typename T, qualifier Q> + struct functor2_vec_sca{ + GLM_FUNC_QUALIFIER static vec call(T (*Func) (T x, T y), vec a, T b) + { + vec ret{a}; + #pragma GCC unroll(4) + for (int i = 0; i < L; i++) { + ret[i] = Func(a[i], b); + } + return ret; + } + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(Fct Func, vec a, T b) + { + vec ret{a}; + #pragma GCC unroll(4) + for (int i = 0; i < L; i++) { + ret[i] = Func(a[i], b); + } + return ret; + } + }; + + template + struct functor2_vec_int { + GLM_FUNC_QUALIFIER static vec call(int (*Func) (T x, int y), vec const& a, vec b) + { + vec ret{b}; + #pragma GCC unroll(4) + for (int i = 0; i < L; i++) { + ret[i] = Func(a[i], b[i]); + } + return ret; + } + + template + GLM_FUNC_QUALIFIER GLM_CONSTEXPR static vec call(Fct Func, vec const& a, vec b) + { + vec ret{b}; + #pragma GCC unroll(4) + for (int i = 0; i < L; i++) { + ret[i] = Func(a[i], b[i]); + } + return ret; + } + }; +}//namespace detail +}//namespace glm diff --git a/glm/detail/simd_constexpr/element.hpp b/glm/detail/simd_constexpr/element.hpp new file mode 100644 index 0000000000..587ebc9476 --- /dev/null +++ b/glm/detail/simd_constexpr/element.hpp @@ -0,0 +1,238 @@ +#include +#include +namespace glm::detail +{ + consteval bool NotEmpty(length_t I, length_t L) { return I <= L; } + struct Empty {}; + struct GLM_TRIVIAL RowTwo { + [[no_unique_address]] Empty y; [[no_unique_address]] Empty g; [[no_unique_address]] Empty t; + }; + struct GLM_TRIVIAL RowThree { + [[no_unique_address]] Empty z; [[no_unique_address]] Empty b; [[no_unique_address]] Empty p; + }; + struct GLM_TRIVIAL RowFour { + [[no_unique_address]] Empty w; [[no_unique_address]] Empty a; [[no_unique_address]] Empty q; + }; + template + struct ElementCollection; + +#ifdef __clang__ + + + template + struct GLM_TRIVIAL ElementCollection<4, T, Q> { + using data_t = typename detail::storage<4, T, detail::is_aligned::value>::type; + static constexpr T X = -1ll; + union + { + VDataArray<4, T, Q> elementArr; + struct { + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + union { T w, a, q; }; + }; + data_t data; + }; + + + template + static consteval int64_t getOffset() { + using E = ElementCollection<4, T, Q>; + + if constexpr (member == &baseObj->X) { + return -1ll; + } else if constexpr (member == &baseObj->x || member == &baseObj->r || member == &baseObj->s) { + return (int64_t)(offsetof(E, x)/sizeof(T)); + } else if constexpr (member == &baseObj->y || member == &baseObj->g || member == &baseObj->t) { + return (int64_t)(offsetof(E, y)/sizeof(T)); + } else if constexpr (member == &baseObj->z || member == &baseObj->b || member == &baseObj->p) { + return (int64_t)(offsetof(E, z)/sizeof(T)); + } else if constexpr (member == &baseObj->w || member == &baseObj->a || member == &baseObj->q) { + return (int64_t)(offsetof(E, w)/sizeof(T)); + } else { + static_assert(false); + } + } + GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, Q) + }; + + + template + struct GLM_TRIVIAL ElementCollection<3, T, Q> : RowFour { + using data_t = typename detail::storage<3, T, detail::is_aligned::value>::type; + using RowFour::w; + using RowFour::a; + using RowFour::q; + static constexpr length_t data_len = (Q == aligned) ? 4 : 3; + static constexpr T X = -1ll; + union + { + VDataArray elementArr; + struct { + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + }; + data_t data; + }; + template + static consteval int64_t getOffset() { + using E = ElementCollection<3, T, Q>; + + if constexpr (member == &baseObj->X) { + return -1ll; + } else if constexpr (member == &baseObj->x || member == &baseObj->r || member == &baseObj->s) { + return (int64_t)(offsetof(E, x)/sizeof(T)); + } else if constexpr (member == &baseObj->y || member == &baseObj->g || member == &baseObj->t) { + return (int64_t)(offsetof(E, y)/sizeof(T)); + } else if constexpr (member == &baseObj->z || member == &baseObj->b || member == &baseObj->p) { + return (int64_t)(offsetof(E, z)/sizeof(T)); + } else { + static_assert(false); + } + } + GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, Q) + }; + template + struct GLM_TRIVIAL ElementCollection<2, T, Q> : RowThree, RowFour { + using data_t = typename detail::storage<2, T, detail::is_aligned::value>::type; + using RowThree::z; + using RowThree::b; + using RowThree::p; + using RowFour::w; + using RowFour::a; + using RowFour::q; + static constexpr T X = -1ll; + union + { + VDataArray<2, T, Q> elementArr; + struct { + union { T x, r, s; }; + union { T y, g, t; }; + }; + data_t data; + }; + template + static consteval int64_t getOffset() { + using E = ElementCollection<2, T, Q>; + + if constexpr (member == &baseObj->X) { + return -1ll; + } else if constexpr (member == &baseObj->x || member == &baseObj->r || member == &baseObj->s) { + return (int64_t)(offsetof(E, x)/sizeof(T)); + } else if constexpr (member == &baseObj->y || member == &baseObj->g || member == &baseObj->t) { + return (int64_t)(offsetof(E, y)/sizeof(T)); + } else { + static_assert(false); + } + } + GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, Q) + }; + template + struct GLM_TRIVIAL ElementCollection<1, T, Q> : RowTwo, RowThree, RowFour { + using data_t = typename detail::storage<1, T, detail::is_aligned::value>::type; + using RowTwo::y; + using RowTwo::g; + using RowTwo::t; + using RowThree::z; + using RowThree::b; + using RowThree::p; + using RowFour::w; + using RowFour::a; + using RowFour::q; + static constexpr T X = -1ll; + union + { + VDataArray<1, T, Q> elementArr; + struct { + union { T x, r, s; }; + }; + data_t data; + }; + }; + +#else + template + struct ElementCollection; + template + struct GLM_TRIVIAL ElementCollection<4, T, Q> { + using data_t = typename detail::storage<4, T, detail::is_aligned::value>::type; + union + { + VDataArray<4, T, Q> elementArr; + struct { + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + union { T w, a, q; }; + }; + data_t data; + }; + GLM_SWIZZLE_GEN_VEC_FROM_VEC4(T, Q) + }; + + + template + struct GLM_TRIVIAL ElementCollection<3, T, Q> : RowFour { + using data_t = typename detail::storage<3, T, detail::is_aligned::value>::type; + using RowFour::w; + using RowFour::a; + using RowFour::q; + static constexpr length_t data_len = (Q == aligned) ? 4 : 3; + union + { + VDataArray elementArr; + struct { + union { T x, r, s; }; + union { T y, g, t; }; + union { T z, b, p; }; + }; + data_t data; + }; + GLM_SWIZZLE_GEN_VEC_FROM_VEC3(T, Q) + }; + template + struct GLM_TRIVIAL ElementCollection<2, T, Q> : RowThree, RowFour { + using data_t = typename detail::storage<2, T, detail::is_aligned::value>::type; + using RowThree::z; + using RowThree::b; + using RowThree::p; + using RowFour::w; + using RowFour::a; + using RowFour::q; + union + { + VDataArray<2, T, Q> elementArr; + struct { + union { T x, r, s; }; + union { T y, g, t; }; + }; + data_t data; + }; + GLM_SWIZZLE_GEN_VEC_FROM_VEC2(T, Q) + }; + template + struct GLM_TRIVIAL ElementCollection<1, T, Q> : RowTwo, RowThree, RowFour { + using data_t = typename detail::storage<1, T, detail::is_aligned::value>::type; + using RowTwo::y; + using RowTwo::g; + using RowTwo::t; + using RowThree::z; + using RowThree::b; + using RowThree::p; + using RowFour::w; + using RowFour::a; + using RowFour::q; + union + { + VDataArray<1, T, Q> elementArr; + struct { + union { T x, r, s; }; + }; + data_t data; + }; + }; +#endif +} + diff --git a/glm/detail/simd_constexpr/simd_helpers.inl b/glm/detail/simd_constexpr/simd_helpers.inl new file mode 100644 index 0000000000..c9794c2901 --- /dev/null +++ b/glm/detail/simd_constexpr/simd_helpers.inl @@ -0,0 +1,252 @@ +namespace glm::detail +{ + template + struct SimdHelpers + { + template + struct GetFirstType + { + using FirstTx = Tx0; + }; + template + using GccVec = typename detail::GccVExt::GccV; + using gcc_vec_t = GccVec; + using data_t = typename detail::storage::value>::type; + + static inline auto __attribute__((always_inline)) gcc_vec_to_data(auto v) { + static constexpr auto size = std::min(sizeof(v), sizeof(data_t)); + static constexpr auto biggerSize = std::max(sizeof(v), sizeof(data_t)); + if constexpr (size == biggerSize) { + if constexpr (L != 3 || (detail::is_aligned::value)) { + return reinterpret_cast(v); + } else { + data_t d; + std::memcpy(&d, &v, size); + return d; + } + } else { + data_t d; + std::memcpy(&d, &v, size); + return d; + } + } + + static inline auto __attribute__((always_inline)) simd_ctor_scalar(arithmetic auto scalar) { + gcc_vec_t v = ( (T)scalar ) - gcc_vec_t{}; + using Tx = decltype(scalar); + scalar.Tx::~Tx(); + return gcc_vec_to_data(v); + } + template + static inline auto __attribute__((always_inline)) fetch_vec3_as_vec4(::glm::vec<3, Tx, Qx> const& v) { + using OtherVec = GccVec<3, Tx, Qx>; +#ifdef __clang__ + //On clang, simply doing memcpy results in better overall codegen + //Also, this allows clang to avoid spilling registers to the stack, when this function is run on local lvalues + //The local lvalues thing only matters for clang, because gcc seems to always emit memory load/stores when going from packed vec3 -> vec4/aligned_vec3 :( + OtherVec o{}; + std::memcpy(&o, &v, sizeof(v)); + return o; +#else + typedef Tx v2_packed __attribute__((aligned(alignof(Tx)),vector_size(2*sizeof(Tx)))); + struct __attribute__((packed,aligned(alignof(Tx)))) padded { + Tx data0; + v2_packed v2; + }; + auto const& reinterpreted = reinterpret_cast(v); + OtherVec initialPart{}; + initialPart[0] = v[0]; + OtherVec fetched = __builtin_shufflevector(reinterpreted.v2, reinterpreted.v2, -1, -1, 0, 1); + initialPart = __builtin_shufflevector(initialPart, fetched, 0, 5, 6, -1 ); + return initialPart; +#endif + } + template + static inline auto __attribute__((always_inline)) fetch_vec3_as_vec4(::glm::vec<3, Tx, Qx>&& v) { + union M { + gcc_vec_t ourType; + ::glm::vec<3, Tx, Qx> other; + }; + M m {.ourType{}}; + m.other = v; + return m.ourType; + } + template requires (Lx == L) + static inline auto __attribute__((always_inline)) simd_ctor_same_size_conversions(auto&& v) { + using OtherVec = GccVec; + static_assert(sizeof(v) == sizeof(data_t)); + if constexpr (std::is_same_v<::glm::vec, ::glm::vec>) { + return v.data; + } else if constexpr (L == 3 && !BIsAlignedQ()) { + if constexpr (std::is_same_v) { + return v.data; + } else { + using Vec4 = GccVec<4, T, Qx>; + gcc_vec_t converted = __builtin_convertvector(fetch_vec3_as_vec4(v), Vec4); + return gcc_vec_to_data(converted); + } + } else { + gcc_vec_t converted = __builtin_convertvector(v.data, gcc_vec_t); + return gcc_vec_to_data(converted); + } + } + + template requires (Lx == L) + static inline auto __attribute__((always_inline)) simd_ctor(::glm::vec&& v) + { + using OtherVec = GccVec; + if constexpr (sizeof(v) == sizeof(data_t)) { + return simd_ctor_same_size_conversions(v); + } else if constexpr (BIsAlignedQ() && !BIsAlignedQ() && Lx == 3) { + auto o = fetch_vec3_as_vec4(v); + if constexpr (std::is_same_v) { + return gcc_vec_to_data(o); + } else { + gcc_vec_t converted = __builtin_convertvector(o, gcc_vec_t); + return gcc_vec_to_data(converted); + } + } else { + OtherVec o; + static constexpr auto size = std::min(sizeof(v.data), sizeof(o)); + std::memcpy(&o, &(v.data), size); + //using o_vec_t = decltype(v); + //v.o_vec_t::~o_vec_t(); + gcc_vec_t converted = __builtin_convertvector(o, gcc_vec_t); + return gcc_vec_to_data(converted); + } + } + template requires (Lx == L) + static inline auto __attribute__((always_inline)) simd_ctor(::glm::vec const& v) + { + using OtherVec = GccVec; + if constexpr (sizeof(v) == sizeof(data_t)) { + return simd_ctor_same_size_conversions(v); + } else if constexpr (BIsAlignedQ() && !BIsAlignedQ() && Lx == 3) { + auto o = fetch_vec3_as_vec4(v); + if constexpr (std::is_same_v) { + return gcc_vec_to_data(o); + } else { + gcc_vec_t converted = __builtin_convertvector(o, gcc_vec_t); + return gcc_vec_to_data(converted); + } + } else { + OtherVec o; + static constexpr auto size = std::min(sizeof(v.data), sizeof(o)); + std::memcpy(&o, &(v.data), size); + //using o_vec_t = decltype(v); + //v.o_vec_t::~o_vec_t(); + gcc_vec_t converted = __builtin_convertvector(o, gcc_vec_t); + return gcc_vec_to_data(converted); + } + } + + template requires (Lx != L) + static inline auto __attribute__((always_inline)) simd_ctor(::glm::vec v) + { + using OtherVec = GccVec; + if constexpr ( ((Lx != 3 || L == 3) && (!BIsAlignedQ() || !BIsAlignedQ())) + && L != 3 && L > 0 && L <= 4 ) { + static constexpr int64_t posOne = 0; + static constexpr int64_t posTwo = Lx > 1 ? 1 : -1; + static constexpr int64_t posThree = Lx > 2 ? 2 : -1; + static constexpr int64_t posFour = Lx > 3 ? 3 : -1; + if constexpr (L == 4) { + OtherVec o = v.data; + auto oExt = __builtin_shufflevector(o, o, posOne, posTwo, posThree, posFour); + if constexpr (std::is_same_v) { + return gcc_vec_to_data(oExt); + } else { + return gcc_vec_to_data(__builtin_convertvector(oExt, gcc_vec_t)); + } + } else if constexpr (L == 2) { + OtherVec o = v.data; + auto oExt = __builtin_shufflevector(o, o, posOne, posTwo); + if constexpr (std::is_same_v) { + return gcc_vec_to_data(oExt); + } else { + return gcc_vec_to_data(__builtin_convertvector(oExt, gcc_vec_t)); + } + } else if constexpr (L == 1) { + OtherVec o = v.data; + auto oExt = __builtin_shufflevector(o, o, posOne); + if constexpr (std::is_same_v) { + return gcc_vec_to_data(oExt); + } else { + return gcc_vec_to_data(__builtin_convertvector(oExt, gcc_vec_t)); + } + } else { + static_assert(false, "unreachable"); + } + } else { + using OurSizeTheirType = GccVec; + static constexpr auto size = std::min(sizeof(OurSizeTheirType), sizeof(v.data)); + OurSizeTheirType oExpanded; + std::memcpy(&oExpanded, &(v.data), size); + using o_vec_t = decltype(v); + v.o_vec_t::~o_vec_t(); + + gcc_vec_t converted = __builtin_convertvector(oExpanded, gcc_vec_t); + return gcc_vec_to_data(converted); + } + } + + template + static consteval bool isLengthOfVector() { + return sizeof...(A) == L; + } + + template + static inline auto __attribute__((always_inline)) simd_ctor_multi_scalars(A... scalars) requires ( isLengthOfVector() && SameTypes()) + { + using OtherType = GetFirstType::FirstTx; + using other_vec_t = GccVec; + other_vec_t o {scalars...}; + if constexpr (std::is_same_v) { + return gcc_vec_to_data(o); + } else { + return gcc_vec_to_data(__builtin_convertvector(o, gcc_vec_t)); + } + } + template + static inline auto __attribute__((always_inline)) simd_ctor_multi_scalars(A... scalars) requires ( isLengthOfVector() && std::is_floating_point_v && AllIntegralTypes() && !SameTypes()) + { + using OtherType = GetCommonType::Type; + using other_vec_t = GccVec; + other_vec_t o {scalars...}; + + if constexpr (std::is_same_v) { + return gcc_vec_to_data(o); + } else { + return gcc_vec_to_data(__builtin_convertvector(o, gcc_vec_t)); + } + } + + template + static inline auto __attribute__((always_inline)) simd_ctor_multi_scalars(A... scalars) requires ( isLengthOfVector() && std::is_integral_v && AllFloatTypes() && !SameTypes()) + { + using OtherType = GetCommonType::Type; + using other_vec_t = GccVec; + other_vec_t o {scalars...}; + + if constexpr (std::is_same_v) { + return gcc_vec_to_data(o); + } else { + return gcc_vec_to_data(__builtin_convertvector(o, gcc_vec_t)); + } + } + + template + static inline auto __attribute__((always_inline)) simd_ctor_multi_scalars(A... scalars) requires ( isLengthOfVector() && !SameTypes() && SameArithmeticTypes()) + { + gcc_vec_t v; + std::array pack{static_cast(scalars)...}; + for (int i = 0; i != sizeof...(scalars); i++ ) { + v[i] = pack[i]; + pack[i].T::~T(); + } + return gcc_vec_to_data(v); + } + + + }; +} \ No newline at end of file diff --git a/glm/detail/simd_constexpr/vec.hpp b/glm/detail/simd_constexpr/vec.hpp new file mode 100644 index 0000000000..1e93a8b790 --- /dev/null +++ b/glm/detail/simd_constexpr/vec.hpp @@ -0,0 +1,1448 @@ +/// @ref core +/// @defgroup simd_constexpr_vec c++20 vec implementation that supports using both constexpr constructors, and simd constructors & functions +/// @file glm/detail/simd_constexpr/vec.hpp + +#pragma once + +#include "../qualifier.hpp" +#ifdef GLM_CONFIG_SWIZZLE +# undef GLM_CONFIG_SWIZZLE +#endif + +#define GLM_CONFIG_SWIZZLE GLM_SWIZZLE_FUNCTION + +//sharkautarch: IMO, the GLM swizzle 'operators' are too hacky to me, plus they actually *increase the size of the vec's*, and lastly, I wasn't confident that they'd work well here. +//Instead, we'll just always provide swizzle *functions*, which don't bloat the binary/stack space, and also utilizes simd __builtin_shufflevector intrinsics (for *both* aligned and packed vec's). This'll make them actually be *more performant* compared to separately accessing more than one x/y/z/w(etc) member. +//So no real reason not to simply enable swizzle functions by default! + +// NOTE: swizzle functions only return by value. +// also all swizzles require you to select at least two members (ex: v.xy(); v2.yzw(); ) +#include "../_swizzle_func_gcc_vec.hpp" + +#include +#include +#include +#include +#include +#include +namespace glm +{ +#ifdef __clang__ +#define GLM_TRIVIAL __attribute__((trivial_abi)) +#else +#define GLM_TRIVIAL +#endif + + //improve vectorization by widening length-three vectors that are floating point + //since the compiler will have a harder time vectorizing glm::vec3's + template + concept NotVec1 = !std::is_same_v, std::integral_constant>; + template + consteval bool BIsAlignedQ() { + return detail::is_aligned::value; + } + + template + consteval qualifier PackedToAligned() { +#ifdef GLM_CONFIG_ALIGNED_GENTYPES + if constexpr (BIsAlignedQ()) { + return Q; + } else if constexpr (Q == packed_highp) { + return aligned_highp; + } else if constexpr (Q == packed_mediump) { + return aligned_mediump; + } else if constexpr (Q == packed_lowp) { + return aligned_lowp; + } else { + static_assert(false, "Invalid qualifier"); + } +#else + return Q; +#endif + } + + template + consteval bool BShouldWidenVec() { + return (L==3) && BIsAlignedQ() && std::is_floating_point_v; + } + template + concept arithmetic = std::integral || std::floating_point; + template + consteval bool SameArithmeticTypes() { + return (std::is_same_v, std::common_type_t> && ...); + } + template + consteval bool SameTypes() { + return (std::is_same_v && ...); + } + + template + consteval bool AllIntegralTypes() { + return (std::is_integral_v && ...); + } + + template + struct GetCommonType; + + + template + struct GetCommonType { + using Type = GetCommonType::Type>::Type; + }; + + template + struct GetCommonType { + using Type = std::common_type_t; + }; + + template + struct GetCommonType { + using Type = T; + }; + + template + consteval bool AllFloatTypes() { + return (std::is_floating_point_v && ...); + } + + template + consteval bool NotSameArithmeticTypes() { + return ( (!(std::is_integral_v || std::is_floating_point_v) || ...) || !(SameArithmeticTypes()) ); + } + + namespace detail + { + template + using _ArrT = std::array; + + template + using _data_t = typename detail::storage::value>::type; + + template + struct GccVExt { + static constexpr qualifier k_qual = Q; + static constexpr length_t v_length = (L == 3) ? 4 : L; + using VType = std::conditional_t< std::is_same_v, uint8_t, T>; + typedef VType GccV __attribute__(( vector_size(sizeof(VType)*v_length), aligned(alignof(_data_t)) )); + }; + template + consteval bool BDataNeedsPadding() { + return sizeof(_data_t) > sizeof(_ArrT); + } + template + consteval bool BVecNeedsPadding() { + return sizeof(_data_t) > sizeof(typename GccVExt::GccV); + } + template + struct VecDataArray; + + template + struct VecDataArray { + using ArrT = _ArrT; + using data_t = _data_t; + ArrT p; + constexpr auto cbegin() const { + return std::ranges::cbegin(p); + } + constexpr auto cend() const { + return std::ranges::cend(p); + } + + constexpr auto begin() { + return std::ranges::begin(p); + } + constexpr auto end() { + return std::ranges::end(p); + } + + std::byte padding[sizeof(data_t) - sizeof(ArrT)]; + }; + template + struct VecDataArray { + using ArrT = _ArrT; + ArrT p; + constexpr auto cbegin() const { + return std::ranges::cbegin(p); + } + constexpr auto cend() const { + return std::ranges::cend(p); + } + + constexpr auto begin() { + return std::ranges::begin(p); + } + constexpr auto end() { + return std::ranges::end(p); + } + + }; + + template + struct PaddedGccVec; + + template + struct PaddedGccVec { + using GccV = typename GccVExt::GccV; + using data_t = _data_t; + GccV gcc_vec; + std::byte padding[sizeof(data_t) - sizeof(GccV)]; + }; + + template + struct PaddedGccVec { + using GccV = typename GccVExt::GccV; + GccV gcc_vec; + }; + } + + /*template + using PaddedGccVec = detail::PaddedGccVec()>;*/ + template + using GccVec = typename detail::GccVExt::GccV; + template + using VDataArray = detail::VecDataArray()>; + static_assert(!detail::BDataNeedsPadding<3, float, (glm::qualifier)0>()); +} +#include "element.hpp" +#include "simd_helpers.inl" +#include "../compute_vector_relational.hpp" +#include "../compute_vector_decl.hpp" +#if GLM_CONFIG_SIMD == GLM_ENABLE +# include "../type_vec_simd.inl" +#endif +namespace glm +{ + template + struct GLM_TRIVIAL vec : detail::ElementCollection + { + // -- Data -- + using EC = detail::ElementCollection; + using EC::x; + using EC::y; + using EC::z; + using EC::w; + using EC::r; + using EC::g; + using EC::b; + using EC::a; + using EC::s; + using EC::t; + using EC::p; + using EC::q; + using EC::data; + using EC::elementArr; + + using SimdHlp = detail::SimdHelpers; + static constexpr length_t data_len = (Q == aligned && L == 3) ? 4 : L; + using DataArray = VDataArray; + using data_t = typename detail::storage::value>::type; + using GccVec_t = GccVec; + + // -- Implementation detail -- + typedef T value_type; + typedef vec type; + typedef vec bool_type; + static constexpr qualifier k_qual = Q; + static constexpr length_t k_len = L; + + enum is_aligned + { + value = detail::is_aligned::value + }; + + // -- Component Access -- + static constexpr length_t length(){ return L; } + + inline constexpr T& __attribute__((always_inline,flatten)) operator[](length_t i) + { + if (!std::is_constant_evaluated() && !__builtin_constant_p(i) ) { + GLM_ASSERT_LENGTH(i, L); + } + return elementArr.p[i]; + } + + inline constexpr T __attribute__((always_inline,flatten)) operator[](length_t i) const + { + if (!std::is_constant_evaluated() && !__builtin_constant_p(i) ) { + GLM_ASSERT_LENGTH(i, L); + } + + return elementArr.p[i]; + } + /// @addtogroup simd_constexpr_vec + /// @{ + + //! Evaluates the component-wise expression: this ? v1 : v2, + //! where the vector pointed to by this is the predicate vector + //! the predicate vector must be of an integral or boolean type, + //! All vectors must have the same length, + //! and vector v1 and vector v2 must have the same type (the predicate vector can be a different type + template requires(std::is_integral_v) + inline vec __attribute((pure, leaf, nothrow, no_stack_protector)) compWiseTernary(vec v1, vec v2) const { + if constexpr (L == 3 && (!BIsAlignedQ() || !BIsAlignedQ())) { + vec<3, T, PackedToAligned()> predicateWidened(*this); + vec<3, Tx, PackedToAligned()> v1Widened(v1); + vec<3, Tx, PackedToAligned()> v2Widened(v2); + return vec<3, Tx, Qx>( vec<3, Tx, PackedToAligned()>( predicateWidened.data ? v1Widened.data : v2Widened.data ) ); + } else { + return vec( this->data ? v1.data : v2.data ); + } + } + + //! Evaluates the component-wise expression: this ? v1 : v2, + //! where the vector pointed to by this is the predicate vector + //! the predicate vector must be of an integral or boolean type, + //! All vectors must have the same length, + //! and vector v1 and vector v2 must have the same type (the predicate vector can be a different type + template requires(std::is_integral_v) + inline vec __attribute((pure, leaf, nothrow, no_stack_protector)) compWiseTernary(vec v1, vec v2) { + if constexpr (L == 3 && (!BIsAlignedQ() || !BIsAlignedQ())) { + vec<3, T, PackedToAligned()> predicateWidened(*this); + vec<3, Tx, PackedToAligned()> v1Widened(v1); + vec<3, Tx, PackedToAligned()> v2Widened(v2); + return vec<3, Tx, Qx>( vec<3, Tx, PackedToAligned()>( predicateWidened.data ? v1Widened.data : v2Widened.data ) ); + } else { + return vec( this->data ? v1.data : v2.data ); + } + } + + //! Returns a vector created from blending two vectors together, based on the non-type template parameter boolean array mask. + //! Vector lhs is the vector pointed to by this, vector rhs is the second vector. + //! The nth false/0 element in the mask will select the nth element from the first vector. + //! The nth true/1 element in the mask will select the nth element from the second vector. + //! Vector lhs & rhs must have the same type, length & qualifier. + //! The returned vector has the same type, length & qualifer as vectors lhs & rhs. + template mask> + inline vec __attribute((pure, leaf, nothrow, no_stack_protector)) blend(vec rhs) const { + if constexpr (L == 1) { + return vec<1, T, Q>(__builtin_shufflevector(this->data, rhs.data, mask[0]?1:0)); + } else if constexpr (L == 2) { + return vec<2, T, Q>(__builtin_shufflevector(this->data, rhs.data, mask[0]?2:0, mask[1]?3:1)); + } else if constexpr (L == 3 && BIsAlignedQ()) { + //aligned vec3 data is just vec4 data + return vec<3, T, Q>(__builtin_shufflevector(this->data, rhs.data, mask[0]?4:0, mask[1]?5:1, mask[2]?6:2, -1)); + } else if constexpr (L == 3 && !BIsAlignedQ()) { + vec<3, T, PackedToAligned()> lhsWidened(*this); + vec<3, T, PackedToAligned()> rhsWidened(rhs); + return vec<3, T, Q>( vec<3, T, PackedToAligned()>(__builtin_shufflevector(lhsWidened.data, rhsWidened.data, mask[0]?4:0, mask[1]?5:1, mask[2]?6:2, -1)) ); + } else if constexpr (L == 4) { + return vec<4, T, Q>(__builtin_shufflevector(this->data, rhs.data, mask[0]?4:0, mask[1]?5:1, mask[2]?6:2, mask[3]?7:3)); + } else { + static_assert(false, "vec.blend() can only be run on a vec of length 1<=length<=4" ); + } + } + + + /// @} + static constexpr auto __attribute__((always_inline,flatten)) ctor_scalar(arithmetic auto scalar) { + if (std::is_constant_evaluated()) { + DataArray a{}; + for (length_t i = 0; i < L; i++) { + a.p[i]=scalar; + } + return EC{.elementArr=a}; + } else { + return EC{.data=SimdHlp::simd_ctor_scalar(scalar)}; + } + } + + template + static constexpr auto __attribute__((always_inline,flatten)) ctor(::glm::vec&& vec) { + if (std::is_constant_evaluated()) { + DataArray a; + using ArrX = VDataArray; + ArrX ax = std::bit_cast(vec.elementArr); + for (length_t i = 0; i < std::min(Lx, L); i++) { + a.p[i] = (T)ax.p[i]; + } + + return EC{.elementArr=a}; + } else { + return EC{.data=SimdHlp::simd_ctor(vec)}; + } + } + + template + static constexpr auto __attribute__((always_inline,flatten)) ctor(::glm::vec const& vec) { + if (std::is_constant_evaluated()) { + DataArray a; + using ArrX = VDataArray; + ArrX ax = std::bit_cast(vec.elementArr); + for (length_t i = 0; i < std::min(Lx, L); i++) { + a.p[i] = (T)ax.p[i]; + } + + return EC{.elementArr=a}; + } else { + return EC{.data=SimdHlp::simd_ctor(vec)}; + } + } + + template + using RetArr = std::array; + + template + static constexpr length_t __attribute__((always_inline)) ctor_mixed_constexpr_single_get_length() + { + if constexpr ( std::is_integral_v || std::is_floating_point_v ) { + return 1; + } else if constexpr ( ( requires { Vs0::k_len; }) ) { + return Vs0::k_len; + } else { + return 1; + } + } + static constexpr decltype(auto) __attribute__((always_inline)) ctor_mixed_constexpr_single(auto vs0) + { + using VTX = decltype(vs0); + if constexpr ( std::is_integral_v || std::is_floating_point_v ) { + return RetArr<1>{(T)vs0}; + } else if constexpr ( ( requires { VTX::k_len; }) ) { + using Tx = VTX::value_type; + using ArrX = VDataArray; + + ArrX ax = std::bit_cast(vs0.data); + return ax; + } else { + using Tx = VTX::value_type; + return RetArr<1>{(Tx)vs0}; + } + } + + constexpr __attribute__((always_inline)) vec() = default; + constexpr __attribute__((always_inline)) vec(arithmetic auto scalar) : EC{ ctor_scalar(scalar)} {} + + template requires (Lx == 1 && NotVec1) + constexpr __attribute__((always_inline)) vec(vec v) : EC{ [d=std::bit_cast>(v.elementArr)](){ auto s = [scalar=d.p[0]](){ return scalar; }; return ctor_scalar(s); }() } {} + + template requires (Lx != 1) + constexpr __attribute__((always_inline)) vec(vec&& v) : EC{ ctor(v) } {} + + template requires (Lx != 1) + constexpr __attribute__((always_inline)) vec(vec const& v) : EC{ ctor(v) } {} + + constexpr __attribute__((always_inline)) vec(GccVec_t d) : EC{.data=reinterpret_cast(d)} {} + + template requires (sizeof...(Scalar) == L) + constexpr auto __attribute__((always_inline)) ctor_multi_scalar_func(Scalar... scalar) { + if ( std::is_constant_evaluated() || (L == 3 && !BIsAlignedQ()) ) { + DataArray a = {.p={ static_cast(scalar)... }}; + return EC{.elementArr=a}; + } else { + return EC{.data=SimdHlp::simd_ctor_multi_scalars(scalar...)}; + } + } + + template requires (sizeof...(Scalar) == L) + constexpr __attribute__((always_inline)) vec(Scalar... scalar) + : EC + { ctor_multi_scalar_func(scalar...) + } {} + + template requires (sizeof...(VecOrScalar) >= 1 && NotSameArithmeticTypes()) + constexpr EC __attribute__((always_inline)) ctor_multi_mixed_func(VecOrScalar0 const&__restrict__ vecOrScalar0, VecOrScalar... vecOrScalar) + { + //type_vecx.inl never had any simd versions for ctor from mixes of scalars & vectors, + //so I don't really need to figure out how I'd make a generic simd version for this ctor + + constexpr auto i = ctor_mixed_constexpr_single_get_length(); + + struct PaddedA { + VDataArray a; + unsigned char padding[sizeof(VDataArray) - sizeof(VDataArray)]; + }; + auto destArr = std::bit_cast>(PaddedA{.a=ctor_mixed_constexpr_single(vecOrScalar0)}); + constexpr std::array lengths = { ctor_mixed_constexpr_single_get_length()...}; + const auto params = std::tuple{vecOrScalar...}; + + const auto arr = ctor_mixed_constexpr_single(std::get<0>(params)); + std::ranges::copy_n(arr.cbegin(), lengths[0], destArr.p.begin()+i); + constexpr auto i2 = i + lengths[0]; + + if constexpr (sizeof...(VecOrScalar) > 1) { + const auto arr2 = ctor_mixed_constexpr_single(std::get<1>(params)); + std::ranges::copy_n(arr2.cbegin(), lengths[1], destArr.p.begin()+i2); + constexpr auto i3 = i2 + lengths[1]; + if constexpr (sizeof...(VecOrScalar) > 2) { + const auto arr3 = ctor_mixed_constexpr_single(std::get<2>(params)); + std::ranges::copy_n(arr3.cbegin(), lengths[2], destArr.p.begin()+i3); + } + } + + return std::bit_cast(destArr); + } + template requires (sizeof...(VecOrScalar) >= 1 && NotSameArithmeticTypes()) + constexpr __attribute__((always_inline)) vec(VecOrScalar0 const&__restrict__ vecOrScalar0, VecOrScalar... vecOrScalar) + : EC + {ctor_multi_mixed_func(vecOrScalar0, vecOrScalar...)} {} + + + + inline GLM_CONSTEXPR vec& __attribute__((always_inline)) operator+=(arithmetic auto scalar) + { + if (std::is_constant_evaluated()) { + return *this += vec(scalar); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data += scalar; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data += scalar; + return *this; + } else { + return (*this = detail::compute_vec_add::value>::call(*this, vec(scalar))); + } + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator+=(vec<1, Tx, Q> v) requires (NotVec1) + { + if (std::is_constant_evaluated()) { + return *this += vec(v); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data += v.x; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data += v.x; + return *this; + } else + return (*this = detail::compute_vec_add::value>::call(*this, vec(v.x))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator+=(vec const& v) + { + if (std::is_constant_evaluated()) { + std::array result{}; + + std::ranges::transform(this->elementArr.p, v.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> T { return lhs + rhs; }); + this->elementArr.p = result; + return *this; + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data += vec<3,T,PackedToAligned()>(v).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data += v.data; + return *this; + } else + return (*this = detail::compute_vec_add::value>::call(*this, vec(v))); + } + + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator-=(arithmetic auto scalar) + { + if (std::is_constant_evaluated()) { + return *this -= vec(scalar); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data -= vec<3,T,PackedToAligned()>(scalar).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data -= scalar; + return *this; + } else + return (*this = detail::compute_vec_sub::value>::call(*this, vec(scalar))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator-=(vec<1, Tx, Q> v) requires (NotVec1) + { + if (std::is_constant_evaluated()) { + return *this += vec(v); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data -= vec<3,T,PackedToAligned()>(v.x).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data -= v.x; + return *this; + } else + return (*this = detail::compute_vec_sub::value>::call(*this, vec(v.x))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator-=(vec const& v) + { + if (std::is_constant_evaluated()) { + std::array result{}; + + std::ranges::transform(this->elementArr.p, v.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> T { return lhs - rhs; }); + this->elementArr.p = result; + return *this; + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data -= vec<3,T,PackedToAligned()>(v).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data -= v.data; + return *this; + } else + return (*this = detail::compute_vec_sub::value>::call(*this, vec(v))); + } + + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator*=(arithmetic auto scalar) + { + if (std::is_constant_evaluated()) { + return *this *= vec(scalar); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data *= vec<3,T,PackedToAligned()>(scalar).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data *= scalar; + return *this; + } else + return (*this = detail::compute_vec_mul::value>::call(*this, vec(scalar))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator*=(vec<1, Tx, Q> v) requires (NotVec1) + { + if (std::is_constant_evaluated()) { + return *this *= vec(v); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data *= vec<3,T,PackedToAligned()>(v.x).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data *= v.x; + return *this; + } else + return (*this = detail::compute_vec_mul::value>::call(*this, vec(v.x))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator*=(vec const& v) + { + if (std::is_constant_evaluated()) { + std::array result{}; + + std::ranges::transform(this->elementArr.p, v.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> T { return lhs * rhs; }); + this->elementArr.p = result; + return *this; + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data *= vec<3,T,PackedToAligned()>(v).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data *= v.data; + return *this; + } else + return (*this = detail::compute_vec_mul::value>::call(*this, vec(v))); + } + + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator/=(arithmetic auto scalar) + { + if (std::is_constant_evaluated()) { + return *this /= vec(scalar); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data /= vec<3,T,PackedToAligned()>(scalar).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data /= scalar; + return *this; + } else + return (*this = detail::compute_vec_div::value>::call(*this, vec(scalar))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator/=(vec<1, Tx, Q> v) requires (NotVec1) + { + if (std::is_constant_evaluated()) { + return *this /= vec(v); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data /= vec<3,T,PackedToAligned()>(v.x).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data /= v.x; + return *this; + } else + return (*this = detail::compute_vec_div::value>::call(*this, vec(v.x))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator/=(vec const& v) + { + if (std::is_constant_evaluated()) { + std::array result{}; + + std::ranges::transform(this->elementArr.p, v.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> T { return lhs + rhs; }); + this->elementArr.p = result; + return *this; + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data /= vec<3,T,PackedToAligned()>(v).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data /= v.data; + return *this; + } else + return (*this = detail::compute_vec_div::value>::call(*this, vec(v))); + } + + // -- Increment and decrement operators -- + + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator++() + { + constexpr T one = T(1); + *this += one; + return *this; + } + + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator--() + { + constexpr T one = T(1); + *this -= one; + return *this; + } + + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator++(int) + { + vec Result(*this); + constexpr T one = T(1); + ++*this; + return Result; + } + + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator--(int) + { + vec Result(*this); + constexpr T one = T(1); + --*this; + return Result; + } + + // -- Unary bit operators -- + + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator%=(arithmetic auto scalar) + { + if (std::is_constant_evaluated()) { + return *this %= vec(scalar); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data %= vec<3,T,PackedToAligned()>(scalar).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data %= scalar; + return *this; + } else + return (*this = detail::compute_vec_mod::value>::call(*this, vec(scalar))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator%=(vec<1, Tx, Q> v) requires (NotVec1) + { + if (std::is_constant_evaluated()) { + return *this %= vec(v); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data %= vec<3,T,PackedToAligned()>(v.x).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data %= v.x; + return *this; + } else + return (*this = detail::compute_vec_mod::value>::call(*this, vec(v))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator%=(vec v) + { + if (std::is_constant_evaluated()) { + std::array result{}; + + std::ranges::transform(this->elementArr.p, v.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> T { return lhs % rhs; }); + this->elementArr.p = result; + return *this; + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data %= vec<3,T,PackedToAligned()>(v).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data %= v.data; + return *this; + } else + return (*this = detail::compute_vec_mod::value>::call(*this, vec(v))); + } + + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator&=(arithmetic auto scalar) + { + if (std::is_constant_evaluated()) { + return *this &= vec(scalar); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data &= vec<3,T,PackedToAligned()>(scalar).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data &= scalar; + return *this; + } else + return (*this = detail::compute_vec_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(scalar))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator&=(vec<1, Tx, Q> v) requires (NotVec1) + { + if (std::is_constant_evaluated()) { + return *this &= vec(v); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data &= vec<3,T,PackedToAligned()>(v.x).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data &= v.x; + return *this; + } else + return (*this = detail::compute_vec_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(v))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator&=(vec v) + { + if (std::is_constant_evaluated()) { + std::array result{}; + + std::ranges::transform(this->elementArr.p, v.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> T { return lhs & rhs; }); + this->elementArr.p = result; + return *this; + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data &= vec<3,T,PackedToAligned()>(v).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data &= v.data; + return *this; + } else + return (*this = detail::compute_vec_and::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(v))); + } + + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator|=(arithmetic auto scalar) + { + if (std::is_constant_evaluated()) { + return *this |= vec(scalar); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data |= vec<3,T,PackedToAligned()>(scalar).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data |= scalar; + return *this; + } else + return (*this = detail::compute_vec_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(scalar))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator|=(vec<1, Tx, Q> v) requires (NotVec1) + { + if (std::is_constant_evaluated()) { + return *this |= vec(v); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data |= vec<3,T,PackedToAligned()>(v.x).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data |= v.x; + return *this; + } else + return (*this = detail::compute_vec_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(v.x))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator|=(vec v) + { + if (std::is_constant_evaluated()) { + std::array result{}; + + std::ranges::transform(this->elementArr.p, v.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> T { return lhs | rhs; }); + this->elementArr.p = result; + return *this; + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data |= vec<3,T,PackedToAligned()>(v).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data |= v.data; + return *this; + } else + return (*this = detail::compute_vec_or::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(v))); + } + + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator^=(arithmetic auto scalar) + { + if (std::is_constant_evaluated()) { + return *this ^= vec(scalar); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data ^= vec<3,T,PackedToAligned()>(scalar).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data ^= scalar; + return *this; + } else + return (*this = detail::compute_vec_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(scalar))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator^=(vec<1, Tx, Q> v) requires (NotVec1) + { + if (std::is_constant_evaluated()) { + return *this ^= vec(v); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data ^= vec<3,T,PackedToAligned()>(v.x).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data ^= v.x; + return *this; + } else + return (*this = detail::compute_vec_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(v.x))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator^=(vec v) + { + if (std::is_constant_evaluated()) { + std::array result{}; + + std::ranges::transform(this->elementArr.p, v.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> T { return lhs ^ rhs; }); + this->elementArr.p = result; + return *this; + } else { + return (*this = detail::compute_vec_xor::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(v))); + } + } + + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator<<=(arithmetic auto scalar) + { + if (std::is_constant_evaluated()) { + return *this <<= vec(scalar); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data <<= vec<4,T,PackedToAligned()>(scalar).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data <<= scalar; + return *this; + } else + return (*this = detail::compute_vec_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(scalar))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator<<=(vec<1, Tx, Q> v) requires (NotVec1) + { + if (std::is_constant_evaluated()) { + return *this <<= vec(v); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data <<= vec<3,T,PackedToAligned()>(v.x).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data <<= v.x; + return *this; + } else + return (*this = detail::compute_vec_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(v.x))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator<<=(vec v) + { + if (std::is_constant_evaluated()) { + std::array result{}; + + std::ranges::transform(this->elementArr.p, v.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> T { return lhs << rhs; }); + this->elementArr.p = result; + return *this; + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data <<= vec<3,T,PackedToAligned()>(v).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data <<= v.data; + return *this; + } else + return (*this = detail::compute_vec_shift_left::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(v))); + } + + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator>>=(arithmetic auto scalar) + { + if (std::is_constant_evaluated()) { + return *this >>= vec(scalar); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data >>= vec<3,T,PackedToAligned()>(scalar).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data >>= scalar; + return *this; + } else + return (*this = detail::compute_vec_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(scalar))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator>>=(vec<1, Tx, Q> v) requires (NotVec1) + { + if (std::is_constant_evaluated()) { + return *this >>= vec(v); + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data >>= vec<3,T,PackedToAligned()>(v.x).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data >>= v.x; + return *this; + } else + return (*this = detail::compute_vec_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(v.x))); + } + + template + inline GLM_CONSTEXPR vec & __attribute__((always_inline)) operator>>=(vec v) + { + if (std::is_constant_evaluated()) { + std::array result{}; + + std::ranges::transform(this->elementArr.p, v.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> T { return lhs >> rhs; }); + this->elementArr.p = result; + return *this; + } else if constexpr (BShouldWidenVec()) { + vec<3,T,PackedToAligned()> widened(*this); + + widened.data >>= vec<3,T,PackedToAligned()>(v).data; + return (*this = vec<3,T,Q>(widened)); + } else if constexpr (L != 3) { + this->data >>= v.data; + return *this; + } else + return (*this = detail::compute_vec_shift_right::value, sizeof(T) * 8, detail::is_aligned::value>::call(*this, vec(v))); + } + + // -- Unary constant operators -- + + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator+() + { + return *this; + } + + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator-() + { + return vec(0) -= *this; + } + + // -- Binary arithmetic operators -- + + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator+(T scalar) + { + return vec(*this) += scalar; + } + + template + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator+(vec v2) requires (!NotVec1 && NotVec1) + { + return vec(*this) += v2; + } + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator+(T scalar, vec v) + { + return vec(v) += scalar; + } + + /*friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator+(vec<1, T, Q> v1, vec v2) + { + return vec(v2) += v1; + }*/ + + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator+(vec v2) + { + return vec(*this) += v2; + } + + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator+(vec v2) const + { + return vec(*this) += v2; + } + + + + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator-(T scalar) + { + return vec(*this) -= scalar; + } + + template + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator-(vec v2) requires (!NotVec1 && NotVec1) + { + return vec(*this) -= v2; + } + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator-(T scalar, vec v) + { + return vec(scalar) -= v; + } + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator-(vec<1, T, Q> v1, vec v2) requires (NotVec1) + { + return vec(v1.x) -= v2; + } + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator-(vec v1, vec v2) + { + return vec(v1) -= v2; + } + + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator*(T scalar) + { + return vec(*this) *= scalar; + } + + template + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator*(vec v2) requires (!NotVec1 && NotVec1) + { + return vec(*this) *= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator*(T scalar, vec v) + { + return vec(v) *= scalar; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator*(vec<1, T, Q> v1, vec v2) requires (NotVec1) + { + return vec(v2) *= v1; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator*(vec v1, vec const& v2) + { + return vec(v2) *= v1; + } + + + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator/(T scalar) + { + return vec(*this) /= scalar; + } + + + template + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator/(vec v2) requires (!NotVec1 && NotVec1) + { + return vec(*this) /= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator/(T scalar, vec v) + { + return vec(scalar) /= v; + } + + template + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator/(vec v1, vec v2) requires (!NotVec1 && NotVec1) + { + return vec(v1.x) /= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator/(vec v1, vec v2) + { + return vec(v1) /= v2; + } + + // -- Binary bit operators -- + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator%(vec v, T scalar) + { + return vec(v) %= scalar; + } + + template + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator%(vec v2) requires (!NotVec1 && NotVec1) + { + return vec(*this) %= v2.x; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator%(T scalar, vec v) + { + return vec(scalar) %= v; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator%(vec<1, T, Q> scalar, vec v) requires (NotVec1) + { + return vec(scalar.x) %= v; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator%(vec v1, vec v2) + { + return vec(v1) %= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator&(vec v, T scalar) + { + return vec(v) &= scalar; + } + + + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator&(vec<1, T, Q> scalar) requires (NotVec1) + { + return vec(*this) &= scalar; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator&(T scalar, vec v) + { + return vec(scalar) &= v; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator&(vec<1, T, Q> v1, vec v2) requires (NotVec1) + { + return vec(v1.x) &= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator&(vec v1, vec v2) + { + return vec(v1) &= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator|(vec v, T scalar) + { + return vec(v) |= scalar; + } + + template + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator|(vec v2) requires (!NotVec1 && NotVec1) + { + return vec(*this) |= v2.x; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator|(T scalar, vec v) + { + return vec(scalar) |= v; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator|(vec<1, T, Q> v1, vec v2) requires (NotVec1) + { + return vec(v1.x) |= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator|(vec v1, vec v2) + { + return vec(v1) |= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator^(vec v, T scalar) + { + return vec(v) ^= scalar; + } + + template + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator^(vec v2) requires (!NotVec1 && NotVec1) + { + return vec(*this) ^= v2.x; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator^(T scalar, vec v) + { + return vec(scalar) ^= v; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator^(vec<1, T, Q> v1, vec v2) requires (NotVec1) + { + return vec(v1.x) ^= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator^(vec v1, vec v2) + { + return vec(v1) ^= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator<<(vec v, T scalar) + { + return vec(v) <<= scalar; + } + + template + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator<<(vec v2) requires (!NotVec1 && NotVec1) + { + return vec(*this) <<= v2.x; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator<<(T scalar, vec v) + { + return vec(scalar) <<= v; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator<<(vec<1, T, Q> v1, vec v2) requires (NotVec1) + { + return vec(v1.x) <<= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator<<(vec v1, vec v2) + { + return vec(v1) <<= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator>>(vec v, T scalar) + { + return vec(v) >>= scalar; + } + + template + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator>>(vec v2) requires (!NotVec1 && NotVec1) + { + return vec(*this) >>= v2.x; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator>>(T scalar, vec v) + { + return vec(scalar) >>= v; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator>>(vec<1, T, Q> v1, vec v2) requires (NotVec1) + { + return vec(v1.x) >>= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator>>(vec v1, vec v2) + { + return vec(v1) >>= v2; + } + + + friend inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator~(vec v) + { + if (std::is_constant_evaluated()) { + std::array result{}; + std::ranges::transform(v.elementArr.p, result.begin(), [](auto const& lhs) -> T { return ~lhs; }); + vec ret{T(0)}; + ret.elementArr.p = result; + return ret; + } else { + return detail::compute_vec_bitwise_not::value, sizeof(T) * 8, detail::is_aligned::value>::call(v); + } + } + + // -- Boolean operators -- + + friend inline GLM_CONSTEXPR bool __attribute__((always_inline)) operator==(vec v1, vec v2) + { + if (std::is_constant_evaluated()) { + std::array result{}; + std::ranges::transform(v1.elementArr.p, v2.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> bool { return lhs == rhs; }); + return std::ranges::all_of(result, [](bool lhs) -> bool { return lhs; }); + } else { + return detail::compute_vec_equal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); + } + } + + + friend inline GLM_CONSTEXPR bool __attribute__((always_inline)) operator!=(vec v1, vec v2) + { + if (std::is_constant_evaluated()) { + std::array result{}; + std::ranges::transform(v1.elementArr.p, v2.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> bool { return lhs != rhs; }); + return std::ranges::any_of(result, [](bool lhs) -> bool { return lhs; }); + } else { + return detail::compute_vec_nequal::value, sizeof(T) * 8, detail::is_aligned::value>::call(v1, v2); + } + } + }; + + template requires (std::is_same_v) + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator&&(vec v1, vec v2) + { + if (std::is_constant_evaluated()) { + std::array::data_len> result{}; + + std::ranges::transform(v1.elementArr.p, v2.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> Tx { return lhs && rhs; }); + vec ret{false}; + ret.elementArr.p = result; + return ret; + } else { + using GVec_t = typename detail::GccVExt::GccV; + using VT = typename vec::type; + GVec_t gv1, gv2; + std::memcpy(&v1, &gv1, std::min(sizeof(v1), sizeof(gv1))); + std::memcpy(&v2, &gv2, std::min(sizeof(v2), sizeof(gv2))); + v1.VT::~VT(); + v2.VT::~VT(); + return vec(gv1 && gv2); + } + } + template requires (std::is_same_v) + inline GLM_CONSTEXPR vec __attribute__((always_inline)) operator||(vec v1, vec v2) + { + if (std::is_constant_evaluated()) { + std::array::data_len> result{}; + + std::ranges::transform(v1.elementArr.p, v2.elementArr.p, result.begin(), [](auto const& lhs, auto const& rhs) -> Tx { return lhs || rhs; }); + vec ret{false}; + ret.elementArr.p = result; + return ret; + } else { + using GVec_t = typename detail::GccVExt::GccV; + using VT = typename vec::type; + GVec_t gv1, gv2; + std::memcpy(&v1, &gv1, std::min(sizeof(v1), sizeof(gv1))); + std::memcpy(&v2, &gv2, std::min(sizeof(v2), sizeof(gv2))); + v1.VT::~VT(); + v2.VT::~VT(); + return vec(gv1 || gv2); + } + } +} +static_assert( glm::detail::is_aligned<(glm::qualifier)0>::value == false); +static_assert(sizeof(glm::vec<3, float, (glm::qualifier)0>) == 12); \ No newline at end of file diff --git a/glm/detail/type_vec1.hpp b/glm/detail/type_vec1.hpp index 0cc7b5d4c9..27860146c4 100644 --- a/glm/detail/type_vec1.hpp +++ b/glm/detail/type_vec1.hpp @@ -1,6 +1,6 @@ /// @ref core /// @file glm/detail/type_vec1.hpp - +#if GLM_SIMD_CONSTEXPR == 0 #pragma once #include "qualifier.hpp" @@ -306,3 +306,4 @@ namespace glm #ifndef GLM_EXTERNAL_TEMPLATE #include "type_vec1.inl" #endif//GLM_EXTERNAL_TEMPLATE +#endif diff --git a/glm/detail/type_vec2.hpp b/glm/detail/type_vec2.hpp index 66c6137cee..e8e043a7ac 100644 --- a/glm/detail/type_vec2.hpp +++ b/glm/detail/type_vec2.hpp @@ -1,9 +1,9 @@ /// @ref core /// @file glm/detail/type_vec2.hpp - #pragma once #include "qualifier.hpp" +#if GLM_SIMD_CONSTEXPR == 0 #if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR # include "_swizzle.hpp" #elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION @@ -404,3 +404,4 @@ namespace glm #ifndef GLM_EXTERNAL_TEMPLATE #include "type_vec2.inl" #endif//GLM_EXTERNAL_TEMPLATE +#endif diff --git a/glm/detail/type_vec3.hpp b/glm/detail/type_vec3.hpp index 90de2f8a41..f2a2a6d65f 100644 --- a/glm/detail/type_vec3.hpp +++ b/glm/detail/type_vec3.hpp @@ -1,9 +1,9 @@ /// @ref core /// @file glm/detail/type_vec3.hpp - #pragma once #include "qualifier.hpp" +#if GLM_SIMD_CONSTEXPR == 0 #if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR # include "_swizzle.hpp" #elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION @@ -445,3 +445,5 @@ namespace glm #ifndef GLM_EXTERNAL_TEMPLATE #include "type_vec3.inl" #endif//GLM_EXTERNAL_TEMPLATE + +#endif diff --git a/glm/detail/type_vec4.hpp b/glm/detail/type_vec4.hpp index 9ba11229f6..9f02f03bd6 100644 --- a/glm/detail/type_vec4.hpp +++ b/glm/detail/type_vec4.hpp @@ -1,9 +1,9 @@ /// @ref core /// @file glm/detail/type_vec4.hpp - #pragma once #include "qualifier.hpp" +#if GLM_SIMD_CONSTEXPR == 0 #if GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_OPERATOR # include "_swizzle.hpp" #elif GLM_CONFIG_SWIZZLE == GLM_SWIZZLE_FUNCTION @@ -512,3 +512,5 @@ namespace glm #ifndef GLM_EXTERNAL_TEMPLATE #include "type_vec4.inl" #endif//GLM_EXTERNAL_TEMPLATE + +#endif diff --git a/glm/detail/type_vec4.inl b/glm/detail/type_vec4.inl index 66539e3049..d4947f7726 100644 --- a/glm/detail/type_vec4.inl +++ b/glm/detail/type_vec4.inl @@ -1,5 +1,5 @@ /// @ref core - +#if GLM_SIMD_CONSTEXPR == 0 #include "compute_vector_relational.hpp" #include "compute_vector_decl.hpp" @@ -1128,3 +1128,5 @@ namespace glm { } #endif + +#endif diff --git a/glm/glm.hpp b/glm/glm.hpp index 8b375459a7..a551e102c1 100644 --- a/glm/glm.hpp +++ b/glm/glm.hpp @@ -101,6 +101,14 @@ /// included a specific file. /// +#ifndef GLM_SIMD_CONSTEXPR +#define GLM_SIMD_CONSTEXPR 0 +#endif + +#if GLM_SIMD_CONSTEXPR == 1 +# define GLM_FORCE_INTRINSICS 1 +#endif + #include "detail/_fixes.hpp" #include "detail/setup.hpp" @@ -114,9 +122,14 @@ #include #include "fwd.hpp" -#include "vec2.hpp" -#include "vec3.hpp" -#include "vec4.hpp" +#if GLM_SIMD_CONSTEXPR == 0 +# include "vec2.hpp" +# include "vec3.hpp" +# include "vec4.hpp" +#else +# include "simd_constexpr/vec.hpp" +#endif + #include "mat2x2.hpp" #include "mat2x3.hpp" #include "mat2x4.hpp" diff --git a/glm/matrix.hpp b/glm/matrix.hpp index 4584c92c3c..3d1a62b7a5 100644 --- a/glm/matrix.hpp +++ b/glm/matrix.hpp @@ -15,9 +15,14 @@ // Dependencies #include "detail/qualifier.hpp" #include "detail/setup.hpp" -#include "vec2.hpp" -#include "vec3.hpp" -#include "vec4.hpp" +#if GLM_SIMD_CONSTEXPR == 0 +# include "vec2.hpp" +# include "vec3.hpp" +# include "vec4.hpp" +#else +# include"simd_constexpr/vec.hpp" +#endif + #include "mat2x2.hpp" #include "mat2x3.hpp" #include "mat2x4.hpp" diff --git a/glm/simd/platform.h b/glm/simd/platform.h index a318b098f9..9dd60071a4 100644 --- a/glm/simd/platform.h +++ b/glm/simd/platform.h @@ -373,7 +373,7 @@ #elif defined(GLM_FORCE_SSE) # define GLM_ARCH (GLM_ARCH_SSE) # define GLM_FORCE_INTRINSICS -#elif defined(GLM_FORCE_INTRINSICS) && !defined(GLM_FORCE_XYZW_ONLY) +#elif ( (defined(GLM_FORCE_INTRINSICS) && !defined(GLM_FORCE_XYZW_ONLY)) || GLM_SIMD_CONSTEXPR == 1 ) # if defined(__AVX2__) # define GLM_ARCH (GLM_ARCH_AVX2) # elif defined(__AVX__) diff --git a/glm/simd_constexpr/vec.hpp b/glm/simd_constexpr/vec.hpp new file mode 100644 index 0000000000..bff1b2e8db --- /dev/null +++ b/glm/simd_constexpr/vec.hpp @@ -0,0 +1,27 @@ +/// @ref core +/// @file glm/simd_constexpr/vec4.hpp + +#pragma once +namespace glm +{ + typedef vec<1, float, defaultp> vec1; + typedef vec<2, float, defaultp> vec2; + typedef vec<3, float, defaultp> vec3; + typedef vec<4, float, defaultp> vec4; + + typedef vec<1, int, defaultp> ivec1; + typedef vec<2, int, defaultp> ivec2; + typedef vec<3, int, defaultp> ivec3; + typedef vec<4, int, defaultp> ivec4; + + typedef vec<1, unsigned int, defaultp> uvec1; + typedef vec<2, unsigned int, defaultp> uvec2; + typedef vec<3, unsigned int, defaultp> uvec3; + typedef vec<4, unsigned int, defaultp> uvec4; + + typedef vec<1, bool, defaultp> bvec1; + typedef vec<2, bool, defaultp> bvec2; + typedef vec<3, bool, defaultp> bvec3; + typedef vec<4, bool, defaultp> bvec4; +} +#include "../detail/simd_constexpr/vec.hpp" diff --git a/test/core/core_c++20_simd_constexpr.cpp b/test/core/core_c++20_simd_constexpr.cpp new file mode 100644 index 0000000000..e440811ddd --- /dev/null +++ b/test/core/core_c++20_simd_constexpr.cpp @@ -0,0 +1,51 @@ +#define GLM_SIMD_CONSTEXPR 1 +#include +#include +#include +#include +#include +#include +#include +#define GLM_FORCE_ALIGNED_GENTYPES 1 +#include +#if GLM_COMPILER & GLM_COMPILER_CLANG +# pragma clang diagnostic push +# pragma clang diagnostic ignored "-Wglobal-constructors" +# pragma clang diagnostic ignored "-Wunused-variable" +#endif + +int main() +{ +#if defined(__x86_64__) || defined(__aarch64__) + static_assert(GLM_ARCH & GLM_ARCH_SIMD_BIT); + static_assert(GLM_CONFIG_SIMD); + static_assert(GLM_ARCH_SIMD_BIT); +#endif + + using avec4 = glm::vec<4, float, glm::aligned_highp>; + static constexpr avec4 v{1.0f};//, 1.1f, 1.2f, 1.0f}; + avec4 v1{static_cast(rand() % 2)}; + avec4 v2{static_cast(rand() % 2)};//, static_cast(rand() % 255), static_cast(rand() % 255), static_cast(rand() % 255)}; + static constexpr avec4 v3 = avec4{1.5f,2.0f,3.0f,4.0f}; + static constexpr avec4 v4 = v3; + printf("v1 = %f %f %f %f\n", v1[0], v1[1], v1[2], v1[3]); + printf("v2 = %f %f %f %f\n", v2[0], v2[1], v2[2], v2[3]); + v1.x; + avec4 vfin = glm::max(v1, v2) + v3; + static_assert(sizeof(vfin)>0); + double w = v3.w; + printf("vfin = %f %f %f %f\n", vfin[0], vfin[1], vfin[2], vfin[3]); + printf("v3 = %f %f %f %f\n", v3[0], v3[1], v3.z, w); + auto v5 = v3.xyzw(); + printf("v3.xyzw() = %f %f %f %f\n", v5.x, v5.y, v5.z, v5.w); + #ifdef __clang__ + auto v6 = v3.Xyzw(); + printf("v3.Xyzw() = %f %f %f %f\n", -1.0, v6.y, v6.z, v6.w); + #endif + + auto v7 = v3.blend<{0, 1, 0, 1}>(vfin); + printf("v3.blend(vfin) = %f %f %f %f\n", v7.x, v7.y, v7.z, v7.w); + static constexpr auto v8 = v + v4; + printf("static constexpr auto v8 = v + v4 = %f %f %f %f\n", v8.x, v8.y, v8.z, v8.w); + return 0; +}