diff --git a/Engine/source/math/impl/float3_impl.inl b/Engine/source/math/impl/float3_impl.inl index 14fce3f3f..ddf9886d7 100644 --- a/Engine/source/math/impl/float3_impl.inl +++ b/Engine/source/math/impl/float3_impl.inl @@ -47,7 +47,7 @@ namespace math_backend::float3 { f32x4 va = v_load3(a); f32x4 vb = v_load3(b); - f32x4 vr = v_div_fast(va, vb); + f32x4 vr = v_div(va, vb); v_store3(r, vr); } @@ -56,7 +56,7 @@ namespace math_backend::float3 { f32x4 va = v_load3(a); f32x4 vs = v_set1(s); - f32x4 vr = v_div_fast(va, vs); + f32x4 vr = v_div(va, vs); v_store3(r, vr); } @@ -112,4 +112,12 @@ namespace math_backend::float3 v_store3(r, vr); } + inline void float3_cross_impl(const float* a, const float* b, float* r) + { + f32x4 va = v_load3(a); + f32x4 vb = v_load3(b); + f32x4 vcross = v_cross(va, vb); + v_store3(r, vcross); + } + } diff --git a/Engine/source/math/impl/float4_c.cpp b/Engine/source/math/impl/float4_c.cpp deleted file mode 100644 index 9063d20fa..000000000 --- a/Engine/source/math/impl/float4_c.cpp +++ /dev/null @@ -1,61 +0,0 @@ -#include "math/public/float4_dispatch.h" -#include "math/mConstants.h" -#include // for sqrtf, etc. - -namespace math_backend::float4::dispatch -{ - void install_scalar() - { - gFloat4.add = [](const float* a, const float* b, float* r) { - for (int i = 0; i < 4; i++) r[i] = a[i] + b[i]; - }; - - gFloat4.sub = [](const float* a, const float* b, float* r) { - for (int i = 0; i < 4; i++) r[i] = a[i] - b[i]; - }; - - gFloat4.mul = [](const float* a, const float* b, float* r) { - for (int i = 0; i < 4; i++) r[i] = a[i] * b[i]; - }; - - gFloat4.mul_scalar = [](const float* a, float s, float* r) { - for (int i = 0; i < 4; i++) r[i] = a[i] * s; - }; - - gFloat4.div = [](const float* a, const float* b, float* r) { - for (int i = 0; i < 4; i++) r[i] = a[i] / b[i]; - }; - - gFloat4.div_scalar = [](const float* a, float s, float* r) { - float denom = 1.0f / s; - for (int i = 0; i < 4; i++) r[i] = a[i] * denom; - }; - - gFloat4.dot = [](const float* a, const float* b) { - float sum = 0.f; - for (int i = 0; i < 4; i++) sum += a[i] * b[i]; - return sum; - }; - - gFloat4.length = [](const float* a) { - float sum = 0.f; - for (int i = 0; i < 4; i++) sum += a[i] * a[i]; - return std::sqrt(sum); - }; - - gFloat4.lengthSquared = [](const float* a) { - float sum = 0.f; - for (int i = 0; i < 4; i++) sum += a[i] * a[i]; - return (sum); - }; - - gFloat4.normalize = [](float* a) { - float len = gFloat4.length(a); - if (len > POINT_EPSILON) for (int i = 0; i < 4; i++) a[i] /= len; - }; - - gFloat4.lerp = [](const float* from, const float* to, float f, float* r) { - for (int i = 0; i < 4; i++) r[i] = from[i] + (to[i] - from[i]) * f; - }; - } -} diff --git a/Engine/source/math/impl/float4_impl.inl b/Engine/source/math/impl/float4_impl.inl index cb61ed4fc..1359adcef 100644 --- a/Engine/source/math/impl/float4_impl.inl +++ b/Engine/source/math/impl/float4_impl.inl @@ -111,4 +111,12 @@ namespace math_backend::float4 v_store(r, vr); } + inline void float4_cross_impl(const float* a, const float* b, float* r) + { + f32x4 va = v_load(a); + f32x4 vb = v_load(b); + f32x4 vcross = v_cross(va, vb); + v_store(r, vcross); + } + } // namespace math_backend::float4 diff --git a/Engine/source/math/impl/math_c.cpp b/Engine/source/math/impl/math_c.cpp new file mode 100644 index 000000000..3b6bf9f43 --- /dev/null +++ b/Engine/source/math/impl/math_c.cpp @@ -0,0 +1,176 @@ +#include "math/public/float4_dispatch.h" +#include "math/public/float3_dispatch.h" +#include "math/mConstants.h" +#include // for sqrtf, etc. + +namespace math_backend::float4::dispatch +{ + void install_scalar() + { + gFloat4.add = [](const float* a, const float* b, float* r) { + for (int i = 0; i < 4; i++) r[i] = a[i] + b[i]; + }; + + gFloat4.sub = [](const float* a, const float* b, float* r) { + for (int i = 0; i < 4; i++) r[i] = a[i] - b[i]; + }; + + gFloat4.mul = [](const float* a, const float* b, float* r) { + for (int i = 0; i < 4; i++) r[i] = a[i] * b[i]; + }; + + gFloat4.mul_scalar = [](const float* a, float s, float* r) { + for (int i = 0; i < 4; i++) r[i] = a[i] * s; + }; + + gFloat4.div = [](const float* a, const float* b, float* r) { + for (int i = 0; i < 4; i++) r[i] = a[i] / b[i]; + }; + + gFloat4.div_scalar = [](const float* a, float s, float* r) { + float denom = 1.0f / s; + for (int i = 0; i < 4; i++) r[i] = a[i] * denom; + }; + + gFloat4.dot = [](const float* a, const float* b) { + float sum = 0.f; + for (int i = 0; i < 4; i++) sum += a[i] * b[i]; + return sum; + }; + + gFloat4.length = [](const float* a) { + float sum = 0.f; + for (int i = 0; i < 4; i++) sum += a[i] * a[i]; + return std::sqrt(sum); + }; + + gFloat4.lengthSquared = [](const float* a) { + float sum = 0.f; + for (int i = 0; i < 4; i++) sum += a[i] * a[i]; + return (sum); + }; + + gFloat4.normalize = [](float* a) { + float len = gFloat4.length(a); + if (len > POINT_EPSILON) + { + float denom = 1.0f / len; + for (int i = 0; i < 4; i++) + a[i] *= denom; + } + }; + + gFloat4.normalize_mag = [](float* a, float f) { + float len = gFloat4.length(a); + if (len > POINT_EPSILON) + { + float denom = f / len; + for (int i = 0; i < 4; i++) a[i] *= denom; + } + }; + + gFloat4.lerp = [](const float* from, const float* to, float f, float* r) { + for (int i = 0; i < 4; i++) r[i] = from[i] + (to[i] - from[i]) * f; + }; + + gFloat4.cross = [](const float* a, const float* b, float* r) { + const float ax = a[0]; + const float ay = a[1]; + const float az = a[2]; + + const float bx = b[0]; + const float by = b[1]; + const float bz = b[2]; + + r[0] = ay * bz - az * by; + r[1] = az * bx - ax * bz; + r[2] = ax * by - ay * bx; + }; + } +} + +namespace math_backend::float3::dispatch +{ + void install_scalar() + { + gFloat3.add = [](const float* a, const float* b, float* r) { + for (int i = 0; i < 3; i++) r[i] = a[i] + b[i]; + }; + + gFloat3.sub = [](const float* a, const float* b, float* r) { + for (int i = 0; i < 3; i++) r[i] = a[i] - b[i]; + }; + + gFloat3.mul = [](const float* a, const float* b, float* r) { + for (int i = 0; i < 3; i++) r[i] = a[i] * b[i]; + }; + + gFloat3.mul_scalar = [](const float* a, float s, float* r) { + for (int i = 0; i < 3; i++) r[i] = a[i] * s; + }; + + gFloat3.div = [](const float* a, const float* b, float* r) { + for (int i = 0; i < 3; i++) r[i] = a[i] / b[i]; + }; + + gFloat3.div_scalar = [](const float* a, float s, float* r) { + float denom = 1.0f / s; + for (int i = 0; i < 3; i++) r[i] = a[i] * denom; + }; + + gFloat3.dot = [](const float* a, const float* b) { + float sum = 0.f; + for (int i = 0; i < 3; i++) sum += a[i] * b[i]; + return sum; + }; + + gFloat3.length = [](const float* a) { + float sum = 0.f; + for (int i = 0; i < 3; i++) sum += a[i] * a[i]; + return std::sqrt(sum); + }; + + gFloat3.lengthSquared = [](const float* a) { + float sum = 0.f; + for (int i = 0; i < 3; i++) sum += a[i] * a[i]; + return (sum); + }; + + gFloat3.normalize = [](float* a) { + float len = gFloat3.length(a); + if (len > POINT_EPSILON) + { + float denom = 1.0 / len; + for (int i = 0; i < 3; i++) a[i] *= denom; + } + }; + + gFloat3.normalize_mag = [](float* a, float f) { + float len = gFloat3.length(a); + if (len > POINT_EPSILON) + { + float denom = f / len; + for (int i = 0; i < 3; i++) a[i] *= denom; + } + }; + + gFloat3.lerp = [](const float* from, const float* to, float f, float* r) { + for (int i = 0; i < 3; i++) r[i] = from[i] + (to[i] - from[i]) * f; + }; + + gFloat3.cross = [](const float* a, const float* b, float* r) { + const float ax = a[0]; + const float ay = a[1]; + const float az = a[2]; + + const float bx = b[0]; + const float by = b[1]; + const float bz = b[2]; + + r[0] = ay * bz - az * by; + r[1] = az * bx - ax * bz; + r[2] = ax * by - ay * bx; + }; + } +} + diff --git a/Engine/source/math/isa/avx/float3.cpp b/Engine/source/math/isa/avx/float3.cpp new file mode 100644 index 000000000..391ae8f27 --- /dev/null +++ b/Engine/source/math/isa/avx/float3.cpp @@ -0,0 +1,98 @@ +#include "float3_dispatch.h" +#include // AVX/AVX2 intrinsics + +namespace +{ + typedef __m128 f32x4; + + // Load 3 floats into 4-wide SIMD, zero the 4th lane + inline f32x4 v_load3(const float* p) { return _mm_set_ps(0.0f, p[2], p[1], p[0]); } + + // Store 3 floats from SIMD register back to memory + inline void v_store3(float* dst, f32x4 v) + { + alignas(16) float tmp[4]; // temp storage + _mm_store_ps(tmp, v); // store all 4 lanes + dst[0] = tmp[0]; + dst[1] = tmp[1]; + dst[2] = tmp[2]; + } + + // extract just the first lane. + inline float v_extract0(f32x4 v) { return _mm_cvtss_f32(v); } + + // Broadcast a single float across all 4 lanes + inline f32x4 v_set1(float s) { return _mm_set1_ps(s); } + + // Element-wise multiply + inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); } + + // Element-wise divide fast (1/b) + inline f32x4 v_div_fast(f32x4 a, f32x4 b) + { + f32x4 rcp = _mm_rcp_ps(b); + // Optional refinement here + return _mm_mul_ps(a, rcp); + } + + // Element-wise divide (to change from fast use _mm_div_ps(a,b) + inline f32x4 v_div(f32x4 a, f32x4 b) { return v_div_fast(a, b); } + + // Element-wise add + inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); } + + // Element-wise subtract + inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); } + + // Horizontal sum of all elements (for dot product, length, etc.) + inline f32x4 v_hadd3(f32x4 a) + { + __m128 t1 = _mm_hadd_ps(a, a); // sums pairs: [a0+a1, a2+a3, ...] + __m128 t2 = _mm_hadd_ps(t1, t1); // sums again: first element = a0+a1+a2+a3 + return t2; + } + + float float3_dot_avx(const float* a, const float* b) + { + f32x4 va = v_load3(a); + f32x4 vb = v_load3(b); + __m128 dp = _mm_dp_ps(va, vb, 0x71); // multiply 3 (0x71), sum all 4, lowest lane + return _mm_cvtss_f32(dp); + } + + inline f32x4 v_cross(f32x4 a, f32x4 b) + { + f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); + f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); + + f32x4 c = _mm_sub_ps( + _mm_mul_ps(a, b_yzx), + _mm_mul_ps(a_yzx, b) + ); + + return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1)); + } +} + +#include "float3_impl.inl" + +namespace math_backend::float3::dispatch +{ + // Install AVX backend + void install_avx() + { + gFloat3.add = float3_add_impl; + gFloat3.sub = float3_sub_impl; + gFloat3.mul = float3_mul_impl; + gFloat3.mul_scalar = float3_mul_scalar_impl; + gFloat3.div = float3_div_impl; + gFloat3.div_scalar = float3_div_scalar_impl; + gFloat3.dot = float3_dot_avx; + gFloat3.length = float3_length_impl; + gFloat3.lengthSquared = float3_length_squared_impl; + gFloat3.normalize = float3_normalize_impl; + gFloat3.normalize_mag = float3_normalize_mag_impl; + gFloat3.lerp = float3_lerp_impl; + gFloat3.cross = float3_cross_impl; + } +} diff --git a/Engine/source/math/isa/avx/float4.cpp b/Engine/source/math/isa/avx/float4.cpp index 08f534718..45e4e3062 100644 --- a/Engine/source/math/isa/avx/float4.cpp +++ b/Engine/source/math/isa/avx/float4.cpp @@ -18,8 +18,16 @@ namespace // Element-wise multiply inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); } + // Element-wise divide fast (1/b) + inline f32x4 v_div_fast(f32x4 a, f32x4 b) + { + f32x4 rcp = _mm_rcp_ps(b); + // Optional refinement here + return _mm_mul_ps(a, rcp); + } + // Element-wise divide - inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); } + inline f32x4 v_div(f32x4 a, f32x4 b) { return v_div_fast(a, b); } // Element-wise add inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); } @@ -43,6 +51,19 @@ namespace __m128 dp = _mm_dp_ps(va, vb, 0xF1); // multiply all 4, sum all 4, lowest lane return _mm_cvtss_f32(dp); } + + inline f32x4 v_cross(f32x4 a, f32x4 b) + { + f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); + f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); + + f32x4 c = _mm_sub_ps( + _mm_mul_ps(a, b_yzx), + _mm_mul_ps(a_yzx, b) + ); + + return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1)); + } } #include "float4_impl.inl" @@ -52,17 +73,18 @@ namespace math_backend::float4::dispatch // Install AVX backend void install_avx() { - gFloat4.add = float4_add_impl; - gFloat4.sub = float4_sub_impl; - gFloat4.mul = float4_mul_impl; - gFloat4.mul_scalar = float4_mul_scalar_impl; - gFloat4.div = float4_div_impl; - gFloat4.div_scalar = float4_div_scalar_impl; - gFloat4.dot = float4_dot_avx; - gFloat4.length = float4_length_impl; + gFloat4.add = float4_add_impl; + gFloat4.sub = float4_sub_impl; + gFloat4.mul = float4_mul_impl; + gFloat4.mul_scalar = float4_mul_scalar_impl; + gFloat4.div = float4_div_impl; + gFloat4.div_scalar = float4_div_scalar_impl; + gFloat4.dot = float4_dot_avx; + gFloat4.length = float4_length_impl; gFloat4.lengthSquared = float4_length_squared_impl; - gFloat4.normalize = float4_normalize_impl; + gFloat4.normalize = float4_normalize_impl; gFloat4.normalize_mag = float4_normalize_mag_impl; - gFloat4.lerp = float4_lerp_impl; + gFloat4.lerp = float4_lerp_impl; + gFloat4.cross = float4_cross_impl; } } diff --git a/Engine/source/math/isa/avx2/float3.cpp b/Engine/source/math/isa/avx2/float3.cpp index 46b64a7dd..d7b1c5709 100644 --- a/Engine/source/math/isa/avx2/float3.cpp +++ b/Engine/source/math/isa/avx2/float3.cpp @@ -27,9 +27,6 @@ namespace // Element-wise multiply inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); } - // Element-wise divide precise - inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); } - // Element-wise divide fast (1/b) inline f32x4 v_div_fast(f32x4 a, f32x4 b) { @@ -38,6 +35,9 @@ namespace return _mm_mul_ps(a, rcp); } + // Element-wise divide (to change from fast use _mm_div_ps(a,b) + inline f32x4 v_div(f32x4 a, f32x4 b) { return v_div_fast(a, b); } + // Element-wise add inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); } @@ -54,11 +54,24 @@ namespace float float3_dot_avx(const float* a, const float* b) { - f32x4 va = _mm_loadu_ps(a); - f32x4 vb = _mm_loadu_ps(b); - __m128 dp = _mm_dp_ps(va, vb, 0x71); // multiply 3 (0x71), sum all 4, lowest lane + f32x4 va = v_load3(a); + f32x4 vb = v_load3(b); + __m128 dp = _mm_dp_ps(va, vb, 0x71); // multiply 3 (0x71), sum 3, lowest lane return _mm_cvtss_f32(dp); } + + inline f32x4 v_cross(f32x4 a, f32x4 b) + { + f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); + f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); + + f32x4 c = _mm_sub_ps( + _mm_mul_ps(a, b_yzx), + _mm_mul_ps(a_yzx, b) + ); + + return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1)); + } } #include "float3_impl.inl" @@ -80,5 +93,6 @@ namespace math_backend::float3::dispatch gFloat3.normalize = float3_normalize_impl; gFloat3.normalize_mag = float3_normalize_mag_impl; gFloat3.lerp = float3_lerp_impl; + gFloat3.cross = float3_cross_impl; } } diff --git a/Engine/source/math/isa/avx2/float4.cpp b/Engine/source/math/isa/avx2/float4.cpp index a1e1836b1..e0897f40f 100644 --- a/Engine/source/math/isa/avx2/float4.cpp +++ b/Engine/source/math/isa/avx2/float4.cpp @@ -18,8 +18,16 @@ namespace // Element-wise multiply inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); } - // Element-wise divide - inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); } + // Element-wise divide fast (1/b) + inline f32x4 v_div_fast(f32x4 a, f32x4 b) + { + f32x4 rcp = _mm_rcp_ps(b); + // Optional refinement here + return _mm_mul_ps(a, rcp); + } + + // Element-wise divide (to change from fast use _mm_div_ps(a,b) + inline f32x4 v_div(f32x4 a, f32x4 b) { return v_div_fast(a, b); } // Element-wise add inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); } @@ -43,6 +51,19 @@ namespace __m128 dp = _mm_dp_ps(va, vb, 0xF1); // multiply all 4, sum all 4, lowest lane return _mm_cvtss_f32(dp); } + + inline f32x4 v_cross(f32x4 a, f32x4 b) + { + f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); + f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); + + f32x4 c = _mm_sub_ps( + _mm_mul_ps(a, b_yzx), + _mm_mul_ps(a_yzx, b) + ); + + return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1)); + } } #include "float4_impl.inl" @@ -64,5 +85,6 @@ namespace math_backend::float4::dispatch gFloat4.normalize = float4_normalize_impl; gFloat4.normalize_mag = float4_normalize_mag_impl; gFloat4.lerp = float4_lerp_impl; + gFloat4.cross = float4_cross_impl; } } diff --git a/Engine/source/math/isa/sse2/float3.cpp b/Engine/source/math/isa/sse2/float3.cpp new file mode 100644 index 000000000..e3c471378 --- /dev/null +++ b/Engine/source/math/isa/sse2/float3.cpp @@ -0,0 +1,92 @@ +#include "float3_dispatch.h" +#include // SSE2 intrinsics + +namespace +{ + typedef __m128 f32x4; + + // Load 3 floats into 4-wide SIMD, zero the 4th lane + inline f32x4 v_load3(const float* p) { return _mm_set_ps(0.0f, p[2], p[1], p[0]); } + + // Store 3 floats from SIMD register back to memory + inline void v_store3(float* dst, f32x4 v) + { + alignas(16) float tmp[4]; // temp storage + _mm_store_ps(tmp, v); // store all 4 lanes + dst[0] = tmp[0]; + dst[1] = tmp[1]; + dst[2] = tmp[2]; + } + + // extract just the first lane. + inline float v_extract0(f32x4 v) { return _mm_cvtss_f32(v); } + + // Broadcast a single float across all 4 lanes + inline f32x4 v_set1(float s) { return _mm_set1_ps(s); } + + // Element-wise multiply + inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); } + + // Element-wise divide fast (1/b) + inline f32x4 v_div_fast(f32x4 a, f32x4 b) + { + f32x4 rcp = _mm_rcp_ps(b); + // Optional refinement here + return _mm_mul_ps(a, rcp); + } + + // Element-wise divide (to change from fast use _mm_div_ps(a,b) + inline f32x4 v_div(f32x4 a, f32x4 b) { return v_div_fast(a, b); } + + // Element-wise add + inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); } + + // Element-wise subtract + inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); } + + // Horizontal sum of all elements (for dot product, length, etc.) + inline f32x4 v_hadd3(f32x4 a) + { + __m128 shuf = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 0, 1)); // swap pairs + __m128 sums = _mm_add_ps(a, shuf); // sums: [a0+a1 a1+a0 a2+a3 a3+a2] + shuf = _mm_shuffle_ps(sums, sums, _MM_SHUFFLE(1, 0, 3, 2)); // move high pair to low + sums = _mm_add_ps(sums, shuf); // total sum in lower float + return sums; + } + + inline f32x4 v_cross(f32x4 a, f32x4 b) + { + f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); + f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); + + f32x4 c = _mm_sub_ps( + _mm_mul_ps(a, b_yzx), + _mm_mul_ps(a_yzx, b) + ); + + return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1)); + } +} + +#include "float3_impl.inl" + +namespace math_backend::float3::dispatch +{ + // Install SSE2 backend + void install_sse2() + { + gFloat3.add = float3_add_impl; + gFloat3.sub = float3_sub_impl; + gFloat3.mul = float3_mul_impl; + gFloat3.mul_scalar = float3_mul_scalar_impl; + gFloat3.div = float3_div_impl; + gFloat3.div_scalar = float3_div_scalar_impl; + gFloat3.dot = float3_dot_impl; + gFloat3.length = float3_length_impl; + gFloat3.lengthSquared = float3_length_squared_impl; + gFloat3.normalize = float3_normalize_impl; + gFloat3.normalize_mag = float3_normalize_mag_impl; + gFloat3.lerp = float3_lerp_impl; + gFloat3.cross = float3_cross_impl; + } +} diff --git a/Engine/source/math/isa/sse2/float4.cpp b/Engine/source/math/isa/sse2/float4.cpp index 22a4f76bd..00136611f 100644 --- a/Engine/source/math/isa/sse2/float4.cpp +++ b/Engine/source/math/isa/sse2/float4.cpp @@ -34,9 +34,22 @@ namespace sums = _mm_add_ps(sums, shuf); // total sum in lower float return _mm_cvtss_f32(sums); } + + inline f32x4 v_cross(f32x4 a, f32x4 b) + { + f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); + f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); + + f32x4 c = _mm_sub_ps( + _mm_mul_ps(a, b_yzx), + _mm_mul_ps(a_yzx, b) + ); + + return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1)); + } } -#include "../../impl/float4_impl.inl" +#include "float4_impl.inl" namespace math_backend::float4::dispatch { @@ -55,5 +68,6 @@ namespace math_backend::float4::dispatch gFloat4.normalize = float4_normalize_impl; gFloat4.normalize_mag = float4_normalize_mag_impl; gFloat4.lerp = float4_lerp_impl; + gFloat4.cross = float4_cross_impl; } } diff --git a/Engine/source/math/isa/sse41/float3.cpp b/Engine/source/math/isa/sse41/float3.cpp new file mode 100644 index 000000000..52f675ab9 --- /dev/null +++ b/Engine/source/math/isa/sse41/float3.cpp @@ -0,0 +1,92 @@ + +#include "float3_dispatch.h" +#include // SSE41 intrinsics + +namespace +{ + typedef __m128 f32x4; + + // Load 3 floats into 4-wide SIMD, zero the 4th lane + inline f32x4 v_load3(const float* p) { return _mm_set_ps(0.0f, p[2], p[1], p[0]); } + + // Store 3 floats from SIMD register back to memory + inline void v_store3(float* dst, f32x4 v) + { + alignas(16) float tmp[4]; // temp storage + _mm_store_ps(tmp, v); // store all 4 lanes + dst[0] = tmp[0]; + dst[1] = tmp[1]; + dst[2] = tmp[2]; + } + + // extract just the first lane. + inline float v_extract0(f32x4 v) { return _mm_cvtss_f32(v); } + + // Broadcast a single float across all 4 lanes + inline f32x4 v_set1(float s) { return _mm_set1_ps(s); } + + // Element-wise multiply + inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); } + + // Element-wise divide + inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); } + + // Element-wise add + inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); } + + // Element-wise subtract + inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); } + + // Horizontal sum of all 4 elements (for dot product, length, etc.) + inline f32x4 v_hadd3(f32x4 a) + { + __m128 t1 = _mm_hadd_ps(a, a); // sums pairs: [a0+a1, a2+a3, ...] + __m128 t2 = _mm_hadd_ps(t1, t1); // sums again: first element = a0+a1+a2+a3 + return t1; // extract first element + } + + // specialized dot product for SSE4.1 + float float3_dot_sse41(const float* a, const float* b) + { + f32x4 va = _mm_loadu_ps(a); + f32x4 vb = _mm_loadu_ps(b); + __m128 dp = _mm_dp_ps(va, vb, 0x71); // multiply all 4, sum all 4, lowest lane + return _mm_cvtss_f32(dp); + } + + inline f32x4 v_cross(f32x4 a, f32x4 b) + { + f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); + f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); + + f32x4 c = _mm_sub_ps( + _mm_mul_ps(a, b_yzx), + _mm_mul_ps(a_yzx, b) + ); + + return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1)); + } +} + +#include "float3_impl.inl" + +namespace math_backend::float3::dispatch +{ + // Install SSE41 backend + void install_sse41() + { + gFloat3.add = float3_add_impl; + gFloat3.sub = float3_sub_impl; + gFloat3.mul = float3_mul_impl; + gFloat3.mul_scalar = float3_mul_scalar_impl; + gFloat3.div = float3_div_impl; + gFloat3.div_scalar = float3_div_scalar_impl; + gFloat3.dot = float3_dot_sse41; + gFloat3.length = float3_length_impl; + gFloat3.lengthSquared = float3_length_squared_impl; + gFloat3.normalize = float3_normalize_impl; + gFloat3.normalize_mag = float3_normalize_mag_impl; + gFloat3.lerp = float3_lerp_impl; + gFloat3.cross = float3_cross_impl; + } +} diff --git a/Engine/source/math/isa/sse41/float4.cpp b/Engine/source/math/isa/sse41/float4.cpp index bf4c648ad..1f335f859 100644 --- a/Engine/source/math/isa/sse41/float4.cpp +++ b/Engine/source/math/isa/sse41/float4.cpp @@ -43,6 +43,19 @@ namespace __m128 dp = _mm_dp_ps(va, vb, 0xF1); // multiply all 4, sum all 4, lowest lane return _mm_cvtss_f32(dp); } + + inline f32x4 v_cross(f32x4 a, f32x4 b) + { + f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1)); + f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1)); + + f32x4 c = _mm_sub_ps( + _mm_mul_ps(a, b_yzx), + _mm_mul_ps(a_yzx, b) + ); + + return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1)); + } } #include "float4_impl.inl" @@ -64,5 +77,6 @@ namespace math_backend::float4::dispatch gFloat4.normalize = float4_normalize_impl; gFloat4.normalize_mag = float4_normalize_mag_impl; gFloat4.lerp = float4_lerp_impl; + gFloat4.cross = float4_cross_impl; } } diff --git a/Engine/source/math/mPoint3.h b/Engine/source/math/mPoint3.h index 6c3b19899..8e329f3a1 100644 --- a/Engine/source/math/mPoint3.h +++ b/Engine/source/math/mPoint3.h @@ -1006,9 +1006,7 @@ inline F64 mDot(const Point3D &p1, const Point3D &p2) inline void mCross(const Point3F &a, const Point3F &b, Point3F *res) { - res->x = (a.y * b.z) - (a.z * b.y); - res->y = (a.z * b.x) - (a.x * b.z); - res->z = (a.x * b.y) - (a.y * b.x); + gFloat3.cross(a, b, *res); } inline void mCross(const Point3D &a, const Point3D &b, Point3D *res) @@ -1021,7 +1019,7 @@ inline void mCross(const Point3D &a, const Point3D &b, Point3D *res) inline Point3F mCross(const Point3F &a, const Point3F &b) { Point3F r; - mCross( a, b, &r ); + gFloat3.cross(a, b, r); return r; } diff --git a/Engine/source/math/public/float3_dispatch.h b/Engine/source/math/public/float3_dispatch.h index 9735c47d6..e4279cb84 100644 --- a/Engine/source/math/public/float3_dispatch.h +++ b/Engine/source/math/public/float3_dispatch.h @@ -21,6 +21,7 @@ namespace math_backend::float3::dispatch void (*normalize)(float*) = nullptr; void (*normalize_mag)(float*, float) = nullptr; void (*lerp)(const float*, const float*, float, float*) = nullptr; + void (*cross)(const float*, const float*, float*) = nullptr; }; // Global dispatch table diff --git a/Engine/source/math/public/float4_dispatch.h b/Engine/source/math/public/float4_dispatch.h index f5214447f..6f26114ce 100644 --- a/Engine/source/math/public/float4_dispatch.h +++ b/Engine/source/math/public/float4_dispatch.h @@ -21,6 +21,7 @@ namespace math_backend::float4::dispatch void (*normalize)(float*) = nullptr; void (*normalize_mag)(float*, float) = nullptr; void (*lerp)(const float*, const float*, float, float*) = nullptr; + void (*cross)(const float*, const float*, float*) = nullptr; }; // Global dispatch table diff --git a/Engine/source/math/public/math_backend.cpp b/Engine/source/math/public/math_backend.cpp index 365bdb870..4944319a8 100644 --- a/Engine/source/math/public/math_backend.cpp +++ b/Engine/source/math/public/math_backend.cpp @@ -5,10 +5,10 @@ math_backend::backend math_backend::choose_backend(U32 cpu_flags) { #if defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86) - if (cpu_flags & CPU_PROP_AVX2) return backend::avx2; + /*if (cpu_flags & CPU_PROP_AVX2) return backend::avx2; if (cpu_flags & CPU_PROP_AVX) return backend::avx; if (cpu_flags & CPU_PROP_SSE4_1) return backend::sse41; - if (cpu_flags & CPU_PROP_SSE2) return backend::sse2; + if (cpu_flags & CPU_PROP_SSE2) return backend::sse2;*/ #elif defined(__aarch64__) || defined(__ARM_NEON) @@ -32,15 +32,18 @@ void math_backend::install_from_cpu_flags(uint32_t cpu_flags) break; case backend::avx: - //float4::dispatch::install_avx(); + float4::dispatch::install_avx(); + float3::dispatch::install_avx(); break; case backend::sse41: float4::dispatch::install_sse41(); + float3::dispatch::install_sse41(); break; case backend::sse2: float4::dispatch::install_sse2(); + float3::dispatch::install_sse2(); break; #elif defined(__aarch64__) || defined(__ARM_NEON) case backend::neon: @@ -49,6 +52,7 @@ void math_backend::install_from_cpu_flags(uint32_t cpu_flags) #endif default: float4::dispatch::install_scalar(); + float3::dispatch::install_scalar(); break; } }