all float3 and float4 functions and isas

completed all options of float3 and float4 functions in isas and math_c
neon still to be done but that will be on mac.
This commit is contained in:
marauder2k7 2026-02-27 11:28:51 +00:00
parent 18d0aa0418
commit f0a3251cd3
16 changed files with 593 additions and 90 deletions

View file

@ -47,7 +47,7 @@ namespace math_backend::float3
{
f32x4 va = v_load3(a);
f32x4 vb = v_load3(b);
f32x4 vr = v_div_fast(va, vb);
f32x4 vr = v_div(va, vb);
v_store3(r, vr);
}
@ -56,7 +56,7 @@ namespace math_backend::float3
{
f32x4 va = v_load3(a);
f32x4 vs = v_set1(s);
f32x4 vr = v_div_fast(va, vs);
f32x4 vr = v_div(va, vs);
v_store3(r, vr);
}
@ -112,4 +112,12 @@ namespace math_backend::float3
v_store3(r, vr);
}
inline void float3_cross_impl(const float* a, const float* b, float* r)
{
f32x4 va = v_load3(a);
f32x4 vb = v_load3(b);
f32x4 vcross = v_cross(va, vb);
v_store3(r, vcross);
}
}

View file

@ -1,61 +0,0 @@
#include "math/public/float4_dispatch.h"
#include "math/mConstants.h"
#include <cmath> // for sqrtf, etc.
namespace math_backend::float4::dispatch
{
void install_scalar()
{
gFloat4.add = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 4; i++) r[i] = a[i] + b[i];
};
gFloat4.sub = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 4; i++) r[i] = a[i] - b[i];
};
gFloat4.mul = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 4; i++) r[i] = a[i] * b[i];
};
gFloat4.mul_scalar = [](const float* a, float s, float* r) {
for (int i = 0; i < 4; i++) r[i] = a[i] * s;
};
gFloat4.div = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 4; i++) r[i] = a[i] / b[i];
};
gFloat4.div_scalar = [](const float* a, float s, float* r) {
float denom = 1.0f / s;
for (int i = 0; i < 4; i++) r[i] = a[i] * denom;
};
gFloat4.dot = [](const float* a, const float* b) {
float sum = 0.f;
for (int i = 0; i < 4; i++) sum += a[i] * b[i];
return sum;
};
gFloat4.length = [](const float* a) {
float sum = 0.f;
for (int i = 0; i < 4; i++) sum += a[i] * a[i];
return std::sqrt(sum);
};
gFloat4.lengthSquared = [](const float* a) {
float sum = 0.f;
for (int i = 0; i < 4; i++) sum += a[i] * a[i];
return (sum);
};
gFloat4.normalize = [](float* a) {
float len = gFloat4.length(a);
if (len > POINT_EPSILON) for (int i = 0; i < 4; i++) a[i] /= len;
};
gFloat4.lerp = [](const float* from, const float* to, float f, float* r) {
for (int i = 0; i < 4; i++) r[i] = from[i] + (to[i] - from[i]) * f;
};
}
}

View file

@ -111,4 +111,12 @@ namespace math_backend::float4
v_store(r, vr);
}
inline void float4_cross_impl(const float* a, const float* b, float* r)
{
f32x4 va = v_load(a);
f32x4 vb = v_load(b);
f32x4 vcross = v_cross(va, vb);
v_store(r, vcross);
}
} // namespace math_backend::float4

View file

@ -0,0 +1,176 @@
#include "math/public/float4_dispatch.h"
#include "math/public/float3_dispatch.h"
#include "math/mConstants.h"
#include <cmath> // for sqrtf, etc.
namespace math_backend::float4::dispatch
{
void install_scalar()
{
gFloat4.add = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 4; i++) r[i] = a[i] + b[i];
};
gFloat4.sub = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 4; i++) r[i] = a[i] - b[i];
};
gFloat4.mul = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 4; i++) r[i] = a[i] * b[i];
};
gFloat4.mul_scalar = [](const float* a, float s, float* r) {
for (int i = 0; i < 4; i++) r[i] = a[i] * s;
};
gFloat4.div = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 4; i++) r[i] = a[i] / b[i];
};
gFloat4.div_scalar = [](const float* a, float s, float* r) {
float denom = 1.0f / s;
for (int i = 0; i < 4; i++) r[i] = a[i] * denom;
};
gFloat4.dot = [](const float* a, const float* b) {
float sum = 0.f;
for (int i = 0; i < 4; i++) sum += a[i] * b[i];
return sum;
};
gFloat4.length = [](const float* a) {
float sum = 0.f;
for (int i = 0; i < 4; i++) sum += a[i] * a[i];
return std::sqrt(sum);
};
gFloat4.lengthSquared = [](const float* a) {
float sum = 0.f;
for (int i = 0; i < 4; i++) sum += a[i] * a[i];
return (sum);
};
gFloat4.normalize = [](float* a) {
float len = gFloat4.length(a);
if (len > POINT_EPSILON)
{
float denom = 1.0f / len;
for (int i = 0; i < 4; i++)
a[i] *= denom;
}
};
gFloat4.normalize_mag = [](float* a, float f) {
float len = gFloat4.length(a);
if (len > POINT_EPSILON)
{
float denom = f / len;
for (int i = 0; i < 4; i++) a[i] *= denom;
}
};
gFloat4.lerp = [](const float* from, const float* to, float f, float* r) {
for (int i = 0; i < 4; i++) r[i] = from[i] + (to[i] - from[i]) * f;
};
gFloat4.cross = [](const float* a, const float* b, float* r) {
const float ax = a[0];
const float ay = a[1];
const float az = a[2];
const float bx = b[0];
const float by = b[1];
const float bz = b[2];
r[0] = ay * bz - az * by;
r[1] = az * bx - ax * bz;
r[2] = ax * by - ay * bx;
};
}
}
namespace math_backend::float3::dispatch
{
void install_scalar()
{
gFloat3.add = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 3; i++) r[i] = a[i] + b[i];
};
gFloat3.sub = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 3; i++) r[i] = a[i] - b[i];
};
gFloat3.mul = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 3; i++) r[i] = a[i] * b[i];
};
gFloat3.mul_scalar = [](const float* a, float s, float* r) {
for (int i = 0; i < 3; i++) r[i] = a[i] * s;
};
gFloat3.div = [](const float* a, const float* b, float* r) {
for (int i = 0; i < 3; i++) r[i] = a[i] / b[i];
};
gFloat3.div_scalar = [](const float* a, float s, float* r) {
float denom = 1.0f / s;
for (int i = 0; i < 3; i++) r[i] = a[i] * denom;
};
gFloat3.dot = [](const float* a, const float* b) {
float sum = 0.f;
for (int i = 0; i < 3; i++) sum += a[i] * b[i];
return sum;
};
gFloat3.length = [](const float* a) {
float sum = 0.f;
for (int i = 0; i < 3; i++) sum += a[i] * a[i];
return std::sqrt(sum);
};
gFloat3.lengthSquared = [](const float* a) {
float sum = 0.f;
for (int i = 0; i < 3; i++) sum += a[i] * a[i];
return (sum);
};
gFloat3.normalize = [](float* a) {
float len = gFloat3.length(a);
if (len > POINT_EPSILON)
{
float denom = 1.0 / len;
for (int i = 0; i < 3; i++) a[i] *= denom;
}
};
gFloat3.normalize_mag = [](float* a, float f) {
float len = gFloat3.length(a);
if (len > POINT_EPSILON)
{
float denom = f / len;
for (int i = 0; i < 3; i++) a[i] *= denom;
}
};
gFloat3.lerp = [](const float* from, const float* to, float f, float* r) {
for (int i = 0; i < 3; i++) r[i] = from[i] + (to[i] - from[i]) * f;
};
gFloat3.cross = [](const float* a, const float* b, float* r) {
const float ax = a[0];
const float ay = a[1];
const float az = a[2];
const float bx = b[0];
const float by = b[1];
const float bz = b[2];
r[0] = ay * bz - az * by;
r[1] = az * bx - ax * bz;
r[2] = ax * by - ay * bx;
};
}
}

View file

@ -0,0 +1,98 @@
#include "float3_dispatch.h"
#include <immintrin.h> // AVX/AVX2 intrinsics
namespace
{
typedef __m128 f32x4;
// Load 3 floats into 4-wide SIMD, zero the 4th lane
inline f32x4 v_load3(const float* p) { return _mm_set_ps(0.0f, p[2], p[1], p[0]); }
// Store 3 floats from SIMD register back to memory
inline void v_store3(float* dst, f32x4 v)
{
alignas(16) float tmp[4]; // temp storage
_mm_store_ps(tmp, v); // store all 4 lanes
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
// extract just the first lane.
inline float v_extract0(f32x4 v) { return _mm_cvtss_f32(v); }
// Broadcast a single float across all 4 lanes
inline f32x4 v_set1(float s) { return _mm_set1_ps(s); }
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide fast (1/b)
inline f32x4 v_div_fast(f32x4 a, f32x4 b)
{
f32x4 rcp = _mm_rcp_ps(b);
// Optional refinement here
return _mm_mul_ps(a, rcp);
}
// Element-wise divide (to change from fast use _mm_div_ps(a,b)
inline f32x4 v_div(f32x4 a, f32x4 b) { return v_div_fast(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
// Element-wise subtract
inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); }
// Horizontal sum of all elements (for dot product, length, etc.)
inline f32x4 v_hadd3(f32x4 a)
{
__m128 t1 = _mm_hadd_ps(a, a); // sums pairs: [a0+a1, a2+a3, ...]
__m128 t2 = _mm_hadd_ps(t1, t1); // sums again: first element = a0+a1+a2+a3
return t2;
}
float float3_dot_avx(const float* a, const float* b)
{
f32x4 va = v_load3(a);
f32x4 vb = v_load3(b);
__m128 dp = _mm_dp_ps(va, vb, 0x71); // multiply 3 (0x71), sum all 4, lowest lane
return _mm_cvtss_f32(dp);
}
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps(
_mm_mul_ps(a, b_yzx),
_mm_mul_ps(a_yzx, b)
);
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
}
#include "float3_impl.inl"
namespace math_backend::float3::dispatch
{
// Install AVX backend
void install_avx()
{
gFloat3.add = float3_add_impl;
gFloat3.sub = float3_sub_impl;
gFloat3.mul = float3_mul_impl;
gFloat3.mul_scalar = float3_mul_scalar_impl;
gFloat3.div = float3_div_impl;
gFloat3.div_scalar = float3_div_scalar_impl;
gFloat3.dot = float3_dot_avx;
gFloat3.length = float3_length_impl;
gFloat3.lengthSquared = float3_length_squared_impl;
gFloat3.normalize = float3_normalize_impl;
gFloat3.normalize_mag = float3_normalize_mag_impl;
gFloat3.lerp = float3_lerp_impl;
gFloat3.cross = float3_cross_impl;
}
}

View file

@ -18,8 +18,16 @@ namespace
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide fast (1/b)
inline f32x4 v_div_fast(f32x4 a, f32x4 b)
{
f32x4 rcp = _mm_rcp_ps(b);
// Optional refinement here
return _mm_mul_ps(a, rcp);
}
// Element-wise divide
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
inline f32x4 v_div(f32x4 a, f32x4 b) { return v_div_fast(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
@ -43,6 +51,19 @@ namespace
__m128 dp = _mm_dp_ps(va, vb, 0xF1); // multiply all 4, sum all 4, lowest lane
return _mm_cvtss_f32(dp);
}
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps(
_mm_mul_ps(a, b_yzx),
_mm_mul_ps(a_yzx, b)
);
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
}
#include "float4_impl.inl"
@ -52,17 +73,18 @@ namespace math_backend::float4::dispatch
// Install AVX backend
void install_avx()
{
gFloat4.add = float4_add_impl;
gFloat4.sub = float4_sub_impl;
gFloat4.mul = float4_mul_impl;
gFloat4.mul_scalar = float4_mul_scalar_impl;
gFloat4.div = float4_div_impl;
gFloat4.div_scalar = float4_div_scalar_impl;
gFloat4.dot = float4_dot_avx;
gFloat4.length = float4_length_impl;
gFloat4.add = float4_add_impl;
gFloat4.sub = float4_sub_impl;
gFloat4.mul = float4_mul_impl;
gFloat4.mul_scalar = float4_mul_scalar_impl;
gFloat4.div = float4_div_impl;
gFloat4.div_scalar = float4_div_scalar_impl;
gFloat4.dot = float4_dot_avx;
gFloat4.length = float4_length_impl;
gFloat4.lengthSquared = float4_length_squared_impl;
gFloat4.normalize = float4_normalize_impl;
gFloat4.normalize = float4_normalize_impl;
gFloat4.normalize_mag = float4_normalize_mag_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.cross = float4_cross_impl;
}
}

View file

@ -27,9 +27,6 @@ namespace
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide precise
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
// Element-wise divide fast (1/b)
inline f32x4 v_div_fast(f32x4 a, f32x4 b)
{
@ -38,6 +35,9 @@ namespace
return _mm_mul_ps(a, rcp);
}
// Element-wise divide (to change from fast use _mm_div_ps(a,b)
inline f32x4 v_div(f32x4 a, f32x4 b) { return v_div_fast(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
@ -54,11 +54,24 @@ namespace
float float3_dot_avx(const float* a, const float* b)
{
f32x4 va = _mm_loadu_ps(a);
f32x4 vb = _mm_loadu_ps(b);
__m128 dp = _mm_dp_ps(va, vb, 0x71); // multiply 3 (0x71), sum all 4, lowest lane
f32x4 va = v_load3(a);
f32x4 vb = v_load3(b);
__m128 dp = _mm_dp_ps(va, vb, 0x71); // multiply 3 (0x71), sum 3, lowest lane
return _mm_cvtss_f32(dp);
}
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps(
_mm_mul_ps(a, b_yzx),
_mm_mul_ps(a_yzx, b)
);
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
}
#include "float3_impl.inl"
@ -80,5 +93,6 @@ namespace math_backend::float3::dispatch
gFloat3.normalize = float3_normalize_impl;
gFloat3.normalize_mag = float3_normalize_mag_impl;
gFloat3.lerp = float3_lerp_impl;
gFloat3.cross = float3_cross_impl;
}
}

View file

@ -18,8 +18,16 @@ namespace
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
// Element-wise divide fast (1/b)
inline f32x4 v_div_fast(f32x4 a, f32x4 b)
{
f32x4 rcp = _mm_rcp_ps(b);
// Optional refinement here
return _mm_mul_ps(a, rcp);
}
// Element-wise divide (to change from fast use _mm_div_ps(a,b)
inline f32x4 v_div(f32x4 a, f32x4 b) { return v_div_fast(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
@ -43,6 +51,19 @@ namespace
__m128 dp = _mm_dp_ps(va, vb, 0xF1); // multiply all 4, sum all 4, lowest lane
return _mm_cvtss_f32(dp);
}
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps(
_mm_mul_ps(a, b_yzx),
_mm_mul_ps(a_yzx, b)
);
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
}
#include "float4_impl.inl"
@ -64,5 +85,6 @@ namespace math_backend::float4::dispatch
gFloat4.normalize = float4_normalize_impl;
gFloat4.normalize_mag = float4_normalize_mag_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.cross = float4_cross_impl;
}
}

View file

@ -0,0 +1,92 @@
#include "float3_dispatch.h"
#include <emmintrin.h> // SSE2 intrinsics
namespace
{
typedef __m128 f32x4;
// Load 3 floats into 4-wide SIMD, zero the 4th lane
inline f32x4 v_load3(const float* p) { return _mm_set_ps(0.0f, p[2], p[1], p[0]); }
// Store 3 floats from SIMD register back to memory
inline void v_store3(float* dst, f32x4 v)
{
alignas(16) float tmp[4]; // temp storage
_mm_store_ps(tmp, v); // store all 4 lanes
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
// extract just the first lane.
inline float v_extract0(f32x4 v) { return _mm_cvtss_f32(v); }
// Broadcast a single float across all 4 lanes
inline f32x4 v_set1(float s) { return _mm_set1_ps(s); }
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide fast (1/b)
inline f32x4 v_div_fast(f32x4 a, f32x4 b)
{
f32x4 rcp = _mm_rcp_ps(b);
// Optional refinement here
return _mm_mul_ps(a, rcp);
}
// Element-wise divide (to change from fast use _mm_div_ps(a,b)
inline f32x4 v_div(f32x4 a, f32x4 b) { return v_div_fast(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
// Element-wise subtract
inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); }
// Horizontal sum of all elements (for dot product, length, etc.)
inline f32x4 v_hadd3(f32x4 a)
{
__m128 shuf = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 0, 1)); // swap pairs
__m128 sums = _mm_add_ps(a, shuf); // sums: [a0+a1 a1+a0 a2+a3 a3+a2]
shuf = _mm_shuffle_ps(sums, sums, _MM_SHUFFLE(1, 0, 3, 2)); // move high pair to low
sums = _mm_add_ps(sums, shuf); // total sum in lower float
return sums;
}
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps(
_mm_mul_ps(a, b_yzx),
_mm_mul_ps(a_yzx, b)
);
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
}
#include "float3_impl.inl"
namespace math_backend::float3::dispatch
{
// Install SSE2 backend
void install_sse2()
{
gFloat3.add = float3_add_impl;
gFloat3.sub = float3_sub_impl;
gFloat3.mul = float3_mul_impl;
gFloat3.mul_scalar = float3_mul_scalar_impl;
gFloat3.div = float3_div_impl;
gFloat3.div_scalar = float3_div_scalar_impl;
gFloat3.dot = float3_dot_impl;
gFloat3.length = float3_length_impl;
gFloat3.lengthSquared = float3_length_squared_impl;
gFloat3.normalize = float3_normalize_impl;
gFloat3.normalize_mag = float3_normalize_mag_impl;
gFloat3.lerp = float3_lerp_impl;
gFloat3.cross = float3_cross_impl;
}
}

View file

@ -34,9 +34,22 @@ namespace
sums = _mm_add_ps(sums, shuf); // total sum in lower float
return _mm_cvtss_f32(sums);
}
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps(
_mm_mul_ps(a, b_yzx),
_mm_mul_ps(a_yzx, b)
);
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
}
#include "../../impl/float4_impl.inl"
#include "float4_impl.inl"
namespace math_backend::float4::dispatch
{
@ -55,5 +68,6 @@ namespace math_backend::float4::dispatch
gFloat4.normalize = float4_normalize_impl;
gFloat4.normalize_mag = float4_normalize_mag_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.cross = float4_cross_impl;
}
}

View file

@ -0,0 +1,92 @@
#include "float3_dispatch.h"
#include <smmintrin.h> // SSE41 intrinsics
namespace
{
typedef __m128 f32x4;
// Load 3 floats into 4-wide SIMD, zero the 4th lane
inline f32x4 v_load3(const float* p) { return _mm_set_ps(0.0f, p[2], p[1], p[0]); }
// Store 3 floats from SIMD register back to memory
inline void v_store3(float* dst, f32x4 v)
{
alignas(16) float tmp[4]; // temp storage
_mm_store_ps(tmp, v); // store all 4 lanes
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
// extract just the first lane.
inline float v_extract0(f32x4 v) { return _mm_cvtss_f32(v); }
// Broadcast a single float across all 4 lanes
inline f32x4 v_set1(float s) { return _mm_set1_ps(s); }
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
// Element-wise subtract
inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); }
// Horizontal sum of all 4 elements (for dot product, length, etc.)
inline f32x4 v_hadd3(f32x4 a)
{
__m128 t1 = _mm_hadd_ps(a, a); // sums pairs: [a0+a1, a2+a3, ...]
__m128 t2 = _mm_hadd_ps(t1, t1); // sums again: first element = a0+a1+a2+a3
return t1; // extract first element
}
// specialized dot product for SSE4.1
float float3_dot_sse41(const float* a, const float* b)
{
f32x4 va = _mm_loadu_ps(a);
f32x4 vb = _mm_loadu_ps(b);
__m128 dp = _mm_dp_ps(va, vb, 0x71); // multiply all 4, sum all 4, lowest lane
return _mm_cvtss_f32(dp);
}
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps(
_mm_mul_ps(a, b_yzx),
_mm_mul_ps(a_yzx, b)
);
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
}
#include "float3_impl.inl"
namespace math_backend::float3::dispatch
{
// Install SSE41 backend
void install_sse41()
{
gFloat3.add = float3_add_impl;
gFloat3.sub = float3_sub_impl;
gFloat3.mul = float3_mul_impl;
gFloat3.mul_scalar = float3_mul_scalar_impl;
gFloat3.div = float3_div_impl;
gFloat3.div_scalar = float3_div_scalar_impl;
gFloat3.dot = float3_dot_sse41;
gFloat3.length = float3_length_impl;
gFloat3.lengthSquared = float3_length_squared_impl;
gFloat3.normalize = float3_normalize_impl;
gFloat3.normalize_mag = float3_normalize_mag_impl;
gFloat3.lerp = float3_lerp_impl;
gFloat3.cross = float3_cross_impl;
}
}

View file

@ -43,6 +43,19 @@ namespace
__m128 dp = _mm_dp_ps(va, vb, 0xF1); // multiply all 4, sum all 4, lowest lane
return _mm_cvtss_f32(dp);
}
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps(
_mm_mul_ps(a, b_yzx),
_mm_mul_ps(a_yzx, b)
);
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
}
#include "float4_impl.inl"
@ -64,5 +77,6 @@ namespace math_backend::float4::dispatch
gFloat4.normalize = float4_normalize_impl;
gFloat4.normalize_mag = float4_normalize_mag_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.cross = float4_cross_impl;
}
}

View file

@ -1006,9 +1006,7 @@ inline F64 mDot(const Point3D &p1, const Point3D &p2)
inline void mCross(const Point3F &a, const Point3F &b, Point3F *res)
{
res->x = (a.y * b.z) - (a.z * b.y);
res->y = (a.z * b.x) - (a.x * b.z);
res->z = (a.x * b.y) - (a.y * b.x);
gFloat3.cross(a, b, *res);
}
inline void mCross(const Point3D &a, const Point3D &b, Point3D *res)
@ -1021,7 +1019,7 @@ inline void mCross(const Point3D &a, const Point3D &b, Point3D *res)
inline Point3F mCross(const Point3F &a, const Point3F &b)
{
Point3F r;
mCross( a, b, &r );
gFloat3.cross(a, b, r);
return r;
}

View file

@ -21,6 +21,7 @@ namespace math_backend::float3::dispatch
void (*normalize)(float*) = nullptr;
void (*normalize_mag)(float*, float) = nullptr;
void (*lerp)(const float*, const float*, float, float*) = nullptr;
void (*cross)(const float*, const float*, float*) = nullptr;
};
// Global dispatch table

View file

@ -21,6 +21,7 @@ namespace math_backend::float4::dispatch
void (*normalize)(float*) = nullptr;
void (*normalize_mag)(float*, float) = nullptr;
void (*lerp)(const float*, const float*, float, float*) = nullptr;
void (*cross)(const float*, const float*, float*) = nullptr;
};
// Global dispatch table

View file

@ -5,10 +5,10 @@ math_backend::backend math_backend::choose_backend(U32 cpu_flags)
{
#if defined(__x86_64__) || defined(_M_X64) || defined(_M_IX86)
if (cpu_flags & CPU_PROP_AVX2) return backend::avx2;
/*if (cpu_flags & CPU_PROP_AVX2) return backend::avx2;
if (cpu_flags & CPU_PROP_AVX) return backend::avx;
if (cpu_flags & CPU_PROP_SSE4_1) return backend::sse41;
if (cpu_flags & CPU_PROP_SSE2) return backend::sse2;
if (cpu_flags & CPU_PROP_SSE2) return backend::sse2;*/
#elif defined(__aarch64__) || defined(__ARM_NEON)
@ -32,15 +32,18 @@ void math_backend::install_from_cpu_flags(uint32_t cpu_flags)
break;
case backend::avx:
//float4::dispatch::install_avx();
float4::dispatch::install_avx();
float3::dispatch::install_avx();
break;
case backend::sse41:
float4::dispatch::install_sse41();
float3::dispatch::install_sse41();
break;
case backend::sse2:
float4::dispatch::install_sse2();
float3::dispatch::install_sse2();
break;
#elif defined(__aarch64__) || defined(__ARM_NEON)
case backend::neon:
@ -49,6 +52,7 @@ void math_backend::install_from_cpu_flags(uint32_t cpu_flags)
#endif
default:
float4::dispatch::install_scalar();
float3::dispatch::install_scalar();
break;
}
}