ISA backends float3 and float4 - cleanup history squash

working for both neon32 and neon64

Update math_backend.cpp

further sse simd additions

avx2 float3 added
added normalize_magnitude
added divide fast to float3 may copy to float4

move static spheremesh to drawSphere (initialize on first use) so platform has a chance to load the math backend

all float3 and float4 functions and isas

completed all options of float3 and float4 functions in isas and math_c
neon still to be done but that will be on mac.

Update math_backend.cpp

mac isa neon update

added float3
restructured the classes to look more like the final version of the x86 classes

linux required changes

Update build-macos-clang.yml

Update build-macos-clang.yml

Revert "Update build-macos-clang.yml"

This reverts commit 29dfc567f4.

Revert "Update build-macos-clang.yml"

This reverts commit 2abad2b4ca.

Update CMakeLists.txt

fix macs stupid build

remove god awful rolling average from frame time tracker....

use intrinsic headers instead

each isa implementation now uses a header for that isa's intrinsic functions these are then used in the impl files. This will make it easier for matrix functions when those are implemented.

fixed comment saying 256 when it should be 512 for avx512

consolidated initializers for function tables

Update neon_intrinsics.h

fixes for some neon intrinsics no idea if this is the best way to do these but they work at least

v_cross is especially messy at the moment we basically just do it as a c math function need to look into getting this done correctly
This commit is contained in:
marauder2k7 2026-02-26 16:45:13 +00:00
parent 73ed502ac9
commit 67f12311d4
36 changed files with 1481 additions and 419 deletions

View file

@ -0,0 +1,140 @@
#pragma once
#include <immintrin.h> // AVX/AVX2 intrinsics
namespace
{
typedef __m128 f32x4;
//------------------------------------------------------
// Load / Store
//------------------------------------------------------
// Load 4 floats from memory into a SIMD register
inline f32x4 v_load(const float* p) { return _mm_loadu_ps(p); }
inline void v_store(float* dst, f32x4 v) { _mm_storeu_ps(dst, v); }
inline f32x4 v_set1(float s) { return _mm_set1_ps(s); }
inline f32x4 v_zero() { return _mm_setzero_ps(); }
inline float v_extract0(f32x4 v) { return _mm_cvtss_f32(v); }
//------------------------------------------------------
// Mask helpers
//------------------------------------------------------
inline f32x4 v_mask_xyz() { return _mm_blend_ps(_mm_set1_ps(0.0f), _mm_set1_ps(1.0f), 0b0111); }
inline f32x4 v_preserve_w(f32x4 newv, f32x4 original)
{
return _mm_blend_ps(newv, original, 0b1000);
}
//------------------------------------------------------
// Float3 helpers (safe loading into 4 lanes)
//------------------------------------------------------
inline f32x4 v_load3_vec(const float* p) // w = 0
{
return _mm_set_ps(0.0f, p[2], p[1], p[0]);
}
inline f32x4 v_load3_pos(const float* p) // w = 1
{
return _mm_set_ps(1.0f, p[2], p[1], p[0]);
}
inline void v_store3(float* dst, f32x4 v)
{
alignas(16) float tmp[4]; // temp storage
_mm_store_ps(tmp, v); // store all 4 lanes
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
//------------------------------------------------------
// Simple Arithmatic
//------------------------------------------------------
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide
inline f32x4 v_div_exact(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
// Element-wise subtract
inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); }
//------------------------------------------------------
// Fast recip
//------------------------------------------------------
// Fast recip 1/b
inline f32x4 v_rcp_nr(f32x4 b)
{
f32x4 r = _mm_rcp_ps(b);
f32x4 two = _mm_set1_ps(2.0f);
return _mm_mul_ps(r, _mm_sub_ps(two, _mm_mul_ps(b, r)));
}
// Divide fast ( b = recip eg 1/b)
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_mul_ps(a, v_rcp_nr(b)); }
inline f32x4 v_rsqrt_nr(f32x4 x)
{
f32x4 r = _mm_rsqrt_ps(x);
f32x4 half = _mm_set1_ps(0.5f);
f32x4 three = _mm_set1_ps(3.0f);
r = _mm_mul_ps(r, _mm_sub_ps(three, _mm_mul_ps(_mm_mul_ps(x, r), r)));
return _mm_mul_ps(r, half);
}
//------------------------------------------------------
// Vector intrinsic functions
//------------------------------------------------------
// full dot4
inline f32x4 v_dot4(f32x4 a, f32x4 b)
{
return _mm_dp_ps(a, b, 0xF1); // f32x4, 4 lanes into lane 1
}
// dot3 (ignores w)
inline f32x4 v_dot3(f32x4 a, f32x4 b)
{
return _mm_dp_ps(a, b, 0x71); // f32x4, 3 last lanes into lane 1
}
// cross product xyz only.
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps(_mm_mul_ps(a, b_yzx), _mm_mul_ps(a_yzx, b));
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
inline f32x4 v_normalize3(f32x4 v)
{
f32x4 inv = v_rsqrt_nr(v_dot3(v, v));
return _mm_mul_ps(v, inv);
}
// adds all 4 lanes together.
inline f32x4 v_hadd4(f32x4 a)
{
// sum all 4 lanes in SSE41
__m128 sum = _mm_hadd_ps(a, a);
return _mm_hadd_ps(sum, sum);
}
}

View file

@ -0,0 +1,26 @@
#include "avx_intrinsics.h"
#include "float3_dispatch.h"
#include <immintrin.h> // AVX/AVX2 intrinsics
#include "float3_impl.inl"
namespace math_backend::float3::dispatch
{
// Install AVX backend
void install_avx()
{
gFloat3.add = float3_add_impl;
gFloat3.sub = float3_sub_impl;
gFloat3.mul = float3_mul_impl;
gFloat3.mul_scalar = float3_mul_scalar_impl;
gFloat3.div = float3_div_impl;
gFloat3.div_scalar = float3_div_scalar_impl;
gFloat3.dot = float3_dot_impl;
gFloat3.length = float3_length_impl;
gFloat3.lengthSquared = float3_length_squared_impl;
gFloat3.normalize = float3_normalize_impl;
gFloat3.normalize_mag = float3_normalize_mag_impl;
gFloat3.lerp = float3_lerp_impl;
gFloat3.cross = float3_cross_impl;
}
}

View file

@ -1,49 +1,5 @@
#include "avx_intrinsics.h"
#include "float4_dispatch.h"
#include <immintrin.h> // AVX/AVX2 intrinsics
namespace
{
typedef __m128 f32x4;
// Load 4 floats from memory into a SIMD register
inline f32x4 v_load(const float* p) { return _mm_loadu_ps(p); }
// Store 4 floats from SIMD register back to memory
inline void v_store(float* dst, f32x4 v) { _mm_storeu_ps(dst, v); }
// Broadcast a single float across all 4 lanes
inline f32x4 v_set1(float s) { return _mm_set1_ps(s); }
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
// Element-wise subtract
inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); }
// Horizontal sum of all 4 elements (for dot product, length, etc.)
inline float v_hadd4(f32x4 a)
{
__m128 t1 = _mm_hadd_ps(a, a); // sums pairs: [a0+a1, a2+a3, ...]
__m128 t2 = _mm_hadd_ps(t1, t1); // sums again: first element = a0+a1+a2+a3
return _mm_cvtss_f32(t2); // extract first element
}
// specialized dot product for AVX
float float4_dot_avx(const float* a, const float* b)
{
f32x4 va = _mm_loadu_ps(a);
f32x4 vb = _mm_loadu_ps(b);
__m128 dp = _mm_dp_ps(va, vb, 0xF1); // multiply all 4, sum all 4, lowest lane
return _mm_cvtss_f32(dp);
}
}
#include "float4_impl.inl"
@ -52,16 +8,18 @@ namespace math_backend::float4::dispatch
// Install AVX backend
void install_avx()
{
gFloat4.add = float4_add_impl;
gFloat4.sub = float4_sub_impl;
gFloat4.mul = float4_mul_impl;
gFloat4.mul_scalar = float4_mul_scalar_impl;
gFloat4.div = float4_div_impl;
gFloat4.div_scalar = float4_div_scalar_impl;
gFloat4.dot = float4_dot_avx;
gFloat4.length = float4_length_impl;
gFloat4.add = float4_add_impl;
gFloat4.sub = float4_sub_impl;
gFloat4.mul = float4_mul_impl;
gFloat4.mul_scalar = float4_mul_scalar_impl;
gFloat4.div = float4_div_impl;
gFloat4.div_scalar = float4_div_scalar_impl;
gFloat4.dot = float4_dot_impl;
gFloat4.length = float4_length_impl;
gFloat4.lengthSquared = float4_length_squared_impl;
gFloat4.normalize = float4_normalize_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.normalize = float4_normalize_impl;
gFloat4.normalize_mag = float4_normalize_mag_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.cross = float4_cross_impl;
}
}

View file

@ -0,0 +1,140 @@
#pragma once
#include <immintrin.h> // AVX/AVX2 intrinsics
namespace
{
typedef __m128 f32x4;
//------------------------------------------------------
// Load / Store
//------------------------------------------------------
// Load 4 floats from memory into a SIMD register
inline f32x4 v_load(const float* p) { return _mm_loadu_ps(p); }
inline void v_store(float* dst, f32x4 v) { _mm_storeu_ps(dst, v); }
inline f32x4 v_set1(float s) { return _mm_set1_ps(s); }
inline f32x4 v_zero() { return _mm_setzero_ps(); }
inline float v_extract0(f32x4 v) { return _mm_cvtss_f32(v); }
//------------------------------------------------------
// Mask helpers
//------------------------------------------------------
inline f32x4 v_mask_xyz() { return _mm_blend_ps(_mm_set1_ps(0.0f), _mm_set1_ps(1.0f), 0b0111); }
inline f32x4 v_preserve_w(f32x4 newv, f32x4 original)
{
return _mm_blend_ps(newv, original, 0b1000);
}
//------------------------------------------------------
// Float3 helpers (safe loading into 4 lanes)
//------------------------------------------------------
inline f32x4 v_load3_vec(const float* p) // w = 0
{
return _mm_set_ps(0.0f, p[2], p[1], p[0]);
}
inline f32x4 v_load3_pos(const float* p) // w = 1
{
return _mm_set_ps(1.0f, p[2], p[1], p[0]);
}
inline void v_store3(float* dst, f32x4 v)
{
alignas(16) float tmp[4]; // temp storage
_mm_store_ps(tmp, v); // store all 4 lanes
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
//------------------------------------------------------
// Simple Arithmatic
//------------------------------------------------------
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide
inline f32x4 v_div_exact(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
// Element-wise subtract
inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); }
//------------------------------------------------------
// Fast recip
//------------------------------------------------------
// Fast recip 1/b
inline f32x4 v_rcp_nr(f32x4 b)
{
f32x4 r = _mm_rcp_ps(b);
f32x4 two = _mm_set1_ps(2.0f);
return _mm_mul_ps(r, _mm_sub_ps(two, _mm_mul_ps(b, r)));
}
// Divide fast ( b = recip eg 1/b)
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_mul_ps(a, v_rcp_nr(b)); }
inline f32x4 v_rsqrt_nr(f32x4 x)
{
f32x4 r = _mm_rsqrt_ps(x);
f32x4 half = _mm_set1_ps(0.5f);
f32x4 three = _mm_set1_ps(3.0f);
r = _mm_mul_ps(r, _mm_sub_ps(three, _mm_mul_ps(_mm_mul_ps(x, r), r)));
return _mm_mul_ps(r, half);
}
//------------------------------------------------------
// Vector intrinsic functions
//------------------------------------------------------
// full dot4
inline f32x4 v_dot4(f32x4 a, f32x4 b)
{
return _mm_dp_ps(a, b, 0xF1); // f32x4, 4 lanes into lane 1
}
// dot3 (ignores w)
inline f32x4 v_dot3(f32x4 a, f32x4 b)
{
return _mm_dp_ps(a, b, 0x71); // f32x4, 3 last lanes into lane 1
}
// cross product xyz only.
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps(_mm_mul_ps(a, b_yzx), _mm_mul_ps(a_yzx, b));
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
inline f32x4 v_normalize3(f32x4 v)
{
f32x4 inv = v_rsqrt_nr(v_dot3(v, v));
return _mm_mul_ps(v, inv);
}
// adds all 4 lanes together.
inline f32x4 v_hadd4(f32x4 a)
{
// sum all 4 lanes in SSE41
__m128 sum = _mm_hadd_ps(a, a);
return _mm_hadd_ps(sum, sum);
}
}

View file

@ -0,0 +1,26 @@
#include "avx2_intrinsics.h"
#include "float3_dispatch.h"
#include <immintrin.h> // AVX/AVX2 intrinsics
#include "float3_impl.inl"
namespace math_backend::float3::dispatch
{
// Install AVX2 backend
void install_avx2()
{
gFloat3.add = float3_add_impl;
gFloat3.sub = float3_sub_impl;
gFloat3.mul = float3_mul_impl;
gFloat3.mul_scalar = float3_mul_scalar_impl;
gFloat3.div = float3_div_impl;
gFloat3.div_scalar = float3_div_scalar_impl;
gFloat3.dot = float3_dot_impl;
gFloat3.length = float3_length_impl;
gFloat3.lengthSquared = float3_length_squared_impl;
gFloat3.normalize = float3_normalize_impl;
gFloat3.normalize_mag = float3_normalize_mag_impl;
gFloat3.lerp = float3_lerp_impl;
gFloat3.cross = float3_cross_impl;
}
}

View file

@ -1,49 +1,5 @@
#include "avx2_intrinsics.h"
#include "float4_dispatch.h"
#include <immintrin.h> // AVX/AVX2 intrinsics
namespace
{
typedef __m128 f32x4;
// Load 4 floats from memory into a SIMD register
inline f32x4 v_load(const float* p) { return _mm_loadu_ps(p); }
// Store 4 floats from SIMD register back to memory
inline void v_store(float* dst, f32x4 v) { _mm_storeu_ps(dst, v); }
// Broadcast a single float across all 4 lanes
inline f32x4 v_set1(float s) { return _mm_set1_ps(s); }
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
// Element-wise subtract
inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); }
// Horizontal sum of all 4 elements (for dot product, length, etc.)
inline float v_hadd4(f32x4 a)
{
__m128 t1 = _mm_hadd_ps(a, a); // sums pairs: [a0+a1, a2+a3, ...]
__m128 t2 = _mm_hadd_ps(t1, t1); // sums again: first element = a0+a1+a2+a3
return _mm_cvtss_f32(t2); // extract first element
}
// specialized dot product for AVX
float float4_dot_avx(const float* a, const float* b)
{
f32x4 va = _mm_loadu_ps(a);
f32x4 vb = _mm_loadu_ps(b);
__m128 dp = _mm_dp_ps(va, vb, 0xF1); // multiply all 4, sum all 4, lowest lane
return _mm_cvtss_f32(dp);
}
}
#include "float4_impl.inl"
@ -58,10 +14,12 @@ namespace math_backend::float4::dispatch
gFloat4.mul_scalar = float4_mul_scalar_impl;
gFloat4.div = float4_div_impl;
gFloat4.div_scalar = float4_div_scalar_impl;
gFloat4.dot = float4_dot_avx;
gFloat4.dot = float4_dot_impl;
gFloat4.length = float4_length_impl;
gFloat4.lengthSquared = float4_length_squared_impl;
gFloat4.normalize = float4_normalize_impl;
gFloat4.normalize_mag = float4_normalize_mag_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.cross = float4_cross_impl;
}
}

View file

@ -0,0 +1,25 @@
#include "neon_intrinsics.h"
#include "float3_dispatch.h"
#include "float3_impl.inl"
namespace math_backend::float3::dispatch
{
// Install NEON backend
void install_neon()
{
gFloat3.add = float3_add_impl;
gFloat3.sub = float3_sub_impl;
gFloat3.mul = float3_mul_impl;
gFloat3.mul_scalar = float3_mul_scalar_impl;
gFloat3.div = float3_div_impl;
gFloat3.div_scalar = float3_div_scalar_impl;
gFloat3.dot = float3_dot_impl;
gFloat3.length = float3_length_impl;
gFloat3.lengthSquared = float3_length_squared_impl;
gFloat3.normalize = float3_normalize_impl;
gFloat3.normalize_mag = float3_normalize_mag_impl;
gFloat3.lerp = float3_lerp_impl;
gFloat3.cross = float3_cross_impl;
}
}

View file

@ -1,50 +1,25 @@
#include "neon_intrinsics.h"
#include "float4_dispatch.h"
#include <arm_neon.h>
namespace
{
typedef float32x4_t f32x4;
inline f32x4 v_load(const float* p) { return vld1q_f32(p); }
inline void v_store(float* dst, f32x4 v) { vst1q_f32(dst, v); }
inline f32x4 v_set1(float s) { return vdupq_n_f32(s); }
inline f32x4 v_mul(f32x4 a, f32x4 b) { return vmulq_f32(a, b); }
inline f32x4 v_add(f32x4 a, f32x4 b) { return vaddq_f32(a, b); }
inline f32x4 v_sub(f32x4 a, f32x4 b) { return vsubq_f32(a, b); }
// AArch64 native divide
inline f32x4 v_div(f32x4 a, f32x4 b)
{
return vdivq_f32(a, b);
}
inline float v_hadd4(f32x4 a)
{
float32x2_t low = vget_low_f32(a);
float32x2_t high = vget_high_f32(a);
float32x2_t sum = vadd_f32(low, high);
sum = vpadd_f32(sum, sum);
return vget_lane_f32(sum, 0);
}
}
#include "../../impl/float4_impl.inl"
#include "float4_impl.inl"
namespace math_backend::float4::dispatch
{
// Install NEON64 backend
void install_neon()
{
gFloat4.add = float4_add_impl;
gFloat4.sub = float4_sub_impl;
gFloat4.mul = float4_mul_impl;
gFloat4.mul_scalar = float4_mul_scalar_impl;
gFloat4.div = float4_div_impl;
gFloat4.div_scalar = float4_div_scalar_impl;
gFloat4.dot = float4_dot_impl;
gFloat4.length = float4_length_impl;
gFloat4.add = float4_add_impl;
gFloat4.sub = float4_sub_impl;
gFloat4.mul = float4_mul_impl;
gFloat4.mul_scalar = float4_mul_scalar_impl;
gFloat4.div = float4_div_impl;
gFloat4.div_scalar = float4_div_scalar_impl;
gFloat4.dot = float4_dot_impl;
gFloat4.length = float4_length_impl;
gFloat4.lengthSquared = float4_length_squared_impl;
gFloat4.normalize = float4_normalize_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.normalize = float4_normalize_impl;
gFloat4.normalize_mag = float4_normalize_mag_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.cross = float4_cross_impl;
}
}

View file

@ -0,0 +1,130 @@
#pragma once
#include <arm_neon.h>
namespace
{
typedef float32x4_t f32x4;
//------------------------------------------------------
// Load / Store
//------------------------------------------------------
inline f32x4 v_load(const float* p) { return vld1q_f32(p); }
inline void v_store(float* dst, f32x4 v) { vst1q_f32(dst, v); }
inline f32x4 v_set1(float s) { return vdupq_n_f32(s); }
inline f32x4 v_zero() { return vdupq_n_f32(0.0f); }
inline float v_extract0(f32x4 v) { return vgetq_lane_f32(v, 0); }
//------------------------------------------------------
// Mask helpers
//------------------------------------------------------
inline f32x4 v_mask_xyz()
{
// equivalent to [1,1,1,0]
float32x4_t mask = {1.0f, 1.0f, 1.0f, 0.0f};
return mask;
}
inline f32x4 v_preserve_w(f32x4 newv, f32x4 original)
{
float32x4_t mask = {0.0f, 0.0f, 0.0f, 1.0f};
return vbslq_f32(vreinterpretq_u32_f32(mask), original, newv);
}
//------------------------------------------------------
// Float3 helpers
//------------------------------------------------------
inline f32x4 v_load3_vec(const float* p) // w = 0
{
float tmp[4] = { p[0], p[1], p[2], 0.0f };
return vld1q_f32(tmp);
}
inline f32x4 v_load3_pos(const float* p) // w = 1
{
float tmp[4] = { p[0], p[1], p[2], 1.0f };
return vld1q_f32(tmp);
}
inline void v_store3(float* dst, f32x4 v)
{
float tmp[4];
vst1q_f32(tmp, v);
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
//------------------------------------------------------
// Simple Arithmetic
//------------------------------------------------------
inline f32x4 v_mul(f32x4 a, f32x4 b) { return vmulq_f32(a, b); }
inline f32x4 v_div_exact(f32x4 a, f32x4 b) { return vdivq_f32(a, b); } // only NEON64
inline f32x4 v_add(f32x4 a, f32x4 b) { return vaddq_f32(a, b); }
inline f32x4 v_sub(f32x4 a, f32x4 b) { return vsubq_f32(a, b); }
//------------------------------------------------------
// Fast recip
//------------------------------------------------------
inline f32x4 v_rcp_nr(f32x4 b)
{
f32x4 r = vrecpeq_f32(b);
r = vmulq_f32(r, vrecpsq_f32(b, r)); // Newton-Raphson
r = vmulq_f32(r, vrecpsq_f32(b, r));
return r;
}
inline f32x4 v_div(f32x4 a, f32x4 b)
{
return vmulq_f32(a, v_rcp_nr(b));
}
inline f32x4 v_rsqrt_nr(f32x4 x)
{
f32x4 r = vrsqrteq_f32(x);
r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(r,r), x)); // refine
r = vmulq_f32(r, vrsqrtsq_f32(vmulq_f32(r,r), x));
return r;
}
//------------------------------------------------------
// Vector intrinsic functions
//------------------------------------------------------
inline f32x4 v_dot4(f32x4 a, f32x4 b)
{
f32x4 mul = vmulq_f32(a, b);
float32x2_t sum2 = vpadd_f32(vget_low_f32(mul), vget_high_f32(mul));
float sum = vget_lane_f32(sum2, 0) + vget_lane_f32(sum2, 1);
return vdupq_n_f32(sum);
}
inline f32x4 v_dot3(f32x4 a, f32x4 b)
{
float32x4_t mask = {1.0f, 1.0f, 1.0f, 0.0f};
f32x4 mul = vmulq_f32(a, b);
mul = vmulq_f32(mul, mask);
float32x2_t sum2 = vpadd_f32(vget_low_f32(mul), vget_high_f32(mul));
float sum = vget_lane_f32(sum2, 0) + vget_lane_f32(sum2, 1);
return vdupq_n_f32(sum);
}
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
float32x4_t a_yzx = { vgetq_lane_f32(a,1), vgetq_lane_f32(a,2), vgetq_lane_f32(a,0), 0 };
float32x4_t b_yzx = { vgetq_lane_f32(b,1), vgetq_lane_f32(b,2), vgetq_lane_f32(b,0), 0 };
float32x4_t c = vsubq_f32(vmulq_f32(a, b_yzx), vmulq_f32(a_yzx, b));
return (float32x4_t){ vgetq_lane_f32(c,2), vgetq_lane_f32(c,0), vgetq_lane_f32(c,1), 0 };
}
inline f32x4 v_normalize3(f32x4 v)
{
f32x4 inv = v_rsqrt_nr(v_dot3(v,v));
return vmulq_f32(v, inv);
}
inline f32x4 v_hadd4(f32x4 a)
{
float32x2_t sum2 = vpadd_f32(vget_low_f32(a), vget_high_f32(a));
float sum = vget_lane_f32(sum2,0) + vget_lane_f32(sum2,1);
return vdupq_n_f32(sum);
}
}

View file

@ -0,0 +1,26 @@
#include "sse2_intrinsics.h"
#include "float3_dispatch.h"
#include <emmintrin.h> // SSE2 intrinsics
#include "float3_impl.inl"
namespace math_backend::float3::dispatch
{
// Install SSE2 backend
void install_sse2()
{
gFloat3.add = float3_add_impl;
gFloat3.sub = float3_sub_impl;
gFloat3.mul = float3_mul_impl;
gFloat3.mul_scalar = float3_mul_scalar_impl;
gFloat3.div = float3_div_impl;
gFloat3.div_scalar = float3_div_scalar_impl;
gFloat3.dot = float3_dot_impl;
gFloat3.length = float3_length_impl;
gFloat3.lengthSquared = float3_length_squared_impl;
gFloat3.normalize = float3_normalize_impl;
gFloat3.normalize_mag = float3_normalize_mag_impl;
gFloat3.lerp = float3_lerp_impl;
gFloat3.cross = float3_cross_impl;
}
}

View file

@ -1,42 +1,8 @@
#include "sse2_intrinsics.h"
#include "float4_dispatch.h"
#include <emmintrin.h> // SSE2 intrinsics
namespace
{
typedef __m128 f32x4;
// Load 4 floats from memory into a SIMD register
inline f32x4 v_load(const float* p) { return _mm_loadu_ps(p); }
#include "float4_impl.inl"
// Store 4 floats from SIMD register back to memory
inline void v_store(float* dst, f32x4 v) { _mm_storeu_ps(dst, v); }
// Broadcast a single float across all 4 lanes
inline f32x4 v_set1(float s) { return _mm_set1_ps(s); }
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
// Element-wise subtract
inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); }
// Horizontal sum of all 4 elements (for dot product, length, etc.)
inline float v_hadd4(f32x4 a)
{
__m128 shuf = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 0, 1)); // swap pairs
__m128 sums = _mm_add_ps(a, shuf); // sums: [a0+a1 a1+a0 a2+a3 a3+a2]
shuf = _mm_shuffle_ps(sums, sums, _MM_SHUFFLE(1, 0, 3, 2)); // move high pair to low
sums = _mm_add_ps(sums, shuf); // total sum in lower float
return _mm_cvtss_f32(sums);
}
}
#include "../../impl/float4_impl.inl"
namespace math_backend::float4::dispatch
{
@ -53,6 +19,8 @@ namespace math_backend::float4::dispatch
gFloat4.length = float4_length_impl;
gFloat4.lengthSquared = float4_length_squared_impl;
gFloat4.normalize = float4_normalize_impl;
gFloat4.normalize_mag = float4_normalize_mag_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.cross = float4_cross_impl;
}
}

View file

@ -0,0 +1,156 @@
#pragma once
#include <emmintrin.h> // SSE2
#include <xmmintrin.h> // SSE
namespace
{
typedef __m128 f32x4;
//------------------------------------------------------
// Load / Store
//------------------------------------------------------
// Load 4 floats from memory into a SIMD register
inline f32x4 v_load(const float* p) { return _mm_loadu_ps(p); }
inline void v_store(float* dst, f32x4 v) { _mm_storeu_ps(dst, v); }
inline f32x4 v_set1(float s) { return _mm_set1_ps(s); }
inline f32x4 v_zero() { return _mm_setzero_ps(); }
inline float v_extract0(f32x4 v) { return _mm_cvtss_f32(v); }
//------------------------------------------------------
// Float3 helpers (safe loading into 4 lanes)
//------------------------------------------------------
inline f32x4 v_load3_vec(const float* p) // w = 0
{
return _mm_set_ps(0.0f, p[2], p[1], p[0]);
}
inline f32x4 v_load3_pos(const float* p) // w = 1
{
return _mm_set_ps(1.0f, p[2], p[1], p[0]);
}
inline void v_store3(float* dst, f32x4 v)
{
alignas(16) float tmp[4]; // temp storage
_mm_store_ps(tmp, v); // store all 4 lanes
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
//------------------------------------------------------
// Mask helpers
//------------------------------------------------------
inline f32x4 v_mask_xyz() { return _mm_castsi128_ps(_mm_set_epi32(0, -1, -1, -1)); }
inline f32x4 v_preserve_w(f32x4 newv, f32x4 original)
{
f32x4 mask = _mm_castsi128_ps(_mm_set_epi32(-1, 0, 0, 0));
return _mm_or_ps(_mm_and_ps(mask, original), _mm_andnot_ps(mask, newv));
}
//------------------------------------------------------
// Simple Arithmatic
//------------------------------------------------------
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide
inline f32x4 v_div_exact(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
// Element-wise subtract
inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); }
//------------------------------------------------------
// Fast recip
//------------------------------------------------------
// Fast recip 1/b
inline f32x4 v_rcp_nr(f32x4 b)
{
f32x4 r = _mm_rcp_ps(b);
f32x4 two = _mm_set1_ps(2.0f);
return _mm_mul_ps(r, _mm_sub_ps(two, _mm_mul_ps(b, r)));
}
// Divide fast ( b = recip eg 1/b)
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_mul_ps(a, v_rcp_nr(b)); }
inline f32x4 v_rsqrt_nr(f32x4 x)
{
f32x4 r = _mm_rsqrt_ps(x);
f32x4 half = _mm_set1_ps(0.5f);
f32x4 three = _mm_set1_ps(3.0f);
r = _mm_mul_ps(r, _mm_sub_ps(three, _mm_mul_ps(_mm_mul_ps(x, r), r)));
return _mm_mul_ps(r, half);
}
//------------------------------------------------------
// Vector intrinsic functions
//------------------------------------------------------
// full dot4
inline f32x4 v_dot4(f32x4 a, f32x4 b)
{
f32x4 prod = _mm_mul_ps(a, b); // multiply element-wise
f32x4 shuf = _mm_shuffle_ps(prod, prod, _MM_SHUFFLE(2, 3, 0, 1));
prod = _mm_add_ps(prod, shuf);
shuf = _mm_shuffle_ps(prod, prod, _MM_SHUFFLE(1, 0, 3, 2));
prod = _mm_add_ps(prod, shuf);
return prod; // f32x4, all lanes = dot(a,b)
}
// dot3 (ignores w)
inline f32x4 v_dot3(f32x4 a, f32x4 b)
{
f32x4 prod = _mm_mul_ps(a, b);
prod = _mm_and_ps(prod, v_mask_xyz()); // zero w
f32x4 shuf = _mm_shuffle_ps(prod, prod, _MM_SHUFFLE(2, 3, 0, 1));
prod = _mm_add_ps(prod, shuf);
shuf = _mm_shuffle_ps(prod, prod, _MM_SHUFFLE(1, 0, 3, 2));
prod = _mm_add_ps(prod, shuf);
return prod; // f32x4, all lanes = dot(a,b)
}
// cross product xyz only.
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps( _mm_mul_ps(a, b_yzx), _mm_mul_ps(a_yzx, b));
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
inline f32x4 v_normalize3(f32x4 v)
{
f32x4 inv = v_rsqrt_nr(v_dot3(v, v));
return _mm_mul_ps(v, inv);
}
// adds all 4 lanes together.
inline f32x4 v_hadd4(f32x4 a)
{
// sum all 4 lanes in SSE2
__m128 shuf = _mm_shuffle_ps(a, a, _MM_SHUFFLE(2, 3, 0, 1)); // swap pairs
__m128 sums = _mm_add_ps(a, shuf);
shuf = _mm_shuffle_ps(sums, sums, _MM_SHUFFLE(1, 0, 3, 2));
return _mm_add_ps(sums, shuf);
}
}

View file

@ -0,0 +1,26 @@
#include "sse41_intrinsics.h"
#include "float3_dispatch.h"
#include <smmintrin.h> // SSE41 intrinsics
#include "float3_impl.inl"
namespace math_backend::float3::dispatch
{
// Install SSE41 backend
void install_sse41()
{
gFloat3.add = float3_add_impl;
gFloat3.sub = float3_sub_impl;
gFloat3.mul = float3_mul_impl;
gFloat3.mul_scalar = float3_mul_scalar_impl;
gFloat3.div = float3_div_impl;
gFloat3.div_scalar = float3_div_scalar_impl;
gFloat3.dot = float3_dot_impl;
gFloat3.length = float3_length_impl;
gFloat3.lengthSquared = float3_length_squared_impl;
gFloat3.normalize = float3_normalize_impl;
gFloat3.normalize_mag = float3_normalize_mag_impl;
gFloat3.lerp = float3_lerp_impl;
gFloat3.cross = float3_cross_impl;
}
}

View file

@ -1,49 +1,5 @@
#include "sse41_intrinsics.h"
#include "float4_dispatch.h"
#include <smmintrin.h> // SSE41 intrinsics
namespace
{
typedef __m128 f32x4;
// Load 4 floats from memory into a SIMD register
inline f32x4 v_load(const float* p) { return _mm_loadu_ps(p); }
// Store 4 floats from SIMD register back to memory
inline void v_store(float* dst, f32x4 v) { _mm_storeu_ps(dst, v); }
// Broadcast a single float across all 4 lanes
inline f32x4 v_set1(float s) { return _mm_set1_ps(s); }
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
// Element-wise subtract
inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); }
// Horizontal sum of all 4 elements (for dot product, length, etc.)
inline float v_hadd4(f32x4 a)
{
__m128 t1 = _mm_hadd_ps(a, a); // sums pairs: [a0+a1, a2+a3, ...]
__m128 t2 = _mm_hadd_ps(t1, t1); // sums again: first element = a0+a1+a2+a3
return _mm_cvtss_f32(t2); // extract first element
}
// specialized dot product for SSE4.1
float float4_dot_sse41(const float* a, const float* b)
{
f32x4 va = _mm_loadu_ps(a);
f32x4 vb = _mm_loadu_ps(b);
__m128 dp = _mm_dp_ps(va, vb, 0xF1); // multiply all 4, sum all 4, lowest lane
return _mm_cvtss_f32(dp);
}
}
#include "float4_impl.inl"
@ -58,10 +14,12 @@ namespace math_backend::float4::dispatch
gFloat4.mul_scalar = float4_mul_scalar_impl;
gFloat4.div = float4_div_impl;
gFloat4.div_scalar = float4_div_scalar_impl;
gFloat4.dot = float4_dot_sse41;
gFloat4.dot = float4_dot_impl;
gFloat4.length = float4_length_impl;
gFloat4.lengthSquared = float4_length_squared_impl;
gFloat4.normalize = float4_normalize_impl;
gFloat4.normalize_mag = float4_normalize_mag_impl;
gFloat4.lerp = float4_lerp_impl;
gFloat4.cross = float4_cross_impl;
}
}

View file

@ -0,0 +1,140 @@
#pragma once
#include <smmintrin.h> // SSE4.1
namespace
{
typedef __m128 f32x4;
//------------------------------------------------------
// Load / Store
//------------------------------------------------------
// Load 4 floats from memory into a SIMD register
inline f32x4 v_load(const float* p) { return _mm_loadu_ps(p); }
inline void v_store(float* dst, f32x4 v) { _mm_storeu_ps(dst, v); }
inline f32x4 v_set1(float s) { return _mm_set1_ps(s); }
inline f32x4 v_zero() { return _mm_setzero_ps(); }
inline float v_extract0(f32x4 v) { return _mm_cvtss_f32(v); }
//------------------------------------------------------
// Mask helpers
//------------------------------------------------------
inline f32x4 v_mask_xyz() { return _mm_blend_ps(_mm_set1_ps(0.0f), _mm_set1_ps(1.0f), 0b0111); }
inline f32x4 v_preserve_w(f32x4 newv, f32x4 original)
{
return _mm_blend_ps(newv, original, 0b1000);
}
//------------------------------------------------------
// Float3 helpers (safe loading into 4 lanes)
//------------------------------------------------------
inline f32x4 v_load3_vec(const float* p) // w = 0
{
return _mm_set_ps(0.0f, p[2], p[1], p[0]);
}
inline f32x4 v_load3_pos(const float* p) // w = 1
{
return _mm_set_ps(1.0f, p[2], p[1], p[0]);
}
inline void v_store3(float* dst, f32x4 v)
{
alignas(16) float tmp[4]; // temp storage
_mm_store_ps(tmp, v); // store all 4 lanes
dst[0] = tmp[0];
dst[1] = tmp[1];
dst[2] = tmp[2];
}
//------------------------------------------------------
// Simple Arithmatic
//------------------------------------------------------
// Element-wise multiply
inline f32x4 v_mul(f32x4 a, f32x4 b) { return _mm_mul_ps(a, b); }
// Element-wise divide
inline f32x4 v_div_exact(f32x4 a, f32x4 b) { return _mm_div_ps(a, b); }
// Element-wise add
inline f32x4 v_add(f32x4 a, f32x4 b) { return _mm_add_ps(a, b); }
// Element-wise subtract
inline f32x4 v_sub(f32x4 a, f32x4 b) { return _mm_sub_ps(a, b); }
//------------------------------------------------------
// Fast recip
//------------------------------------------------------
// Fast recip 1/b
inline f32x4 v_rcp_nr(f32x4 b)
{
f32x4 r = _mm_rcp_ps(b);
f32x4 two = _mm_set1_ps(2.0f);
return _mm_mul_ps(r, _mm_sub_ps(two, _mm_mul_ps(b, r)));
}
// Divide fast ( b = recip eg 1/b)
inline f32x4 v_div(f32x4 a, f32x4 b) { return _mm_mul_ps(a, v_rcp_nr(b)); }
inline f32x4 v_rsqrt_nr(f32x4 x)
{
f32x4 r = _mm_rsqrt_ps(x);
f32x4 half = _mm_set1_ps(0.5f);
f32x4 three = _mm_set1_ps(3.0f);
r = _mm_mul_ps(r, _mm_sub_ps(three, _mm_mul_ps(_mm_mul_ps(x, r), r)));
return _mm_mul_ps(r, half);
}
//------------------------------------------------------
// Vector intrinsic functions
//------------------------------------------------------
// full dot4
inline f32x4 v_dot4(f32x4 a, f32x4 b)
{
return _mm_dp_ps(a, b, 0xF1); // f32x4, 4 lanes into lane 1
}
// dot3 (ignores w)
inline f32x4 v_dot3(f32x4 a, f32x4 b)
{
return _mm_dp_ps(a, b, 0x71); // f32x4, 3 last lanes into lane 1
}
// cross product xyz only.
inline f32x4 v_cross(f32x4 a, f32x4 b)
{
f32x4 a_yzx = _mm_shuffle_ps(a, a, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 b_yzx = _mm_shuffle_ps(b, b, _MM_SHUFFLE(3, 0, 2, 1));
f32x4 c = _mm_sub_ps(_mm_mul_ps(a, b_yzx), _mm_mul_ps(a_yzx, b));
return _mm_shuffle_ps(c, c, _MM_SHUFFLE(3, 0, 2, 1));
}
inline f32x4 v_normalize3(f32x4 v)
{
f32x4 inv = v_rsqrt_nr(v_dot3(v, v));
return _mm_mul_ps(v, inv);
}
// adds all 4 lanes together.
inline f32x4 v_hadd4(f32x4 a)
{
// sum all 4 lanes in SSE41
__m128 sum = _mm_hadd_ps(a, a);
return _mm_hadd_ps(sum, sum);
}
}