mirror of
https://github.com/TorqueGameEngines/Torque3D.git
synced 2026-03-20 04:40:54 +00:00
added float3 restructured the classes to look more like the final version of the x86 classes
98 lines
3.3 KiB
C++
98 lines
3.3 KiB
C++
#include "float3_dispatch.h"
|
|
#include <arm_neon.h> // NEON intrinsics
|
|
|
|
namespace
|
|
{
|
|
typedef float32x4_t f32x4;
|
|
|
|
// Load 3 floats into 4-wide SIMD, zero the 4th lane
|
|
inline f32x4 v_load3(const float* p)
|
|
{
|
|
// Load first 3 floats
|
|
float32x2_t low = vld1_f32(p); // load p[0], p[1]
|
|
float32x2_t high = vld1_dup_f32(p + 2); // load p[2], duplicate to second lane
|
|
return vcombine_f32(low, high); // combine into 128-bit vector
|
|
}
|
|
|
|
// Store 3 floats from SIMD register back to memory
|
|
inline void v_store3(float* dst, f32x4 v)
|
|
{
|
|
vst1_f32(dst, vget_low_f32(v)); // store first 2 floats
|
|
dst[2] = vgetq_lane_f32(v, 2); // store 3rd element
|
|
}
|
|
|
|
// extract just the first lane.
|
|
inline float v_extract0(f32x4 v) { return vgetq_lane_f32(v, 0); }
|
|
|
|
// Broadcast a single float across all 4 lanes
|
|
inline f32x4 v_set1(float s) { return vdupq_n_f32(s); }
|
|
|
|
// Element-wise multiply
|
|
inline f32x4 v_mul(f32x4 a, f32x4 b) { return vmulq_f32(a, b); }
|
|
|
|
// Element-wise divide (fast approximate)
|
|
inline f32x4 v_div_fast(f32x4 a, f32x4 b)
|
|
{
|
|
float32x4_t rcp = vrecpeq_f32(b);
|
|
// Optional refinement for better precision
|
|
rcp = vmulq_f32(vrecpsq_f32(b, rcp), rcp);
|
|
return vmulq_f32(a, rcp);
|
|
}
|
|
|
|
inline f32x4 v_div(f32x4 a, f32x4 b) { return v_div_fast(a, b); }
|
|
|
|
// Element-wise add
|
|
inline f32x4 v_add(f32x4 a, f32x4 b) { return vaddq_f32(a, b); }
|
|
|
|
// Element-wise subtract
|
|
inline f32x4 v_sub(f32x4 a, f32x4 b) { return vsubq_f32(a, b); }
|
|
|
|
// Horizontal sum of all elements (for dot product, length, etc.)
|
|
inline f32x4 v_hadd3(f32x4 a)
|
|
{
|
|
float32x2_t sum_pair = vadd_f32(vget_low_f32(a), vget_high_f32(a)); // sum pairs
|
|
float32x2_t sum = vpadd_f32(sum_pair, sum_pair); // horizontal add
|
|
return vsetq_lane_f32(vget_lane_f32(sum, 0), a, 0); // total sum in lane 0
|
|
}
|
|
|
|
// Cross product
|
|
inline f32x4 v_cross(f32x4 a, f32x4 b)
|
|
{
|
|
// Extract xyz as separate registers
|
|
float32x4_t a_yzx = vextq_f32(a, a, 1); // rotate left: y,z,x,w
|
|
float32x4_t b_yzx = vextq_f32(b, b, 1);
|
|
|
|
float32x4_t mul1 = vmulq_f32(a, b_yzx);
|
|
float32x4_t mul2 = vmulq_f32(a_yzx, b);
|
|
|
|
float32x4_t c = vsubq_f32(mul1, mul2);
|
|
|
|
// Rotate back to x,y,z and keep w from original 'a'
|
|
float32x4_t xyz = vextq_f32(c, c, 3); // x,y,z in lanes 0..2
|
|
float32x4_t result = vsetq_lane_f32(vgetq_lane_f32(a, 3), xyz, 3); // preserve w
|
|
return result;
|
|
}
|
|
}
|
|
|
|
#include "float3_impl.inl"
|
|
|
|
namespace math_backend::float3::dispatch
|
|
{
|
|
// Install NEON backend
|
|
void install_neon()
|
|
{
|
|
gFloat3.add = float3_add_impl;
|
|
gFloat3.sub = float3_sub_impl;
|
|
gFloat3.mul = float3_mul_impl;
|
|
gFloat3.mul_scalar = float3_mul_scalar_impl;
|
|
gFloat3.div = float3_div_impl;
|
|
gFloat3.div_scalar = float3_div_scalar_impl;
|
|
gFloat3.dot = float3_dot_impl;
|
|
gFloat3.length = float3_length_impl;
|
|
gFloat3.lengthSquared = float3_length_squared_impl;
|
|
gFloat3.normalize = float3_normalize_impl;
|
|
gFloat3.normalize_mag = float3_normalize_mag_impl;
|
|
gFloat3.lerp = float3_lerp_impl;
|
|
gFloat3.cross = float3_cross_impl;
|
|
}
|
|
}
|