GPU: Use half width vector types where appropriate

This commit is contained in:
Stenzek
2024-07-11 18:37:51 +10:00
parent d70f5ddb32
commit 59a2309a83
8 changed files with 2614 additions and 220 deletions

View File

@@ -3,7 +3,6 @@
#pragma once
#include "common/assert.h"
#include "common/intrin.h"
#include "common/types.h"
@@ -14,7 +13,740 @@
#define GSVECTOR_HAS_SRLV 1
#endif
class GSVector2;
class GSVector2i;
class GSVector4;
class GSVector4i;
class alignas(16) GSVector2i
{
struct cxpr_init_tag
{
};
static constexpr cxpr_init_tag cxpr_init{};
constexpr GSVector2i(cxpr_init_tag, s32 x, s32 y) : I32{x, y, 0, 0} {}
constexpr GSVector2i(cxpr_init_tag, s16 s0, s16 s1, s16 s2, s16 s3) : I16{s0, s1, s2, s3, 0, 0, 0, 0} {}
constexpr GSVector2i(cxpr_init_tag, s8 b0, s8 b1, s8 b2, s8 b3, s8 b4, s8 b5, s8 b6, s8 b7)
: I8{b0, b1, b2, b3, b4, b5, b6, b7, 0, 0, 0, 0, 0, 0, 0, 0}
{
}
public:
union
{
struct
{
s32 x, y;
};
struct
{
s32 r, g;
};
float F32[4];
s8 I8[16];
s16 I16[8];
s32 I32[4];
s64 I64[2];
u8 U8[16];
u16 U16[8];
u32 U32[4];
u64 U64[2];
__m128i m;
};
GSVector2i() = default;
ALWAYS_INLINE constexpr static GSVector2i cxpr(s32 x, s32 y) { return GSVector2i(cxpr_init, x, y); }
ALWAYS_INLINE constexpr static GSVector2i cxpr(s32 x) { return GSVector2i(cxpr_init, x, x); }
ALWAYS_INLINE constexpr static GSVector2i cxpr16(s16 x) { return GSVector2i(cxpr_init, x, x, x, x); }
ALWAYS_INLINE constexpr static GSVector2i cxpr16(s16 s0, s16 s1, s16 s2, s16 s3)
{
return GSVector2i(cxpr_init, s0, s1, s2, s3);
}
ALWAYS_INLINE constexpr static GSVector2i cxpr8(s8 b0, s8 b1, s8 b2, s8 b3, s8 b4, s8 b5, s8 b6, s8 b7)
{
return GSVector2i(cxpr_init, b0, b1, b2, b3, b4, b5, b6, b7);
}
ALWAYS_INLINE GSVector2i(s32 x, s32 y) { m = _mm_set_epi32(0, 0, y, x); }
ALWAYS_INLINE GSVector2i(s16 s0, s16 s1, s16 s2, s16 s3) { m = _mm_set_epi16(0, 0, 0, 0, s3, s2, s1, s0); }
ALWAYS_INLINE constexpr GSVector2i(s8 b0, s8 b1, s8 b2, s8 b3, s8 b4, s8 b5, s8 b6, s8 b7)
: I8{b0, b1, b2, b3, b4, b5, b6, b7, 0, 0, 0, 0, 0, 0, 0, 0}
{
}
// MSVC has bad codegen for the constexpr version when applied to non-constexpr things (https://godbolt.org/z/h8qbn7),
// so leave the non-constexpr version default
ALWAYS_INLINE explicit GSVector2i(s32 i) { *this = i; }
ALWAYS_INLINE explicit GSVector2i(const GSVector2& v, bool truncate = true);
ALWAYS_INLINE static GSVector2i cast(const GSVector2& v);
ALWAYS_INLINE constexpr explicit GSVector2i(__m128i m) : m(m) {}
ALWAYS_INLINE void operator=(s32 i) { m = _mm_set1_epi32(i); }
ALWAYS_INLINE void operator=(__m128i m_) { m = m_; }
ALWAYS_INLINE operator __m128i() const { return m; }
ALWAYS_INLINE GSVector2i sat_i8(const GSVector2i& min, const GSVector2i& max) const
{
return max_i8(min).min_i8(max);
}
ALWAYS_INLINE GSVector2i sat_i16(const GSVector2i& min, const GSVector2i& max) const
{
return max_i16(min).min_i16(max);
}
ALWAYS_INLINE GSVector2i sat_i32(const GSVector2i& min, const GSVector2i& max) const
{
return max_i32(min).min_i32(max);
}
ALWAYS_INLINE GSVector2i sat_u8(const GSVector2i& min, const GSVector2i& max) const
{
return max_u8(min).min_u8(max);
}
ALWAYS_INLINE GSVector2i sat_u16(const GSVector2i& min, const GSVector2i& max) const
{
return max_u16(min).min_u16(max);
}
ALWAYS_INLINE GSVector2i sat_u32(const GSVector2i& min, const GSVector2i& max) const
{
return max_u32(min).min_u32(max);
}
ALWAYS_INLINE GSVector2i min_i8(const GSVector2i& v) const { return GSVector2i(_mm_min_epi8(m, v)); }
ALWAYS_INLINE GSVector2i max_i8(const GSVector2i& v) const { return GSVector2i(_mm_max_epi8(m, v)); }
ALWAYS_INLINE GSVector2i min_i16(const GSVector2i& v) const { return GSVector2i(_mm_min_epi16(m, v)); }
ALWAYS_INLINE GSVector2i max_i16(const GSVector2i& v) const { return GSVector2i(_mm_max_epi16(m, v)); }
ALWAYS_INLINE GSVector2i min_i32(const GSVector2i& v) const { return GSVector2i(_mm_min_epi32(m, v)); }
ALWAYS_INLINE GSVector2i max_i32(const GSVector2i& v) const { return GSVector2i(_mm_max_epi32(m, v)); }
ALWAYS_INLINE GSVector2i min_u8(const GSVector2i& v) const { return GSVector2i(_mm_min_epu8(m, v)); }
ALWAYS_INLINE GSVector2i max_u8(const GSVector2i& v) const { return GSVector2i(_mm_max_epu8(m, v)); }
ALWAYS_INLINE GSVector2i min_u16(const GSVector2i& v) const { return GSVector2i(_mm_min_epu16(m, v)); }
ALWAYS_INLINE GSVector2i max_u16(const GSVector2i& v) const { return GSVector2i(_mm_max_epu16(m, v)); }
ALWAYS_INLINE GSVector2i min_u32(const GSVector2i& v) const { return GSVector2i(_mm_min_epu32(m, v)); }
ALWAYS_INLINE GSVector2i max_u32(const GSVector2i& v) const { return GSVector2i(_mm_max_epu32(m, v)); }
ALWAYS_INLINE u8 minv_u8() const
{
__m128i vmin = _mm_min_epu8(m, _mm_shuffle_epi32(m, _MM_SHUFFLE(1, 1, 1, 1)));
return static_cast<u8>(std::min(
static_cast<u32>(_mm_extract_epi8(vmin, 0)),
std::min(static_cast<u32>(_mm_extract_epi8(vmin, 1)),
std::min(static_cast<u32>(_mm_extract_epi8(vmin, 2)), static_cast<u32>(_mm_extract_epi8(vmin, 3))))));
}
ALWAYS_INLINE u16 maxv_u8() const
{
__m128i vmax = _mm_max_epu8(m, _mm_shuffle_epi32(m, _MM_SHUFFLE(1, 1, 1, 1)));
return static_cast<u8>(std::max(
static_cast<u32>(_mm_extract_epi8(vmax, 0)),
std::max(static_cast<u32>(_mm_extract_epi8(vmax, 1)),
std::max(static_cast<u32>(_mm_extract_epi8(vmax, 2)), static_cast<u32>(_mm_extract_epi8(vmax, 3))))));
}
ALWAYS_INLINE u16 minv_u16() const
{
__m128i vmin = _mm_min_epu16(m, _mm_shuffle_epi32(m, _MM_SHUFFLE(1, 1, 1, 1)));
return static_cast<u16>(
std::min(static_cast<u32>(_mm_extract_epi16(vmin, 0)), static_cast<u32>(_mm_extract_epi16(vmin, 1))));
}
ALWAYS_INLINE u16 maxv_u16() const
{
__m128i vmax = _mm_max_epu16(m, _mm_shuffle_epi32(m, _MM_SHUFFLE(1, 1, 1, 1)));
return static_cast<u16>(
std::max<u32>(static_cast<u32>(_mm_extract_epi16(vmax, 0)), static_cast<u32>(_mm_extract_epi16(vmax, 1))));
}
ALWAYS_INLINE s32 minv_s32() const { return std::min<s32>(_mm_extract_epi32(m, 0), _mm_extract_epi32(m, 1)); }
ALWAYS_INLINE u32 minv_u32() const { return std::min<u32>(_mm_extract_epi32(m, 0), _mm_extract_epi32(m, 1)); }
ALWAYS_INLINE s32 maxv_s32() const { return std::max<s32>(_mm_extract_epi32(m, 0), _mm_extract_epi32(m, 1)); }
ALWAYS_INLINE u32 maxv_u32() const { return std::max<u32>(_mm_extract_epi32(m, 0), _mm_extract_epi32(m, 1)); }
ALWAYS_INLINE GSVector2i clamp8() const { return pu16().upl8(); }
ALWAYS_INLINE GSVector2i blend8(const GSVector2i& v, const GSVector2i& mask) const
{
return GSVector2i(_mm_blendv_epi8(m, v, mask));
}
template<s32 mask>
ALWAYS_INLINE GSVector2i blend16(const GSVector2i& v) const
{
return GSVector2i(_mm_blend_epi16(m, v, mask));
}
template<s32 mask>
ALWAYS_INLINE GSVector2i blend32(const GSVector2i& v) const
{
#if defined(__AVX2__)
return GSVector2i(_mm_blend_epi32(m, v.m, mask));
#else
constexpr s32 bit1 = ((mask & 2) * 3) << 1;
constexpr s32 bit0 = (mask & 1) * 3;
return blend16<bit1 | bit0>(v);
#endif
}
ALWAYS_INLINE GSVector2i blend(const GSVector2i& v, const GSVector2i& mask) const
{
return GSVector2i(_mm_or_si128(_mm_andnot_si128(mask, m), _mm_and_si128(mask, v)));
}
ALWAYS_INLINE GSVector2i mix16(const GSVector2i& v) const { return blend16<0xa>(v); }
ALWAYS_INLINE GSVector2i shuffle8(const GSVector2i& mask) const { return GSVector2i(_mm_shuffle_epi8(m, mask)); }
ALWAYS_INLINE GSVector2i ps16() const { return GSVector2i(_mm_packs_epi16(m, m)); }
ALWAYS_INLINE GSVector2i pu16() const { return GSVector2i(_mm_packus_epi16(m, m)); }
ALWAYS_INLINE GSVector2i ps32() const { return GSVector2i(_mm_packs_epi32(m, m)); }
ALWAYS_INLINE GSVector2i pu32() const { return GSVector2i(_mm_packus_epi32(m, m)); }
ALWAYS_INLINE GSVector2i upl8(const GSVector2i& v) const { return GSVector2i(_mm_unpacklo_epi8(m, v)); }
ALWAYS_INLINE GSVector2i uph8(const GSVector2i& v) const { return GSVector2i(_mm_unpackhi_epi8(m, v)); }
ALWAYS_INLINE GSVector2i upl16(const GSVector2i& v) const { return GSVector2i(_mm_unpacklo_epi16(m, v)); }
ALWAYS_INLINE GSVector2i uph16(const GSVector2i& v) const { return GSVector2i(_mm_unpackhi_epi16(m, v)); }
ALWAYS_INLINE GSVector2i upl32(const GSVector2i& v) const { return GSVector2i(_mm_unpacklo_epi32(m, v)); }
ALWAYS_INLINE GSVector2i uph32(const GSVector2i& v) const { return GSVector2i(_mm_unpackhi_epi32(m, v)); }
ALWAYS_INLINE GSVector2i upl8() const { return GSVector2i(_mm_unpacklo_epi8(m, _mm_setzero_si128())); }
ALWAYS_INLINE GSVector2i uph8() const { return GSVector2i(_mm_unpackhi_epi8(m, _mm_setzero_si128())); }
ALWAYS_INLINE GSVector2i upl16() const { return GSVector2i(_mm_unpacklo_epi16(m, _mm_setzero_si128())); }
ALWAYS_INLINE GSVector2i uph16() const { return GSVector2i(_mm_unpackhi_epi16(m, _mm_setzero_si128())); }
ALWAYS_INLINE GSVector2i upl32() const { return GSVector2i(_mm_unpacklo_epi32(m, _mm_setzero_si128())); }
ALWAYS_INLINE GSVector2i uph32() const { return GSVector2i(_mm_unpackhi_epi32(m, _mm_setzero_si128())); }
ALWAYS_INLINE GSVector2i i8to16() const { return GSVector2i(_mm_cvtepi8_epi16(m)); }
#ifdef CPU_ARCH_SSE41
ALWAYS_INLINE GSVector2i u8to16() const { return GSVector2i(_mm_cvtepu8_epi16(m)); }
#endif
template<s32 i>
ALWAYS_INLINE GSVector2i srl() const
{
return GSVector2i(_mm_srli_si128(m, i));
}
template<s32 i>
ALWAYS_INLINE GSVector2i sll() const
{
return GSVector2i(_mm_slli_si128(m, i));
}
template<s32 i>
ALWAYS_INLINE GSVector2i sll16() const
{
return GSVector2i(_mm_slli_epi16(m, i));
}
ALWAYS_INLINE GSVector2i sll16(s32 i) const { return GSVector2i(_mm_sll_epi16(m, _mm_cvtsi32_si128(i))); }
#ifdef CPU_ARCH_AVX2
ALWAYS_INLINE GSVector2i sllv16(const GSVector2i& v) const { return GSVector2i(_mm_sllv_epi16(m, v.m)); }
#endif
template<s32 i>
ALWAYS_INLINE GSVector2i srl16() const
{
return GSVector2i(_mm_srli_epi16(m, i));
}
ALWAYS_INLINE GSVector2i srl16(s32 i) const { return GSVector2i(_mm_srl_epi16(m, _mm_cvtsi32_si128(i))); }
#ifdef CPU_ARCH_AVX2
ALWAYS_INLINE GSVector2i srlv16(const GSVector2i& v) const { return GSVector2i(_mm_srlv_epi16(m, v.m)); }
#endif
template<s32 i>
ALWAYS_INLINE GSVector2i sra16() const
{
return GSVector2i(_mm_srai_epi16(m, i));
}
ALWAYS_INLINE GSVector2i sra16(s32 i) const { return GSVector2i(_mm_sra_epi16(m, _mm_cvtsi32_si128(i))); }
#ifdef CPU_ARCH_AVX2
ALWAYS_INLINE GSVector2i srav16(const GSVector2i& v) const { return GSVector2i(_mm_srav_epi16(m, v.m)); }
#endif
template<s32 i>
ALWAYS_INLINE GSVector2i sll32() const
{
return GSVector2i(_mm_slli_epi32(m, i));
}
ALWAYS_INLINE GSVector2i sll32(s32 i) const { return GSVector2i(_mm_sll_epi32(m, _mm_cvtsi32_si128(i))); }
#ifdef CPU_ARCH_AVX2
ALWAYS_INLINE GSVector2i sllv32(const GSVector2i& v) const { return GSVector2i(_mm_sllv_epi32(m, v.m)); }
#endif
template<s32 i>
ALWAYS_INLINE GSVector2i srl32() const
{
return GSVector2i(_mm_srli_epi32(m, i));
}
ALWAYS_INLINE GSVector2i srl32(s32 i) const { return GSVector2i(_mm_srl_epi32(m, _mm_cvtsi32_si128(i))); }
#ifdef CPU_ARCH_AVX2
ALWAYS_INLINE GSVector2i srlv32(const GSVector2i& v) const { return GSVector2i(_mm_srlv_epi32(m, v.m)); }
#endif
template<s32 i>
ALWAYS_INLINE GSVector2i sra32() const
{
return GSVector2i(_mm_srai_epi32(m, i));
}
ALWAYS_INLINE GSVector2i sra32(s32 i) const { return GSVector2i(_mm_sra_epi32(m, _mm_cvtsi32_si128(i))); }
#ifdef CPU_ARCH_AVX2
ALWAYS_INLINE GSVector2i srav32(const GSVector2i& v) const { return GSVector2i(_mm_srav_epi32(m, v.m)); }
#endif
ALWAYS_INLINE GSVector2i add8(const GSVector2i& v) const { return GSVector2i(_mm_add_epi8(m, v.m)); }
ALWAYS_INLINE GSVector2i add16(const GSVector2i& v) const { return GSVector2i(_mm_add_epi16(m, v.m)); }
ALWAYS_INLINE GSVector2i add32(const GSVector2i& v) const { return GSVector2i(_mm_add_epi32(m, v.m)); }
ALWAYS_INLINE GSVector2i adds8(const GSVector2i& v) const { return GSVector2i(_mm_adds_epi8(m, v.m)); }
ALWAYS_INLINE GSVector2i adds16(const GSVector2i& v) const { return GSVector2i(_mm_adds_epi16(m, v.m)); }
ALWAYS_INLINE GSVector2i addus8(const GSVector2i& v) const { return GSVector2i(_mm_adds_epu8(m, v.m)); }
ALWAYS_INLINE GSVector2i addus16(const GSVector2i& v) const { return GSVector2i(_mm_adds_epu16(m, v.m)); }
ALWAYS_INLINE GSVector2i sub8(const GSVector2i& v) const { return GSVector2i(_mm_sub_epi8(m, v.m)); }
ALWAYS_INLINE GSVector2i sub16(const GSVector2i& v) const { return GSVector2i(_mm_sub_epi16(m, v.m)); }
ALWAYS_INLINE GSVector2i sub32(const GSVector2i& v) const { return GSVector2i(_mm_sub_epi32(m, v.m)); }
ALWAYS_INLINE GSVector2i subs8(const GSVector2i& v) const { return GSVector2i(_mm_subs_epi8(m, v.m)); }
ALWAYS_INLINE GSVector2i subs16(const GSVector2i& v) const { return GSVector2i(_mm_subs_epi16(m, v.m)); }
ALWAYS_INLINE GSVector2i subus8(const GSVector2i& v) const { return GSVector2i(_mm_subs_epu8(m, v.m)); }
ALWAYS_INLINE GSVector2i subus16(const GSVector2i& v) const { return GSVector2i(_mm_subs_epu16(m, v.m)); }
ALWAYS_INLINE GSVector2i avg8(const GSVector2i& v) const { return GSVector2i(_mm_avg_epu8(m, v.m)); }
ALWAYS_INLINE GSVector2i avg16(const GSVector2i& v) const { return GSVector2i(_mm_avg_epu16(m, v.m)); }
ALWAYS_INLINE GSVector2i mul16l(const GSVector2i& v) const { return GSVector2i(_mm_mullo_epi16(m, v.m)); }
ALWAYS_INLINE GSVector2i mul32l(const GSVector2i& v) const { return GSVector2i(_mm_mullo_epi32(m, v.m)); }
ALWAYS_INLINE bool eq(const GSVector2i& v) const { return eq8(v).alltrue(); }
ALWAYS_INLINE GSVector2i eq8(const GSVector2i& v) const { return GSVector2i(_mm_cmpeq_epi8(m, v.m)); }
ALWAYS_INLINE GSVector2i eq16(const GSVector2i& v) const { return GSVector2i(_mm_cmpeq_epi16(m, v.m)); }
ALWAYS_INLINE GSVector2i eq32(const GSVector2i& v) const { return GSVector2i(_mm_cmpeq_epi32(m, v.m)); }
ALWAYS_INLINE GSVector2i neq8(const GSVector2i& v) const { return ~eq8(v); }
ALWAYS_INLINE GSVector2i neq16(const GSVector2i& v) const { return ~eq16(v); }
ALWAYS_INLINE GSVector2i neq32(const GSVector2i& v) const { return ~eq32(v); }
ALWAYS_INLINE GSVector2i gt8(const GSVector2i& v) const { return GSVector2i(_mm_cmpgt_epi8(m, v.m)); }
ALWAYS_INLINE GSVector2i gt16(const GSVector2i& v) const { return GSVector2i(_mm_cmpgt_epi16(m, v.m)); }
ALWAYS_INLINE GSVector2i gt32(const GSVector2i& v) const { return GSVector2i(_mm_cmpgt_epi32(m, v.m)); }
ALWAYS_INLINE GSVector2i ge8(const GSVector2i& v) const { return ~GSVector2i(_mm_cmplt_epi8(m, v.m)); }
ALWAYS_INLINE GSVector2i ge16(const GSVector2i& v) const { return ~GSVector2i(_mm_cmplt_epi16(m, v.m)); }
ALWAYS_INLINE GSVector2i ge32(const GSVector2i& v) const { return ~GSVector2i(_mm_cmplt_epi32(m, v.m)); }
ALWAYS_INLINE GSVector2i lt8(const GSVector2i& v) const { return GSVector2i(_mm_cmplt_epi8(m, v.m)); }
ALWAYS_INLINE GSVector2i lt16(const GSVector2i& v) const { return GSVector2i(_mm_cmplt_epi16(m, v.m)); }
ALWAYS_INLINE GSVector2i lt32(const GSVector2i& v) const { return GSVector2i(_mm_cmplt_epi32(m, v.m)); }
ALWAYS_INLINE GSVector2i le8(const GSVector2i& v) const { return ~GSVector2i(_mm_cmpgt_epi8(m, v.m)); }
ALWAYS_INLINE GSVector2i le16(const GSVector2i& v) const { return ~GSVector2i(_mm_cmpgt_epi16(m, v.m)); }
ALWAYS_INLINE GSVector2i le32(const GSVector2i& v) const { return ~GSVector2i(_mm_cmpgt_epi32(m, v.m)); }
ALWAYS_INLINE GSVector2i andnot(const GSVector2i& v) const { return GSVector2i(_mm_andnot_si128(v.m, m)); }
ALWAYS_INLINE s32 mask() const { return (_mm_movemask_epi8(m) & 0xff); }
ALWAYS_INLINE bool alltrue() const { return (mask() == 0xff); }
ALWAYS_INLINE bool allfalse() const { return (mask() == 0x00); }
template<s32 i>
ALWAYS_INLINE GSVector2i insert8(s32 a) const
{
return GSVector2i(_mm_insert_epi8(m, a, i));
}
template<s32 i>
ALWAYS_INLINE s32 extract8() const
{
return _mm_extract_epi8(m, i);
}
template<s32 i>
ALWAYS_INLINE GSVector2i insert16(s32 a) const
{
return GSVector2i(_mm_insert_epi16(m, a, i));
}
template<s32 i>
ALWAYS_INLINE s32 extract16() const
{
return _mm_extract_epi16(m, i);
}
template<s32 i>
ALWAYS_INLINE GSVector2i insert32(s32 a) const
{
return GSVector2i(_mm_insert_epi32(m, a, i));
}
template<s32 i>
ALWAYS_INLINE s32 extract32() const
{
if constexpr (i == 0)
return GSVector2i::store(*this);
return _mm_extract_epi32(m, i);
}
ALWAYS_INLINE static GSVector2i load32(const void* p) { return GSVector2i(_mm_loadu_si32(p)); }
ALWAYS_INLINE static GSVector2i load(const void* p) { return GSVector2i(_mm_loadl_epi64((__m128i*)p)); }
ALWAYS_INLINE static GSVector2i load(s32 i) { return GSVector2i(_mm_cvtsi32_si128(i)); }
ALWAYS_INLINE static GSVector2i loadq(s64 i) { return GSVector2i(_mm_cvtsi64_si128(i)); }
ALWAYS_INLINE static void store(void* p, const GSVector2i& v) { _mm_storel_epi64((__m128i*)p, v.m); }
ALWAYS_INLINE static void store32(void* p, const GSVector2i& v) { _mm_storeu_si32(p, v); }
ALWAYS_INLINE static s32 store(const GSVector2i& v) { return _mm_cvtsi128_si32(v.m); }
ALWAYS_INLINE static s64 storeq(const GSVector2i& v) { return _mm_cvtsi128_si64(v.m); }
ALWAYS_INLINE void operator&=(const GSVector2i& v) { m = _mm_and_si128(m, v); }
ALWAYS_INLINE void operator|=(const GSVector2i& v) { m = _mm_or_si128(m, v); }
ALWAYS_INLINE void operator^=(const GSVector2i& v) { m = _mm_xor_si128(m, v); }
ALWAYS_INLINE friend GSVector2i operator&(const GSVector2i& v1, const GSVector2i& v2)
{
return GSVector2i(_mm_and_si128(v1, v2));
}
ALWAYS_INLINE friend GSVector2i operator|(const GSVector2i& v1, const GSVector2i& v2)
{
return GSVector2i(_mm_or_si128(v1, v2));
}
ALWAYS_INLINE friend GSVector2i operator^(const GSVector2i& v1, const GSVector2i& v2)
{
return GSVector2i(_mm_xor_si128(v1, v2));
}
ALWAYS_INLINE friend GSVector2i operator&(const GSVector2i& v, s32 i) { return v & GSVector2i(i); }
ALWAYS_INLINE friend GSVector2i operator|(const GSVector2i& v, s32 i) { return v | GSVector2i(i); }
ALWAYS_INLINE friend GSVector2i operator^(const GSVector2i& v, s32 i) { return v ^ GSVector2i(i); }
ALWAYS_INLINE friend GSVector2i operator~(const GSVector2i& v) { return v ^ v.eq32(v); }
ALWAYS_INLINE static GSVector2i zero() { return GSVector2i(_mm_setzero_si128()); }
ALWAYS_INLINE GSVector2i xy() const { return GSVector2i(m); }
ALWAYS_INLINE GSVector2i xx() const { return GSVector2i(_mm_shuffle_epi32(m, _MM_SHUFFLE(3, 2, 0, 0))); }
ALWAYS_INLINE GSVector2i yx() const { return GSVector2i(_mm_shuffle_epi32(m, _MM_SHUFFLE(3, 2, 0, 1))); }
ALWAYS_INLINE GSVector2i yy() const { return GSVector2i(_mm_shuffle_epi32(m, _MM_SHUFFLE(3, 2, 1, 1))); }
};
class alignas(16) GSVector2
{
struct cxpr_init_tag
{
};
static constexpr cxpr_init_tag cxpr_init{};
constexpr GSVector2(cxpr_init_tag, float x, float y) : F32{x, y} {}
constexpr GSVector2(cxpr_init_tag, int x, int y) : I32{x, y} {}
public:
union
{
struct
{
float x, y;
};
struct
{
float r, g;
};
float F32[4];
double F64[2];
s8 I8[16];
s16 I16[8];
s32 I32[4];
s64 I64[2];
u8 U8[16];
u16 U16[8];
u32 U32[4];
u64 U64[2];
__m128 m;
};
GSVector2() = default;
constexpr static GSVector2 cxpr(float x, float y) { return GSVector2(cxpr_init, x, y); }
constexpr static GSVector2 cxpr(float x) { return GSVector2(cxpr_init, x, x); }
constexpr static GSVector2 cxpr(int x, int y) { return GSVector2(cxpr_init, x, y); }
constexpr static GSVector2 cxpr(int x) { return GSVector2(cxpr_init, x, x); }
ALWAYS_INLINE GSVector2(float x, float y) { m = _mm_set_ps(0, 0, y, x); }
ALWAYS_INLINE GSVector2(int x, int y)
{
GSVector2i v_(x, y);
m = _mm_cvtepi32_ps(v_.m);
}
ALWAYS_INLINE constexpr explicit GSVector2(__m128 m) : m(m) {}
ALWAYS_INLINE explicit GSVector2(__m128d m) : m(_mm_castpd_ps(m)) {}
ALWAYS_INLINE explicit GSVector2(float f) { *this = f; }
ALWAYS_INLINE explicit GSVector2(int i)
{
#ifdef CPU_ARCH_AVX2
m = _mm_cvtepi32_ps(_mm_broadcastd_epi32(_mm_cvtsi32_si128(i)));
#else
*this = GSVector2(GSVector2i(i));
#endif
}
ALWAYS_INLINE explicit GSVector2(const GSVector2i& v);
ALWAYS_INLINE static GSVector2 cast(const GSVector2i& v);
ALWAYS_INLINE void operator=(float f)
{
#if CPU_ARCH_AVX2
m = _mm_broadcastss_ps(_mm_load_ss(&f));
#else
m = _mm_set1_ps(f);
#endif
}
ALWAYS_INLINE void operator=(__m128 m_) { this->m = m_; }
ALWAYS_INLINE operator __m128() const { return m; }
ALWAYS_INLINE GSVector2 abs() const { return *this & cast(GSVector2i::cxpr(0x7fffffff)); }
ALWAYS_INLINE GSVector2 neg() const { return *this ^ cast(GSVector2i::cxpr(0x80000000)); }
ALWAYS_INLINE GSVector2 rcp() const { return GSVector2(_mm_rcp_ps(m)); }
ALWAYS_INLINE GSVector2 rcpnr() const
{
GSVector2 v_ = rcp();
return (v_ + v_) - (v_ * v_) * *this;
}
ALWAYS_INLINE GSVector2 floor() const
{
return GSVector2(_mm_round_ps(m, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC));
}
ALWAYS_INLINE GSVector2 ceil() const { return GSVector2(_mm_round_ps(m, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC)); }
ALWAYS_INLINE GSVector2 sat(const GSVector2& min, const GSVector2& max) const
{
return GSVector2(_mm_min_ps(_mm_max_ps(m, min), max));
}
ALWAYS_INLINE GSVector2 sat(const float scale = 255) const { return sat(zero(), GSVector2(scale)); }
ALWAYS_INLINE GSVector2 clamp(const float scale = 255) const { return min(GSVector2(scale)); }
ALWAYS_INLINE GSVector2 min(const GSVector2& v) const { return GSVector2(_mm_min_ps(m, v)); }
ALWAYS_INLINE GSVector2 max(const GSVector2& v) const { return GSVector2(_mm_max_ps(m, v)); }
template<int mask>
ALWAYS_INLINE GSVector2 blend32(const GSVector2& v) const
{
return GSVector2(_mm_blend_ps(m, v, mask));
}
ALWAYS_INLINE GSVector2 blend32(const GSVector2& v, const GSVector2& mask) const
{
return GSVector2(_mm_blendv_ps(m, v, mask));
}
ALWAYS_INLINE GSVector2 andnot(const GSVector2& v) const { return GSVector2(_mm_andnot_ps(v.m, m)); }
ALWAYS_INLINE int mask() const { return (_mm_movemask_ps(m) & 0x3); }
ALWAYS_INLINE bool alltrue() const { return (mask() == 0x3); }
ALWAYS_INLINE bool allfalse() const { return (mask() == 0x0); }
ALWAYS_INLINE GSVector2 replace_nan(const GSVector2& v) const { return v.blend32(*this, *this == *this); }
template<int src, int dst>
ALWAYS_INLINE GSVector2 insert32(const GSVector2& v) const
{
if constexpr (src == dst)
return GSVector2(_mm_blend_ps(m, v.m, 1 << src));
else
return GSVector2(_mm_insert_ps(m, v.m, _MM_MK_INSERTPS_NDX(src, dst, 0)));
}
template<int i>
ALWAYS_INLINE int extract32() const
{
return _mm_extract_ps(m, i);
}
ALWAYS_INLINE static GSVector2 zero() { return GSVector2(_mm_setzero_ps()); }
ALWAYS_INLINE static GSVector2 xffffffff() { return zero() == zero(); }
ALWAYS_INLINE static GSVector2 load(const void* p) { return GSVector2(_mm_castpd_ps(_mm_load_sd((double*)p))); }
ALWAYS_INLINE static GSVector2 load(float f) { return GSVector2(_mm_load_ss(&f)); }
ALWAYS_INLINE static void store(void* p, const GSVector2& v) { _mm_store_sd((double*)p, _mm_castps_pd(v.m)); }
ALWAYS_INLINE GSVector2 operator-() const { return neg(); }
ALWAYS_INLINE void operator+=(const GSVector2& v_) { m = _mm_add_ps(m, v_); }
ALWAYS_INLINE void operator-=(const GSVector2& v_) { m = _mm_sub_ps(m, v_); }
ALWAYS_INLINE void operator*=(const GSVector2& v_) { m = _mm_mul_ps(m, v_); }
ALWAYS_INLINE void operator/=(const GSVector2& v_) { m = _mm_div_ps(m, v_); }
ALWAYS_INLINE void operator+=(float f) { *this += GSVector2(f); }
ALWAYS_INLINE void operator-=(float f) { *this -= GSVector2(f); }
ALWAYS_INLINE void operator*=(float f) { *this *= GSVector2(f); }
ALWAYS_INLINE void operator/=(float f) { *this /= GSVector2(f); }
ALWAYS_INLINE void operator&=(const GSVector2& v_) { m = _mm_and_ps(m, v_); }
ALWAYS_INLINE void operator|=(const GSVector2& v_) { m = _mm_or_ps(m, v_); }
ALWAYS_INLINE void operator^=(const GSVector2& v_) { m = _mm_xor_ps(m, v_); }
ALWAYS_INLINE friend GSVector2 operator+(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_add_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator-(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_sub_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator*(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_mul_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator/(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_div_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator+(const GSVector2& v, float f) { return v + GSVector2(f); }
ALWAYS_INLINE friend GSVector2 operator-(const GSVector2& v, float f) { return v - GSVector2(f); }
ALWAYS_INLINE friend GSVector2 operator*(const GSVector2& v, float f) { return v * GSVector2(f); }
ALWAYS_INLINE friend GSVector2 operator/(const GSVector2& v, float f) { return v / GSVector2(f); }
ALWAYS_INLINE friend GSVector2 operator&(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_and_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator|(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_or_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator^(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_xor_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator==(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_cmpeq_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator!=(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_cmpneq_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator>(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_cmpgt_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator<(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_cmplt_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator>=(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_cmpge_ps(v1, v2));
}
ALWAYS_INLINE friend GSVector2 operator<=(const GSVector2& v1, const GSVector2& v2)
{
return GSVector2(_mm_cmple_ps(v1, v2));
}
ALWAYS_INLINE GSVector2 xy() const { return *this; }
ALWAYS_INLINE GSVector2 xx() const { return GSVector2(_mm_shuffle_ps(m, m, _MM_SHUFFLE(3, 2, 0, 0))); }
ALWAYS_INLINE GSVector2 yx() const { return GSVector2(_mm_shuffle_ps(m, m, _MM_SHUFFLE(3, 2, 0, 1))); }
ALWAYS_INLINE GSVector2 yy() const { return GSVector2(_mm_shuffle_ps(m, m, _MM_SHUFFLE(3, 2, 1, 1))); }
};
class alignas(16) GSVector4i
{
@@ -100,21 +832,20 @@ public:
{
}
ALWAYS_INLINE GSVector4i(const GSVector4i& v) { m = v.m; }
ALWAYS_INLINE explicit GSVector4i(const GSVector2i& v) { m = _mm_loadl_epi64((__m128i*)&v); }
ALWAYS_INLINE explicit GSVector4i(const GSVector2i& v) { m = v.m; }
// MSVC has bad codegen for the constexpr version when applied to non-constexpr things (https://godbolt.org/z/h8qbn7),
// so leave the non-constexpr version default
ALWAYS_INLINE explicit GSVector4i(s32 i) { *this = i; }
ALWAYS_INLINE explicit GSVector4i(const GSVector2& v, bool truncate = true);
ALWAYS_INLINE explicit GSVector4i(const GSVector4& v, bool truncate = true);
ALWAYS_INLINE static GSVector4i cast(const GSVector4& v);
ALWAYS_INLINE constexpr explicit GSVector4i(__m128i m) : m(m) {}
ALWAYS_INLINE void operator=(const GSVector4i& v) { m = v.m; }
ALWAYS_INLINE void operator=(s32 i) { m = _mm_set1_epi32(i); }
ALWAYS_INLINE void operator=(__m128i m_) { m = m_; }
@@ -141,7 +872,6 @@ public:
ALWAYS_INLINE bool rintersects(const GSVector4i& v) const { return !rintersect(v).rempty(); }
ALWAYS_INLINE bool rcontains(const GSVector4i& v) const { return rintersect(v).eq(v); }
//
ALWAYS_INLINE u32 rgba32() const
@@ -685,7 +1415,10 @@ public:
return GSVector4i(_mm_castps_si128(_mm_loadh_pi(_mm_setzero_ps(), (__m64*)p)));
}
ALWAYS_INLINE static GSVector4i loadh(const GSVector2i& v) { return loadh(&v); }
ALWAYS_INLINE static GSVector4i loadh(const GSVector2i& v)
{
return GSVector4i(_mm_unpacklo_epi64(_mm_setzero_si128(), v.m));
}
template<bool aligned>
ALWAYS_INLINE static GSVector4i load(const void* p)
@@ -755,19 +1488,9 @@ public:
ALWAYS_INLINE GSVector4i xyxy(const GSVector4i& v) const { return upl64(v); }
ALWAYS_INLINE GSVector2i xy() const
{
GSVector2i ret;
storel(&ret, *this);
return ret;
}
ALWAYS_INLINE GSVector2i xy() const { return GSVector2i(m); }
ALWAYS_INLINE GSVector2i zw() const
{
GSVector2i ret;
storeh(&ret, *this);
return ret;
}
ALWAYS_INLINE GSVector2i zw() const { return GSVector2i(_mm_shuffle_epi32(m, _MM_SHUFFLE(3, 2, 3, 2))); }
// clang-format off
@@ -874,9 +1597,12 @@ public:
m = _mm_cvtepi32_ps(_mm_unpacklo_epi32(_mm_cvtsi32_si128(x), _mm_cvtsi32_si128(y)));
}
ALWAYS_INLINE explicit GSVector4(const GSVector2& v) { m = _mm_castsi128_ps(_mm_loadl_epi64((__m128i*)&v)); }
ALWAYS_INLINE explicit GSVector4(const GSVector2& v) : m(v.m) {}
ALWAYS_INLINE explicit GSVector4(const GSVector2i& v) { m = _mm_cvtepi32_ps(_mm_loadl_epi64((__m128i*)&v)); }
ALWAYS_INLINE explicit GSVector4(const GSVector2i& v)
: m(_mm_castpd_ps(_mm_unpacklo_pd(_mm_castps_pd(_mm_cvtepi32_ps(v.m)), _mm_setzero_pd())))
{
}
ALWAYS_INLINE constexpr explicit GSVector4(__m128 m) : m(m) {}
@@ -916,19 +1642,6 @@ public:
ALWAYS_INLINE operator __m128() const { return m; }
/// Makes Clang think that the whole vector is needed, preventing it from changing shuffles around because it thinks
/// we don't need the whole vector Useful for e.g. preventing clang from optimizing shuffles that remove
/// possibly-denormal garbage data from vectors before computing with them
ALWAYS_INLINE GSVector4 noopt()
{
// Note: Clang is currently the only compiler that attempts to optimize vector intrinsics, if that changes in the
// future the implementation should be updated
#ifdef __clang__
__asm__("" : "+x"(m)::);
#endif
return *this;
}
u32 rgba32() const { return GSVector4i(*this).rgba32(); }
ALWAYS_INLINE static GSVector4 rgba32(u32 rgba) { return GSVector4(GSVector4i::load((int)rgba).u8to32()); }
@@ -948,7 +1661,10 @@ public:
return (v_ + v_) - (v_ * v_) * *this;
}
ALWAYS_INLINE GSVector4 floor() const { return GSVector4(_mm_round_ps(m, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC)); }
ALWAYS_INLINE GSVector4 floor() const
{
return GSVector4(_mm_round_ps(m, _MM_FROUND_TO_NEG_INF | _MM_FROUND_NO_EXC));
}
ALWAYS_INLINE GSVector4 ceil() const { return GSVector4(_mm_round_ps(m, _MM_FROUND_TO_POS_INF | _MM_FROUND_NO_EXC)); }
@@ -1289,6 +2005,26 @@ public:
}
};
ALWAYS_INLINE GSVector2i::GSVector2i(const GSVector2& v, bool truncate)
{
m = truncate ? _mm_cvttps_epi32(v) : _mm_cvtps_epi32(v);
}
ALWAYS_INLINE GSVector2::GSVector2(const GSVector2i& v)
{
m = _mm_cvtepi32_ps(v);
}
ALWAYS_INLINE GSVector2i GSVector2i::cast(const GSVector2& v)
{
return GSVector2i(_mm_castps_si128(v.m));
}
ALWAYS_INLINE GSVector2 GSVector2::cast(const GSVector2i& v)
{
return GSVector2(_mm_castsi128_ps(v.m));
}
ALWAYS_INLINE GSVector4i::GSVector4i(const GSVector4& v, bool truncate)
{
m = truncate ? _mm_cvttps_epi32(v) : _mm_cvtps_epi32(v);