added vmul() and vsignum()

This commit is contained in:
Ahmet Inan 2020-07-12 22:44:46 +02:00
commit 48303e7f58
4 changed files with 263 additions and 0 deletions

60
avx2.hh
View file

@ -455,6 +455,22 @@ inline SIMD<uint16_t, 16> vqsub(SIMD<uint16_t, 16> a, SIMD<uint16_t, 16> b)
return tmp;
}
template <>
inline SIMD<float, 8> vmul(SIMD<float, 8> a, SIMD<float, 8> b)
{
SIMD<float, 8> tmp;
tmp.m = _mm256_mul_ps(a.m, b.m);
return tmp;
}
template <>
inline SIMD<double, 4> vmul(SIMD<double, 4> a, SIMD<double, 4> b)
{
SIMD<double, 4> tmp;
tmp.m = _mm256_mul_pd(a.m, b.m);
return tmp;
}
template <>
inline SIMD<float, 8> vabs(SIMD<float, 8> a)
{
@ -495,6 +511,50 @@ inline SIMD<int32_t, 8> vqabs(SIMD<int32_t, 8> a)
return tmp;
}
template <>
inline SIMD<float, 8> vsignum(SIMD<float, 8> a)
{
SIMD<float, 8> tmp;
tmp.m = _mm256_andnot_ps(
_mm256_cmp_ps(a.m, _mm256_setzero_ps(), _CMP_EQ_OQ),
_mm256_or_ps(_mm256_set1_ps(1.f), _mm256_and_ps(_mm256_set1_ps(-0.f), a.m)));
return tmp;
}
template <>
inline SIMD<double, 4> vsignum(SIMD<double, 4> a)
{
SIMD<double, 4> tmp;
tmp.m = _mm256_andnot_pd(
_mm256_cmp_pd(a.m, _mm256_setzero_pd(), _CMP_EQ_OQ),
_mm256_or_pd(_mm256_set1_pd(1.), _mm256_and_pd(_mm256_set1_pd(-0.), a.m)));
return tmp;
}
template <>
inline SIMD<int8_t, 32> vsignum(SIMD<int8_t, 32> a)
{
SIMD<int8_t, 32> tmp;
tmp.m = _mm256_sign_epi8(_mm256_set1_epi8(1), a.m);
return tmp;
}
template <>
inline SIMD<int16_t, 16> vsignum(SIMD<int16_t, 16> a)
{
SIMD<int16_t, 16> tmp;
tmp.m = _mm256_sign_epi16(_mm256_set1_epi16(1), a.m);
return tmp;
}
template <>
inline SIMD<int32_t, 8> vsignum(SIMD<int32_t, 8> a)
{
SIMD<int32_t, 8> tmp;
tmp.m = _mm256_sign_epi32(_mm256_set1_epi32(1), a.m);
return tmp;
}
template <>
inline SIMD<float, 8> vsign(SIMD<float, 8> a, SIMD<float, 8> b)
{

35
neon.hh
View file

@ -444,6 +444,22 @@ inline SIMD<uint16_t, 8> vqsub(SIMD<uint16_t, 8> a, SIMD<uint16_t, 8> b)
return tmp;
}
template <>
inline SIMD<float, 4> vmul(SIMD<float, 4> a, SIMD<float, 4> b)
{
SIMD<float, 4> tmp;
tmp.m = vmulq_f32(a.m, b.m);
return tmp;
}
template <>
inline SIMD<int8_t, 16> vmul(SIMD<int8_t, 16> a, SIMD<int8_t, 16> b)
{
SIMD<int8_t, 16> tmp;
tmp.m = vmulq_s8(a.m, b.m);
return tmp;
}
template <>
inline SIMD<float, 4> vabs(SIMD<float, 4> a)
{
@ -468,6 +484,25 @@ inline SIMD<int16_t, 8> vqabs(SIMD<int16_t, 8> a)
return tmp;
}
template <>
inline SIMD<float, 4> vsignum(SIMD<float, 4> a)
{
SIMD<float, 4> tmp;
tmp.m = (float32x4_t)vbicq_u32(
veorq_u32((uint32x4_t)vdupq_n_f32(1.f), vandq_u32((uint32x4_t)vdupq_n_f32(-0.f), (uint32x4_t)a.m)),
vceqq_f32(a.m, vdupq_n_f32(0.f)));
return tmp;
}
template <>
inline SIMD<int8_t, 16> vsignum(SIMD<int8_t, 16> a)
{
SIMD<int8_t, 16> tmp;
tmp.m = (int8x16_t)vorrq_u8(vcgtq_s8(vdupq_n_s8(0), a.m),
vandq_u8(vcgtq_s8(a.m, vdupq_n_s8(0)), (uint8x16_t)vdupq_n_s8(1)));
return tmp;
}
template <>
inline SIMD<float, 4> vsign(SIMD<float, 4> a, SIMD<float, 4> b)
{

108
simd.hh
View file

@ -1226,6 +1226,114 @@ static inline SIMD<uint16_t, WIDTH> vqsub(SIMD<uint16_t, WIDTH> a, SIMD<uint16_t
return tmp;
}
template <int WIDTH>
static inline SIMD<float, WIDTH> vmul(SIMD<float, WIDTH> a, SIMD<float, WIDTH> b)
{
SIMD<float, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = a.v[i] * b.v[i];
return tmp;
}
template <int WIDTH>
static inline SIMD<double, WIDTH> vmul(SIMD<double, WIDTH> a, SIMD<double, WIDTH> b)
{
SIMD<double, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = a.v[i] * b.v[i];
return tmp;
}
template <int WIDTH>
static inline SIMD<int8_t, WIDTH> vmul(SIMD<int8_t, WIDTH> a, SIMD<int8_t, WIDTH> b)
{
SIMD<int8_t, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = a.v[i] * b.v[i];
return tmp;
}
template <int WIDTH>
static inline SIMD<int16_t, WIDTH> vmul(SIMD<int16_t, WIDTH> a, SIMD<int16_t, WIDTH> b)
{
SIMD<int16_t, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = a.v[i] * b.v[i];
return tmp;
}
template <int WIDTH>
static inline SIMD<int32_t, WIDTH> vmul(SIMD<int32_t, WIDTH> a, SIMD<int32_t, WIDTH> b)
{
SIMD<int32_t, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = a.v[i] * b.v[i];
return tmp;
}
template <int WIDTH>
static inline SIMD<int64_t, WIDTH> vmul(SIMD<int64_t, WIDTH> a, SIMD<int64_t, WIDTH> b)
{
SIMD<int64_t, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = a.v[i] * b.v[i];
return tmp;
}
template <int WIDTH>
static inline SIMD<float, WIDTH> vsignum(SIMD<float, WIDTH> a)
{
SIMD<float, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = (a.v[i] > 0.f) - (a.v[i] < 0.f);
return tmp;
}
template <int WIDTH>
static inline SIMD<double, WIDTH> vsignum(SIMD<double, WIDTH> a)
{
SIMD<double, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = (a.v[i] > 0.) - (a.v[i] < 0.);
return tmp;
}
template <int WIDTH>
static inline SIMD<int8_t, WIDTH> vsignum(SIMD<int8_t, WIDTH> a)
{
SIMD<int8_t, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = (a.v[i] > 0) - (a.v[i] < 0);
return tmp;
}
template <int WIDTH>
static inline SIMD<int16_t, WIDTH> vsignum(SIMD<int16_t, WIDTH> a)
{
SIMD<int16_t, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = (a.v[i] > 0) - (a.v[i] < 0);
return tmp;
}
template <int WIDTH>
static inline SIMD<int32_t, WIDTH> vsignum(SIMD<int32_t, WIDTH> a)
{
SIMD<int32_t, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = (a.v[i] > 0) - (a.v[i] < 0);
return tmp;
}
template <int WIDTH>
static inline SIMD<int64_t, WIDTH> vsignum(SIMD<int64_t, WIDTH> a)
{
SIMD<int64_t, WIDTH> tmp;
for (int i = 0; i < WIDTH; ++i)
tmp.v[i] = (a.v[i] > 0) - (a.v[i] < 0);
return tmp;
}
template <int WIDTH>
static inline SIMD<float, WIDTH> vsign(SIMD<float, WIDTH> a, SIMD<float, WIDTH> b)
{

View file

@ -455,6 +455,22 @@ inline SIMD<uint16_t, 8> vqsub(SIMD<uint16_t, 8> a, SIMD<uint16_t, 8> b)
return tmp;
}
template <>
inline SIMD<float, 4> vmul(SIMD<float, 4> a, SIMD<float, 4> b)
{
SIMD<float, 4> tmp;
tmp.m = _mm_mul_ps(a.m, b.m);
return tmp;
}
template <>
inline SIMD<double, 2> vmul(SIMD<double, 2> a, SIMD<double, 2> b)
{
SIMD<double, 2> tmp;
tmp.m = _mm_mul_pd(a.m, b.m);
return tmp;
}
template <>
inline SIMD<float, 4> vabs(SIMD<float, 4> a)
{
@ -495,6 +511,50 @@ inline SIMD<int32_t, 4> vqabs(SIMD<int32_t, 4> a)
return tmp;
}
template <>
inline SIMD<float, 4> vsignum(SIMD<float, 4> a)
{
SIMD<float, 4> tmp;
tmp.m = _mm_andnot_ps(
_mm_cmpeq_ps(a.m, _mm_setzero_ps()),
_mm_or_ps(_mm_set1_ps(1.f), _mm_and_ps(_mm_set1_ps(-0.f), a.m)));
return tmp;
}
template <>
inline SIMD<double, 2> vsignum(SIMD<double, 2> a)
{
SIMD<double, 2> tmp;
tmp.m = _mm_andnot_pd(
_mm_cmpeq_pd(a.m, _mm_setzero_pd()),
_mm_or_pd(_mm_set1_pd(1.), _mm_and_pd(_mm_set1_pd(-0.), a.m)));
return tmp;
}
template <>
inline SIMD<int8_t, 16> vsignum(SIMD<int8_t, 16> a)
{
SIMD<int8_t, 16> tmp;
tmp.m = _mm_sign_epi8(_mm_set1_epi8(1), a.m);
return tmp;
}
template <>
inline SIMD<int16_t, 8> vsignum(SIMD<int16_t, 8> a)
{
SIMD<int16_t, 8> tmp;
tmp.m = _mm_sign_epi16(_mm_set1_epi16(1), a.m);
return tmp;
}
template <>
inline SIMD<int32_t, 4> vsignum(SIMD<int32_t, 4> a)
{
SIMD<int32_t, 4> tmp;
tmp.m = _mm_sign_epi32(_mm_set1_epi32(1), a.m);
return tmp;
}
template <>
inline SIMD<float, 4> vsign(SIMD<float, 4> a, SIMD<float, 4> b)
{