Fix intrinsics for AVX2
Signed-off-by: Rafael Ravedutti <rafaelravedutti@gmail.com>
This commit is contained in:
parent
4090f43095
commit
d4b34e1fa4
@ -156,6 +156,10 @@ double computeForceLJ_2xnn(Parameter *param, Atom *atom, Neighbor *neighbor, Sta
|
||||
|
||||
#pragma omp parallel for
|
||||
for(int ci = 0; ci < atom->Nclusters_local; ci++) {
|
||||
int ci_cj0 = CJ0_FROM_CI(ci);
|
||||
#if CLUSTER_M > CLUSTER_N
|
||||
int ci_cj1 = CJ1_FROM_CI(ci);
|
||||
#endif
|
||||
int ci_vec_base = CI_VECTOR_BASE_INDEX(ci);
|
||||
MD_FLOAT *ci_x = &atom->cl_x[ci_vec_base];
|
||||
MD_FLOAT *ci_f = &atom->cl_f[ci_vec_base];
|
||||
@ -400,20 +404,6 @@ double computeForceLJ_4xn(Parameter *param, Atom *atom, Neighbor *neighbor, Stat
|
||||
simd_incr_reduced_sum(&ci_f[CL_X_OFFSET], fix0, fix1, fix2, fix3);
|
||||
simd_incr_reduced_sum(&ci_f[CL_Y_OFFSET], fiy0, fiy1, fiy2, fiy3);
|
||||
simd_incr_reduced_sum(&ci_f[CL_Z_OFFSET], fiz0, fiz1, fiz2, fiz3);
|
||||
/*
|
||||
ci_f[CL_X_OFFSET + 0] = simd_h_reduce_sum(fix0);
|
||||
ci_f[CL_X_OFFSET + 1] = simd_h_reduce_sum(fix1);
|
||||
ci_f[CL_X_OFFSET + 2] = simd_h_reduce_sum(fix2);
|
||||
ci_f[CL_X_OFFSET + 3] = simd_h_reduce_sum(fix3);
|
||||
ci_f[CL_Y_OFFSET + 0] = simd_h_reduce_sum(fiy0);
|
||||
ci_f[CL_Y_OFFSET + 1] = simd_h_reduce_sum(fiy1);
|
||||
ci_f[CL_Y_OFFSET + 2] = simd_h_reduce_sum(fiy2);
|
||||
ci_f[CL_Y_OFFSET + 3] = simd_h_reduce_sum(fiy3);
|
||||
ci_f[CL_Z_OFFSET + 0] = simd_h_reduce_sum(fiz0);
|
||||
ci_f[CL_Z_OFFSET + 1] = simd_h_reduce_sum(fiz1);
|
||||
ci_f[CL_Z_OFFSET + 2] = simd_h_reduce_sum(fiz2);
|
||||
ci_f[CL_Z_OFFSET + 3] = simd_h_reduce_sum(fiz3);
|
||||
*/
|
||||
|
||||
addStat(stats->calculated_forces, 1);
|
||||
addStat(stats->num_neighs, numneighs);
|
||||
|
@ -21,8 +21,6 @@
|
||||
* =======================================================================================
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <immintrin.h>
|
||||
#include <zmmintrin.h>
|
||||
|
||||
@ -42,7 +40,6 @@ static inline MD_SIMD_MASK simd_mask_cond_lt(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) {
|
||||
static inline MD_SIMD_MASK simd_mask_from_u32(unsigned int a) { return _cvtu32_mask8(a); }
|
||||
static inline unsigned int simd_mask_to_u32(MD_SIMD_MASK a) { return _cvtmask8_u32(a); }
|
||||
static inline MD_SIMD_FLOAT simd_load(MD_FLOAT *p) { return _mm512_load_pd(p); }
|
||||
|
||||
static inline MD_FLOAT simd_h_reduce_sum(MD_SIMD_FLOAT a) {
|
||||
MD_SIMD_FLOAT x = _mm512_add_pd(a, _mm512_shuffle_f64x2(a, a, 0xee));
|
||||
x = _mm512_add_pd(x, _mm512_shuffle_f64x2(x, x, 0x11));
|
||||
|
@ -29,17 +29,56 @@
|
||||
#define MD_SIMD_FLOAT __m256d
|
||||
|
||||
#ifdef NO_AVX2
|
||||
#define MD_SIMD_MASK __m256d
|
||||
# define MD_SIMD_MASK __m256d
|
||||
#else
|
||||
#define MD_SIMD_MASK __mmask8
|
||||
# define MD_SIMD_MASK __mmask8
|
||||
#endif
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_broadcast(double scalar) { return _mm256_set1_pd(scalar); }
|
||||
static inline MD_SIMD_FLOAT simd_broadcast(MD_FLOAT scalar) { return _mm256_set1_pd(scalar); }
|
||||
static inline MD_SIMD_FLOAT simd_zero() { return _mm256_set1_pd(0.0); }
|
||||
static inline MD_SIMD_FLOAT simd_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_add_pd(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_sub(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_sub_pd(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_mul(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_mul_pd(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_load(MD_FLOAT *p) { return _mm256_load_pd(p); }
|
||||
static inline MD_SIMD_FLOAT simd_load_h_duplicate(const MD_FLOAT *m) {
|
||||
MD_SIMD_FLOAT ret;
|
||||
fprintf(stderr, "simd_load_h_duplicate(): Not implemented for AVX/AVX2 with double precision!");
|
||||
exit(-1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_load_h_dual(const MD_FLOAT *m) {
|
||||
MD_SIMD_FLOAT ret;
|
||||
fprintf(stderr, "simd_load_h_dual(): Not implemented for AVX/AVX2 with double precision!");
|
||||
exit(-1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline MD_FLOAT simd_h_dual_incr_reduced_sum(MD_FLOAT *m, MD_SIMD_FLOAT v0, MD_SIMD_FLOAT v1) {
|
||||
fprintf(stderr, "simd_h_dual_incr_reduced_sum(): Not implemented for AVX/AVX2 with double precision!");
|
||||
exit(-1);
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
static inline MD_FLOAT simd_incr_reduced_sum(MD_FLOAT *m, MD_SIMD_FLOAT v0, MD_SIMD_FLOAT v1, MD_SIMD_FLOAT v2, MD_SIMD_FLOAT v3) {
|
||||
__m256d t0, t1, t2;
|
||||
__m128d a0, a1;
|
||||
|
||||
t0 = _mm256_hadd_pd(v0, v1);
|
||||
t1 = _mm256_hadd_pd(v2, v3);
|
||||
t2 = _mm256_permute2f128_pd(t0, t1, 0x21);
|
||||
t0 = _mm256_add_pd(t0, t2);
|
||||
t1 = _mm256_add_pd(t1, t2);
|
||||
t0 = _mm256_blend_pd(t0, t1, 0b1100);
|
||||
t1 = _mm256_add_pd(t0, _mm256_load_pd(m));
|
||||
_mm256_store_pd(m, t1);
|
||||
|
||||
t0 = _mm256_add_pd(t0, _mm256_permute_pd(t0, 0b0101));
|
||||
a0 = _mm256_castpd256_pd128(t0);
|
||||
a1 = _mm256_extractf128_pd(t0, 0x1);
|
||||
a0 = _mm_add_sd(a0, a1);
|
||||
return *((MD_FLOAT *) &a0);
|
||||
}
|
||||
|
||||
#ifdef NO_AVX2
|
||||
|
||||
|
@ -21,36 +21,22 @@
|
||||
* =======================================================================================
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <immintrin.h>
|
||||
#include <zmmintrin.h>
|
||||
|
||||
#define MD_SIMD_FLOAT __m256
|
||||
|
||||
#ifdef NO_AVX2
|
||||
#define MD_SIMD_MASK __m256
|
||||
#else
|
||||
#define MD_SIMD_MASK __mmask8
|
||||
#endif
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_broadcast(float scalar) { return _mm256_set1_ps(scalar); }
|
||||
static inline MD_SIMD_FLOAT simd_broadcast(MD_FLOAT scalar) { return _mm256_set1_ps(scalar); }
|
||||
static inline MD_SIMD_FLOAT simd_zero() { return _mm256_set1_ps(0.0); }
|
||||
static inline MD_SIMD_FLOAT simd_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_add_ps(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_sub(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_sub_ps(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_mul(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_mul_ps(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_load(MD_FLOAT *p) { return _mm256_load_ps(p); }
|
||||
|
||||
#ifdef NO_AVX2
|
||||
|
||||
#error "AVX intrinsincs with single-precision not implemented!"
|
||||
|
||||
#else // AVX2
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_reciprocal(MD_SIMD_FLOAT a) { return _mm256_rcp14_ps(a); }
|
||||
static inline MD_SIMD_FLOAT simd_fma(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_FLOAT c) { return _mm256_fmadd_ps(a, b, c); }
|
||||
static inline MD_SIMD_FLOAT simd_masked_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_MASK m) { return _mm256_mask_add_ps(a, m, a, b); }
|
||||
static inline MD_SIMD_MASK simd_mask_cond_lt(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_cmp_pd_mask(a, b, _CMP_LT_OQ); }
|
||||
static inline MD_SIMD_MASK simd_mask_cond_lt(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_cmp_ps_mask(a, b, _CMP_LT_OQ); }
|
||||
static inline MD_SIMD_MASK simd_mask_and(MD_SIMD_MASK a, MD_SIMD_MASK b) { return _kand_mask8(a, b); }
|
||||
static inline MD_SIMD_MASK simd_mask_from_u32(unsigned int a) { return _cvtu32_mask8(a); }
|
||||
static inline unsigned int simd_mask_to_u32(MD_SIMD_MASK a) { return _cvtmask8_u32(a); }
|
||||
@ -62,4 +48,41 @@ static inline MD_FLOAT simd_h_reduce_sum(MD_SIMD_FLOAT a) {
|
||||
return *((MD_FLOAT *) &t0);
|
||||
}
|
||||
|
||||
#endif
|
||||
static inline MD_FLOAT simd_incr_reduced_sum(MD_FLOAT *m, MD_SIMD_FLOAT v0, MD_SIMD_FLOAT v1, MD_SIMD_FLOAT v2, MD_SIMD_FLOAT v3) {
|
||||
__m128 t0, t2;
|
||||
v0 = _mm256_hadd_ps(v0, v1);
|
||||
v2 = _mm256_hadd_ps(v2, v3);
|
||||
v0 = _mm256_hadd_ps(v0, v2);
|
||||
t0 = _mm_add_ps(_mm256_castps256_ps128(v0), _mm256_extractf128_ps(v0, 0x1));
|
||||
t2 = _mm_add_ps(t0, _mm_load_ps(m));
|
||||
_mm_store_ps(m, t2);
|
||||
|
||||
t0 = _mm_add_ps(t0, _mm_permute_ps(t0, _MM_SHUFFLE(1, 0, 3, 2)));
|
||||
t0 = _mm_add_ss(t0, _mm_permute_ps(t0, _MM_SHUFFLE(0, 3, 2, 1)));
|
||||
return *((MD_FLOAT *) &t0);
|
||||
}
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_load_h_duplicate(const MD_FLOAT *m) {
|
||||
return _mm256_broadcast_ps((const __m128 *)(m));
|
||||
}
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_load_h_dual(const MD_FLOAT *m) {
|
||||
__m128 t0, t1;
|
||||
t0 = _mm_broadcast_ss(m);
|
||||
t1 = _mm_broadcast_ss(m + 1);
|
||||
return _mm256_insertf128_ps(_mm256_castps128_ps256(t0), t1, 0x1);
|
||||
}
|
||||
|
||||
static inline MD_FLOAT simd_h_dual_incr_reduced_sum(MD_FLOAT *m, MD_SIMD_FLOAT v0, MD_SIMD_FLOAT v1) {
|
||||
__m128 t0, t1;
|
||||
v0 = _mm256_hadd_ps(v0, v1);
|
||||
t0 = _mm256_extractf128_ps(v0, 0x1);
|
||||
t0 = _mm_hadd_ps(_mm256_castps256_ps128(v0), t0);
|
||||
t0 = _mm_permute_ps(t0, _MM_SHUFFLE(3, 1, 2, 0));
|
||||
t1 = _mm_add_ps(t0, _mm_load_ps(m));
|
||||
_mm_store_ps(m, t1);
|
||||
|
||||
t0 = _mm_add_ps(t0, _mm_permute_ps(t0, _MM_SHUFFLE(1, 0, 3, 2)));
|
||||
t0 = _mm_add_ss(t0, _mm_permute_ps(t0, _MM_SHUFFLE(0, 3, 2, 1)));
|
||||
return *((MD_FLOAT *) &t0);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user