Add version with AVX2 intrinsics for gromacs scheme

Signed-off-by: Rafael Ravedutti <rafaelravedutti@gmail.com>
This commit is contained in:
Rafael Ravedutti 2022-02-04 17:52:48 +01:00
parent 34ce407f18
commit cdb1d5b9f1
3 changed files with 70 additions and 11 deletions

View File

@ -155,14 +155,21 @@ double computeForceLJ_4xn(Parameter *param, Atom *atom, Neighbor *neighbor, Stat
for(int k = 0; k < numneighs; k += unroll_j) {
int cj0 = neighs[k + 0];
int cj1 = neighs[k + 1];
unsigned int cond0 = (unsigned int)(ci == cj0);
unsigned int cond1 = (unsigned int)(ci == cj1);
MD_FLOAT *cj0_ptr = cluster_pos_ptr(cj0);
#if VECTOR_WIDTH == 8
int cj1 = neighs[k + 1];
unsigned int cond1 = (unsigned int)(ci == cj1);
MD_FLOAT *cj1_ptr = cluster_pos_ptr(cj1);
MD_SIMD_FLOAT xj_tmp = simd_gather2(cj0_ptr, cj1_ptr, 0);
MD_SIMD_FLOAT yj_tmp = simd_gather2(cj0_ptr, cj1_ptr, 1);
MD_SIMD_FLOAT zj_tmp = simd_gather2(cj0_ptr, cj1_ptr, 2);
MD_SIMD_FLOAT xj_tmp = simd_load2(cj0_ptr, cj1_ptr, 0);
MD_SIMD_FLOAT yj_tmp = simd_load2(cj0_ptr, cj1_ptr, 1);
MD_SIMD_FLOAT zj_tmp = simd_load2(cj0_ptr, cj1_ptr, 2);
#else
MD_SIMD_FLOAT xj_tmp = simd_load(cj0_ptr, 0);
MD_SIMD_FLOAT yj_tmp = simd_load(cj0_ptr, 1);
MD_SIMD_FLOAT zj_tmp = simd_load(cj0_ptr, 2);
#endif
MD_SIMD_FLOAT delx0 = simd_sub(xi0_tmp, xj_tmp);
MD_SIMD_FLOAT dely0 = simd_sub(yi0_tmp, yj_tmp);
@ -177,10 +184,17 @@ double computeForceLJ_4xn(Parameter *param, Atom *atom, Neighbor *neighbor, Stat
MD_SIMD_FLOAT dely3 = simd_sub(yi3_tmp, yj_tmp);
MD_SIMD_FLOAT delz3 = simd_sub(zi3_tmp, zj_tmp);
#if VECTOR_WIDTH == 8
MD_SIMD_MASK excl_mask0 = simd_mask_from_u32((unsigned int)(0xff - 0x1 * cond0 - 0x10 * cond1));
MD_SIMD_MASK excl_mask1 = simd_mask_from_u32((unsigned int)(0xff - 0x2 * cond0 - 0x20 * cond1));
MD_SIMD_MASK excl_mask2 = simd_mask_from_u32((unsigned int)(0xff - 0x4 * cond0 - 0x40 * cond1));
MD_SIMD_MASK excl_mask3 = simd_mask_from_u32((unsigned int)(0xff - 0x8 * cond0 - 0x80 * cond1));
#else
MD_SIMD_MASK excl_mask0 = simd_mask_from_u32((unsigned int)(0xf - 0x1 * cond0));
MD_SIMD_MASK excl_mask1 = simd_mask_from_u32((unsigned int)(0xf - 0x2 * cond0));
MD_SIMD_MASK excl_mask2 = simd_mask_from_u32((unsigned int)(0xf - 0x4 * cond0));
MD_SIMD_MASK excl_mask3 = simd_mask_from_u32((unsigned int)(0xf - 0x8 * cond0));
#endif
MD_SIMD_FLOAT rsq0 = simd_fma(delx0, delx0, simd_fma(dely0, dely0, simd_mul(delz0, delz0)));
MD_SIMD_FLOAT rsq1 = simd_fma(delx1, delx1, simd_fma(dely1, dely1, simd_mul(delz1, delz1)));

View File

@ -26,7 +26,7 @@
#define __ATOM_H_
#define CLUSTER_DIM_M 4
#define CLUSTER_DIM_N 8
#define CLUSTER_DIM_N VECTOR_WIDTH
typedef struct {
int bin;

View File

@ -25,11 +25,14 @@
#include <immintrin.h>
#include <zmmintrin.h>
#define MD_SIMD_FLOAT __m512d
#define MD_SIMD_MASK __mmask8
#define SIMD_PRINT_REAL(a) simd_print_real(#a, a);
#define SIMD_PRINT_MASK(a) simd_print_mask(#a, a);
#if VECTOR_WIDTH == 8 // AVX512
#define MD_SIMD_FLOAT __m512d
#define MD_SIMD_MASK __mmask8
static inline MD_SIMD_FLOAT simd_broadcast(double scalar) { return _mm512_set1_pd(scalar); }
static inline MD_SIMD_FLOAT simd_zero() { return _mm512_set1_pd(0.0); }
static inline MD_SIMD_FLOAT simd_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm512_add_pd(a, b); }
@ -43,7 +46,7 @@ static inline MD_SIMD_MASK simd_mask_to_u32(unsigned int a) { return _cvtmask8_u
static inline MD_SIMD_MASK simd_mask_and(MD_SIMD_MASK a, MD_SIMD_MASK b) { return _kand_mask8(a, b); }
static inline MD_SIMD_MASK simd_mask_cond_lt(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm512_cmp_pd_mask(a, b, _CMP_LT_OQ); }
static MD_SIMD_FLOAT simd_gather2(MD_FLOAT *c0, MD_FLOAT *c1, int d) {
static MD_SIMD_FLOAT simd_load2(MD_FLOAT *c0, MD_FLOAT *c1, int d) {
MD_SIMD_FLOAT x;
#ifdef CLUSTER_AOS
__m256i aos_gather_vindex = _mm256_set_epi32(9, 6, 3, 0, 9, 6, 3, 0);
@ -64,12 +67,54 @@ static inline MD_FLOAT simd_horizontal_sum(MD_SIMD_FLOAT a) {
return *((double *) &x);
}
#else // AVX2
#define MD_SIMD_FLOAT __m256d
#define MD_SIMD_MASK __mmask8
static inline MD_SIMD_FLOAT simd_broadcast(double scalar) { return _mm256_set1_pd(scalar); }
static inline MD_SIMD_FLOAT simd_zero() { return _mm256_set1_pd(0.0); }
static inline MD_SIMD_FLOAT simd_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_add_pd(a, b); }
static inline MD_SIMD_FLOAT simd_sub(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_sub_pd(a, b); }
static inline MD_SIMD_FLOAT simd_mul(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_mul_pd(a, b); }
static inline MD_SIMD_FLOAT simd_fma(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_FLOAT c) { return _mm256_fmadd_pd(a, b, c); }
static inline MD_SIMD_FLOAT simd_reciprocal(MD_SIMD_FLOAT a) { return _mm256_rcp14_pd(a); }
static inline MD_SIMD_FLOAT simd_masked_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_MASK m) { return _mm256_mask_add_pd(a, m, a, b); }
static inline MD_SIMD_MASK simd_mask_from_u32(unsigned int a) { return _cvtu32_mask8(a); }
static inline MD_SIMD_MASK simd_mask_to_u32(unsigned int a) { return _cvtmask8_u32(a); }
static inline MD_SIMD_MASK simd_mask_and(MD_SIMD_MASK a, MD_SIMD_MASK b) { return _kand_mask8(a, b); }
static inline MD_SIMD_MASK simd_mask_cond_lt(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_cmp_pd_mask(a, b, _CMP_LT_OQ); }
static MD_SIMD_FLOAT simd_load(MD_FLOAT *c0, int d) {
MD_SIMD_FLOAT x;
#ifdef CLUSTER_AOS
__m128i aos_gather_vindex = _mm128_set_epi32(9, 6, 3, 0);
__m128i vindex = _mm128_add_epi32(aos_gather_vindex, _mm128_set1_epi32(d));
x = _mm256_i32gather_pd(c0, vindex, sizeof(double));
#else
x = _mm256_load_pd(&c0[d * CLUSTER_DIM_M]);
#endif
return x;
}
static inline MD_FLOAT simd_horizontal_sum(MD_SIMD_FLOAT a) {
__m128d a0, a1;
// test with shuffle & add as an alternative to hadd later
a = _mm256_hadd_pd(a, a);
a0 = _mm256_castpd256_pd128(a);
a1 = _mm256_extractf128_pd(a, 0x1);
a0 = _mm_add_sd(a0, a1);
return *((double *) &a0);
}
#endif
static inline void simd_print_real(const char *ref, MD_SIMD_FLOAT a) {
double x[8];
double x[VECTOR_WIDTH];
memcpy(x, &a, sizeof(x));
fprintf(stdout, "%s: ", ref);
for(int i = 0; i < 8; i++) {
for(int i = 0; i < VECTOR_WIDTH; i++) {
fprintf(stdout, "%f ", x[i]);
}