Reorganize SIMD files and split AVX and AVX2
Signed-off-by: Rafael Ravedutti <rafaelravedutti@gmail.com>
This commit is contained in:
parent
f293cec960
commit
cd1fbfb3c8
20
Makefile
20
Makefile
@ -65,20 +65,24 @@ ifneq ($(VECTOR_WIDTH),)
|
||||
DEFINES += -DVECTOR_WIDTH=$(VECTOR_WIDTH)
|
||||
endif
|
||||
|
||||
ifeq ($(strip $(MASK_REGISTERS)),true)
|
||||
DEFINES += -DMASK_REGISTERS
|
||||
ifeq ($(strip $(__SIMD_KERNEL__)),true)
|
||||
DEFINES += -D__SIMD_KERNEL__
|
||||
endif
|
||||
|
||||
ifeq ($(strip $(SIMD_KERNEL_AVAILABLE)),true)
|
||||
DEFINES += -DSIMD_KERNEL_AVAILABLE
|
||||
ifeq ($(strip $(__SSE__)),true)
|
||||
DEFINES += -D__ISA_SSE__
|
||||
endif
|
||||
|
||||
ifeq ($(strip $(NO_AVX2)),true)
|
||||
DEFINES += -DNO_AVX2
|
||||
ifeq ($(strip $(__ISA_AVX__)),true)
|
||||
DEFINES += -D__ISA_AVX__
|
||||
endif
|
||||
|
||||
ifeq ($(strip $(AVX512)),true)
|
||||
DEFINES += -DAVX512
|
||||
ifeq ($(strip $(__ISA_AVX2__)),true)
|
||||
DEFINES += -D__ISA_AVX2__
|
||||
endif
|
||||
|
||||
ifeq ($(strip $(__ISA_AVX512__)),true)
|
||||
DEFINES += -D__ISA_AVX512__
|
||||
endif
|
||||
|
||||
ifeq ($(strip $(ENABLE_OMP_SIMD)),true)
|
||||
|
@ -4,10 +4,14 @@
|
||||
* Use of this source code is governed by a LGPL-3.0
|
||||
* license that can be found in the LICENSE file.
|
||||
*/
|
||||
#ifndef __SIMD_H__
|
||||
#define __SIMD_H__
|
||||
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
#ifndef NO_ZMM_INTRIN
|
||||
# include <zmmintrin.h>
|
||||
#endif
|
||||
@ -20,17 +24,27 @@
|
||||
# define CLUSTER_N 1
|
||||
#endif
|
||||
|
||||
#ifdef AVX512
|
||||
#if defined(__ISA_AVX512__)
|
||||
# if PRECISION == 2
|
||||
# include "simd/avx512_double.h"
|
||||
# else
|
||||
# include "simd/avx512_float.h"
|
||||
# endif
|
||||
#else
|
||||
#endif
|
||||
|
||||
#if defined(__ISA_AVX2__)
|
||||
# if PRECISION == 2
|
||||
# include "simd/avx_avx2_double.h"
|
||||
# include "simd/avx2_double.h"
|
||||
# else
|
||||
# include "simd/avx_avx2_float.h"
|
||||
# include "simd/avx2_float.h"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#if defined(__ISA_AVX__)
|
||||
# if PRECISION == 2
|
||||
# include "simd/avx_double.h"
|
||||
# else
|
||||
# include "simd/avx_float.h"
|
||||
# endif
|
||||
#endif
|
||||
|
||||
@ -50,3 +64,5 @@ static inline void simd_print_real(const char *ref, MD_SIMD_FLOAT a) {
|
||||
}
|
||||
|
||||
static inline void simd_print_mask(const char *ref, MD_SIMD_MASK a) { fprintf(stdout, "%s: %x\n", ref, simd_mask_to_u32(a)); }
|
||||
|
||||
#endif // __SIMD_H__
|
||||
|
@ -10,12 +10,7 @@
|
||||
|
||||
#define MD_SIMD_FLOAT __m256d
|
||||
#define MD_SIMD_INT __m128i
|
||||
|
||||
#ifdef MASK_REGISTERS
|
||||
# define MD_SIMD_MASK __mmask8
|
||||
#else
|
||||
# define MD_SIMD_MASK __m256d
|
||||
#endif
|
||||
#define MD_SIMD_MASK __m256d
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_broadcast(MD_FLOAT scalar) { return _mm256_set1_pd(scalar); }
|
||||
static inline MD_SIMD_FLOAT simd_zero() { return _mm256_set1_pd(0.0); }
|
||||
@ -26,20 +21,20 @@ static inline MD_SIMD_FLOAT simd_load(MD_FLOAT *p) { return _mm256_load_pd(p); }
|
||||
static inline void simd_store(MD_FLOAT *p, MD_SIMD_FLOAT a) { _mm256_store_pd(p, a); }
|
||||
static inline MD_SIMD_FLOAT simd_load_h_duplicate(const MD_FLOAT *m) {
|
||||
MD_SIMD_FLOAT ret;
|
||||
fprintf(stderr, "simd_load_h_duplicate(): Not implemented for AVX/AVX2 with double precision!");
|
||||
fprintf(stderr, "simd_load_h_duplicate(): Not implemented for AVX2 with double precision!");
|
||||
exit(-1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_load_h_dual(const MD_FLOAT *m) {
|
||||
MD_SIMD_FLOAT ret;
|
||||
fprintf(stderr, "simd_load_h_dual(): Not implemented for AVX/AVX2 with double precision!");
|
||||
fprintf(stderr, "simd_load_h_dual(): Not implemented for AVX2 with double precision!");
|
||||
exit(-1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline MD_FLOAT simd_h_dual_incr_reduced_sum(MD_FLOAT *m, MD_SIMD_FLOAT v0, MD_SIMD_FLOAT v1) {
|
||||
fprintf(stderr, "simd_h_dual_incr_reduced_sum(): Not implemented for AVX/AVX2 with double precision!");
|
||||
fprintf(stderr, "simd_h_dual_incr_reduced_sum(): Not implemented for AVX2 with double precision!");
|
||||
exit(-1);
|
||||
return 0.0;
|
||||
}
|
||||
@ -64,11 +59,9 @@ static inline MD_FLOAT simd_incr_reduced_sum(MD_FLOAT *m, MD_SIMD_FLOAT v0, MD_S
|
||||
return *((MD_FLOAT *) &a0);
|
||||
}
|
||||
|
||||
#ifdef NO_AVX2
|
||||
|
||||
static inline MD_SIMD_FLOAT select_by_mask(MD_SIMD_FLOAT a, MD_SIMD_MASK m) { return _mm256_and_pd(a, m); }
|
||||
static inline MD_SIMD_FLOAT simd_reciprocal(MD_SIMD_FLOAT a) { return _mm256_cvtps_pd(_mm_rcp_ps(_mm256_cvtpd_ps(a))); }
|
||||
static inline MD_SIMD_FLOAT simd_fma(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_FLOAT c) { return simd_add(simd_mul(a, b), c); }
|
||||
static inline MD_SIMD_FLOAT simd_reciprocal(MD_SIMD_FLOAT a) { return _mm256_rcp14_pd(a); }
|
||||
static inline MD_SIMD_FLOAT simd_fma(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_FLOAT c) { return _mm256_fmadd_pd(a, b, c); }
|
||||
static inline MD_SIMD_FLOAT simd_masked_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_MASK m) { return simd_add(a, _mm256_and_pd(b, m)); }
|
||||
static inline MD_SIMD_MASK simd_mask_cond_lt(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_cmp_pd(a, b, _CMP_LT_OQ); }
|
||||
static inline MD_SIMD_MASK simd_mask_int_cond_lt(MD_SIMD_INT a, MD_SIMD_INT b) { return _mm256_cvtepi32_pd(_mm_cmplt_epi32(a, b)); }
|
||||
@ -81,26 +74,6 @@ static inline MD_SIMD_MASK simd_mask_from_u32(unsigned int a) {
|
||||
}
|
||||
// TODO: Implement this, althrough it is just required for debugging
|
||||
static inline int simd_mask_to_u32(MD_SIMD_MASK a) { return 0; }
|
||||
static inline MD_FLOAT simd_h_reduce_sum(MD_SIMD_FLOAT a) {
|
||||
__m128d a0, a1;
|
||||
a = _mm256_add_pd(a, _mm256_permute_pd(a, 0b0101));
|
||||
a0 = _mm256_castpd256_pd128(a);
|
||||
a1 = _mm256_extractf128_pd(a, 0x1);
|
||||
a0 = _mm_add_sd(a0, a1);
|
||||
return *((MD_FLOAT *) &a0);
|
||||
}
|
||||
|
||||
#else // AVX2
|
||||
|
||||
static inline MD_SIMD_FLOAT select_by_mask(MD_SIMD_FLOAT a, MD_SIMD_MASK m) { return _mm256_mask_mov_pd(_mm256_setzero_pd(), m, a); }
|
||||
static inline MD_SIMD_FLOAT simd_reciprocal(MD_SIMD_FLOAT a) { return _mm256_rcp14_pd(a); }
|
||||
static inline MD_SIMD_FLOAT simd_fma(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_FLOAT c) { return _mm256_fmadd_pd(a, b, c); }
|
||||
static inline MD_SIMD_FLOAT simd_masked_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_MASK m) { return _mm256_mask_add_pd(a, m, a, b); }
|
||||
static inline MD_SIMD_MASK simd_mask_cond_lt(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_cmp_pd_mask(a, b, _CMP_LT_OQ); }
|
||||
static inline MD_SIMD_MASK simd_mask_int_cond_lt(MD_SIMD_INT a, MD_SIMD_INT b) { return _mm_cmplt_epi32_mask(a, b); }
|
||||
static inline MD_SIMD_MASK simd_mask_and(MD_SIMD_MASK a, MD_SIMD_MASK b) { return _kand_mask8(a, b); }
|
||||
static inline MD_SIMD_MASK simd_mask_from_u32(unsigned int a) { return _cvtu32_mask8(a); }
|
||||
static inline unsigned int simd_mask_to_u32(MD_SIMD_MASK a) { return _cvtmask8_u32(a); }
|
||||
static inline MD_FLOAT simd_h_reduce_sum(MD_SIMD_FLOAT a) {
|
||||
__m128d a0, a1;
|
||||
// test with shuffle & add as an alternative to hadd later
|
||||
@ -111,10 +84,8 @@ static inline MD_FLOAT simd_h_reduce_sum(MD_SIMD_FLOAT a) {
|
||||
return *((MD_FLOAT *) &a0);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
static inline void simd_h_decr3(MD_FLOAT *m, MD_SIMD_FLOAT a0, MD_SIMD_FLOAT a1, MD_SIMD_FLOAT a2) {
|
||||
fprintf(stderr, "simd_h_decr3(): Not implemented for AVX/AVX2 with double precision!");
|
||||
fprintf(stderr, "simd_h_decr3(): Not implemented for AVX2 with double precision!");
|
||||
exit(-1);
|
||||
}
|
||||
|
99
common/includes/simd/avx_double.h
Normal file
99
common/includes/simd/avx_double.h
Normal file
@ -0,0 +1,99 @@
|
||||
/*
|
||||
* Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
||||
* All rights reserved. This file is part of MD-Bench.
|
||||
* Use of this source code is governed by a LGPL-3.0
|
||||
* license that can be found in the LICENSE file.
|
||||
*/
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <immintrin.h>
|
||||
|
||||
#define MD_SIMD_FLOAT __m256d
|
||||
#define MD_SIMD_INT __m128i
|
||||
#define MD_SIMD_MASK __m256d
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_broadcast(MD_FLOAT scalar) { return _mm256_set1_pd(scalar); }
|
||||
static inline MD_SIMD_FLOAT simd_zero() { return _mm256_set1_pd(0.0); }
|
||||
static inline MD_SIMD_FLOAT simd_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_add_pd(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_sub(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_sub_pd(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_mul(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_mul_pd(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_load(MD_FLOAT *p) { return _mm256_load_pd(p); }
|
||||
static inline void simd_store(MD_FLOAT *p, MD_SIMD_FLOAT a) { _mm256_store_pd(p, a); }
|
||||
static inline MD_SIMD_FLOAT simd_load_h_duplicate(const MD_FLOAT *m) {
|
||||
MD_SIMD_FLOAT ret;
|
||||
fprintf(stderr, "simd_load_h_duplicate(): Not implemented for AVX with double precision!");
|
||||
exit(-1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_load_h_dual(const MD_FLOAT *m) {
|
||||
MD_SIMD_FLOAT ret;
|
||||
fprintf(stderr, "simd_load_h_dual(): Not implemented for AVX with double precision!");
|
||||
exit(-1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static inline MD_FLOAT simd_h_dual_incr_reduced_sum(MD_FLOAT *m, MD_SIMD_FLOAT v0, MD_SIMD_FLOAT v1) {
|
||||
fprintf(stderr, "simd_h_dual_incr_reduced_sum(): Not implemented for AVX with double precision!");
|
||||
exit(-1);
|
||||
return 0.0;
|
||||
}
|
||||
|
||||
static inline MD_FLOAT simd_incr_reduced_sum(MD_FLOAT *m, MD_SIMD_FLOAT v0, MD_SIMD_FLOAT v1, MD_SIMD_FLOAT v2, MD_SIMD_FLOAT v3) {
|
||||
__m256d t0, t1, t2;
|
||||
__m128d a0, a1;
|
||||
|
||||
t0 = _mm256_hadd_pd(v0, v1);
|
||||
t1 = _mm256_hadd_pd(v2, v3);
|
||||
t2 = _mm256_permute2f128_pd(t0, t1, 0x21);
|
||||
t0 = _mm256_add_pd(t0, t2);
|
||||
t1 = _mm256_add_pd(t1, t2);
|
||||
t0 = _mm256_blend_pd(t0, t1, 0b1100);
|
||||
t1 = _mm256_add_pd(t0, _mm256_load_pd(m));
|
||||
_mm256_store_pd(m, t1);
|
||||
|
||||
t0 = _mm256_add_pd(t0, _mm256_permute_pd(t0, 0b0101));
|
||||
a0 = _mm256_castpd256_pd128(t0);
|
||||
a1 = _mm256_extractf128_pd(t0, 0x1);
|
||||
a0 = _mm_add_sd(a0, a1);
|
||||
return *((MD_FLOAT *) &a0);
|
||||
}
|
||||
|
||||
static inline MD_SIMD_FLOAT select_by_mask(MD_SIMD_FLOAT a, MD_SIMD_MASK m) { return _mm256_and_pd(a, m); }
|
||||
static inline MD_SIMD_FLOAT simd_reciprocal(MD_SIMD_FLOAT a) { return _mm256_cvtps_pd(_mm_rcp_ps(_mm256_cvtpd_ps(a))); }
|
||||
static inline MD_SIMD_FLOAT simd_fma(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_FLOAT c) { return simd_add(simd_mul(a, b), c); }
|
||||
static inline MD_SIMD_FLOAT simd_masked_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_MASK m) { return simd_add(a, _mm256_and_pd(b, m)); }
|
||||
static inline MD_SIMD_MASK simd_mask_cond_lt(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_cmp_pd(a, b, _CMP_LT_OQ); }
|
||||
static inline MD_SIMD_MASK simd_mask_int_cond_lt(MD_SIMD_INT a, MD_SIMD_INT b) { return _mm256_cvtepi32_pd(_mm_cmplt_epi32(a, b)); }
|
||||
static inline MD_SIMD_MASK simd_mask_and(MD_SIMD_MASK a, MD_SIMD_MASK b) { return _mm256_and_pd(a, b); }
|
||||
// TODO: Initialize all diagonal cases and just select the proper one (all bits set or diagonal) based on cond0
|
||||
static inline MD_SIMD_MASK simd_mask_from_u32(unsigned int a) {
|
||||
const unsigned long long int all = 0xFFFFFFFFFFFFFFFF;
|
||||
const unsigned long long int none = 0x0;
|
||||
return _mm256_castsi256_pd(_mm256_set_epi64x((a & 0x8) ? all : none, (a & 0x4) ? all : none, (a & 0x2) ? all : none, (a & 0x1) ? all : none));
|
||||
}
|
||||
// TODO: Implement this, althrough it is just required for debugging
|
||||
static inline int simd_mask_to_u32(MD_SIMD_MASK a) { return 0; }
|
||||
static inline MD_FLOAT simd_h_reduce_sum(MD_SIMD_FLOAT a) {
|
||||
__m128d a0, a1;
|
||||
a = _mm256_add_pd(a, _mm256_permute_pd(a, 0b0101));
|
||||
a0 = _mm256_castpd256_pd128(a);
|
||||
a1 = _mm256_extractf128_pd(a, 0x1);
|
||||
a0 = _mm_add_sd(a0, a1);
|
||||
return *((MD_FLOAT *) &a0);
|
||||
}
|
||||
|
||||
static inline void simd_h_decr3(MD_FLOAT *m, MD_SIMD_FLOAT a0, MD_SIMD_FLOAT a1, MD_SIMD_FLOAT a2) {
|
||||
fprintf(stderr, "simd_h_decr3(): Not implemented for AVX with double precision!");
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
// Functions used in LAMMPS kernel
|
||||
static inline MD_SIMD_FLOAT simd_gather(MD_SIMD_INT vidx, const MD_FLOAT *m, int s) { return _mm256_i32gather_pd(m, vidx, s); }
|
||||
static inline MD_SIMD_INT simd_int_broadcast(int scalar) { return _mm_set1_epi32(scalar); }
|
||||
static inline MD_SIMD_INT simd_int_zero() { return _mm_setzero_si128(); }
|
||||
static inline MD_SIMD_INT simd_int_seq() { return _mm_set_epi32(3, 2, 1, 0); }
|
||||
static inline MD_SIMD_INT simd_int_load(const int *m) { return _mm_load_si128((__m128i const *) m); }
|
||||
static inline MD_SIMD_INT simd_int_add(MD_SIMD_INT a, MD_SIMD_INT b) { return _mm_add_epi32(a, b); }
|
||||
static inline MD_SIMD_INT simd_int_mul(MD_SIMD_INT a, MD_SIMD_INT b) { return _mm_mul_epi32(a, b); }
|
||||
static inline MD_SIMD_INT simd_int_mask_load(const int *m, MD_SIMD_MASK k) { return simd_int_load(m) & _mm256_cvtpd_epi32(k); }
|
84
common/includes/simd/avx_float.h
Normal file
84
common/includes/simd/avx_float.h
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright (C) 2022 NHR@FAU, University Erlangen-Nuremberg.
|
||||
* All rights reserved. This file is part of MD-Bench.
|
||||
* Use of this source code is governed by a LGPL-3.0
|
||||
* license that can be found in the LICENSE file.
|
||||
*/
|
||||
#include <immintrin.h>
|
||||
#include <zmmintrin.h>
|
||||
|
||||
#define MD_SIMD_FLOAT __m256
|
||||
#define MD_SIMD_MASK __mmask8
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_broadcast(MD_FLOAT scalar) { return _mm256_set1_ps(scalar); }
|
||||
static inline MD_SIMD_FLOAT simd_zero() { return _mm256_set1_ps(0.0); }
|
||||
static inline MD_SIMD_FLOAT simd_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_add_ps(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_sub(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_sub_ps(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_mul(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_mul_ps(a, b); }
|
||||
static inline MD_SIMD_FLOAT simd_load(MD_FLOAT *p) { return _mm256_load_ps(p); }
|
||||
static inline void simd_store(MD_FLOAT *p, MD_SIMD_FLOAT a) { _mm256_store_ps(p, a); }
|
||||
static inline MD_SIMD_FLOAT select_by_mask(MD_SIMD_FLOAT a, MD_SIMD_MASK m) { return _mm256_mask_mov_ps(_mm256_setzero_ps(), m, a); }
|
||||
static inline MD_SIMD_FLOAT simd_reciprocal(MD_SIMD_FLOAT a) { return _mm256_rcp14_ps(a); }
|
||||
static inline MD_SIMD_FLOAT simd_fma(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_FLOAT c) { return _mm256_fmadd_ps(a, b, c); }
|
||||
static inline MD_SIMD_FLOAT simd_masked_add(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b, MD_SIMD_MASK m) { return _mm256_mask_add_ps(a, m, a, b); }
|
||||
static inline MD_SIMD_MASK simd_mask_cond_lt(MD_SIMD_FLOAT a, MD_SIMD_FLOAT b) { return _mm256_cmp_ps_mask(a, b, _CMP_LT_OQ); }
|
||||
static inline MD_SIMD_MASK simd_mask_and(MD_SIMD_MASK a, MD_SIMD_MASK b) { return _kand_mask8(a, b); }
|
||||
static inline MD_SIMD_MASK simd_mask_from_u32(unsigned int a) { return _cvtu32_mask8(a); }
|
||||
static inline unsigned int simd_mask_to_u32(MD_SIMD_MASK a) { return _cvtmask8_u32(a); }
|
||||
static inline MD_FLOAT simd_h_reduce_sum(MD_SIMD_FLOAT a) {
|
||||
__m128 t0;
|
||||
t0 = _mm_add_ps(_mm256_castps256_ps128(a), _mm256_extractf128_ps(a, 0x1));
|
||||
t0 = _mm_add_ps(t0, _mm_permute_ps(t0, _MM_SHUFFLE(1, 0, 3, 2)));
|
||||
t0 = _mm_add_ss(t0, _mm_permute_ps(t0, _MM_SHUFFLE(0, 3, 2, 1)));
|
||||
return *((MD_FLOAT *) &t0);
|
||||
}
|
||||
|
||||
static inline MD_FLOAT simd_incr_reduced_sum(MD_FLOAT *m, MD_SIMD_FLOAT v0, MD_SIMD_FLOAT v1, MD_SIMD_FLOAT v2, MD_SIMD_FLOAT v3) {
|
||||
__m128 t0, t2;
|
||||
v0 = _mm256_hadd_ps(v0, v1);
|
||||
v2 = _mm256_hadd_ps(v2, v3);
|
||||
v0 = _mm256_hadd_ps(v0, v2);
|
||||
t0 = _mm_add_ps(_mm256_castps256_ps128(v0), _mm256_extractf128_ps(v0, 0x1));
|
||||
t2 = _mm_add_ps(t0, _mm_load_ps(m));
|
||||
_mm_store_ps(m, t2);
|
||||
|
||||
t0 = _mm_add_ps(t0, _mm_permute_ps(t0, _MM_SHUFFLE(1, 0, 3, 2)));
|
||||
t0 = _mm_add_ss(t0, _mm_permute_ps(t0, _MM_SHUFFLE(0, 3, 2, 1)));
|
||||
return *((MD_FLOAT *) &t0);
|
||||
}
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_load_h_duplicate(const MD_FLOAT *m) {
|
||||
return _mm256_broadcast_ps((const __m128 *)(m));
|
||||
}
|
||||
|
||||
static inline MD_SIMD_FLOAT simd_load_h_dual(const MD_FLOAT *m) {
|
||||
__m128 t0, t1;
|
||||
t0 = _mm_broadcast_ss(m);
|
||||
t1 = _mm_broadcast_ss(m + 1);
|
||||
return _mm256_insertf128_ps(_mm256_castps128_ps256(t0), t1, 0x1);
|
||||
}
|
||||
|
||||
static inline MD_FLOAT simd_h_dual_incr_reduced_sum(MD_FLOAT *m, MD_SIMD_FLOAT v0, MD_SIMD_FLOAT v1) {
|
||||
__m128 t0, t1;
|
||||
v0 = _mm256_hadd_ps(v0, v1);
|
||||
t0 = _mm256_extractf128_ps(v0, 0x1);
|
||||
t0 = _mm_hadd_ps(_mm256_castps256_ps128(v0), t0);
|
||||
t0 = _mm_permute_ps(t0, _MM_SHUFFLE(3, 1, 2, 0));
|
||||
t1 = _mm_add_ps(t0, _mm_load_ps(m));
|
||||
_mm_store_ps(m, t1);
|
||||
|
||||
t0 = _mm_add_ps(t0, _mm_permute_ps(t0, _MM_SHUFFLE(1, 0, 3, 2)));
|
||||
t0 = _mm_add_ss(t0, _mm_permute_ps(t0, _MM_SHUFFLE(0, 3, 2, 1)));
|
||||
return *((MD_FLOAT *) &t0);
|
||||
}
|
||||
|
||||
inline void simd_h_decr(MD_FLOAT *m, MD_SIMD_FLOAT a) {
|
||||
__m128 asum = _mm_add_ps(_mm256_castps256_ps128(a), _mm256_extractf128_ps(a, 0x1));
|
||||
_mm_store_ps(m, _mm_sub_ps(_mm_load_ps(m), asum));
|
||||
}
|
||||
|
||||
static inline void simd_h_decr3(MD_FLOAT *m, MD_SIMD_FLOAT a0, MD_SIMD_FLOAT a1, MD_SIMD_FLOAT a2) {
|
||||
simd_h_decr(m, a0);
|
||||
simd_h_decr(m + CLUSTER_N, a1);
|
||||
simd_h_decr(m + CLUSTER_N * 2, a2);
|
||||
}
|
@ -2,9 +2,6 @@
|
||||
TAG ?= ICC
|
||||
# Instruction set (SSE/AVX/AVX2/AVX512)
|
||||
ISA ?= AVX512
|
||||
# Use mask registers (AVX512 must be available in the target CPU)
|
||||
# This is always true when ISA is set to AVX512
|
||||
MASK_REGISTERS ?= true
|
||||
# Optimization scheme (lammps/gromacs/clusters_per_bin)
|
||||
OPT_SCHEME ?= lammps
|
||||
# Enable likwid (true or false)
|
||||
|
@ -1,20 +1,23 @@
|
||||
ifeq ($(strip $(ISA)), SSE)
|
||||
_VECTOR_WIDTH=2
|
||||
__ISA_SSE__=true
|
||||
__SIMD_WIDTH_DBL__=2
|
||||
else ifeq ($(strip $(ISA)), AVX)
|
||||
# Vector width is 4 but AVX2 instruction set is not supported
|
||||
NO_AVX2=true
|
||||
_VECTOR_WIDTH=4
|
||||
__ISA_AVX__=true
|
||||
__SIMD_WIDTH_DBL__=4
|
||||
else ifeq ($(strip $(ISA)), AVX2)
|
||||
#SIMD_KERNEL_AVAILABLE=true
|
||||
_VECTOR_WIDTH=4
|
||||
__ISA_AVX2__=true
|
||||
#__SIMD_KERNEL__=true
|
||||
__SIMD_WIDTH_DBL__=4
|
||||
else ifeq ($(strip $(ISA)), AVX512)
|
||||
AVX512=true
|
||||
SIMD_KERNEL_AVAILABLE=true
|
||||
_VECTOR_WIDTH=8
|
||||
__ISA_AVX512__=true
|
||||
__SIMD_KERNEL__=true
|
||||
__SIMD_WIDTH_DBL__=8
|
||||
endif
|
||||
|
||||
# SIMD width is specified in double-precision, hence it may
|
||||
# need to be adjusted for single-precision
|
||||
ifeq ($(strip $(DATA_TYPE)), SP)
|
||||
VECTOR_WIDTH=$(shell echo $$(( $(_VECTOR_WIDTH) * 2 )))
|
||||
VECTOR_WIDTH=$(shell echo $$(( $(__SIMD_WIDTH_DBL__) * 2 )))
|
||||
else
|
||||
VECTOR_WIDTH=$(_VECTOR_WIDTH)
|
||||
VECTOR_WIDTH=$(__SIMD_WIDTH_DBL__)
|
||||
endif
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include <stats.h>
|
||||
#include <timing.h>
|
||||
|
||||
#ifdef SIMD_KERNEL_AVAILABLE
|
||||
#ifdef __SIMD_KERNEL__
|
||||
#include <simd.h>
|
||||
#endif
|
||||
|
||||
@ -191,7 +191,7 @@ double computeForceLJFullNeigh_simd(Parameter *param, Atom *atom, Neighbor *neig
|
||||
double S = getTimeStamp();
|
||||
LIKWID_MARKER_START("force");
|
||||
|
||||
#ifndef SIMD_KERNEL_AVAILABLE
|
||||
#ifndef __SIMD_KERNEL__
|
||||
fprintf(stderr, "Error: SIMD kernel not implemented for specified instruction set!");
|
||||
exit(-1);
|
||||
#else
|
||||
|
Loading…
Reference in New Issue
Block a user