Only malloc once at the beginning plus measurement csv
This commit is contained in:
parent
134e3f4b78
commit
0ea0587442
53
src/force.cu
53
src/force.cu
@ -98,7 +98,11 @@ __global__ void calc_force(
|
|||||||
|
|
||||||
extern "C" {
|
extern "C" {
|
||||||
|
|
||||||
|
bool initialized = false;
|
||||||
|
Atom c_atom;
|
||||||
|
|
||||||
double computeForce(
|
double computeForce(
|
||||||
|
bool reneighbourHappenend,
|
||||||
Parameter *param,
|
Parameter *param,
|
||||||
Atom *atom,
|
Atom *atom,
|
||||||
Neighbor *neighbor
|
Neighbor *neighbor
|
||||||
@ -125,12 +129,11 @@ double computeForce(
|
|||||||
const char *num_threads_env = getenv("NUM_THREADS");
|
const char *num_threads_env = getenv("NUM_THREADS");
|
||||||
int num_threads = 0;
|
int num_threads = 0;
|
||||||
if(num_threads_env == nullptr)
|
if(num_threads_env == nullptr)
|
||||||
num_threads = 2;
|
num_threads = 32;
|
||||||
else {
|
else {
|
||||||
num_threads = atoi(num_threads_env);
|
num_threads = atoi(num_threads_env);
|
||||||
}
|
}
|
||||||
|
|
||||||
Atom c_atom;
|
|
||||||
c_atom.Natoms = atom->Natoms;
|
c_atom.Natoms = atom->Natoms;
|
||||||
c_atom.Nlocal = atom->Nlocal;
|
c_atom.Nlocal = atom->Nlocal;
|
||||||
c_atom.Nghost = atom->Nghost;
|
c_atom.Nghost = atom->Nghost;
|
||||||
@ -140,67 +143,55 @@ double computeForce(
|
|||||||
/*
|
/*
|
||||||
int nDevices;
|
int nDevices;
|
||||||
cudaGetDeviceCount(&nDevices);
|
cudaGetDeviceCount(&nDevices);
|
||||||
|
size_t free, total;
|
||||||
for(int i = 0; i < nDevices; ++i) {
|
for(int i = 0; i < nDevices; ++i) {
|
||||||
|
cudaMemGetInfo( &free, &total );
|
||||||
cudaDeviceProp prop;
|
cudaDeviceProp prop;
|
||||||
cudaGetDeviceProperties(&prop, i);
|
cudaGetDeviceProperties(&prop, i);
|
||||||
printf("DEVICE %d/%d NAME: %s\r\n", i + 1, nDevices, prop.name);
|
printf("DEVICE %d/%d NAME: %s\r\n with %ld MB/%ld MB memory used", i + 1, nDevices, prop.name, free / 1024 / 1024, total / 1024 / 1024);
|
||||||
}
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
// Choose GPU where you want to execute code on
|
// Choose GPU where you want to execute code on
|
||||||
// It is possible to execute the same kernel on multiple GPUs but you have to copy the data multiple times
|
// It is possible to execute the same kernel on multiple GPUs but you have to copy the data multiple times
|
||||||
// Executing `cudaSetDevice(N)` before cudaMalloc / cudaMemcpy / calc_force <<< >>> will set the GPU accordingly
|
// Executing `cudaSetDevice(N)` before cudaMalloc / cudaMemcpy / calc_force <<< >>> will set the GPU accordingly
|
||||||
*/
|
|
||||||
|
|
||||||
|
|
||||||
// HINT: Run with cuda-memcheck ./MDBench-NVCC in case of error
|
// HINT: Run with cuda-memcheck ./MDBench-NVCC in case of error
|
||||||
// HINT: Only works for data layout = AOS!!!
|
// HINT: Only works for data layout = AOS!!!
|
||||||
|
|
||||||
|
if(!initialized) {
|
||||||
checkCUDAError( "c_atom.x malloc", cudaMalloc((void**)&(c_atom.x), sizeof(MD_FLOAT) * atom->Nmax * 3) );
|
checkCUDAError( "c_atom.x malloc", cudaMalloc((void**)&(c_atom.x), sizeof(MD_FLOAT) * atom->Nmax * 3) );
|
||||||
checkCUDAError( "c_atom.x memcpy", cudaMemcpy(c_atom.x, atom->x, sizeof(MD_FLOAT) * atom->Nmax * 3, cudaMemcpyHostToDevice) );
|
|
||||||
|
|
||||||
checkCUDAError( "c_atom.fx malloc", cudaMalloc((void**)&(c_atom.fx), sizeof(MD_FLOAT) * Nlocal) );
|
checkCUDAError( "c_atom.fx malloc", cudaMalloc((void**)&(c_atom.fx), sizeof(MD_FLOAT) * Nlocal) );
|
||||||
checkCUDAError( "c_atom.fx memcpy", cudaMemcpy(c_atom.fx, fx, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
|
||||||
|
|
||||||
checkCUDAError( "c_atom.fy malloc", cudaMalloc((void**)&(c_atom.fy), sizeof(MD_FLOAT) * Nlocal) );
|
checkCUDAError( "c_atom.fy malloc", cudaMalloc((void**)&(c_atom.fy), sizeof(MD_FLOAT) * Nlocal) );
|
||||||
checkCUDAError( "c_atom.fy memcpy", cudaMemcpy(c_atom.fy, fy, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
|
||||||
|
|
||||||
checkCUDAError( "c_atom.fz malloc", cudaMalloc((void**)&(c_atom.fz), sizeof(MD_FLOAT) * Nlocal) );
|
checkCUDAError( "c_atom.fz malloc", cudaMalloc((void**)&(c_atom.fz), sizeof(MD_FLOAT) * Nlocal) );
|
||||||
checkCUDAError( "c_atom.fz memcpy", cudaMemcpy(c_atom.fz, fz, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
|
||||||
|
|
||||||
checkCUDAError( "c_atom.type malloc", cudaMalloc((void**)&(c_atom.type), sizeof(int) * atom->Nmax) );
|
checkCUDAError( "c_atom.type malloc", cudaMalloc((void**)&(c_atom.type), sizeof(int) * atom->Nmax) );
|
||||||
checkCUDAError( "c_atom.type memcpy", cudaMemcpy(c_atom.type, atom->type, sizeof(int) * atom->Nmax, cudaMemcpyHostToDevice) );
|
|
||||||
|
|
||||||
checkCUDAError( "c_atom.epsilon malloc", cudaMalloc((void**)&(c_atom.epsilon), sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes) );
|
checkCUDAError( "c_atom.epsilon malloc", cudaMalloc((void**)&(c_atom.epsilon), sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes) );
|
||||||
checkCUDAError( "c_atom.epsilon memcpy", cudaMemcpy(c_atom.epsilon, atom->epsilon, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
|
||||||
|
|
||||||
checkCUDAError( "c_atom.sigma6 malloc", cudaMalloc((void**)&(c_atom.sigma6), sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes) );
|
checkCUDAError( "c_atom.sigma6 malloc", cudaMalloc((void**)&(c_atom.sigma6), sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes) );
|
||||||
checkCUDAError( "c_atom.sigma6 memcpy", cudaMemcpy(c_atom.sigma6, atom->sigma6, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
|
||||||
|
|
||||||
checkCUDAError( "c_atom.cutforcesq malloc", cudaMalloc((void**)&(c_atom.cutforcesq), sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes) );
|
checkCUDAError( "c_atom.cutforcesq malloc", cudaMalloc((void**)&(c_atom.cutforcesq), sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes) );
|
||||||
|
|
||||||
|
initialized = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
checkCUDAError( "c_atom.x memcpy", cudaMemcpy(c_atom.x, atom->x, sizeof(MD_FLOAT) * atom->Nmax * 3, cudaMemcpyHostToDevice) );
|
||||||
|
checkCUDAError( "c_atom.fx memcpy", cudaMemcpy(c_atom.fx, fx, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
||||||
|
checkCUDAError( "c_atom.fy memcpy", cudaMemcpy(c_atom.fy, fy, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
||||||
|
checkCUDAError( "c_atom.fz memcpy", cudaMemcpy(c_atom.fz, fz, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
||||||
|
checkCUDAError( "c_atom.type memcpy", cudaMemcpy(c_atom.type, atom->type, sizeof(int) * atom->Nmax, cudaMemcpyHostToDevice) );
|
||||||
|
checkCUDAError( "c_atom.epsilon memcpy", cudaMemcpy(c_atom.epsilon, atom->epsilon, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
||||||
|
checkCUDAError( "c_atom.sigma6 memcpy", cudaMemcpy(c_atom.sigma6, atom->sigma6, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
||||||
checkCUDAError( "c_atom.cutforcesq memcpy", cudaMemcpy(c_atom.cutforcesq, atom->cutforcesq, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
checkCUDAError( "c_atom.cutforcesq memcpy", cudaMemcpy(c_atom.cutforcesq, atom->cutforcesq, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
||||||
|
|
||||||
|
|
||||||
// double start_memory_bandwidth = getTimeStamp();
|
|
||||||
|
|
||||||
int *c_neighs;
|
int *c_neighs;
|
||||||
checkCUDAError( "c_neighs malloc", cudaMalloc((void**)&c_neighs, sizeof(int) * Nlocal * neighbor->maxneighs) );
|
checkCUDAError( "c_neighs malloc", cudaMalloc((void**)&c_neighs, sizeof(int) * Nlocal * neighbor->maxneighs) );
|
||||||
checkCUDAError( "c_neighs memcpy", cudaMemcpy(c_neighs, neighbor->neighbors, sizeof(int) * Nlocal * neighbor->maxneighs, cudaMemcpyHostToDevice) );
|
checkCUDAError( "c_neighs memcpy", cudaMemcpy(c_neighs, neighbor->neighbors, sizeof(int) * Nlocal * neighbor->maxneighs, cudaMemcpyHostToDevice) );
|
||||||
|
|
||||||
/*
|
|
||||||
double end_memory_bandwidth = getTimeStamp();
|
|
||||||
double memory_bandwith_time = (end_memory_bandwidth - start_memory_bandwidth);
|
|
||||||
const unsigned long bytes = sizeof(int) * Nlocal * neighbor->maxneighs;
|
|
||||||
const double gb_per_second = ((double)bytes / memory_bandwith_time) / 1024.0 / 1024.0 / 1024.0;
|
|
||||||
printf("Data transfer of %lu bytes took %fs => %f GB/s\r\n", bytes, memory_bandwith_time, gb_per_second);
|
|
||||||
*/
|
|
||||||
|
|
||||||
int *c_neigh_numneigh;
|
int *c_neigh_numneigh;
|
||||||
checkCUDAError( "c_neigh_numneigh malloc", cudaMalloc((void**)&c_neigh_numneigh, sizeof(int) * Nlocal) );
|
checkCUDAError( "c_neigh_numneigh malloc", cudaMalloc((void**)&c_neigh_numneigh, sizeof(int) * Nlocal) );
|
||||||
checkCUDAError( "c_neigh_numneigh memcpy", cudaMemcpy(c_neigh_numneigh, neighbor->numneigh, sizeof(int) * Nlocal, cudaMemcpyHostToDevice) );
|
checkCUDAError( "c_neigh_numneigh memcpy", cudaMemcpy(c_neigh_numneigh, neighbor->numneigh, sizeof(int) * Nlocal, cudaMemcpyHostToDevice) );
|
||||||
|
|
||||||
const int num_threads_per_block = num_threads; // this should be multiple of 32 as operations are performed at the level of warps
|
const int num_threads_per_block = num_threads; // this should be multiple of 32 as operations are performed at the level of warps
|
||||||
const int num_blocks = ceil((float)Nlocal / (float)num_threads_per_block);
|
const int num_blocks = ceil((float)Nlocal / (float)num_threads_per_block);
|
||||||
// printf("Distribution size: %d\r\n%d Blocks with each %d threads\r\n", Nlocal, num_blocks, num_threads_per_block);
|
|
||||||
|
|
||||||
double S = getTimeStamp();
|
double S = getTimeStamp();
|
||||||
LIKWID_MARKER_START("force");
|
LIKWID_MARKER_START("force");
|
||||||
@ -215,12 +206,14 @@ double computeForce(
|
|||||||
cudaMemcpy(atom->fy, c_atom.fy, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
cudaMemcpy(atom->fy, c_atom.fy, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
||||||
cudaMemcpy(atom->fz, c_atom.fz, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
cudaMemcpy(atom->fz, c_atom.fz, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
||||||
|
|
||||||
|
/*
|
||||||
cudaFree(c_atom.x);
|
cudaFree(c_atom.x);
|
||||||
cudaFree(c_atom.fx); cudaFree(c_atom.fy); cudaFree(c_atom.fz);
|
cudaFree(c_atom.fx); cudaFree(c_atom.fy); cudaFree(c_atom.fz);
|
||||||
cudaFree(c_atom.type);
|
cudaFree(c_atom.type);
|
||||||
cudaFree(c_atom.epsilon);
|
cudaFree(c_atom.epsilon);
|
||||||
cudaFree(c_atom.sigma6);
|
cudaFree(c_atom.sigma6);
|
||||||
cudaFree(c_atom.cutforcesq);
|
cudaFree(c_atom.cutforcesq);
|
||||||
|
*/
|
||||||
|
|
||||||
cudaFree(c_neighs); cudaFree(c_neigh_numneigh);
|
cudaFree(c_neighs); cudaFree(c_neigh_numneigh);
|
||||||
|
|
||||||
|
15
src/main.c
15
src/main.c
@ -23,6 +23,7 @@
|
|||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
#include <stdio.h>
|
#include <stdio.h>
|
||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
#include <stdbool.h>
|
||||||
#include <unistd.h>
|
#include <unistd.h>
|
||||||
#include <limits.h>
|
#include <limits.h>
|
||||||
#include <math.h>
|
#include <math.h>
|
||||||
@ -44,7 +45,7 @@
|
|||||||
|
|
||||||
#define HLINE "----------------------------------------------------------------------------\n"
|
#define HLINE "----------------------------------------------------------------------------\n"
|
||||||
|
|
||||||
extern double computeForce(Parameter*, Atom*, Neighbor*);
|
extern double computeForce(bool, Parameter*, Atom*, Neighbor*);
|
||||||
extern double computeForceTracing(Parameter*, Atom*, Neighbor*, Stats*, int, int);
|
extern double computeForceTracing(Parameter*, Atom*, Neighbor*, Stats*, int, int);
|
||||||
extern double computeForceEam(Eam* eam, Parameter*, Atom *atom, Neighbor *neighbor, Stats *stats, int first_exec, int timestep);
|
extern double computeForceEam(Eam* eam, Parameter*, Atom *atom, Neighbor *neighbor, Stats *stats, int first_exec, int timestep);
|
||||||
|
|
||||||
@ -262,7 +263,7 @@ int main(int argc, char** argv)
|
|||||||
#if defined(MEM_TRACER) || defined(INDEX_TRACER) || defined(COMPUTE_STATS)
|
#if defined(MEM_TRACER) || defined(INDEX_TRACER) || defined(COMPUTE_STATS)
|
||||||
computeForceTracing(¶m, &atom, &neighbor, &stats, 1, 0);
|
computeForceTracing(¶m, &atom, &neighbor, &stats, 1, 0);
|
||||||
#else
|
#else
|
||||||
computeForce(¶m, &atom, &neighbor);
|
computeForce(true, ¶m, &atom, &neighbor);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -277,10 +278,12 @@ int main(int argc, char** argv)
|
|||||||
for(int n = 0; n < param.ntimes; n++) {
|
for(int n = 0; n < param.ntimes; n++) {
|
||||||
initialIntegrate(¶m, &atom);
|
initialIntegrate(¶m, &atom);
|
||||||
|
|
||||||
if((n + 1) % param.every) {
|
const bool doReneighbour = (n + 1) % param.every == 0;
|
||||||
updatePbc(&atom, ¶m);
|
|
||||||
} else {
|
if(doReneighbour) {
|
||||||
timer[NEIGH] += reneighbour(¶m, &atom, &neighbor);
|
timer[NEIGH] += reneighbour(¶m, &atom, &neighbor);
|
||||||
|
} else {
|
||||||
|
updatePbc(&atom, ¶m);
|
||||||
}
|
}
|
||||||
|
|
||||||
if(param.force_field == FF_EAM) {
|
if(param.force_field == FF_EAM) {
|
||||||
@ -289,7 +292,7 @@ int main(int argc, char** argv)
|
|||||||
#if defined(MEM_TRACER) || defined(INDEX_TRACER) || defined(COMPUTE_STATS)
|
#if defined(MEM_TRACER) || defined(INDEX_TRACER) || defined(COMPUTE_STATS)
|
||||||
timer[FORCE] += computeForceTracing(¶m, &atom, &neighbor, &stats, 0, n + 1);
|
timer[FORCE] += computeForceTracing(¶m, &atom, &neighbor, &stats, 0, n + 1);
|
||||||
#else
|
#else
|
||||||
timer[FORCE] += computeForce(¶m, &atom, &neighbor);
|
timer[FORCE] += computeForce(doReneighbour, ¶m, &atom, &neighbor);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
finalIntegrate(¶m, &atom);
|
finalIntegrate(¶m, &atom);
|
||||||
|
Loading…
Reference in New Issue
Block a user