Removed debug prints, only zero atom forces and not copy them, added measurements
This commit is contained in:
parent
8009b54113
commit
bf1ae3d013
56
src/force.cu
56
src/force.cu
@ -111,26 +111,14 @@ double computeForce(
|
|||||||
)
|
)
|
||||||
{
|
{
|
||||||
int Nlocal = atom->Nlocal;
|
int Nlocal = atom->Nlocal;
|
||||||
MD_FLOAT* fx = atom->fx;
|
|
||||||
MD_FLOAT* fy = atom->fy;
|
|
||||||
MD_FLOAT* fz = atom->fz;
|
|
||||||
#ifndef EXPLICIT_TYPES
|
#ifndef EXPLICIT_TYPES
|
||||||
MD_FLOAT cutforcesq = param->cutforce * param->cutforce;
|
MD_FLOAT cutforcesq = param->cutforce * param->cutforce;
|
||||||
MD_FLOAT sigma6 = param->sigma6;
|
MD_FLOAT sigma6 = param->sigma6;
|
||||||
MD_FLOAT epsilon = param->epsilon;
|
MD_FLOAT epsilon = param->epsilon;
|
||||||
#endif
|
#endif
|
||||||
printf("-1\r\n");
|
|
||||||
|
|
||||||
cudaProfilerStart();
|
cudaProfilerStart();
|
||||||
|
|
||||||
printf("0\r\n");
|
|
||||||
|
|
||||||
for(int i = 0; i < Nlocal; i++) {
|
|
||||||
fx[i] = 0.0;
|
|
||||||
fy[i] = 0.0;
|
|
||||||
fz[i] = 0.0;
|
|
||||||
}
|
|
||||||
|
|
||||||
const char *num_threads_env = getenv("NUM_THREADS");
|
const char *num_threads_env = getenv("NUM_THREADS");
|
||||||
int num_threads = 0;
|
int num_threads = 0;
|
||||||
if(num_threads_env == nullptr)
|
if(num_threads_env == nullptr)
|
||||||
@ -145,8 +133,6 @@ double computeForce(
|
|||||||
c_atom.Nmax = atom->Nmax;
|
c_atom.Nmax = atom->Nmax;
|
||||||
c_atom.ntypes = atom->ntypes;
|
c_atom.ntypes = atom->ntypes;
|
||||||
|
|
||||||
printf("0.1\r\n");
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
int nDevices;
|
int nDevices;
|
||||||
cudaGetDeviceCount(&nDevices);
|
cudaGetDeviceCount(&nDevices);
|
||||||
@ -167,8 +153,6 @@ double computeForce(
|
|||||||
// HINT: Run with cuda-memcheck ./MDBench-NVCC in case of error
|
// HINT: Run with cuda-memcheck ./MDBench-NVCC in case of error
|
||||||
// HINT: Only works for data layout = AOS!!!
|
// HINT: Only works for data layout = AOS!!!
|
||||||
|
|
||||||
printf("1\r\n");
|
|
||||||
|
|
||||||
if(!initialized) {
|
if(!initialized) {
|
||||||
checkCUDAError( "c_atom.x malloc", cudaMalloc((void**)&(c_atom.x), sizeof(MD_FLOAT) * atom->Nmax * 3) );
|
checkCUDAError( "c_atom.x malloc", cudaMalloc((void**)&(c_atom.x), sizeof(MD_FLOAT) * atom->Nmax * 3) );
|
||||||
checkCUDAError( "c_atom.fx malloc", cudaMalloc((void**)&(c_atom.fx), sizeof(MD_FLOAT) * Nlocal) );
|
checkCUDAError( "c_atom.fx malloc", cudaMalloc((void**)&(c_atom.fx), sizeof(MD_FLOAT) * Nlocal) );
|
||||||
@ -183,27 +167,18 @@ double computeForce(
|
|||||||
checkCUDAError( "c_neigh_numneigh malloc", cudaMalloc((void**)&c_neigh_numneigh, sizeof(int) * Nlocal) );
|
checkCUDAError( "c_neigh_numneigh malloc", cudaMalloc((void**)&c_neigh_numneigh, sizeof(int) * Nlocal) );
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("2\r\n");
|
checkCUDAError( "c_atom.fx memset", cudaMemset(c_atom.fx, 0, sizeof(MD_FLOAT) * Nlocal) );
|
||||||
|
checkCUDAError( "c_atom.fy memset", cudaMemset(c_atom.fy, 0, sizeof(MD_FLOAT) * Nlocal) );
|
||||||
|
checkCUDAError( "c_atom.fz memset", cudaMemset(c_atom.fz, 0, sizeof(MD_FLOAT) * Nlocal) );
|
||||||
|
|
||||||
if(reneighbourHappenend || !initialized) {
|
checkCUDAError( "c_atom.x memcpy", cudaMemcpy(c_atom.x, atom->x, sizeof(MD_FLOAT) * atom->Nmax * 3, cudaMemcpyHostToDevice) );
|
||||||
checkCUDAError( "c_atom.x memcpy", cudaMemcpy(c_atom.x, atom->x, sizeof(MD_FLOAT) * atom->Nmax * 3, cudaMemcpyHostToDevice) );
|
checkCUDAError( "c_atom.type memcpy", cudaMemcpy(c_atom.type, atom->type, sizeof(int) * atom->Nmax, cudaMemcpyHostToDevice) );
|
||||||
checkCUDAError( "c_atom.fx memcpy", cudaMemcpy(c_atom.fx, fx, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
checkCUDAError( "c_atom.epsilon memcpy", cudaMemcpy(c_atom.epsilon, atom->epsilon, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
||||||
checkCUDAError( "c_atom.fy memcpy", cudaMemcpy(c_atom.fy, fy, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
checkCUDAError( "c_atom.sigma6 memcpy", cudaMemcpy(c_atom.sigma6, atom->sigma6, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
||||||
checkCUDAError( "c_atom.fz memcpy", cudaMemcpy(c_atom.fz, fz, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
checkCUDAError( "c_atom.cutforcesq memcpy", cudaMemcpy(c_atom.cutforcesq, atom->cutforcesq, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
||||||
checkCUDAError( "c_atom.type memcpy", cudaMemcpy(c_atom.type, atom->type, sizeof(int) * atom->Nmax, cudaMemcpyHostToDevice) );
|
|
||||||
checkCUDAError( "c_atom.epsilon memcpy", cudaMemcpy(c_atom.epsilon, atom->epsilon, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
|
||||||
checkCUDAError( "c_atom.sigma6 memcpy", cudaMemcpy(c_atom.sigma6, atom->sigma6, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
|
||||||
checkCUDAError( "c_atom.cutforcesq memcpy", cudaMemcpy(c_atom.cutforcesq, atom->cutforcesq, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
|
||||||
|
|
||||||
checkCUDAError( "c_neigh_numneigh memcpy", cudaMemcpy(c_neigh_numneigh, neighbor->numneigh, sizeof(int) * Nlocal, cudaMemcpyHostToDevice) );
|
checkCUDAError( "c_neigh_numneigh memcpy", cudaMemcpy(c_neigh_numneigh, neighbor->numneigh, sizeof(int) * Nlocal, cudaMemcpyHostToDevice) );
|
||||||
checkCUDAError( "c_neighs memcpy", cudaMemcpy(c_neighs, neighbor->neighbors, sizeof(int) * Nlocal * neighbor->maxneighs, cudaMemcpyHostToDevice) );
|
checkCUDAError( "c_neighs memcpy", cudaMemcpy(c_neighs, neighbor->neighbors, sizeof(int) * Nlocal * neighbor->maxneighs, cudaMemcpyHostToDevice) );
|
||||||
}
|
|
||||||
|
|
||||||
printf("3\r\n");
|
|
||||||
|
|
||||||
printf("4\r\n");
|
|
||||||
|
|
||||||
printf("5\r\n");
|
|
||||||
|
|
||||||
const int num_threads_per_block = num_threads; // this should be multiple of 32 as operations are performed at the level of warps
|
const int num_threads_per_block = num_threads; // this should be multiple of 32 as operations are performed at the level of warps
|
||||||
const int num_blocks = ceil((float)Nlocal / (float)num_threads_per_block);
|
const int num_blocks = ceil((float)Nlocal / (float)num_threads_per_block);
|
||||||
@ -216,16 +191,11 @@ double computeForce(
|
|||||||
checkCUDAError( "PeekAtLastError", cudaPeekAtLastError() );
|
checkCUDAError( "PeekAtLastError", cudaPeekAtLastError() );
|
||||||
checkCUDAError( "DeviceSync", cudaDeviceSynchronize() );
|
checkCUDAError( "DeviceSync", cudaDeviceSynchronize() );
|
||||||
|
|
||||||
printf("6\r\n");
|
|
||||||
|
|
||||||
// copy results in c_atom.fx/fy/fz to atom->fx/fy/fz
|
// copy results in c_atom.fx/fy/fz to atom->fx/fy/fz
|
||||||
if(reneighbourHappenend) {
|
|
||||||
cudaMemcpy(atom->fx, c_atom.fx, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
|
||||||
cudaMemcpy(atom->fy, c_atom.fy, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
|
||||||
cudaMemcpy(atom->fz, c_atom.fz, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("7\r\n");
|
cudaMemcpy(atom->fx, c_atom.fx, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
||||||
|
cudaMemcpy(atom->fy, c_atom.fy, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
||||||
|
cudaMemcpy(atom->fz, c_atom.fz, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
cudaFree(c_atom.x);
|
cudaFree(c_atom.x);
|
||||||
|
13
src/main.c
13
src/main.c
@ -111,20 +111,13 @@ double reneighbour(
|
|||||||
{
|
{
|
||||||
double S, E;
|
double S, E;
|
||||||
|
|
||||||
printf("10.1\r\n");
|
|
||||||
|
|
||||||
S = getTimeStamp();
|
S = getTimeStamp();
|
||||||
LIKWID_MARKER_START("reneighbour");
|
LIKWID_MARKER_START("reneighbour");
|
||||||
printf("10.2\r\n");
|
|
||||||
updateAtomsPbc(atom, param);
|
updateAtomsPbc(atom, param);
|
||||||
printf("10.3\r\n");
|
|
||||||
setupPbc(atom, param);
|
setupPbc(atom, param);
|
||||||
printf("10.4\r\n");
|
|
||||||
updatePbc(atom, param);
|
updatePbc(atom, param);
|
||||||
printf("10.5\r\n");
|
|
||||||
//sortAtom(atom);
|
//sortAtom(atom);
|
||||||
buildNeighbor(atom, neighbor);
|
buildNeighbor(atom, neighbor);
|
||||||
printf("10.6\r\n");
|
|
||||||
LIKWID_MARKER_STOP("reneighbour");
|
LIKWID_MARKER_STOP("reneighbour");
|
||||||
E = getTimeStamp();
|
E = getTimeStamp();
|
||||||
|
|
||||||
@ -288,18 +281,12 @@ int main(int argc, char** argv)
|
|||||||
const bool doReneighbour = (n + 1) % param.every == 0;
|
const bool doReneighbour = (n + 1) % param.every == 0;
|
||||||
const bool doesReneighbourNextRound = (n + 2) % param.every == 0;
|
const bool doesReneighbourNextRound = (n + 2) % param.every == 0;
|
||||||
|
|
||||||
printf("Run %d does reneighbour: %d\r\n", n, doReneighbour);
|
|
||||||
|
|
||||||
printf("10\r\n");
|
|
||||||
|
|
||||||
if(doReneighbour) {
|
if(doReneighbour) {
|
||||||
timer[NEIGH] += reneighbour(¶m, &atom, &neighbor);
|
timer[NEIGH] += reneighbour(¶m, &atom, &neighbor);
|
||||||
} else {
|
} else {
|
||||||
updatePbc(&atom, ¶m);
|
updatePbc(&atom, ¶m);
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("11\r\n");
|
|
||||||
|
|
||||||
if(param.force_field == FF_EAM) {
|
if(param.force_field == FF_EAM) {
|
||||||
timer[FORCE] += computeForceEam(&eam, ¶m, &atom, &neighbor, &stats, 0, n + 1);
|
timer[FORCE] += computeForceEam(&eam, ¶m, &atom, &neighbor, &stats, 0, n + 1);
|
||||||
} else {
|
} else {
|
||||||
|
@ -172,8 +172,6 @@ void buildNeighbor(Atom *atom, Neighbor *neighbor)
|
|||||||
{
|
{
|
||||||
int nall = atom->Nlocal + atom->Nghost;
|
int nall = atom->Nlocal + atom->Nghost;
|
||||||
|
|
||||||
printf("nall: %d, nmax: %d\r\n", nall, nmax);
|
|
||||||
|
|
||||||
/* extend atom arrays if necessary */
|
/* extend atom arrays if necessary */
|
||||||
if(nall > nmax) {
|
if(nall > nmax) {
|
||||||
nmax = nall;
|
nmax = nall;
|
||||||
@ -185,14 +183,10 @@ void buildNeighbor(Atom *atom, Neighbor *neighbor)
|
|||||||
// neighbor->neighbors = (int*) malloc(nmax * neighbor->maxneighs * sizeof(int*));
|
// neighbor->neighbors = (int*) malloc(nmax * neighbor->maxneighs * sizeof(int*));
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("10.5.1\r\n");
|
|
||||||
|
|
||||||
/* bin local & ghost atoms */
|
/* bin local & ghost atoms */
|
||||||
binatoms(atom);
|
binatoms(atom);
|
||||||
int resize = 1;
|
int resize = 1;
|
||||||
|
|
||||||
printf("10.5.2\r\n");
|
|
||||||
|
|
||||||
/* loop over each atom, storing neighbors */
|
/* loop over each atom, storing neighbors */
|
||||||
while(resize) {
|
while(resize) {
|
||||||
int new_maxneighs = neighbor->maxneighs;
|
int new_maxneighs = neighbor->maxneighs;
|
||||||
@ -322,14 +316,9 @@ int coord2bin(MD_FLOAT xin, MD_FLOAT yin, MD_FLOAT zin)
|
|||||||
|
|
||||||
void binatoms(Atom *atom)
|
void binatoms(Atom *atom)
|
||||||
{
|
{
|
||||||
printf("10.5.1.1\r\n");
|
|
||||||
int nall = atom->Nlocal + atom->Nghost;
|
int nall = atom->Nlocal + atom->Nghost;
|
||||||
int resize = 1;
|
int resize = 1;
|
||||||
|
|
||||||
printf("10.5.1.2\r\n");
|
|
||||||
|
|
||||||
printf("nall: %d, atom->Nmax: %d\r\n", nall, atom->Nmax);
|
|
||||||
|
|
||||||
while(resize > 0) {
|
while(resize > 0) {
|
||||||
resize = 0;
|
resize = 0;
|
||||||
|
|
||||||
@ -337,8 +326,6 @@ void binatoms(Atom *atom)
|
|||||||
bincount[i] = 0;
|
bincount[i] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("10.5.1.3\r\n");
|
|
||||||
|
|
||||||
for(int i = 0; i < nall; i++) {
|
for(int i = 0; i < nall; i++) {
|
||||||
MD_FLOAT x = atom_x(i);
|
MD_FLOAT x = atom_x(i);
|
||||||
MD_FLOAT y = atom_y(i);
|
MD_FLOAT y = atom_y(i);
|
||||||
@ -353,15 +340,11 @@ void binatoms(Atom *atom)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("10.5.1.4\r\n");
|
|
||||||
|
|
||||||
if(resize) {
|
if(resize) {
|
||||||
free(bins);
|
free(bins);
|
||||||
atoms_per_bin *= 2;
|
atoms_per_bin *= 2;
|
||||||
bins = (int*) malloc(mbins * atoms_per_bin * sizeof(int));
|
bins = (int*) malloc(mbins * atoms_per_bin * sizeof(int));
|
||||||
}
|
}
|
||||||
|
|
||||||
printf("10.5.1.5\r\n");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user