Removed debug prints, only zero atom forces and not copy them, added measurements
This commit is contained in:
parent
8009b54113
commit
bf1ae3d013
38
src/force.cu
38
src/force.cu
@ -111,26 +111,14 @@ double computeForce(
|
||||
)
|
||||
{
|
||||
int Nlocal = atom->Nlocal;
|
||||
MD_FLOAT* fx = atom->fx;
|
||||
MD_FLOAT* fy = atom->fy;
|
||||
MD_FLOAT* fz = atom->fz;
|
||||
#ifndef EXPLICIT_TYPES
|
||||
MD_FLOAT cutforcesq = param->cutforce * param->cutforce;
|
||||
MD_FLOAT sigma6 = param->sigma6;
|
||||
MD_FLOAT epsilon = param->epsilon;
|
||||
#endif
|
||||
printf("-1\r\n");
|
||||
|
||||
cudaProfilerStart();
|
||||
|
||||
printf("0\r\n");
|
||||
|
||||
for(int i = 0; i < Nlocal; i++) {
|
||||
fx[i] = 0.0;
|
||||
fy[i] = 0.0;
|
||||
fz[i] = 0.0;
|
||||
}
|
||||
|
||||
const char *num_threads_env = getenv("NUM_THREADS");
|
||||
int num_threads = 0;
|
||||
if(num_threads_env == nullptr)
|
||||
@ -145,8 +133,6 @@ double computeForce(
|
||||
c_atom.Nmax = atom->Nmax;
|
||||
c_atom.ntypes = atom->ntypes;
|
||||
|
||||
printf("0.1\r\n");
|
||||
|
||||
/*
|
||||
int nDevices;
|
||||
cudaGetDeviceCount(&nDevices);
|
||||
@ -167,8 +153,6 @@ double computeForce(
|
||||
// HINT: Run with cuda-memcheck ./MDBench-NVCC in case of error
|
||||
// HINT: Only works for data layout = AOS!!!
|
||||
|
||||
printf("1\r\n");
|
||||
|
||||
if(!initialized) {
|
||||
checkCUDAError( "c_atom.x malloc", cudaMalloc((void**)&(c_atom.x), sizeof(MD_FLOAT) * atom->Nmax * 3) );
|
||||
checkCUDAError( "c_atom.fx malloc", cudaMalloc((void**)&(c_atom.fx), sizeof(MD_FLOAT) * Nlocal) );
|
||||
@ -183,13 +167,11 @@ double computeForce(
|
||||
checkCUDAError( "c_neigh_numneigh malloc", cudaMalloc((void**)&c_neigh_numneigh, sizeof(int) * Nlocal) );
|
||||
}
|
||||
|
||||
printf("2\r\n");
|
||||
checkCUDAError( "c_atom.fx memset", cudaMemset(c_atom.fx, 0, sizeof(MD_FLOAT) * Nlocal) );
|
||||
checkCUDAError( "c_atom.fy memset", cudaMemset(c_atom.fy, 0, sizeof(MD_FLOAT) * Nlocal) );
|
||||
checkCUDAError( "c_atom.fz memset", cudaMemset(c_atom.fz, 0, sizeof(MD_FLOAT) * Nlocal) );
|
||||
|
||||
if(reneighbourHappenend || !initialized) {
|
||||
checkCUDAError( "c_atom.x memcpy", cudaMemcpy(c_atom.x, atom->x, sizeof(MD_FLOAT) * atom->Nmax * 3, cudaMemcpyHostToDevice) );
|
||||
checkCUDAError( "c_atom.fx memcpy", cudaMemcpy(c_atom.fx, fx, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
||||
checkCUDAError( "c_atom.fy memcpy", cudaMemcpy(c_atom.fy, fy, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
||||
checkCUDAError( "c_atom.fz memcpy", cudaMemcpy(c_atom.fz, fz, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyHostToDevice) );
|
||||
checkCUDAError( "c_atom.type memcpy", cudaMemcpy(c_atom.type, atom->type, sizeof(int) * atom->Nmax, cudaMemcpyHostToDevice) );
|
||||
checkCUDAError( "c_atom.epsilon memcpy", cudaMemcpy(c_atom.epsilon, atom->epsilon, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
||||
checkCUDAError( "c_atom.sigma6 memcpy", cudaMemcpy(c_atom.sigma6, atom->sigma6, sizeof(MD_FLOAT) * atom->ntypes * atom->ntypes, cudaMemcpyHostToDevice) );
|
||||
@ -197,13 +179,6 @@ double computeForce(
|
||||
|
||||
checkCUDAError( "c_neigh_numneigh memcpy", cudaMemcpy(c_neigh_numneigh, neighbor->numneigh, sizeof(int) * Nlocal, cudaMemcpyHostToDevice) );
|
||||
checkCUDAError( "c_neighs memcpy", cudaMemcpy(c_neighs, neighbor->neighbors, sizeof(int) * Nlocal * neighbor->maxneighs, cudaMemcpyHostToDevice) );
|
||||
}
|
||||
|
||||
printf("3\r\n");
|
||||
|
||||
printf("4\r\n");
|
||||
|
||||
printf("5\r\n");
|
||||
|
||||
const int num_threads_per_block = num_threads; // this should be multiple of 32 as operations are performed at the level of warps
|
||||
const int num_blocks = ceil((float)Nlocal / (float)num_threads_per_block);
|
||||
@ -216,16 +191,11 @@ double computeForce(
|
||||
checkCUDAError( "PeekAtLastError", cudaPeekAtLastError() );
|
||||
checkCUDAError( "DeviceSync", cudaDeviceSynchronize() );
|
||||
|
||||
printf("6\r\n");
|
||||
|
||||
// copy results in c_atom.fx/fy/fz to atom->fx/fy/fz
|
||||
if(reneighbourHappenend) {
|
||||
|
||||
cudaMemcpy(atom->fx, c_atom.fx, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
||||
cudaMemcpy(atom->fy, c_atom.fy, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
||||
cudaMemcpy(atom->fz, c_atom.fz, sizeof(MD_FLOAT) * Nlocal, cudaMemcpyDeviceToHost);
|
||||
}
|
||||
|
||||
printf("7\r\n");
|
||||
|
||||
/*
|
||||
cudaFree(c_atom.x);
|
||||
|
13
src/main.c
13
src/main.c
@ -111,20 +111,13 @@ double reneighbour(
|
||||
{
|
||||
double S, E;
|
||||
|
||||
printf("10.1\r\n");
|
||||
|
||||
S = getTimeStamp();
|
||||
LIKWID_MARKER_START("reneighbour");
|
||||
printf("10.2\r\n");
|
||||
updateAtomsPbc(atom, param);
|
||||
printf("10.3\r\n");
|
||||
setupPbc(atom, param);
|
||||
printf("10.4\r\n");
|
||||
updatePbc(atom, param);
|
||||
printf("10.5\r\n");
|
||||
//sortAtom(atom);
|
||||
buildNeighbor(atom, neighbor);
|
||||
printf("10.6\r\n");
|
||||
LIKWID_MARKER_STOP("reneighbour");
|
||||
E = getTimeStamp();
|
||||
|
||||
@ -288,18 +281,12 @@ int main(int argc, char** argv)
|
||||
const bool doReneighbour = (n + 1) % param.every == 0;
|
||||
const bool doesReneighbourNextRound = (n + 2) % param.every == 0;
|
||||
|
||||
printf("Run %d does reneighbour: %d\r\n", n, doReneighbour);
|
||||
|
||||
printf("10\r\n");
|
||||
|
||||
if(doReneighbour) {
|
||||
timer[NEIGH] += reneighbour(¶m, &atom, &neighbor);
|
||||
} else {
|
||||
updatePbc(&atom, ¶m);
|
||||
}
|
||||
|
||||
printf("11\r\n");
|
||||
|
||||
if(param.force_field == FF_EAM) {
|
||||
timer[FORCE] += computeForceEam(&eam, ¶m, &atom, &neighbor, &stats, 0, n + 1);
|
||||
} else {
|
||||
|
@ -172,8 +172,6 @@ void buildNeighbor(Atom *atom, Neighbor *neighbor)
|
||||
{
|
||||
int nall = atom->Nlocal + atom->Nghost;
|
||||
|
||||
printf("nall: %d, nmax: %d\r\n", nall, nmax);
|
||||
|
||||
/* extend atom arrays if necessary */
|
||||
if(nall > nmax) {
|
||||
nmax = nall;
|
||||
@ -185,14 +183,10 @@ void buildNeighbor(Atom *atom, Neighbor *neighbor)
|
||||
// neighbor->neighbors = (int*) malloc(nmax * neighbor->maxneighs * sizeof(int*));
|
||||
}
|
||||
|
||||
printf("10.5.1\r\n");
|
||||
|
||||
/* bin local & ghost atoms */
|
||||
binatoms(atom);
|
||||
int resize = 1;
|
||||
|
||||
printf("10.5.2\r\n");
|
||||
|
||||
/* loop over each atom, storing neighbors */
|
||||
while(resize) {
|
||||
int new_maxneighs = neighbor->maxneighs;
|
||||
@ -322,14 +316,9 @@ int coord2bin(MD_FLOAT xin, MD_FLOAT yin, MD_FLOAT zin)
|
||||
|
||||
void binatoms(Atom *atom)
|
||||
{
|
||||
printf("10.5.1.1\r\n");
|
||||
int nall = atom->Nlocal + atom->Nghost;
|
||||
int resize = 1;
|
||||
|
||||
printf("10.5.1.2\r\n");
|
||||
|
||||
printf("nall: %d, atom->Nmax: %d\r\n", nall, atom->Nmax);
|
||||
|
||||
while(resize > 0) {
|
||||
resize = 0;
|
||||
|
||||
@ -337,8 +326,6 @@ void binatoms(Atom *atom)
|
||||
bincount[i] = 0;
|
||||
}
|
||||
|
||||
printf("10.5.1.3\r\n");
|
||||
|
||||
for(int i = 0; i < nall; i++) {
|
||||
MD_FLOAT x = atom_x(i);
|
||||
MD_FLOAT y = atom_y(i);
|
||||
@ -353,15 +340,11 @@ void binatoms(Atom *atom)
|
||||
}
|
||||
}
|
||||
|
||||
printf("10.5.1.4\r\n");
|
||||
|
||||
if(resize) {
|
||||
free(bins);
|
||||
atoms_per_bin *= 2;
|
||||
bins = (int*) malloc(mbins * atoms_per_bin * sizeof(int));
|
||||
}
|
||||
|
||||
printf("10.5.1.5\r\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user