Debug 2D mpi version. Does not yet work v2 upwards.

This commit is contained in:
Jan Eitzinger 2024-02-11 21:47:53 +01:00
parent 6769c7acf0
commit 2d759b106a
15 changed files with 406 additions and 461 deletions

View File

@ -48,6 +48,8 @@ clean:
distclean: clean
$(info ===> DIST CLEAN)
@rm -f $(TARGET)
@rm -f *.dat
@rm -f *.png
info:
$(info $(CFLAGS))

View File

@ -2,11 +2,12 @@
TAG ?= CLANG
ENABLE_MPI ?= true
ENABLE_OPENMP ?= false
COMM_TYPE ?= v3
COMM_TYPE ?= v2
#Feature options
OPTIONS += -DARRAY_ALIGNMENT=64
#OPTIONS += -DVERBOSE
OPTIONS += -DVERBOSE
# OPTIONS += -DTEST
#OPTIONS += -DVERBOSE_AFFINITY
#OPTIONS += -DVERBOSE_DATASIZE
#OPTIONS += -DVERBOSE_TIMER

View File

@ -15,7 +15,7 @@ bcRight 1 #
gx 0.0 # Body forces (e.g. gravity)
gy 0.0 #
re 10.0 # Reynolds number
re 100.0 # Reynolds number
u_init 0.0 # initial value for velocity in x-direction
v_init 0.0 # initial value for velocity in y-direction
@ -26,13 +26,13 @@ p_init 0.0 # initial value for pressure
xlength 1.0 # domain size in x-direction
ylength 1.0 # domain size in y-direction
imax 100 # number of interior cells in x-direction
jmax 100 # number of interior cells in y-direction
imax 80 # number of interior cells in x-direction
jmax 80 # number of interior cells in y-direction
# Time Data:
# ---------
te 5.0 # final time
te 10.0 # final time
dt 0.02 # time stepsize
tau 0.5 # safety factor for time stepsize control (<0 constant delt)
@ -41,6 +41,6 @@ tau 0.5 # safety factor for time stepsize control (<0 constant delt)
itermax 1000 # maximal number of pressure iteration in one time step
eps 0.001 # stopping tolerance for pressure iteration
omg 1.7 # relaxation parameter for SOR iteration
omg 1.9 # relaxation parameter for SOR iteration
gamma 0.9 # upwind differencing factor gamma
#===============================================================================

View File

@ -4,18 +4,12 @@
* Use of this source code is governed by a MIT style
* license that can be found in the LICENSE file.
*/
#include <stdio.h>
#include <stdlib.h>
#include "comm.h"
#if defined(_MPI)
#ifdef _MPI
// subroutines local to this module
static int sizeOfRank(int rank, int size, int N)
{
return N / size + ((N % size > rank) ? 1 : 0);
}
static int sum(int* sizes, int position)
{
int sum = 0;
@ -49,20 +43,9 @@ static void gatherArray(
#endif // defined _MPI
// exported subroutines
void commReduction(double* v, int op)
{
#if defined(_MPI)
if (op == MAX) {
MPI_Allreduce(MPI_IN_PLACE, v, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
} else if (op == SUM) {
MPI_Allreduce(MPI_IN_PLACE, v, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
}
#endif
}
int commIsBoundary(Comm* c, int direction)
{
#if defined(_MPI)
#ifdef _MPI
switch (direction) {
case LEFT:
return 1;
@ -84,7 +67,7 @@ int commIsBoundary(Comm* c, int direction)
void commExchange(Comm* c, double* grid)
{
#if defined(_MPI)
#ifdef _MPI
MPI_Request requests[4] = { MPI_REQUEST_NULL,
MPI_REQUEST_NULL,
MPI_REQUEST_NULL,
@ -116,7 +99,7 @@ void commExchange(Comm* c, double* grid)
void commShift(Comm* c, double* f, double* g)
{
#if defined(_MPI)
#ifdef _MPI
MPI_Request requests[2] = { MPI_REQUEST_NULL, MPI_REQUEST_NULL };
/* shift G */
@ -143,7 +126,6 @@ void commShift(Comm* c, double* f, double* g)
#endif
}
// TODO Merge with seq
void commCollectResult(Comm* c,
double* ug,
double* vg,
@ -154,6 +136,7 @@ void commCollectResult(Comm* c,
int jmax,
int imax)
{
#ifdef _MPI
int *rcvCounts, *displs;
int cnt = c->jmaxLocal * (imax + 2);
@ -183,49 +166,12 @@ void commCollectResult(Comm* c,
gatherArray(c, cnt, rcvCounts, displs, p, pg);
gatherArray(c, cnt, rcvCounts, displs, u, ug);
gatherArray(c, cnt, rcvCounts, displs, v, vg);
}
void commPrintConfig(Comm* c)
{
#if defined(_MPI)
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);
if (commIsMaster(c)) {
printf("Communication setup:\n");
}
for (int i = 0; i < c->size; i++) {
if (i == c->rank) {
printf("\tRank %d of %d\n", c->rank, c->size);
printf("\tNeighbours (bottom, top, left, right): %d %d, %d, %d\n",
c->neighbours[BOTTOM],
c->neighbours[TOP],
c->neighbours[LEFT],
c->neighbours[RIGHT]);
printf("\tCoordinates (j,i) %d %d\n", c->coords[JDIM], c->coords[IDIM]);
printf("\tLocal domain size (j,i) %dx%d\n", c->jmaxLocal, c->imaxLocal);
fflush(stdout);
}
}
MPI_Barrier(MPI_COMM_WORLD);
#endif
}
void commInit(Comm* c, int argc, char** argv)
{
#if defined(_MPI)
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &(c->rank));
MPI_Comm_size(MPI_COMM_WORLD, &(c->size));
#else
c->rank = 0;
c->size = 1;
#endif
}
void commPartition(Comm* c, int jmax, int imax)
{
#if defined(_MPI)
#ifdef _MPI
c->imaxLocal = imax;
c->jmaxLocal = sizeOfRank(c->rank, c->size, jmax);
#else
@ -233,15 +179,3 @@ void commPartition(Comm* c, int jmax, int imax)
c->jmaxLocal = jmax;
#endif
}
void commFinalize(Comm* c)
{
#if defined(_MPI)
for (int i = 0; i < NDIRS; i++) {
MPI_Type_free(&c->sbufferTypes[i]);
MPI_Type_free(&c->rbufferTypes[i]);
}
MPI_Finalize();
#endif
}

View File

@ -4,39 +4,25 @@
* Use of this source code is governed by a MIT style
* license that can be found in the LICENSE file.
*/
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include "comm.h"
#if defined(_MPI)
#ifdef _MPI
// subroutines local to this module
static int sizeOfRank(int rank, int size, int N)
static int sum(int* sizes, int position)
{
return N / size + ((N % size > rank) ? 1 : 0);
int sum = 0;
for (int i = 0; i < position; i++) {
sum += sizes[i];
}
return sum;
}
static void setupCommunication(Comm* c, int direction, int layer)
{
MPI_Datatype type;
size_t dblsize = sizeof(double);
int imaxLocal = c->imaxLocal;
int jmaxLocal = c->jmaxLocal;
int sizes[NDIMS];
int subSizes[NDIMS];
int starts[NDIMS];
int offset = 0;
}
static void assembleResult(Comm* c,
double* src,
double* dst,
int imaxLocal[],
int jmaxLocal[],
int offset[],
int jmax,
int imax)
static void assembleResult(Comm* c, double* src, double* dst, int jmax, int imax)
{
MPI_Request* requests;
int numRequests = 1;
@ -49,11 +35,27 @@ static void assembleResult(Comm* c,
requests = (MPI_Request*)malloc(numRequests * sizeof(MPI_Request));
/* all ranks send their bulk array */
/* all ranks send their bulk array, but including external boundary layer */
MPI_Datatype bulkType;
int oldSizes[NDIMS] = { c->jmaxLocal + 2, c->imaxLocal + 2 };
int newSizes[NDIMS] = { c->jmaxLocal, c->imaxLocal };
int oldSizes[NDIMS] = { c->imaxLocal + 2, c->jmaxLocal + 2 };
int newSizes[NDIMS] = { c->imaxLocal, c->jmaxLocal };
int starts[NDIMS] = { 1, 1 };
if (commIsBoundary(c, LEFT)) {
newSizes[IDIM] += 1;
starts[IDIM] = 0;
}
if (commIsBoundary(c, RIGHT)) {
newSizes[IDIM] += 1;
}
if (commIsBoundary(c, BOTTOM)) {
newSizes[JDIM] += 1;
starts[JDIM] = 0;
}
if (commIsBoundary(c, TOP)) {
newSizes[JDIM] += 1;
}
MPI_Type_create_subarray(NDIMS,
oldSizes,
newSizes,
@ -62,16 +64,23 @@ static void assembleResult(Comm* c,
MPI_DOUBLE,
&bulkType);
MPI_Type_commit(&bulkType);
MPI_Isend(src, 1, bulkType, 0, 0, c->comm, &requests[0]);
int newSizesI[c->size];
int newSizesJ[c->size];
MPI_Gather(&newSizes[IDIM], 1, MPI_INT, newSizesI, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Gather(&newSizes[JDIM], 1, MPI_INT, newSizesJ, 1, MPI_INT, 0, MPI_COMM_WORLD);
/* rank 0 assembles the subdomains */
if (c->rank == 0) {
for (int i = 0; i < c->size; i++) {
MPI_Datatype domainType;
int oldSizes[NDIMS] = { jmax, imax };
int newSizes[NDIMS] = { jmaxLocal[i], imaxLocal[i] };
int starts[NDIMS] = { offset[i * NDIMS + JDIM], offset[i * NDIMS + IDIM] };
int oldSizes[NDIMS] = { imax + 2, jmax + 2 };
int newSizes[NDIMS] = { newSizesI[i], newSizesJ[i] };
int coords[NDIMS];
MPI_Cart_coords(c->comm, i, NDIMS, coords);
int starts[NDIMS] = { sum(newSizesI, coords[IDIM]),
sum(newSizesJ, coords[JDIM]) };
MPI_Type_create_subarray(NDIMS,
oldSizes,
newSizes,
@ -87,34 +96,12 @@ static void assembleResult(Comm* c,
MPI_Waitall(numRequests, requests, MPI_STATUSES_IGNORE);
}
static int sum(int* sizes, int position)
{
int sum = 0;
for (int i = 0; i < position; i++) {
sum += sizes[i];
}
return sum;
}
#endif // defined _MPI
// exported subroutines
void commReduction(double* v, int op)
{
#if defined(_MPI)
if (op == MAX) {
MPI_Allreduce(MPI_IN_PLACE, v, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
} else if (op == SUM) {
MPI_Allreduce(MPI_IN_PLACE, v, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
}
#endif
}
int commIsBoundary(Comm* c, int direction)
{
#if defined(_MPI)
#ifdef _MPI
switch (direction) {
case LEFT:
return c->coords[IDIM] == 0;
@ -136,63 +123,51 @@ int commIsBoundary(Comm* c, int direction)
void commExchange(Comm* c, double* grid)
{
#if defined(_MPI)
double* buf[8];
#ifdef _MPI
double* sbuf[NDIRS];
double* rbuf[NDIRS];
MPI_Request requests[8];
for (int i = 0; i < 8; i++)
requests[i] = MPI_REQUEST_NULL;
buf[0] = grid + 1; // recv bottom
buf[1] = grid + (c->imaxLocal + 2) + 1; // send bottom
buf[2] = grid + (c->jmaxLocal + 1) * (c->imaxLocal + 2) + 1; // recv top
buf[3] = grid + (c->jmaxLocal) * (c->imaxLocal + 2) + 1; // send top
buf[4] = grid + (c->imaxLocal + 2); // recv left
buf[5] = grid + (c->imaxLocal + 2) + 1; // send left
buf[6] = grid + (c->imaxLocal + 2) + (c->imaxLocal + 1); // recv right
buf[7] = grid + (c->imaxLocal + 2) + (c->imaxLocal); // send right
rbuf[LEFT] = grid + (c->imaxLocal + 2);
rbuf[RIGHT] = grid + (c->imaxLocal + 2) + (c->imaxLocal + 1);
rbuf[BOTTOM] = grid + 1;
rbuf[TOP] = grid + (c->jmaxLocal + 1) * (c->imaxLocal + 2) + 1;
sbuf[LEFT] = grid + (c->imaxLocal + 2) + 1;
sbuf[RIGHT] = grid + (c->imaxLocal + 2) + (c->imaxLocal);
sbuf[BOTTOM] = grid + (c->imaxLocal + 2) + 1;
sbuf[TOP] = grid + (c->jmaxLocal) * (c->imaxLocal + 2) + 1;
// buf[0] = grid + (c->imaxLocal + 2); // recv left
// buf[1] = grid + (c->imaxLocal + 2) + 1; // send left
// buf[2] = grid + (c->imaxLocal + 2) + (c->imaxLocal + 1); // recv right
// buf[3] = grid + (c->imaxLocal + 2) + (c->imaxLocal); // send right
// buf[4] = grid + 1; // recv bottom
// buf[5] = grid + (c->imaxLocal + 2) + 1; // send bottom
// buf[6] = grid + (c->jmaxLocal + 1) * (c->imaxLocal + 2) + 1; // recv top
// buf[7] = grid + (c->jmaxLocal) * (c->imaxLocal + 2) + 1; // send top
for (int i = 2; i < 4; i++) {
for (int i = 0; i < NDIRS; i++) {
int tag = 0;
if (c->neighbours[i] != MPI_PROC_NULL) {
// printf("DEBUG: Rank %d - SendRecv with %d\n", c->rank, c->neighbours[i]);
tag = c->neighbours[i];
}
/* exchange ghost cells with bottom/top neighbor */
MPI_Irecv(buf[i * 2],
MPI_Irecv(rbuf[i],
1,
c->rbufferTypes[0],
c->bufferTypes[i],
c->neighbours[i],
tag,
c->comm,
&requests[i * 2]);
MPI_Isend(buf[(i * 2) + 1],
MPI_Isend(sbuf[i],
1,
c->rbufferTypes[0],
c->bufferTypes[i],
c->neighbours[i],
c->rank,
c->comm,
&requests[i * 2 + 1]);
}
for (int i = 0; i < 2; i++) {
int tag = 0;
if (c->neighbours[i] != MPI_PROC_NULL) {
tag = c->neighbours[i];
}
/* exchange ghost cells with left/right neighbor */
MPI_Irecv(buf[i * 2 + 4],
1,
c->sbufferTypes[0],
c->neighbours[i],
tag,
c->comm,
&requests[i * 2 + 4]);
MPI_Isend(buf[i * 2 + 5],
1,
c->sbufferTypes[0],
c->neighbours[i],
c->rank,
c->comm,
&requests[(i * 2) + 5]);
}
MPI_Waitall(8, requests, MPI_STATUSES_IGNORE);
#endif
@ -200,7 +175,7 @@ void commExchange(Comm* c, double* grid)
void commShift(Comm* c, double* f, double* g)
{
#if defined(_MPI)
#ifdef _MPI
MPI_Request requests[4] = { MPI_REQUEST_NULL,
MPI_REQUEST_NULL,
MPI_REQUEST_NULL,
@ -211,7 +186,7 @@ void commShift(Comm* c, double* f, double* g)
/* receive ghost cells from bottom neighbor */
MPI_Irecv(buf,
1,
c->rbufferTypes[0],
c->bufferTypes[BOTTOM],
c->neighbours[BOTTOM],
0,
c->comm,
@ -219,16 +194,28 @@ void commShift(Comm* c, double* f, double* g)
buf = g + (c->jmaxLocal) * (c->imaxLocal + 2) + 1;
/* send ghost cells to top neighbor */
MPI_Isend(buf, 1, c->rbufferTypes[0], c->neighbours[TOP], 0, c->comm, &requests[1]);
MPI_Isend(buf, 1, c->bufferTypes[TOP], c->neighbours[TOP], 0, c->comm, &requests[1]);
/* shift F */
buf = f + (c->imaxLocal + 2);
/* receive ghost cells from left neighbor */
MPI_Irecv(buf, 1, c->sbufferTypes[0], c->neighbours[LEFT], 1, c->comm, &requests[2]);
MPI_Irecv(buf,
1,
c->bufferTypes[LEFT],
c->neighbours[LEFT],
1,
c->comm,
&requests[2]);
buf = f + (c->imaxLocal + 2) + (c->imaxLocal);
buf = f + (c->imaxLocal + 2) + (c->imaxLocal + 1);
/* send ghost cells to right neighbor */
MPI_Isend(buf, 1, c->sbufferTypes[0], c->neighbours[RIGHT], 1, c->comm, &requests[3]);
MPI_Isend(buf,
1,
c->bufferTypes[RIGHT],
c->neighbours[RIGHT],
1,
c->comm,
&requests[3]);
MPI_Waitall(4, requests, MPI_STATUSES_IGNORE);
#endif
@ -244,6 +231,8 @@ void commCollectResult(Comm* c,
int jmax,
int imax)
{
#ifdef _MPI
int offset[c->size * NDIMS];
int imaxLocal[c->size];
int jmaxLocal[c->size];
@ -270,56 +259,19 @@ void commCollectResult(Comm* c,
}
/* collect P */
assembleResult(c, p, pg, imaxLocal, jmaxLocal, offset, jmax, imax);
assembleResult(c, p, pg, jmax, imax);
/* collect U */
assembleResult(c, u, ug, imaxLocal, jmaxLocal, offset, jmax, imax);
assembleResult(c, u, ug, jmax, imax);
/* collect V */
assembleResult(c, v, vg, imaxLocal, jmaxLocal, offset, jmax, imax);
}
void commPrintConfig(Comm* c)
{
#if defined(_MPI)
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);
if (commIsMaster(c)) {
printf("Communication setup:\n");
}
for (int i = 0; i < c->size; i++) {
if (i == c->rank) {
printf("\tRank %d of %d\n", c->rank, c->size);
printf("\tNeighbours (bottom, top, left, right): %d %d, %d, %d\n",
c->neighbours[BOTTOM],
c->neighbours[TOP],
c->neighbours[LEFT],
c->neighbours[RIGHT]);
printf("\tCoordinates (j,i) %d %d\n", c->coords[JDIM], c->coords[IDIM]);
printf("\tLocal domain size (j,i) %dx%d\n", c->jmaxLocal, c->imaxLocal);
fflush(stdout);
}
}
MPI_Barrier(MPI_COMM_WORLD);
#endif
}
void commInit(Comm* c, int argc, char** argv)
{
#if defined(_MPI)
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &(c->rank));
MPI_Comm_size(MPI_COMM_WORLD, &(c->size));
#else
c->rank = 0;
c->size = 1;
assembleResult(c, v, vg, jmax, imax);
#endif
}
void commPartition(Comm* c, int jmax, int imax)
{
#if defined(_MPI)
#ifdef _MPI
int dims[NDIMS] = { 0, 0 };
int periods[NDIMS] = { 0, 0 };
MPI_Dims_create(c->size, NDIMS, dims);
@ -331,25 +283,20 @@ void commPartition(Comm* c, int jmax, int imax)
c->imaxLocal = sizeOfRank(c->rank, dims[IDIM], imax);
c->jmaxLocal = sizeOfRank(c->rank, dims[JDIM], jmax);
MPI_Type_contiguous(c->imaxLocal, MPI_DOUBLE, &c->rbufferTypes[0]);
MPI_Type_commit(&c->rbufferTypes[0]);
MPI_Datatype jBufferType;
MPI_Type_contiguous(c->imaxLocal, MPI_DOUBLE, &jBufferType);
MPI_Type_commit(&jBufferType);
MPI_Type_vector(c->jmaxLocal, 1, c->imaxLocal + 2, MPI_DOUBLE, &c->sbufferTypes[0]);
MPI_Type_commit(&c->sbufferTypes[0]);
MPI_Datatype iBufferType;
MPI_Type_vector(c->jmaxLocal, 1, c->imaxLocal + 2, MPI_DOUBLE, &iBufferType);
MPI_Type_commit(&iBufferType);
c->bufferTypes[LEFT] = iBufferType;
c->bufferTypes[RIGHT] = iBufferType;
c->bufferTypes[BOTTOM] = jBufferType;
c->bufferTypes[TOP] = jBufferType;
#else
c->imaxLocal = imax;
c->jmaxLocal = jmax;
#endif
}
void commFinalize(Comm* c)
{
#if defined(_MPI)
for (int i = 0; i < NDIRS; i++) {
MPI_Type_free(&c->sbufferTypes[i]);
MPI_Type_free(&c->rbufferTypes[i]);
}
MPI_Finalize();
#endif
}

View File

@ -10,82 +10,20 @@
#include "comm.h"
#if defined(_MPI)
#ifdef _MPI
// subroutines local to this module
static int sizeOfRank(int rank, int size, int N)
static int sum(int* sizes, int position)
{
return N / size + ((N % size > rank) ? 1 : 0);
int sum = 0;
for (int i = 0; i < position; i++) {
sum += sizes[i];
}
return sum;
}
static void setupCommunication(Comm* c, int direction, int layer)
{
MPI_Datatype type;
size_t dblsize = sizeof(double);
int imaxLocal = c->imaxLocal;
int jmaxLocal = c->jmaxLocal;
int sizes[NDIMS];
int subSizes[NDIMS];
int starts[NDIMS];
int offset = 0;
sizes[IDIM] = imaxLocal + 2;
sizes[JDIM] = jmaxLocal + 2;
if (layer == HALO) {
offset = 1;
}
switch (direction) {
case LEFT:
subSizes[IDIM] = 1;
subSizes[JDIM] = jmaxLocal;
starts[IDIM] = 1 - offset;
starts[JDIM] = 1;
break;
case RIGHT:
subSizes[IDIM] = 1;
subSizes[JDIM] = jmaxLocal;
starts[IDIM] = imaxLocal + offset;
starts[JDIM] = 1;
break;
case BOTTOM:
subSizes[IDIM] = imaxLocal;
subSizes[JDIM] = 1;
starts[IDIM] = 1;
starts[JDIM] = 1 - offset;
break;
case TOP:
subSizes[IDIM] = imaxLocal;
subSizes[JDIM] = 1;
starts[IDIM] = 1;
starts[JDIM] = jmaxLocal + offset;
break;
}
MPI_Type_create_subarray(NDIMS,
sizes,
subSizes,
starts,
MPI_ORDER_C,
MPI_DOUBLE,
&type);
MPI_Type_commit(&type);
if (layer == HALO) {
c->rbufferTypes[direction] = type;
} else if (layer == BULK) {
c->sbufferTypes[direction] = type;
}
}
static void assembleResult(Comm* c,
double* src,
double* dst,
int imaxLocal[],
int jmaxLocal[],
int offset[],
int jmax,
int imax)
static void assembleResult(Comm* c, double* src, double* dst, int jmax, int imax)
{
MPI_Request* requests;
int numRequests = 1;
@ -98,11 +36,27 @@ static void assembleResult(Comm* c,
requests = (MPI_Request*)malloc(numRequests * sizeof(MPI_Request));
/* all ranks send their bulk array */
/* all ranks send their bulk array, but including external boundary layer */
MPI_Datatype bulkType;
int oldSizes[NDIMS] = { c->jmaxLocal + 2, c->imaxLocal + 2 };
int newSizes[NDIMS] = { c->jmaxLocal, c->imaxLocal };
int starts[NDIMS] = { 1, 1 };
if (commIsBoundary(c, LEFT)) {
newSizes[IDIM] += 1;
starts[IDIM] = 0;
}
if (commIsBoundary(c, RIGHT)) {
newSizes[IDIM] += 1;
}
if (commIsBoundary(c, BOTTOM)) {
newSizes[JDIM] += 1;
starts[JDIM] = 0;
}
if (commIsBoundary(c, TOP)) {
newSizes[JDIM] += 1;
}
MPI_Type_create_subarray(NDIMS,
oldSizes,
newSizes,
@ -111,16 +65,23 @@ static void assembleResult(Comm* c,
MPI_DOUBLE,
&bulkType);
MPI_Type_commit(&bulkType);
MPI_Isend(src, 1, bulkType, 0, 0, c->comm, &requests[0]);
int newSizesI[c->size];
int newSizesJ[c->size];
MPI_Gather(&newSizes[IDIM], 1, MPI_INT, newSizesI, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Gather(&newSizes[JDIM], 1, MPI_INT, newSizesJ, 1, MPI_INT, 0, MPI_COMM_WORLD);
/* rank 0 assembles the subdomains */
if (c->rank == 0) {
for (int i = 0; i < c->size; i++) {
MPI_Datatype domainType;
int oldSizes[NDIMS] = { jmax, imax };
int newSizes[NDIMS] = { jmaxLocal[i], imaxLocal[i] };
int starts[NDIMS] = { offset[i * NDIMS + JDIM], offset[i * NDIMS + IDIM] };
int oldSizes[NDIMS] = { jmax + 2, imax + 2 };
int newSizes[NDIMS] = { newSizesJ[i], newSizesI[i] };
int coords[NDIMS];
MPI_Cart_coords(c->comm, i, NDIMS, coords);
int starts[NDIMS] = { sum(newSizesJ, coords[JDIM]),
sum(newSizesI, coords[IDIM]) };
MPI_Type_create_subarray(NDIMS,
oldSizes,
newSizes,
@ -136,34 +97,12 @@ static void assembleResult(Comm* c,
MPI_Waitall(numRequests, requests, MPI_STATUSES_IGNORE);
}
static int sum(int* sizes, int position)
{
int sum = 0;
for (int i = 0; i < position; i++) {
sum += sizes[i];
}
return sum;
}
#endif // defined _MPI
// exported subroutines
void commReduction(double* v, int op)
{
#if defined(_MPI)
if (op == MAX) {
MPI_Allreduce(MPI_IN_PLACE, v, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
} else if (op == SUM) {
MPI_Allreduce(MPI_IN_PLACE, v, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
}
#endif
}
int commIsBoundary(Comm* c, int direction)
{
#if defined(_MPI)
#ifdef _MPI
switch (direction) {
case LEFT:
return c->coords[IDIM] == 0;
@ -185,25 +124,24 @@ int commIsBoundary(Comm* c, int direction)
void commExchange(Comm* c, double* grid)
{
#if defined(_MPI)
#ifdef _MPI
int counts[NDIRS] = { 1, 1, 1, 1 };
MPI_Aint displs[NDIRS] = { 0, 0, 0, 0 };
MPI_Neighbor_alltoallw(grid,
counts,
displs,
c->sbufferTypes,
c->sdispls,
c->bufferTypes,
grid,
counts,
displs,
c->rbufferTypes,
c->rdispls,
c->bufferTypes,
c->comm);
#endif
}
void commShift(Comm* c, double* f, double* g)
{
#if defined(_MPI)
#ifdef _MPI
MPI_Request requests[4] = { MPI_REQUEST_NULL,
MPI_REQUEST_NULL,
MPI_REQUEST_NULL,
@ -211,25 +149,35 @@ void commShift(Comm* c, double* f, double* g)
/* shift G */
/* receive ghost cells from bottom neighbor */
MPI_Irecv(g,
double* buf = g + 1;
MPI_Irecv(buf,
1,
c->rbufferTypes[BOTTOM],
c->bufferTypes[BOTTOM],
c->neighbours[BOTTOM],
0,
c->comm,
&requests[0]);
/* send ghost cells to top neighbor */
MPI_Isend(g, 1, c->sbufferTypes[TOP], c->neighbours[TOP], 0, c->comm, &requests[1]);
buf = g + (c->jmaxLocal) * (c->imaxLocal + 2) + 1;
MPI_Isend(buf, 1, c->bufferTypes[TOP], c->neighbours[TOP], 0, c->comm, &requests[1]);
/* shift F */
/* receive ghost cells from left neighbor */
MPI_Irecv(f, 1, c->rbufferTypes[LEFT], c->neighbours[LEFT], 1, c->comm, &requests[2]);
buf = f + (c->imaxLocal + 2);
MPI_Irecv(buf,
1,
c->bufferTypes[LEFT],
c->neighbours[LEFT],
1,
c->comm,
&requests[2]);
/* send ghost cells to right neighbor */
MPI_Isend(f,
buf = f + (c->imaxLocal + 2) + (c->imaxLocal);
MPI_Isend(buf,
1,
c->sbufferTypes[RIGHT],
c->bufferTypes[RIGHT],
c->neighbours[RIGHT],
1,
c->comm,
@ -239,7 +187,6 @@ void commShift(Comm* c, double* f, double* g)
#endif
}
// TODO Merge with seq
void commCollectResult(Comm* c,
double* ug,
double* vg,
@ -250,6 +197,8 @@ void commCollectResult(Comm* c,
int jmax,
int imax)
{
#ifdef _MPI
int offset[c->size * NDIMS];
int imaxLocal[c->size];
int jmaxLocal[c->size];
@ -276,56 +225,19 @@ void commCollectResult(Comm* c,
}
/* collect P */
assembleResult(c, p, pg, imaxLocal, jmaxLocal, offset, jmax, imax);
assembleResult(c, p, pg, jmax, imax);
/* collect U */
assembleResult(c, u, ug, imaxLocal, jmaxLocal, offset, jmax, imax);
assembleResult(c, u, ug, jmax, imax);
/* collect V */
assembleResult(c, v, vg, imaxLocal, jmaxLocal, offset, jmax, imax);
}
void commPrintConfig(Comm* c)
{
#if defined(_MPI)
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);
if (commIsMaster(c)) {
printf("Communication setup:\n");
}
for (int i = 0; i < c->size; i++) {
if (i == c->rank) {
printf("\tRank %d of %d\n", c->rank, c->size);
printf("\tNeighbours (bottom, top, left, right): %d %d, %d, %d\n",
c->neighbours[BOTTOM],
c->neighbours[TOP],
c->neighbours[LEFT],
c->neighbours[RIGHT]);
printf("\tCoordinates (j,i) %d %d\n", c->coords[JDIM], c->coords[IDIM]);
printf("\tLocal domain size (j,i) %dx%d\n", c->jmaxLocal, c->imaxLocal);
fflush(stdout);
}
}
MPI_Barrier(MPI_COMM_WORLD);
#endif
}
void commInit(Comm* c, int argc, char** argv)
{
#if defined(_MPI)
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &(c->rank));
MPI_Comm_size(MPI_COMM_WORLD, &(c->size));
#else
c->rank = 0;
c->size = 1;
assembleResult(c, v, vg, jmax, imax);
#endif
}
void commPartition(Comm* c, int jmax, int imax)
{
#if defined(_MPI)
#ifdef _MPI
int dims[NDIMS] = { 0, 0 };
int periods[NDIMS] = { 0, 0 };
MPI_Dims_create(c->size, NDIMS, dims);
@ -337,29 +249,35 @@ void commPartition(Comm* c, int jmax, int imax)
c->imaxLocal = sizeOfRank(c->rank, dims[IDIM], imax);
c->jmaxLocal = sizeOfRank(c->rank, dims[JDIM], jmax);
// setup buffer types for communication
setupCommunication(c, LEFT, BULK);
setupCommunication(c, LEFT, HALO);
setupCommunication(c, RIGHT, BULK);
setupCommunication(c, RIGHT, HALO);
setupCommunication(c, BOTTOM, BULK);
setupCommunication(c, BOTTOM, HALO);
setupCommunication(c, TOP, BULK);
setupCommunication(c, TOP, HALO);
MPI_Datatype jBufferType;
MPI_Type_contiguous(c->imaxLocal, MPI_DOUBLE, &jBufferType);
MPI_Type_commit(&jBufferType);
MPI_Datatype iBufferType;
MPI_Type_vector(c->jmaxLocal, 1, c->imaxLocal + 2, MPI_DOUBLE, &iBufferType);
MPI_Type_commit(&iBufferType);
// in the order of the dimensions i->0, j->1
// first negative direction, then positive direction
size_t dblsize = sizeof(double);
int imaxLocal = c->imaxLocal;
int jmaxLocal = c->jmaxLocal;
c->bufferTypes[LEFT] = iBufferType;
c->bufferTypes[RIGHT] = iBufferType;
c->bufferTypes[BOTTOM] = jBufferType;
c->bufferTypes[TOP] = jBufferType;
c->sdispls[LEFT] = ((imaxLocal + 2) + 1) * dblsize; // send left
c->sdispls[RIGHT] = ((imaxLocal + 2) + imaxLocal) * dblsize; // send right
c->sdispls[BOTTOM] = ((imaxLocal + 2) + 1) * dblsize; // send bottom
c->sdispls[TOP] = (jmaxLocal * (imaxLocal + 2) + 1) * dblsize; // send top
c->rdispls[LEFT] = (imaxLocal + 2) * dblsize; // recv left
c->rdispls[RIGHT] = ((imaxLocal + 2) + (imaxLocal + 1)) * dblsize; // recv right
c->rdispls[BOTTOM] = 1 * dblsize; // recv bottom
c->rdispls[TOP] = ((jmaxLocal + 1) * (imaxLocal + 2) + 1) * dblsize; // recv top
#else
c->imaxLocal = imax;
c->jmaxLocal = jmax;
#endif
}
void commFinalize(Comm* c)
{
#if defined(_MPI)
for (int i = 0; i < NDIRS; i++) {
MPI_Type_free(&c->sbufferTypes[i]);
MPI_Type_free(&c->rbufferTypes[i]);
}
MPI_Finalize();
#endif
}

View File

@ -0,0 +1,123 @@
/*
* Copyright (C) NHR@FAU, University Erlangen-Nuremberg.
* All rights reserved. This file is part of nusif-solver.
* Use of this source code is governed by a MIT style
* license that can be found in the LICENSE file.
*/
#include <stdio.h>
#include <stdlib.h>
#include "comm.h"
// subroutines local to this module
int sizeOfRank(int rank, int size, int N)
{
return N / size + ((N % size > rank) ? 1 : 0);
}
void commReduction(double* v, int op)
{
#ifdef _MPI
if (op == MAX) {
MPI_Allreduce(MPI_IN_PLACE, v, 1, MPI_DOUBLE, MPI_MAX, MPI_COMM_WORLD);
} else if (op == SUM) {
MPI_Allreduce(MPI_IN_PLACE, v, 1, MPI_DOUBLE, MPI_SUM, MPI_COMM_WORLD);
}
#endif
}
void commPrintConfig(Comm* c)
{
#ifdef _MPI
fflush(stdout);
MPI_Barrier(MPI_COMM_WORLD);
if (commIsMaster(c)) {
printf("Communication setup:\n");
}
for (int i = 0; i < c->size; i++) {
if (i == c->rank) {
printf("\tRank %d of %d\n", c->rank, c->size);
printf("\tNeighbours (bottom, top, left, right): %d %d, %d, %d\n",
c->neighbours[BOTTOM],
c->neighbours[TOP],
c->neighbours[LEFT],
c->neighbours[RIGHT]);
printf("\tCoordinates (i,j) %d %d\n", c->coords[IDIM], c->coords[JDIM]);
printf("\tLocal domain size (i,j) %dx%d\n", c->imaxLocal, c->jmaxLocal);
fflush(stdout);
}
}
MPI_Barrier(MPI_COMM_WORLD);
#endif
}
void commInit(Comm* c, int argc, char** argv)
{
#ifdef _MPI
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &(c->rank));
MPI_Comm_size(MPI_COMM_WORLD, &(c->size));
#else
c->rank = 0;
c->size = 1;
#endif
}
void commTestInit(Comm* c, double* p, double* f, double* g)
{
int imax = c->imaxLocal;
int jmax = c->jmaxLocal;
int rank = c->rank;
for (int j = 0; j < jmax + 2; j++) {
for (int i = 0; i < imax + 2; i++) {
p[j * (imax + 2) + i] = rank;
f[j * (imax + 2) + i] = rank;
g[j * (imax + 2) + i] = rank;
}
}
}
static void testWriteFile(char* filename, double* grid, int imax, int jmax)
{
FILE* fp = fopen(filename, "w");
if (fp == NULL) {
printf("Error!\n");
exit(EXIT_FAILURE);
}
for (int j = 0; j < jmax + 2; j++) {
for (int i = 0; i < imax + 2; i++) {
fprintf(fp, "%f ", grid[j * (imax + 2) + i]);
}
fprintf(fp, "\n");
}
fclose(fp);
}
void commTestWrite(Comm* c, double* p, double* f, double* g)
{
int imax = c->imaxLocal;
int jmax = c->jmaxLocal;
int rank = c->rank;
char filename[30];
snprintf(filename, 30, "ptest-%d.dat", rank);
testWriteFile(filename, p, imax, jmax);
snprintf(filename, 30, "ftest-%d.dat", rank);
testWriteFile(filename, f, imax, jmax);
snprintf(filename, 30, "gtest-%d.dat", rank);
testWriteFile(filename, g, imax, jmax);
}
void commFinalize(Comm* c)
{
#ifdef _MPI
MPI_Finalize();
#endif
}

View File

@ -11,7 +11,7 @@
#endif
enum direction { LEFT = 0, RIGHT, BOTTOM, TOP, NDIRS };
enum dimension { JDIM = 0, IDIM, NDIMS };
enum dimension { IDIM = 0, JDIM, NDIMS };
enum layer { HALO = 0, BULK };
enum op { MAX = 0, SUM };
@ -20,15 +20,19 @@ typedef struct {
int size;
#if defined(_MPI)
MPI_Comm comm;
MPI_Datatype sbufferTypes[NDIRS];
MPI_Datatype rbufferTypes[NDIRS];
MPI_Datatype bufferTypes[NDIRS];
MPI_Aint sdispls[NDIRS];
MPI_Aint rdispls[NDIRS];
#endif
int neighbours[NDIRS];
int coords[NDIMS], dims[NDIMS];
int imaxLocal, jmaxLocal;
} Comm;
extern int sizeOfRank(int rank, int size, int N);
extern void commInit(Comm* c, int argc, char** argv);
extern void commTestInit(Comm* c, double* p, double* f, double* g);
extern void commTestWrite(Comm* c, double* p, double* f, double* g);
extern void commFinalize(Comm* c);
extern void commPartition(Comm* c, int jmax, int imax);
extern void commPrintConfig(Comm*);

View File

@ -15,6 +15,26 @@
#include "solver.h"
#include "timing.h"
static void writeResults(Solver* s)
{
#ifdef _MPI
size_t bytesize = (s->imax + 2) * (s->jmax + 2) * sizeof(double);
double* ug = allocate(64, bytesize);
double* vg = allocate(64, bytesize);
double* pg = allocate(64, bytesize);
commCollectResult(&s->comm, ug, vg, pg, s->u, s->v, s->p, s->jmax, s->imax);
writeResult(s, ug, vg, pg);
free(ug);
free(vg);
free(pg);
#else
writeResult(s, s->u, s->v, s->p);
#endif
}
int main(int argc, char** argv)
{
int rank;
@ -35,7 +55,18 @@ int main(int argc, char** argv)
if (commIsMaster(&s.comm)) {
printParameter(&p);
}
initSolver(&s, &p);
#ifdef TEST
commPrintConfig(&s.comm);
commTestInit(&s.comm, s.p, s.f, s.g);
commExchange(&s.comm, s.p);
commShift(&s.comm, s.f, s.g);
commTestWrite(&s.comm, s.p, s.f, s.g);
writeResults(&s);
commFinalize(&s.comm);
exit(EXIT_SUCCESS);
#endif
#ifndef VERBOSE
initProgress(s.te);
#endif
@ -70,15 +101,8 @@ int main(int argc, char** argv)
if (commIsMaster(&s.comm)) {
printf("Solution took %.2fs\n", timeStop - timeStart);
}
size_t bytesize = s.imax * s.jmax * sizeof(double);
double* ug = allocate(64, bytesize);
double* vg = allocate(64, bytesize);
double* pg = allocate(64, bytesize);
commCollectResult(&s.comm, ug, vg, pg, s.u, s.v, s.p, s.jmax, s.imax);
writeResult(&s, ug, vg, pg);
writeResults(&s);
commFinalize(&s.comm);
return EXIT_SUCCESS;
}

View File

@ -7,30 +7,24 @@
#include <math.h>
#include <mpi.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "progress.h"
static double _end;
static int _current;
static int _rank = -1;
void initProgress(double end)
{
MPI_Comm_rank(MPI_COMM_WORLD, &_rank);
_end = end;
_current = 0;
if (_rank == 0) {
printf("[ ]");
fflush(stdout);
}
}
void printProgress(double current)
{
if (_rank == 0) {
int new = (int)rint((current / _end) * 10.0);
if (new > _current) {
@ -48,13 +42,10 @@ void printProgress(double current)
printf("\r[%s]", progress);
}
fflush(stdout);
}
}
void stopProgress()
{
if (_rank == 0) {
printf("\n");
fflush(stdout);
}
}

View File

@ -511,9 +511,9 @@ void writeResult(Solver* s, double* u, double* v, double* p)
exit(EXIT_FAILURE);
}
for (int j = 1; j < jmax; j++) {
for (int j = 1; j <= jmax; j++) {
y = (double)(j - 0.5) * dy;
for (int i = 1; i < imax; i++) {
for (int i = 1; i <= imax; i++) {
x = (double)(i - 0.5) * dx;
fprintf(fp, "%.2f %.2f %f\n", x, y, p[j * (imax) + i]);
}
@ -529,14 +529,14 @@ void writeResult(Solver* s, double* u, double* v, double* p)
exit(EXIT_FAILURE);
}
for (int j = 1; j < jmax; j++) {
for (int j = 1; j <= jmax; j++) {
y = dy * (j - 0.5);
for (int i = 1; i < imax; i++) {
for (int i = 1; i <= imax; i++) {
x = dx * (i - 0.5);
double vel_u = (u[j * (imax) + i] + u[j * (imax) + (i - 1)]) / 2.0;
double vel_v = (v[j * (imax) + i] + v[(j - 1) * (imax) + i]) / 2.0;
double len = sqrt((vel_u * vel_u) + (vel_v * vel_v));
fprintf(fp, "%.2f %.2f %f %f %f\n", x, y, vel_u, vel_v, len);
double velU = (u[j * (imax + 2) + i] + u[j * (imax + 2) + (i - 1)]) / 2.0;
double velV = (v[j * (imax + 2) + i] + v[(j - 1) * (imax + 2) + i]) / 2.0;
double len = sqrt((velU * velU) + (velV * velV));
fprintf(fp, "%.2f %.2f %f %f %f\n", x, y, velU, velV, len);
}
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.1 KiB

View File

@ -47,6 +47,8 @@ clean:
distclean: clean
$(info ===> DIST CLEAN)
@rm -f $(TARGET)
@rm -f *.dat
@rm -f *.png
info:
$(info $(CFLAGS))

View File

@ -15,7 +15,7 @@ bcRight 1 #
gx 0.0 # Body forces (e.g. gravity)
gy 0.0 #
re 10.0 # Reynolds number
re 100.0 # Reynolds number
u_init 0.0 # initial value for velocity in x-direction
v_init 0.0 # initial value for velocity in y-direction

View File

@ -6,7 +6,6 @@
*/
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "progress.h"