/* * Copyright (C) NHR@FAU, University Erlangen-Nuremberg. * All rights reserved. This file is part of nusif-solver. * Use of this source code is governed by a MIT style * license that can be found in the LICENSE file. */ #include #include #include "comm.h" #ifdef _MPI // subroutines local to this module static int sum(int* sizes, int position) { int sum = 0; for (int i = 0; i < position; i++) { sum += sizes[i]; } return sum; } static void assembleResult(Comm* c, double* src, double* dst, int imax, int jmax) { MPI_Request* requests; int numRequests = 1; if (c->rank == 0) { numRequests = c->size + 1; } else { numRequests = 1; } requests = (MPI_Request*)malloc(numRequests * sizeof(MPI_Request)); /* all ranks send their bulk array, including the external boundary layer */ MPI_Datatype bulkType; int oldSizes[NDIMS] = { c->jmaxLocal + 2, c->imaxLocal + 2 }; int newSizes[NDIMS] = { c->jmaxLocal, c->imaxLocal }; int starts[NDIMS] = { 1, 1 }; if (commIsBoundary(c, LEFT)) { newSizes[IDIM] += 1; starts[IDIM] = 0; } if (commIsBoundary(c, RIGHT)) { newSizes[IDIM] += 1; } if (commIsBoundary(c, BOTTOM)) { newSizes[JDIM] += 1; starts[JDIM] = 0; } if (commIsBoundary(c, TOP)) { newSizes[JDIM] += 1; } MPI_Type_create_subarray(NDIMS, oldSizes, newSizes, starts, MPI_ORDER_C, MPI_DOUBLE, &bulkType); MPI_Type_commit(&bulkType); MPI_Isend(src, 1, bulkType, 0, 0, c->comm, &requests[0]); int newSizesI[c->size]; int newSizesJ[c->size]; MPI_Gather(&newSizes[IDIM], 1, MPI_INT, newSizesI, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Gather(&newSizes[JDIM], 1, MPI_INT, newSizesJ, 1, MPI_INT, 0, MPI_COMM_WORLD); /* rank 0 assembles the subdomains */ if (c->rank == 0) { for (int i = 0; i < c->size; i++) { MPI_Datatype domainType; int oldSizes[NDIMS] = { jmax + 2, imax + 2 }; int newSizes[NDIMS] = { newSizesJ[i], newSizesI[i] }; int coords[NDIMS]; MPI_Cart_coords(c->comm, i, NDIMS, coords); int starts[NDIMS] = { sum(newSizesJ, coords[JDIM]), sum(newSizesI, coords[IDIM]) }; printf( "Rank: %d, Coords(i,j): %d %d, Size(i,j): %d %d, Target Size(i,j): %d %d " "Starts(i,j): %d %d\n", i, coords[IDIM], coords[JDIM], oldSizes[IDIM], oldSizes[JDIM], newSizes[IDIM], newSizes[JDIM], starts[IDIM], starts[JDIM]); MPI_Type_create_subarray(NDIMS, oldSizes, newSizes, starts, MPI_ORDER_C, MPI_DOUBLE, &domainType); MPI_Type_commit(&domainType); MPI_Irecv(dst, 1, domainType, i, 0, c->comm, &requests[i + 1]); MPI_Type_free(&domainType); } } MPI_Waitall(numRequests, requests, MPI_STATUSES_IGNORE); } #endif // defined _MPI // exported subroutines int commIsBoundary(Comm* c, int direction) { #ifdef _MPI switch (direction) { case LEFT: return c->coords[IDIM] == 0; break; case RIGHT: return c->coords[IDIM] == (c->dims[IDIM] - 1); break; case BOTTOM: return c->coords[JDIM] == 0; break; case TOP: return c->coords[JDIM] == (c->dims[JDIM] - 1); break; } #endif return 1; } void commExchange(Comm* c, double* grid) { #ifdef _MPI double* sbuf[NDIRS]; double* rbuf[NDIRS]; MPI_Request requests[8]; for (int i = 0; i < 8; i++) requests[i] = MPI_REQUEST_NULL; rbuf[LEFT] = grid + (c->imaxLocal + 2); rbuf[RIGHT] = grid + (c->imaxLocal + 2) + (c->imaxLocal + 1); rbuf[BOTTOM] = grid + 1; rbuf[TOP] = grid + (c->jmaxLocal + 1) * (c->imaxLocal + 2) + 1; sbuf[LEFT] = grid + (c->imaxLocal + 2) + 1; sbuf[RIGHT] = grid + (c->imaxLocal + 2) + (c->imaxLocal); sbuf[BOTTOM] = grid + (c->imaxLocal + 2) + 1; sbuf[TOP] = grid + (c->jmaxLocal) * (c->imaxLocal + 2) + 1; // buf[0] = grid + (c->imaxLocal + 2); // recv left // buf[1] = grid + (c->imaxLocal + 2) + 1; // send left // buf[2] = grid + (c->imaxLocal + 2) + (c->imaxLocal + 1); // recv right // buf[3] = grid + (c->imaxLocal + 2) + (c->imaxLocal); // send right // buf[4] = grid + 1; // recv bottom // buf[5] = grid + (c->imaxLocal + 2) + 1; // send bottom // buf[6] = grid + (c->jmaxLocal + 1) * (c->imaxLocal + 2) + 1; // recv top // buf[7] = grid + (c->jmaxLocal) * (c->imaxLocal + 2) + 1; // send top for (int i = 0; i < NDIRS; i++) { int tag = 0; if (c->neighbours[i] != MPI_PROC_NULL) { // printf("DEBUG: Rank %d - SendRecv with %d\n", c->rank, c->neighbours[i]); tag = c->neighbours[i]; } MPI_Irecv(rbuf[i], 1, c->bufferTypes[i], c->neighbours[i], tag, c->comm, &requests[i * 2]); MPI_Isend(sbuf[i], 1, c->bufferTypes[i], c->neighbours[i], c->rank, c->comm, &requests[i * 2 + 1]); } MPI_Waitall(8, requests, MPI_STATUSES_IGNORE); #endif } void commShift(Comm* c, double* f, double* g) { #ifdef _MPI MPI_Request requests[4] = { MPI_REQUEST_NULL, MPI_REQUEST_NULL, MPI_REQUEST_NULL, MPI_REQUEST_NULL }; /* shift G */ /* receive ghost cells from bottom neighbor */ double* buf = g + 1; MPI_Irecv(buf, 1, c->bufferTypes[BOTTOM], c->neighbours[BOTTOM], 0, c->comm, &requests[0]); /* send ghost cells to top neighbor */ buf = g + (c->jmaxLocal) * (c->imaxLocal + 2) + 1; MPI_Isend(buf, 1, c->bufferTypes[TOP], c->neighbours[TOP], 0, c->comm, &requests[1]); /* shift F */ /* receive ghost cells from left neighbor */ buf = f + (c->imaxLocal + 2); MPI_Irecv(buf, 1, c->bufferTypes[LEFT], c->neighbours[LEFT], 1, c->comm, &requests[2]); /* send ghost cells to right neighbor */ buf = f + (c->imaxLocal + 2) + (c->imaxLocal); MPI_Isend(buf, 1, c->bufferTypes[RIGHT], c->neighbours[RIGHT], 1, c->comm, &requests[3]); MPI_Waitall(4, requests, MPI_STATUSES_IGNORE); #endif } void commCollectResult(Comm* c, double* ug, double* vg, double* pg, double* u, double* v, double* p, int imax, int jmax) { #ifdef _MPI /* collect P */ assembleResult(c, p, pg, imax, jmax); /* collect U */ assembleResult(c, u, ug, imax, jmax); /* collect V */ assembleResult(c, v, vg, imax, jmax); #endif } void commPartition(Comm* c, int jmax, int imax) { #ifdef _MPI int dims[NDIMS] = { 0, 0 }; int periods[NDIMS] = { 0, 0 }; MPI_Dims_create(c->size, NDIMS, dims); MPI_Cart_create(MPI_COMM_WORLD, NDIMS, dims, periods, 0, &c->comm); MPI_Cart_shift(c->comm, IDIM, 1, &c->neighbours[LEFT], &c->neighbours[RIGHT]); MPI_Cart_shift(c->comm, JDIM, 1, &c->neighbours[BOTTOM], &c->neighbours[TOP]); MPI_Cart_get(c->comm, NDIMS, c->dims, periods, c->coords); c->imaxLocal = sizeOfRank(c->rank, dims[IDIM], imax); c->jmaxLocal = sizeOfRank(c->rank, dims[JDIM], jmax); MPI_Datatype jBufferType; MPI_Type_contiguous(c->imaxLocal, MPI_DOUBLE, &jBufferType); MPI_Type_commit(&jBufferType); MPI_Datatype iBufferType; MPI_Type_vector(c->jmaxLocal, 1, c->imaxLocal + 2, MPI_DOUBLE, &iBufferType); MPI_Type_commit(&iBufferType); c->bufferTypes[LEFT] = iBufferType; c->bufferTypes[RIGHT] = iBufferType; c->bufferTypes[BOTTOM] = jBufferType; c->bufferTypes[TOP] = jBufferType; #else c->imaxLocal = imax; c->jmaxLocal = jmax; #endif }