Skip to content

Commit

Permalink
Merge pull request #814 from pierre-guillou/mpi
Browse files Browse the repository at this point in the history
Triangulation: Improve MPI support
  • Loading branch information
julien-tierny authored Jul 27, 2022
2 parents 4d35236 + a07c201 commit a3b0d63
Show file tree
Hide file tree
Showing 21 changed files with 1,382 additions and 822 deletions.
364 changes: 260 additions & 104 deletions core/base/abstractTriangulation/AbstractTriangulation.h

Large diffs are not rendered by default.

72 changes: 10 additions & 62 deletions core/base/common/MPIUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,14 +9,19 @@

#include <BaseClass.h>
#include <Timer.h>

#include <algorithm>
#include <array>
#include <limits>
#include <unordered_map>
#include <unordered_set>
#include <vector>

#if TTK_ENABLE_MPI

// disable the MPI C++ API
#define OMPI_SKIP_MPICXX 1

#include <mpi.h>

namespace ttk {
Expand Down Expand Up @@ -84,64 +89,6 @@ namespace ttk {
return elapsedTime;
};

/**
* @brief Gather vectors on a specific rank
*
* @param[out] dst Container storing vectors copied from other ranks
* @param[in] src Data to send to \ref destRank and to store in \ref dst
* @param[in] destRank Destination process identifier
* @return 0 in case of success
*/
template <typename T>
int gatherVectors(std::vector<std::vector<T>> &dst,
const std::vector<T> &src,
const int destRank) {

if(!ttk::isRunningWithMPI()) {
return -1;
}

// src sizes (gathered on rank 0)
std::vector<unsigned long> vecSizes{};

if(ttk::MPIrank_ == destRank) {
vecSizes.resize(MPIsize_);
dst.resize(MPIsize_);
}

const unsigned long localSize = src.size();
// gather src sizes on destRank
MPI_Gather(&localSize, 1, MPI_UNSIGNED_LONG, vecSizes.data(), 1,
MPI_UNSIGNED_LONG, destRank, ttk::MPIcomm_);

if(ttk::MPIrank_ == destRank) {
// allocate dst with vecSizes
for(int i = 0; i < ttk::MPIsize_; ++i) {
if(i == destRank) {
continue;
}
dst[i].resize(vecSizes[i]);
}

for(int i = 0; i < ttk::MPIsize_; ++i) {
if(i == destRank) {
continue;
}
// receive src content from other ranks
MPI_Recv(dst[i].data(), dst[i].size(), ttk::getMPIType(src[0]), i,
MPI_ANY_TAG, ttk::MPIcomm_, MPI_STATUS_IGNORE);
}
dst[destRank] = std::move(src);

} else {
// send src content to destRank
MPI_Send(src.data(), src.size(), ttk::getMPIType(src[0]), destRank, 0,
ttk::MPIcomm_);
}

return 0;
}

/**
* @brief Request all ghost cell scalar data from one rank from their owning
* ranks and get the data
Expand All @@ -163,12 +110,13 @@ namespace ttk {
template <typename DT, typename IT>
int getGhostCellScalars(DT *scalarArray,
const int *const rankArray,
const IT *const globalIds,
const LongSimplexId *const globalIds,
const std::unordered_map<IT, IT> &gidToLidMap,
const std::unordered_set<int> &neighbors,
const int rankToSend,
const IT nVerts,
MPI_Comm communicator) {

if(!ttk::isRunningWithMPI()) {
return -1;
}
Expand Down Expand Up @@ -368,7 +316,7 @@ namespace ttk {
template <typename DT, typename IT>
int exchangeGhostCells(DT *scalarArray,
const int *const rankArray,
const IT *const globalIds,
const LongSimplexId *const globalIds,
const std::unordered_map<IT, IT> &gidToLidMap,
const IT nVerts,
MPI_Comm communicator) {
Expand Down Expand Up @@ -407,7 +355,7 @@ namespace ttk {
* @param[in] nVertices number of vertices in the arrays
*/
void inline produceRankArray(std::vector<int> &rankArray,
long int *globalIds,
LongSimplexId *globalIds,
unsigned char *ghostCells,
int nVertices,
double *boundingBox) {
Expand Down Expand Up @@ -696,7 +644,7 @@ namespace ttk {
std::unordered_map<IT, IT> &gidToLidMap,
const size_t nVerts,
const DT *const scalars,
const IT *const globalIds,
const LongSimplexId *const globalIds,
const int *const rankArray) {
for(size_t i = 0; i < nVerts; i++) {
IT globalId = globalIds[i];
Expand Down
33 changes: 0 additions & 33 deletions core/base/compactTriangulation/CompactTriangulation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2512,36 +2512,3 @@ int CompactTriangulation::getBoundaryCells(ImplicitCluster *const nodePtr,

return 0;
}
#if TTK_ENABLE_MPI
int CompactTriangulation::preconditionDistributedVertices() {
if(this->hasPreconditionedDistributedVertices_) {
return 0;
}
if(!isRunningWithMPI()) {
return -1;
}
if(this->globalIdsArray_ == nullptr) {
this->printWrn("Missing global identifiers array!");
return -2;
}

// allocate memory
this->vertexLidToGid_.resize(this->vertexNumber_, -1);
this->vertexGidToLid_.reserve(this->vertexNumber_);

for(SimplexId i = 0; i < this->vertexNumber_; ++i) {
this->vertexLidToGid_[i] = this->globalIdsArray_[i];
this->vertexGidToLid_[this->globalIdsArray_[i]] = i;
}

if(MPIrank_ == 0) {
this->printMsg("Domain contains "
+ std::to_string(this->getNumberOfVerticesInternal())
+ " vertices");
}

this->hasPreconditionedDistributedVertices_ = true;

return 0;
}
#endif // TTK_ENABLE_MPI
26 changes: 0 additions & 26 deletions core/base/compactTriangulation/CompactTriangulation.h
Original file line number Diff line number Diff line change
Expand Up @@ -1327,32 +1327,6 @@ namespace ttk {
return (exnode->boundaryVertices_)[localVertexId];
}

#ifdef TTK_ENABLE_MPI
int preconditionDistributedVertices() override;
inline SimplexId TTK_TRIANGULATION_INTERNAL(getVertexGlobalId)(
const SimplexId &ltid) const override {
#ifndef TTK_ENABLE_KAMIKAZE
if(ltid < 0 || ltid >= this->getNumberOfVerticesInternal()) {
return -1;
}
#endif // TTK_ENABLE_KAMIKAZE
return this->vertexLidToGid_[ltid];
}
inline const std::unordered_map<SimplexId, SimplexId> *
TTK_TRIANGULATION_INTERNAL(getVertexGlobalIdMap)() const override {
return &this->vertexGidToLid_;
}
inline SimplexId TTK_TRIANGULATION_INTERNAL(getVertexLocalId)(
const SimplexId &gtid) const override {
#ifndef TTK_ENABLE_KAMIKAZE
if(this->vertexGidToLid_.find(gtid) == this->vertexGidToLid_.end()) {
return -1;
}
#endif // TTK_ENABLE_KAMIKAZE
return this->vertexGidToLid_.at(gtid);
}
#endif // TTK_ENABLE_MPI

inline int preconditionBoundaryEdgesInternal() override {
return 0;
}
Expand Down
Loading

0 comments on commit a3b0d63

Please sign in to comment.