Skip to content

Commit

Permalink
[arcane,std] Dans 'MetisGraphGather' et 'MetisWrapper', utilise 'IPar…
Browse files Browse the repository at this point in the history
…allelMng' au lieu de 'MPI_Comm'.
  • Loading branch information
grospelliergilles committed Oct 6, 2024
1 parent f9d8097 commit c0b5539
Show file tree
Hide file tree
Showing 4 changed files with 155 additions and 143 deletions.
184 changes: 103 additions & 81 deletions arcane/src/arcane/std/MetisGraphGather.cc
Original file line number Diff line number Diff line change
Expand Up @@ -14,54 +14,78 @@
#include "arcane/std/internal/MetisGraphGather.h"

#include "arcane/utils/CheckedConvert.h"
#include "arcane/utils/FatalErrorException.h"

#include "arcane/core/IParallelMng.h"

#include <algorithm>

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

namespace
{
MPI_Comm _getMPICommunicator(Arcane::IParallelMng* pm)
{
return static_cast<MPI_Comm>(pm->communicator());
}
} // namespace

namespace Arcane
{

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

MetisGraphGather::
MetisGraphGather(IParallelMng* pm)
: TraceAccessor(pm->traceMng())
, m_parallel_mng(pm)
, m_my_rank(pm->commRank())
, m_nb_rank(pm->commSize())
{}

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

namespace
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

template <class SourceType, class TargetType> void MetisGraphGather::
_convertVector(const int size, ConstArrayView<SourceType> src, ArrayView<TargetType> dest)
{
template <class SourceType, class TargetType>
void convertVector(const int size, ConstArrayView<SourceType> src, ArrayView<TargetType> dest)
{
for (int i = 0; i < size; ++i) {
dest[i] = static_cast<TargetType>(src[i]);
}
if (size > src.size())
ARCANE_FATAL("Source size is too small size={0} src_size={1}", size, src.size());

Check warning on line 58 in arcane/src/arcane/std/MetisGraphGather.cc

View check run for this annotation

Codecov / codecov/patch

arcane/src/arcane/std/MetisGraphGather.cc#L58

Added line #L58 was not covered by tests
if (size > dest.size())
ARCANE_FATAL("Target size is too small size={0} target_size={1}", size, dest.size());

Check warning on line 60 in arcane/src/arcane/std/MetisGraphGather.cc

View check run for this annotation

Codecov / codecov/patch

arcane/src/arcane/std/MetisGraphGather.cc#L60

Added line #L60 was not covered by tests
for (int i = 0; i < size; ++i) {
dest[i] = static_cast<TargetType>(src[i]);
}
}

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

void MetisGraphGather::
gatherGraph(const bool need_part, const String&, MPI_Comm comm,
ConstArrayView<idx_t> vtxdist, const int ncon, MetisGraphView my_graph,
gatherGraph(const bool need_part, ConstArrayView<idx_t> vtxdist, const int ncon, MetisGraphView my_graph,
MetisGraph& graph)
{
int my_rank = -1;
int nb_rank = -1;
int io_rank = 0;

MPI_Comm_rank(comm, &my_rank);
MPI_Comm_size(comm, &nb_rank);

MPI_Comm comm = _getMPICommunicator(m_parallel_mng);
info() << "Metis: gather graph";

const Int32 io_rank = m_parallel_mng->masterIORank();
const Int32 nb_rank = m_nb_rank;
const bool is_master_io = m_parallel_mng->isMasterIO();
// buffers

UniqueArray<int> my_buffer; // pour les entrees des routines MPI
UniqueArray<int> buffer; // pour les sorties des routines MPI
UniqueArray<int> offset; // pour les gather

// nombre de sommets du graph complet
if (my_rank == io_rank) {
graph.nb_vertices = CheckedConvert::toInt32(vtxdist[nb_rank]);

if (m_my_rank == io_rank) {
graph.nb_vertices = CheckedConvert::toInt32(vtxdist[m_nb_rank]);
graph.have_vsize = my_graph.have_vsize; // on suppose que tous les processeurs ont la meme valeur
graph.have_adjwgt = my_graph.have_adjwgt; // on suppose que tous les processeurs ont la meme valeur
} else {
Expand All @@ -73,7 +97,7 @@ gatherGraph(const bool need_part, const String&, MPI_Comm comm,
// récupère les dimensions caractérisant la répartition du graphe sur les processeurs

my_buffer.resize(2);
if (my_rank == io_rank) {
if (m_my_rank == io_rank) {
offset.resize(nb_rank);
buffer.resize(2 * nb_rank);
}
Expand All @@ -87,7 +111,7 @@ gatherGraph(const bool need_part, const String&, MPI_Comm comm,
UniqueArray<int> nb_vertices_per_rank(nb_rank);
UniqueArray<int> adjncy_size_per_rank(nb_rank);
UniqueArray<int> nb_vertice_cons_per_rank(nb_rank);
if (my_rank == io_rank) {
if (is_master_io) {
for (int rank = 0; rank < nb_rank; ++rank) {
nb_vertices_per_rank[rank] = buffer[2 * rank];
adjncy_size_per_rank[rank] = buffer[2 * rank + 1];
Expand All @@ -101,52 +125,52 @@ gatherGraph(const bool need_part, const String&, MPI_Comm comm,
int max_buffer_size = my_graph.nb_vertices * ncon;
max_buffer_size = std::max(my_graph.adjncy.size(), max_buffer_size);
my_buffer.resize(max_buffer_size);
if (my_rank == io_rank) {
if (is_master_io) {
max_buffer_size = graph.nb_vertices * ncon;
max_buffer_size = std::max(adjcny_size, max_buffer_size);
buffer.resize(max_buffer_size);
}

// Recupere la liste d'adjacences.
// Récupère la liste d'adjacences.

if (my_rank == io_rank) {
if (is_master_io) {
offset[0] = 0;
for (int rank = 1; rank < nb_rank; ++rank) {
offset[rank] = offset[rank - 1] + adjncy_size_per_rank[rank - 1];
}
graph.adjncy.resize(adjcny_size);
}

convertVector(my_graph.adjncy.size(), my_graph.adjncy.constView(), my_buffer.view());
_convertVector(my_graph.adjncy.size(), my_graph.adjncy.constView(), my_buffer.view());

MPI_Gatherv(my_buffer.data(), my_graph.adjncy.size(), MPI_INT,
buffer.data(), adjncy_size_per_rank.data(),
offset.data(), MPI_INT, io_rank, comm);
convertVector(adjcny_size, buffer.constView(), graph.adjncy.view());

_convertVector(adjcny_size, buffer.constView(), graph.adjncy.view());

// Recupere la liste des poids aux arretes du graph.

if (my_graph.have_adjwgt) {
if (my_rank == io_rank) {
if (is_master_io) {
graph.adjwgt.resize(adjcny_size);
}
convertVector(my_graph.adjwgt.size(), my_graph.adjwgt.constView(), my_buffer.view());

_convertVector(my_graph.adjwgt.size(), my_graph.adjwgt.constView(), my_buffer.view());

MPI_Gatherv(my_buffer.data(), my_graph.adjwgt.size(), MPI_INT,
buffer.data(), adjncy_size_per_rank.data(),
offset.data(), MPI_INT, io_rank, comm);
convertVector(adjcny_size, buffer.constView(), graph.adjwgt.view());

_convertVector(adjcny_size, buffer.constView(), graph.adjwgt.view());
}
// Recupere la liste des indexes dans la liste d'adjacences.
// A cette etape, la liste recuperee n'est pas correcte, sauf le dernier indice.
// En effet les indexes par processeur ne sont pas les meme que les indexes
// dans le liste d'adjacences complete.
if (my_rank == io_rank) {

// Récupère la liste des index dans la liste d'adjacences.
// A cette étape, la liste récupérée n'est pas correcte, sauf le dernier indice.
// En effet les indexes par processeur ne sont pas les meme que les index
// dans la liste d'adjacences complete.

if (is_master_io) {
graph.xadj.resize(graph.nb_vertices + 1);
graph.xadj[graph.nb_vertices] = graph.adjncy.size();
offset[0] = 0;
Expand All @@ -155,17 +179,17 @@ gatherGraph(const bool need_part, const String&, MPI_Comm comm,
}
}

convertVector(my_graph.nb_vertices, my_graph.xadj.constView(), my_buffer.view());
_convertVector(my_graph.nb_vertices, my_graph.xadj.constView(), my_buffer.view());

MPI_Gatherv(my_buffer.data(), my_graph.nb_vertices, MPI_INT,
buffer.data(), nb_vertices_per_rank.data(),
offset.data(), MPI_INT, io_rank, comm);

convertVector(graph.nb_vertices, buffer.constView(), graph.xadj.view());
// Correction des indexes
if (my_rank == io_rank) {
_convertVector(graph.nb_vertices, buffer.constView(), graph.xadj.view());

// Correction des index

if (is_master_io) {
int start_adjncy_index = 0;
for (int rank = 1; rank < nb_rank; ++rank) {
start_adjncy_index += adjncy_size_per_rank[rank-1];
Expand All @@ -178,71 +202,69 @@ gatherGraph(const bool need_part, const String&, MPI_Comm comm,
}
}

// Recupere la liste des poids "memoire" aux sommets du graph.
// Récupère la liste des poids "memoire" aux sommets du graph.

if (my_graph.have_vsize) {
if (my_rank == io_rank) {
if (is_master_io) {
graph.vsize.resize(graph.nb_vertices);
}

convertVector(my_graph.nb_vertices, my_graph.vsize.constView(), my_buffer.view());
_convertVector(my_graph.nb_vertices, my_graph.vsize.constView(), my_buffer.view());

MPI_Gatherv(my_buffer.data(), my_graph.nb_vertices, MPI_INT,
buffer.data(), nb_vertices_per_rank.data(),
offset.data(), MPI_INT, io_rank, comm);
convertVector(graph.nb_vertices, buffer.constView(), graph.vsize.view());

_convertVector(graph.nb_vertices, buffer.constView(), graph.vsize.view());
}

// Recupere la liste des numeros de sous-domaine d'un precedent partitionnement
if (my_rank == io_rank) {
// Récupère la liste des numéros de sous-domaine d'un precedent partitionnement

if (is_master_io) {
graph.part.resize(graph.nb_vertices);
}

if (need_part) {

convertVector(my_graph.nb_vertices, my_graph.part.constView(), my_buffer.view());
_convertVector(my_graph.nb_vertices, my_graph.part.constView(), my_buffer.view());

MPI_Gatherv(my_buffer.data(), my_graph.nb_vertices, MPI_INT,
buffer.data(), nb_vertices_per_rank.data(),
offset.data(), MPI_INT, io_rank, comm);
convertVector(graph.nb_vertices, buffer.constView(), graph.part.view());

_convertVector(graph.nb_vertices, buffer.constView(), graph.part.view());
}

// Recupere la liste des poids aux sommets du graph. Il peut y en avoir plusieurs (ncon).
// Récupère la liste des poids aux sommets du graph. Il peut y en avoir plusieurs (ncon).

if (my_rank == io_rank) {
if (is_master_io) {
graph.vwgt.resize(graph.nb_vertices * ncon);
for (auto& x : offset) {
x *= ncon;
}
}
convertVector(my_graph.nb_vertices * ncon, my_graph.vwgt.constView(), my_buffer.view());

_convertVector(my_graph.nb_vertices * ncon, my_graph.vwgt.constView(), my_buffer.view());

MPI_Gatherv(my_buffer.data(), my_graph.nb_vertices * ncon, MPI_INT,
buffer.data(), nb_vertice_cons_per_rank.data(),
offset.data(), MPI_INT, io_rank, comm);
convertVector(graph.nb_vertices * ncon, buffer.constView(), graph.vwgt.view());

_convertVector(graph.nb_vertices * ncon, buffer.constView(), graph.vwgt.view());
}

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

void MetisGraphGather::
scatterPart(MPI_Comm comm, ConstArrayView<idx_t> vtxdist, ConstArrayView<idx_t> part,
ArrayView<idx_t> my_part)
scatterPart(ConstArrayView<idx_t> vtxdist, ConstArrayView<idx_t> part, ArrayView<idx_t> my_part)
{
int my_rank = -1;
int nb_rank = -1;
MPI_Comm comm = _getMPICommunicator(m_parallel_mng);

const Int32 nb_rank = m_nb_rank;
const bool is_master_io = m_parallel_mng->isMasterIO();
int io_rank = 0;

MPI_Comm_rank(comm, &my_rank);
MPI_Comm_size(comm, &nb_rank);

UniqueArray<Int32> nb_vertices(nb_rank);
UniqueArray<Int32> displ(nb_rank);

Expand All @@ -254,17 +276,17 @@ scatterPart(MPI_Comm comm, ConstArrayView<idx_t> vtxdist, ConstArrayView<idx_t>
UniqueArray<int> send_buffer;
UniqueArray<int> recv_buffer(my_part.size());

if (my_rank == io_rank) {
if (is_master_io) {
send_buffer.resize(part.size());
}
convertVector(send_buffer.size(), part, send_buffer.view());

_convertVector(send_buffer.size(), part, send_buffer.view());

MPI_Scatterv(send_buffer.data(), nb_vertices.data(),
displ.data(), MPI_INT, recv_buffer.data(),
my_part.size(), MPI_INT, io_rank, comm);
convertVector(recv_buffer.size(), recv_buffer.constView(), my_part);

_convertVector(recv_buffer.size(), recv_buffer.constView(), my_part);
}

/*---------------------------------------------------------------------------*/
Expand Down
Loading

0 comments on commit c0b5539

Please sign in to comment.