Skip to content

Commit

Permalink
[arcane,std] Supprime avertissements de compilation dans 'MetisGraphG…
Browse files Browse the repository at this point in the history
…ather' et 'MetisWrapper'.

Il s'agit essentiellement de conversions implicites 'idx_t' vers 'Int32'.
  • Loading branch information
grospelliergilles committed Oct 3, 2024
1 parent 41974b9 commit 7638617
Show file tree
Hide file tree
Showing 4 changed files with 69 additions and 107 deletions.
44 changes: 25 additions & 19 deletions arcane/src/arcane/std/MetisGraphGather.cc
Original file line number Diff line number Diff line change
@@ -1,17 +1,20 @@
// -*- tab-width: 2; indent-tabs-mode: nil; coding: utf-8-with-signature -*-
//-----------------------------------------------------------------------------
// Copyright 2000-2023 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
// Copyright 2000-2024 CEA (www.cea.fr) IFPEN (www.ifpenergiesnouvelles.com)
// See the top-level COPYRIGHT file for details.
// SPDX-License-Identifier: Apache-2.0
//-----------------------------------------------------------------------------
/*---------------------------------------------------------------------------*/
/* MetisGraphGather.h (C) 2000-2023 */
/* MetisGraphGather.cc (C) 2000-2024 */
/* */
/* Regroupement de graphes de 'Parmetis'. */
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

#include "arcane/utils/CheckedConvert.h"

#include "arcane/std/MetisGraphGather.h"

#include <algorithm>

/*---------------------------------------------------------------------------*/
Expand All @@ -25,20 +28,21 @@ namespace Arcane

namespace
{
template <class src_type, class dest_type>
void convertVector(const int size, ConstArrayView<src_type> src, ArrayView<dest_type> dest)
{
for (int i = 0; i < size; ++i) {
dest[i] = src[i];
template <class SourceType, class TargetType>
void convertVector(const int size, ConstArrayView<SourceType> src, ArrayView<TargetType> dest)
{
for (int i = 0; i < size; ++i) {
dest[i] = static_cast<TargetType>(src[i]);
}
}
}
}

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

void MetisGraphGather::
gatherGraph(const bool need_part, const String&, MPI_Comm comm,
ConstArrayView<idx_t> vtxdist, const idx_t ncon, MetisGraphView my_graph,
ConstArrayView<idx_t> vtxdist, const int ncon, MetisGraphView my_graph,
MetisGraph& graph)
{
int my_rank = -1;
Expand All @@ -57,16 +61,16 @@ gatherGraph(const bool need_part, const String&, MPI_Comm comm,
// nombre de sommets du graph complet

if (my_rank == io_rank) {
graph.nb_vertices = vtxdist[nb_rank];
graph.nb_vertices = CheckedConvert::toInt32(vtxdist[nb_rank]);
graph.have_vsize = my_graph.have_vsize; // on suppose que tous les processeurs ont la meme valeur
graph.have_adjwgt = my_graph.have_adjwgt; // on suppose que tous les processeurs ont la meme valeur
} else {
graph.nb_vertices = 0;
graph.have_vsize = false;
graph.have_adjwgt = false;
}
// recupere les dimensions caracterisant la repartition du graph sur les processeurs

// récupère les dimensions caractérisant la répartition du graphe sur les processeurs

my_buffer.resize(2);
if (my_rank == io_rank) {
Expand Down Expand Up @@ -166,7 +170,9 @@ gatherGraph(const bool need_part, const String&, MPI_Comm comm,
for (int rank = 1; rank < nb_rank; ++rank) {
start_adjncy_index += adjncy_size_per_rank[rank-1];
//std::cerr << "rank " << rank << " offset " << start_adjncy_index << " vtxdist[rank] " << vtxdist[rank] << " vtxdist[rank+1] " << vtxdist[rank+1] << std::endl;
for (int ixadj = vtxdist[rank]; ixadj < vtxdist[rank+1]; ++ixadj) {
Int32 vtxdist_rank = CheckedConvert::toInt32(vtxdist[rank]);
Int32 vtxdist_rank_plus_one = CheckedConvert::toInt32(vtxdist[rank + 1]);
for (Int32 ixadj = vtxdist_rank; ixadj < vtxdist_rank_plus_one; ++ixadj) {
graph.xadj[ixadj] += start_adjncy_index;
}
}
Expand Down Expand Up @@ -236,13 +242,13 @@ scatterPart(MPI_Comm comm, ConstArrayView<idx_t> vtxdist, ConstArrayView<idx_t>

MPI_Comm_rank(comm, &my_rank);
MPI_Comm_size(comm, &nb_rank);
UniqueArray<int> nb_vertices(nb_rank);
UniqueArray<int> displ(nb_rank);

UniqueArray<Int32> nb_vertices(nb_rank);
UniqueArray<Int32> displ(nb_rank);

for (int rank = 0; rank < nb_rank; ++rank) {
displ[rank] = vtxdist[rank];
nb_vertices[rank] = vtxdist[rank+1] - vtxdist[rank];
displ[rank] = CheckedConvert::toInt32(vtxdist[rank]);
nb_vertices[rank] = CheckedConvert::toInt32(vtxdist[rank + 1] - vtxdist[rank]);
}

UniqueArray<int> send_buffer;
Expand Down
6 changes: 4 additions & 2 deletions arcane/src/arcane/std/MetisGraphGather.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include "arcane/utils/Array.h"
#include "arcane/utils/ArrayView.h"
#include "arcane/utils/String.h"

#include "arcane/std/MetisGraph.h"

#include <parmetis.h>
Expand All @@ -34,12 +35,13 @@ namespace Arcane
class MetisGraphGather
{
public:

/*!
* \brief Effectue un regroupement du graphe ParMetis "my_graph" sur le processeur de
* rang 0 dans le communicateur "comm". Le graph resultat est "graph".
* rang 0 dans le communicateur "comm". Le graph résultat est "graph".
*/
void gatherGraph(const bool need_part, const String& comm_name, MPI_Comm comm,
ConstArrayView<idx_t> vtxdist, const idx_t ncon, MetisGraphView my_graph,
ConstArrayView<idx_t> vtxdist, const int ncon, MetisGraphView my_graph,
MetisGraph& graph);

/*!
Expand Down
110 changes: 24 additions & 86 deletions arcane/src/arcane/std/MetisWrapper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,11 @@
/* MetisWrapper.cc (C) 2000-2024 */
/* */
/* Wrapper autour des appels de Parmetis. */
/* Calcule une somme de contrôle globale des entrées/sorties Metis. */
/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

#include "arcane/utils/CheckedConvert.h"

#include "arcane/core/IParallelMng.h"

#include "arcane/std/MetisGraph.h"
Expand All @@ -27,24 +28,12 @@
namespace Arcane
{

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

using MetisCall = std::function<int(MPI_Comm& comm, MetisGraphView graph,
ArrayView<idx_t> vtxdist)>;

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

namespace
{

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/*!
* \brief Appelle Metis sans regroupement de graph.
*/
int
int MetisWrapper::
_callMetis(MPI_Comm comm, ArrayView<idx_t> vtxdist, MetisGraphView my_graph,
MetisCall& metis)
{
Expand All @@ -56,8 +45,8 @@ _callMetis(MPI_Comm comm, ArrayView<idx_t> vtxdist, MetisGraphView my_graph,
/*!
* \brief Appelle Metis en regroupant le graph sur 2 processeurs.
*/
int
_callMetisWith2Processors(const idx_t ncon, const bool need_part, MPI_Comm comm,
int MetisWrapper::
_callMetisWith2Processors(const Int32 ncon, const bool need_part, MPI_Comm comm,
ConstArrayView<idx_t> vtxdist, MetisGraphView my_graph,
MetisCall& metis)
{
Expand Down Expand Up @@ -129,56 +118,6 @@ _callMetisWith2Processors(const idx_t ncon, const bool need_part, MPI_Comm comm,
return ierr;
}

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
/*!
* \brief Appelle Metis en regroupant le graph sur 1 seul processeur.
*
* \warning Cette méthode n'est pas compatible avec la routine AdaptiveRepart de ParMetis qui
* est buggee lorsqu'il n'y a qu'un seul processeur.
*/
int
_callMetisWith1Processor(const idx_t ncon, const bool need_part, MPI_Comm comm,
ConstArrayView<idx_t> vtxdist, MetisGraphView my_graph,
MetisCall& metis)
{
int my_rank = -1;
int nb_rank = -1;

MPI_Comm_rank(comm, &my_rank);
MPI_Comm_size(comm, &nb_rank);

MetisGraph metis_graph;
MetisGraphGather metis_gather;

metis_gather.gatherGraph(need_part, "maincomm", comm, vtxdist, ncon,
my_graph, metis_graph);

MPI_Comm metis_comm = MPI_COMM_SELF;

UniqueArray<idx_t> metis_vtxdist(2);
metis_vtxdist[0] = 0;
metis_vtxdist[1] = vtxdist[vtxdist.size() - 1];

int ierr = 0;

if (my_rank == 0) {
MetisGraphView metis_graph_view(metis_graph);
ierr = metis(metis_comm, metis_graph_view, metis_vtxdist);
}

MPI_Bcast(&ierr, 1, MPI_INT, 0, comm);

metis_gather.scatterPart(comm, vtxdist, metis_graph.part, my_graph.part);

return ierr;
}

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

}

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

Expand Down Expand Up @@ -247,12 +186,13 @@ callPartKway(IParallelMng* pm, const bool print_digest, const bool gather,
MetisGraphView my_graph;

ArrayView<idx_t> offset(nb_rank + 1, vtxdist);
my_graph.nb_vertices = offset[my_rank+1] - offset[my_rank];
my_graph.nb_vertices = CheckedConvert::toInt32(offset[my_rank + 1] - offset[my_rank]);
my_graph.xadj = ArrayView<idx_t>(my_graph.nb_vertices + 1, xadj);
idx_t adjncy_size = my_graph.xadj[my_graph.nb_vertices];
my_graph.adjncy = ArrayView<idx_t>(adjncy_size, adjncy);
my_graph.vwgt = ArrayView<idx_t>(my_graph.nb_vertices * (*ncon), vwgt);
my_graph.adjwgt = ArrayView<idx_t>(adjncy_size, adjwgt);
const Int32 adjacency_size = CheckedConvert::toInt32(my_graph.xadj[my_graph.nb_vertices]);
const Int32 nb_con = CheckedConvert::toInt32(*ncon);
my_graph.adjncy = ArrayView<idx_t>(adjacency_size, adjncy);
my_graph.vwgt = ArrayView<idx_t>(CheckedConvert::multiply(my_graph.nb_vertices, nb_con), vwgt);
my_graph.adjwgt = ArrayView<idx_t>(adjacency_size, adjwgt);
my_graph.part = ArrayView<idx_t>(my_graph.nb_vertices, part);
my_graph.have_vsize = false;
my_graph.have_adjwgt = true;
Expand All @@ -267,24 +207,21 @@ callPartKway(IParallelMng* pm, const bool print_digest, const bool gather,
}

if (gather && nb_rank > 2) {
// tm->info() << "Partionnement metis avec regroupement sur 1 processeur";
// ierr = callMetisWith1Processor(*ncon, false, *comm, offset, my_graph, partkway);

// Normalement c'est plus rapide ...
tm->info() << "Partionnement metis : regroupement " << nb_rank << " -> 2 processeurs";
ierr = _callMetisWith2Processors(*ncon, false, *comm, offset, my_graph, partkway);
tm->info() << "Partitioning metis : re-grouping " << nb_rank << " -> 2 rank";
ierr = _callMetisWith2Processors(nb_con, false, *comm, offset, my_graph, partkway);
}
else {
tm->info() << "Partionnement metis : nb processeurs = " << nb_rank;
tm->info() << "Partitioning metis : nb rank = " << nb_rank;
ierr = _callMetis(*comm, offset, my_graph, (nb_rank==1) ? partkway_seq : partkway);
}
tm->info() << "End Partionnement metis";

tm->info() << "End Partitioning metis";
if (print_digest){
MetisGraphDigest d(pm);
String digest = d.computeOutputDigest(my_graph, edgecut);
if (my_rank == 0) {
tm->info() << "signature des sorties Metis = " << digest;
tm->info() << "hash for Metis output = " << digest;
}
}

Expand Down Expand Up @@ -348,13 +285,14 @@ callAdaptiveRepart(IParallelMng* pm, const bool print_digest, const bool gather,


ArrayView<idx_t> offset(nb_rank + 1, vtxdist);
my_graph.nb_vertices = offset[my_rank+1] - offset[my_rank];
my_graph.nb_vertices = CheckedConvert::toInt32(offset[my_rank + 1] - offset[my_rank]);
my_graph.xadj = ArrayView<idx_t>(my_graph.nb_vertices + 1, xadj);
idx_t adjncy_size = my_graph.xadj[my_graph.nb_vertices];
my_graph.adjncy = ArrayView<idx_t>(adjncy_size, adjncy);
my_graph.vwgt = ArrayView<idx_t>(my_graph.nb_vertices * (*ncon), vwgt);
const Int32 adjacency_size = CheckedConvert::toInt32(my_graph.xadj[my_graph.nb_vertices]);
const Int32 nb_con = CheckedConvert::toInt32(*ncon);
my_graph.adjncy = ArrayView<idx_t>(adjacency_size, adjncy);
my_graph.vwgt = ArrayView<idx_t>(CheckedConvert::multiply(my_graph.nb_vertices, nb_con), vwgt);
my_graph.vsize = ArrayView<idx_t>(my_graph.nb_vertices, vsize);
my_graph.adjwgt = ArrayView<idx_t>(adjncy_size, adjwgt);
my_graph.adjwgt = ArrayView<idx_t>(adjacency_size, adjwgt);
my_graph.part = ArrayView<idx_t>(my_graph.nb_vertices, part);
my_graph.have_vsize = true;
my_graph.have_adjwgt = true;
Expand All @@ -371,7 +309,7 @@ callAdaptiveRepart(IParallelMng* pm, const bool print_digest, const bool gather,

if (gather && nb_rank > 2) {
tm->info() << "Partionnement metis : regroupement " << nb_rank << " -> 2 processeurs";
ierr = _callMetisWith2Processors(*ncon, true, *comm, offset, my_graph, repart_func);
ierr = _callMetisWith2Processors(nb_con, true, *comm, offset, my_graph, repart_func);
}
else {
tm->info() << "Partionnement metis : nb processeurs = " << nb_rank;
Expand Down
16 changes: 16 additions & 0 deletions arcane/src/arcane/std/MetisWrapper.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,14 @@

#include <parmetis.h>
#include <mpi.h>
#include <functional>

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/

namespace Arcane
{
class MetisGraphView;

/*---------------------------------------------------------------------------*/
/*---------------------------------------------------------------------------*/
Expand All @@ -32,7 +34,13 @@ namespace Arcane
*/
class MetisWrapper
{
private:

using MetisCall = std::function<int(MPI_Comm& comm, MetisGraphView graph,
ArrayView<idx_t> vtxdist)>;

public:

/*!
* \brief Simple wrapper autour de la routine ParMetis "ParMETIS_V3_PartKway".
*
Expand All @@ -59,6 +67,14 @@ class MetisWrapper
idx_t *vsize, idx_t *adjwgt, idx_t *wgtflag, idx_t *numflag, idx_t *ncon,
idx_t *nparts, real_t *tpwgts, real_t *ubvec, real_t *ipc2redist,
idx_t *options, idx_t *edgecut, idx_t *part, MPI_Comm *comm);

private:

int _callMetis(MPI_Comm comm, ArrayView<idx_t> vtxdist, MetisGraphView my_graph,
MetisCall& metis);
int _callMetisWith2Processors(const Int32 ncon, const bool need_part, MPI_Comm comm,
ConstArrayView<idx_t> vtxdist, MetisGraphView my_graph,
MetisCall& metis);
};

/*---------------------------------------------------------------------------*/
Expand Down

0 comments on commit 7638617

Please sign in to comment.