Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix build warnings reported by gcc on rzansel. #1099

Merged
merged 4 commits into from
Aug 9, 2021
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
126 changes: 32 additions & 94 deletions src/VendorChecks/test/tstParmetis.cc
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,7 @@
* \file VendorChecks/test/tstParmetis.cc
* \date Monday, May 16, 2016, 16:30 pm
* \brief Attempt to link to libparmetis and run a simple problem.
* \note Copyright (C) 2016-2019, Triad National Security, LLC.
* All rights reserved. */
* \note Copyright (C) 2016-2021, Triad National Security, LLC., All rights reserved. */
//------------------------------------------------------------------------------------------------//

#include "c4/ParallelUnitTest.hh"
Expand Down Expand Up @@ -41,117 +40,56 @@ void test_parmetis(rtt_c4::ParallelUnitTest &ut) {
idx_t wgtflag = 0;
// C-style numbering that starts from 0.
idx_t numflag = 0;
// This is used to specify the number of weights that each vertex has. It is
// also the number of balance constraints that must be satisfied.
// This is used to specify the number of weights that each vertex has. It is also the number of
// balance constraints that must be satisfied.
idx_t ncon = 1;
// This is used to specify the number of sub-domains that are desired. Note
// that the number of subdomains is independent of the number of processors
// that call this routine.
// This is used to specify the number of sub-domains that are desired. Note that the number of
// subdomains is independent of the number of processors that call this routine.
idx_t nparts = 3;
// An array of size ncon × nparts that is used to specify the fraction of
// vertex weight that should be distributed to each sub-domain for each
// balance constraint. If all of the sub-domains are to be of the same size
// for every vertex weight, then each of the ncon × nparts elements should
// be set to a value of 1/nparts.
std::vector<real_t> tpwgts(ncon * nparts, static_cast<real_t>(1.0 / nparts));
// An array of size ncon that is used to specify the imbalance tolerance for
// each vertex weight, with 1 being perfect balance and nparts being perfect
// imbalance. A value of 1.05 for each of the ncon weights is recommended.
auto ubvec(static_cast<real_t>(1.05));
// This is an array of integers that is used to pass additional parameters
// for the routine.
constexpr auto one = static_cast<real_t>(1);
// An array of size ncon * that is used to specify the fraction of vertex weight that
// should be distributed to each sub-domain for each balance constraint. If all of the sub-domains
// are to be of the same size for every vertex weight, then each of the ncon * elements
// should be set to a value of 1/nparts.
std::vector<real_t> tpwgts(ncon * nparts, (one / static_cast<real_t>(nparts)));
// An array of size ncon that is used to specify the imbalance tolerance for each vertex weight,
// with 1 being perfect balance and nparts being perfect imbalance. A value of 1.05 for each of
// the ncon weights is recommended.
#if REALTYPEWIDTH == 64
real_t ubvec = 1.05;
#else
real_t ubvec = 1.05f;
#endif
// This is an array of integers that is used to pass additional parameters for the routine.
std::vector<idx_t> options(4, 0);
// Upon successful completion, the number of edges that are cut by the
// partitioning is written to this parameter.
// Upon successful completion, the number of edges that are cut by the partitioning is written to
// this parameter.
idx_t edgecut(0);

MPI_Comm_dup(MPI_COMM_WORLD, &rtt_c4::communicator);

// This is an array of size equal to the number of locally-stored
// vertices. Upon successful completion the partition vector of the
// locally-stored vertices is written to this array.
// This is an array of size equal to the number of locally-stored vertices. Upon successful
// completion the partition vector of the locally-stored vertices is written to this array.
Check(MPI_PROC_ID < INT_MAX);
std::vector<idx_t> part(5, static_cast<idx_t>(MPI_PROC_ID));

// This array describes how the vertices of the graph are distributed among
// the processors. Its contents are identical for every processor.
// This array describes how the vertices of the graph are distributed among the processors. Its
// contents are identical for every processor.
std::vector<idx_t> vtxdist = {0, 5, 10, 15};

// Dependent on each processor
if (MPI_PROC_ID == 0) {
adjncy.resize(13);

xadj[0] = 0;
xadj[1] = 2;
xadj[2] = 5;
xadj[3] = 8;
xadj[4] = 11;
xadj[5] = 13;

adjncy[0] = 1;
adjncy[1] = 5;
adjncy[2] = 0;
adjncy[3] = 2;
adjncy[4] = 6;
adjncy[5] = 1;
adjncy[6] = 3;
adjncy[7] = 7;
adjncy[8] = 2;
adjncy[9] = 4;
adjncy[10] = 8;
adjncy[11] = 3;
adjncy[12] = 9;
xadj = {0, 2, 5, 8, 11, 13};
adjncy = {1, 5, 0, 2, 6, 1, 3, 7, 2, 4, 8, 3, 9};
} else if (MPI_PROC_ID == 1) {
adjncy.resize(18);

xadj[0] = 0;
xadj[1] = 3;
xadj[2] = 7;
xadj[3] = 11;
xadj[4] = 15;
xadj[5] = 18;

adjncy[0] = 0;
adjncy[1] = 6;
adjncy[2] = 10;
adjncy[3] = 1;
adjncy[4] = 5;
adjncy[5] = 7;
adjncy[6] = 11;
adjncy[7] = 2;
adjncy[8] = 6;
adjncy[9] = 8;
adjncy[10] = 12;
adjncy[11] = 3;
adjncy[12] = 7;
adjncy[13] = 9;
adjncy[14] = 13;
adjncy[15] = 4;
adjncy[16] = 8;
adjncy[17] = 14;
xadj = {0, 3, 7, 11, 15, 18};
adjncy = {0, 6, 10, 1, 5, 7, 11, 2, 6, 8, 12, 3, 7, 9, 13, 4, 8, 14};
} else if (MPI_PROC_ID == 2) {
adjncy.resize(13);

xadj[0] = 0;
xadj[1] = 2;
xadj[2] = 5;
xadj[3] = 8;
xadj[4] = 11;
xadj[5] = 13;

adjncy[0] = 5;
adjncy[1] = 11;
adjncy[2] = 6;
adjncy[3] = 10;
adjncy[4] = 12;
adjncy[5] = 7;
adjncy[6] = 11;
adjncy[7] = 13;
adjncy[8] = 8;
adjncy[9] = 12;
adjncy[10] = 14;
adjncy[11] = 9;
adjncy[12] = 13;
xadj = {0, 2, 5, 8, 11, 13};
adjncy = {5, 11, 6, 10, 12, 7, 11, 13, 8, 12, 14, 9, 13};
}
if (MPI_PROC_ID == 0)
cout << "parmetis initialized." << '\n';
Expand Down