From f7163f7a8c7125667f63e03f86f263daf59c07b2 Mon Sep 17 00:00:00 2001 From: Sean McBride Date: Mon, 17 Jul 2023 21:14:24 -0400 Subject: [PATCH] H5_CHECK_OVERFLOW --- src/H5Dmpio.c | 10 +++++----- src/H5Smpio.c | 10 +++++----- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c index 611908cb412..a3cbfa33acf 100644 --- a/src/H5Dmpio.c +++ b/src/H5Dmpio.c @@ -2903,7 +2903,7 @@ H5D__obtain_mpio_mode(H5D_io_info_t *io_info, H5D_dset_io_info_t *di, uint8_t as } /* end while */ /* Gather all the information */ - H5_CHECK_OVERFLOW(total_chunks, size_t, int) + H5_CHECK_OVERFLOW(total_chunks, size_t, int); if (MPI_SUCCESS != (mpi_code = MPI_Gather(io_mode_info, (int)total_chunks, MPI_BYTE, recv_io_mode_info, (int)total_chunks, MPI_BYTE, root, comm))) HMPI_GOTO_ERROR(FAIL, "MPI_Gather failed", mpi_code) @@ -3865,7 +3865,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk * future, this may become a problem and derived datatypes * will need to be used. */ - H5_CHECK_OVERFLOW(mod_data_size, size_t, int) + H5_CHECK_OVERFLOW(mod_data_size, size_t, int); /* Send modification data to new owner */ if (MPI_SUCCESS != @@ -3905,8 +3905,8 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "too many shared chunks in parallel filtered write operation") - H5_CHECK_OVERFLOW(num_send_requests, size_t, int) - H5_CHECK_OVERFLOW(num_msgs_incoming, size_t, int) + H5_CHECK_OVERFLOW(num_send_requests, size_t, int); + H5_CHECK_OVERFLOW(num_msgs_incoming, size_t, int); /* * Allocate receive buffer and MPI_Request arrays for non-blocking @@ -3942,7 +3942,7 @@ H5D__mpio_share_chunk_modification_data(H5D_filtered_collective_io_info_t *chunk if (MPI_SUCCESS != (mpi_code = MPI_Get_elements_x(&status, MPI_BYTE, &msg_size))) HMPI_GOTO_ERROR(FAIL, "MPI_Get_elements_x failed", mpi_code) - H5_CHECK_OVERFLOW(msg_size, MPI_Count, int) + H5_CHECK_OVERFLOW(msg_size, MPI_Count, int); #else int msg_size = 0; diff --git a/src/H5Smpio.c b/src/H5Smpio.c index f304743e196..c99f90609fe 100644 --- a/src/H5Smpio.c +++ b/src/H5Smpio.c @@ -695,7 +695,7 @@ H5S__mpio_reg_hyper_type(H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, fprintf(H5DEBUG(S), "%s: Flattened selection\n", __func__); #endif for (u = 0; u < rank; ++u) { - H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t) + H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t); d[u].start = (hssize_t)diminfo[u].start + sel_iter->u.hyp.sel_off[u]; d[u].strid = diminfo[u].stride; d[u].block = diminfo[u].block; @@ -729,7 +729,7 @@ H5S__mpio_reg_hyper_type(H5S_t *space, size_t elmt_size, MPI_Datatype *new_type, fprintf(H5DEBUG(S), "%s: Non-flattened selection\n", __func__); #endif for (u = 0; u < rank; ++u) { - H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t) + H5_CHECK_OVERFLOW(diminfo[u].start, hsize_t, hssize_t); d[u].start = (hssize_t)diminfo[u].start + space->select.offset[u]; d[u].strid = diminfo[u].stride; d[u].block = diminfo[u].block; @@ -1178,7 +1178,7 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down, size_t e /* Store displacement & block length */ disp[outercount] = (MPI_Aint)elmt_size * (MPI_Aint)span->low; - H5_CHECK_OVERFLOW(nelmts, hsize_t, int) + H5_CHECK_OVERFLOW(nelmts, hsize_t, int); blocklen[outercount] = (int)nelmts; if (bigio_count < (hsize_t)blocklen[outercount]) @@ -1287,7 +1287,7 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down, size_t e nelmts = (span->high - span->low) + 1; /* Build the MPI datatype for this node */ - H5_CHECK_OVERFLOW(nelmts, hsize_t, int) + H5_CHECK_OVERFLOW(nelmts, hsize_t, int); if (MPI_SUCCESS != (mpi_code = MPI_Type_create_hvector((int)nelmts, 1, stride, down_type, &inner_type[outercount]))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hvector failed", mpi_code) @@ -1297,7 +1297,7 @@ H5S__obtain_datatype(H5S_hyper_span_info_t *spans, const hsize_t *down, size_t e } /* end while */ /* Building the whole vector datatype */ - H5_CHECK_OVERFLOW(outercount, size_t, int) + H5_CHECK_OVERFLOW(outercount, size_t, int); if (MPI_SUCCESS != (mpi_code = MPI_Type_create_struct((int)outercount, blocklen, disp, inner_type, &spans->op_info[op_info_i].u.down_type))) HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_struct failed", mpi_code)