Skip to content

Commit

Permalink
Merge branch 'fix.meshComm.integerOverflow' into 'master.dev'
Browse files Browse the repository at this point in the history
[fix.meshComm.integerOverflow] Use MPI_STRUCT to avoid integer overflow in StartCommunicateMeshReadin

See merge request piclas/piclas!933
  • Loading branch information
pnizenkov committed Apr 8, 2024
2 parents 31d376e + ba8211f commit 321b924
Show file tree
Hide file tree
Showing 2 changed files with 34 additions and 9 deletions.
1 change: 1 addition & 0 deletions src/mpi/mpi_shared_vars.f90
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ MODULE MOD_MPI_Shared_Vars
INTEGER,ALLOCATABLE:: displsElem(:),recvcountElem(:)
INTEGER,ALLOCATABLE:: displsSide(:),recvcountSide(:)
INTEGER,ALLOCATABLE:: displsNode(:),recvcountNode(:)
INTEGER :: MPI_STRUCT_ELEM,MPI_STRUCT_SIDE,MPI_STRUCT_NODE
#endif /*USE_MPI*/

! Surface sampling
Expand Down
42 changes: 33 additions & 9 deletions src/particles/particle_mesh/particle_mesh_readin.f90
Original file line number Diff line number Diff line change
Expand Up @@ -145,6 +145,8 @@ SUBROUTINE StartCommunicateMeshReadin()
#if USE_MPI
INTEGER :: iProc
INTEGER :: offsetNodeID!,nNodeIDs
INTEGER :: MPI_LENGTH(1),MPI_TYPE(1)
INTEGER(KIND=MPI_ADDRESS_KIND) :: MPI_DISPLACEMENT(1)
#endif /*USE_MPI*/
!===================================================================================================================================

Expand Down Expand Up @@ -260,14 +262,33 @@ SUBROUTINE StartCommunicateMeshReadin()

! Gather mesh information in a non-blocking way
ALLOCATE(MPI_COMM_LEADERS_REQUEST(1:4))
CALL MPI_IALLGATHERV(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,ElemInfo_Shared,ELEMINFOSIZE *recvcountElem &
,ELEMINFOSIZE*displsElem ,MPI_INTEGER ,MPI_COMM_LEADERS_SHARED,MPI_COMM_LEADERS_REQUEST(1),IERROR)
CALL MPI_IALLGATHERV(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,SideInfo_Shared,(SIDEINFOSIZE+1)*recvcountSide &
,(SIDEINFOSIZE+1)*displsSide ,MPI_INTEGER ,MPI_COMM_LEADERS_SHARED,MPI_COMM_LEADERS_REQUEST(2),IERROR)
! ElemInfo_Shared
MPI_LENGTH = ELEMINFOSIZE
MPI_DISPLACEMENT = 0 ! 0*SIZEOF(MPI_SIZE)
MPI_TYPE = MPI_INTEGER
CALL MPI_TYPE_CREATE_STRUCT(1,MPI_LENGTH,MPI_DISPLACEMENT,MPI_TYPE,MPI_STRUCT_ELEM,iError)
CALL MPI_TYPE_COMMIT(MPI_STRUCT_ELEM,iError)
CALL MPI_IALLGATHERV(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,ElemInfo_Shared,recvcountElem &
,displsElem ,MPI_STRUCT_ELEM ,MPI_COMM_LEADERS_SHARED,MPI_COMM_LEADERS_REQUEST(1),IERROR)
! SideInfo_Shared
MPI_LENGTH = SIDEINFOSIZE+1
MPI_DISPLACEMENT = 0 ! 0*SIZEOF(MPI_SIZE)
MPI_TYPE = MPI_INTEGER
CALL MPI_TYPE_CREATE_STRUCT(1,MPI_LENGTH,MPI_DISPLACEMENT,MPI_TYPE,MPI_STRUCT_SIDE,iError)
CALL MPI_TYPE_COMMIT(MPI_STRUCT_SIDE,iError)
CALL MPI_IALLGATHERV(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,SideInfo_Shared, recvcountSide &
,displsSide ,MPI_STRUCT_SIDE ,MPI_COMM_LEADERS_SHARED,MPI_COMM_LEADERS_REQUEST(2),IERROR)
! NodeInfo_Shared
CALL MPI_IALLGATHERV(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,NodeInfo_Shared, recvcountNode &
,displsNode ,MPI_INTEGER ,MPI_COMM_LEADERS_SHARED,MPI_COMM_LEADERS_REQUEST(3),IERROR)
CALL MPI_IALLGATHERV(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,NodeCoords_Shared,3 *recvcountNode &
,3*displsNode ,MPI_DOUBLE_PRECISION,MPI_COMM_LEADERS_SHARED,MPI_COMM_LEADERS_REQUEST(4),IERROR)
,displsNode ,MPI_INTEGER ,MPI_COMM_LEADERS_SHARED,MPI_COMM_LEADERS_REQUEST(3),IERROR)
! NodeCoords_Shared
MPI_LENGTH = 3
MPI_DISPLACEMENT = 0 ! 0*SIZEOF(MPI_SIZE)
MPI_TYPE = MPI_DOUBLE_PRECISION
CALL MPI_TYPE_CREATE_STRUCT(1,MPI_LENGTH,MPI_DISPLACEMENT,MPI_TYPE,MPI_STRUCT_NODE,iError)
CALL MPI_TYPE_COMMIT(MPI_STRUCT_NODE,iError)
CALL MPI_IALLGATHERV(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,NodeCoords_Shared,recvcountNode &
,displsNode ,MPI_STRUCT_NODE ,MPI_COMM_LEADERS_SHARED,MPI_COMM_LEADERS_REQUEST(4),IERROR)
END IF
#endif /*USE_MPI*/

Expand Down Expand Up @@ -397,8 +418,11 @@ SUBROUTINE FinishCommunicateMeshReadin()
CALL BARRIER_AND_SYNC(SideInfo_Shared_Win,MPI_COMM_SHARED)

IF (myComputeNodeRank.EQ.0) THEN
CALL MPI_ALLGATHERV(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,SideInfo_Shared,(SIDEINFOSIZE+1)*recvcountSide &
,(SIDEINFOSIZE+1)*displsSide,MPI_INTEGER ,MPI_COMM_LEADERS_SHARED,IERROR)
CALL MPI_ALLGATHERV(MPI_IN_PLACE,0,MPI_DATATYPE_NULL,SideInfo_Shared,recvcountSide &
,displsSide,MPI_STRUCT_SIDE ,MPI_COMM_LEADERS_SHARED,IERROR)
CALL MPI_TYPE_FREE(MPI_STRUCT_ELEM,iError)
CALL MPI_TYPE_FREE(MPI_STRUCT_SIDE,iError)
CALL MPI_TYPE_FREE(MPI_STRUCT_NODE,iError)
END IF

! Write compute-node local SIDE_NBELEMTYPE
Expand Down

0 comments on commit 321b924

Please sign in to comment.