MPI-AMRVAC  3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
mod_load_balance.t
Go to the documentation of this file.
2  implicit none
3 
4 contains
5  !> reallocate blocks into processors for load balance
6  subroutine load_balance
7  use mod_forest
12 
13  integer :: Morton_no, recv_igrid, recv_ipe, send_igrid, send_ipe, igrid, ipe
14  !> MPI recv send variables for AMR
15  integer :: itag, irecv, isend
16  integer, dimension(:), allocatable :: recvrequest, sendrequest
17  integer, dimension(:,:), allocatable :: recvstatus, sendstatus
18  !> MPI recv send variables for staggered-variable AMR
19  integer :: itag_stg
20  integer, dimension(:), allocatable :: recvrequest_stg, sendrequest_stg
21  integer, dimension(:,:), allocatable :: recvstatus_stg, sendstatus_stg
22 
23 
24  ! Jannis: for now, not using version for passive/active blocks
25  call get_morton_range()
26 
27  if (npe==1) then
29  return
30  end if
31 
32  irecv=0
33  isend=0
34  allocate(recvstatus(mpi_status_size,max_blocks),recvrequest(max_blocks), &
35  sendstatus(mpi_status_size,max_blocks),sendrequest(max_blocks))
36  recvrequest=mpi_request_null
37  sendrequest=mpi_request_null
38 
39  if(stagger_grid) then
40  allocate(recvstatus_stg(mpi_status_size,max_blocks*^nd),recvrequest_stg(max_blocks*^nd), &
41  sendstatus_stg(mpi_status_size,max_blocks*^nd),sendrequest_stg(max_blocks*^nd))
42  recvrequest_stg=mpi_request_null
43  sendrequest_stg=mpi_request_null
44  end if
45 
46  do ipe=0,npe-1; do morton_no=morton_start(ipe),morton_stop(ipe)
47  recv_ipe=ipe
48 
49  send_igrid=sfc(1,morton_no)
50  send_ipe=sfc(2,morton_no)
51 
52  if (recv_ipe/=send_ipe) then
53  ! get an igrid number for the new node in recv_ipe processor
54  recv_igrid=getnode(recv_ipe)
55  ! update node igrid and ipe on the tree
56  call change_ipe_tree_leaf(recv_igrid,recv_ipe,send_igrid,send_ipe)
57  ! receive physical data of the new node in recv_ipe processor
58  if (recv_ipe==mype) call lb_recv
59  ! send physical data of the old node in send_ipe processor
60  if (send_ipe==mype) call lb_send
61  end if
62  if (recv_ipe==mype) then
63  if (recv_ipe==send_ipe) then
64  sfc_to_igrid(morton_no)=send_igrid
65  else
66  sfc_to_igrid(morton_no)=recv_igrid
67  end if
68  end if
69  end do; end do
70 
71  if (irecv>0) then
72  call mpi_waitall(irecv,recvrequest,recvstatus,ierrmpi)
73  if(stagger_grid) call mpi_waitall(irecv,recvrequest_stg,recvstatus_stg,ierrmpi)
74  end if
75  if (isend>0) then
76  call mpi_waitall(isend,sendrequest,sendstatus,ierrmpi)
77  if(stagger_grid) call mpi_waitall(isend,sendrequest_stg,sendstatus_stg,ierrmpi)
78  end if
79 
80  deallocate(recvstatus,recvrequest,sendstatus,sendrequest)
81  if(stagger_grid) deallocate(recvstatus_stg,recvrequest_stg,sendstatus_stg,sendrequest_stg)
82 
83  ! post processing
84  do ipe=0,npe-1; do morton_no=morton_start(ipe),morton_stop(ipe)
85  recv_ipe=ipe
86 
87  send_igrid=sfc(1,morton_no)
88  send_ipe=sfc(2,morton_no)
89 
90  if (recv_ipe/=send_ipe) then
91  !if (send_ipe==mype) call dealloc_node(send_igrid)
92  call putnode(send_igrid,send_ipe)
93  end if
94  end do; end do
95  {#IFDEF EVOLVINGBOUNDARY
96  ! mark physical-boundary blocks on space-filling curve
97  do morton_no=morton_start(mype),morton_stop(mype)
98  igrid=sfc_to_igrid(morton_no)
99  if (phyboundblock(igrid)) sfc_phybound(morton_no)=1
100  end do
101  call mpi_allreduce(mpi_in_place,sfc_phybound,nleafs,mpi_integer,&
102  mpi_sum,icomm,ierrmpi)
103  }
104 
105  ! Update sfc array: igrid and ipe info in space filling curve
106  call amr_morton_order()
107 
108  contains
109 
110  subroutine lb_recv
112 
113  call alloc_node(recv_igrid)
114 
115  itag=recv_igrid
116  irecv=irecv+1
117  {#IFDEF EVOLVINGBOUNDARY
118  if (phyboundblock(recv_igrid)) then
119  call mpi_irecv(ps(recv_igrid)%w,1,type_block,send_ipe,itag, &
120  icomm,recvrequest(irecv),ierrmpi)
121  else
122  call mpi_irecv(ps(recv_igrid)%w,1,type_block_io,send_ipe,itag, &
123  icomm,recvrequest(irecv),ierrmpi)
124  end if
125  }{#IFNDEF EVOLVINGBOUNDARY
126  call mpi_irecv(ps(recv_igrid)%w,1,type_block_io,send_ipe,itag, &
127  icomm,recvrequest(irecv),ierrmpi)
128  }
129  if(stagger_grid) then
130  itag=recv_igrid+max_blocks
131  call mpi_irecv(ps(recv_igrid)%ws,1,type_block_io_stg,send_ipe,itag, &
132  icomm,recvrequest_stg(irecv),ierrmpi)
133  end if
134 
135  end subroutine lb_recv
136 
137  subroutine lb_send
138 
139  itag=recv_igrid
140  isend=isend+1
141  {#IFDEF EVOLVINGBOUNDARY
142  if (phyboundblock(send_igrid)) then
143  call mpi_isend(ps(send_igrid)%w,1,type_block,recv_ipe,itag, &
144  icomm,sendrequest(isend),ierrmpi)
145  else
146  call mpi_isend(ps(send_igrid)%w,1,type_block_io,recv_ipe,itag, &
147  icomm,sendrequest(isend),ierrmpi)
148  end if
149  }{#IFNDEF EVOLVINGBOUNDARY
150  call mpi_isend(ps(send_igrid)%w,1,type_block_io,recv_ipe,itag, &
151  icomm,sendrequest(isend),ierrmpi)
152  }
153  if(stagger_grid) then
154  itag=recv_igrid+max_blocks
155  call mpi_isend(ps(send_igrid)%ws,1,type_block_io_stg,recv_ipe,itag, &
156  icomm,sendrequest_stg(isend),ierrmpi)
157  end if
158 
159  end subroutine lb_send
160 
161  end subroutine load_balance
162 
163 end module mod_load_balance
subroutine lb_recv
subroutine lb_send
subroutine, public putnode(igrid, ipe)
subroutine, public alloc_node(igrid)
allocate arrays on igrid node
integer function, public getnode(ipe)
Get first available igrid on processor ipe.
Module with basic grid data structures.
Definition: mod_forest.t:2
integer, dimension(:), allocatable, save sfc_to_igrid
Go from a Morton number to an igrid index (for a single processor)
Definition: mod_forest.t:53
integer, dimension(:), allocatable, save morton_start
First Morton number per processor.
Definition: mod_forest.t:62
integer, dimension(:), allocatable, save morton_stop
Last Morton number per processor.
Definition: mod_forest.t:65
integer, dimension(:,:), allocatable, save sfc
Array to go from a Morton number to an igrid and processor index. Sfc(1:3, MN) contains [igrid,...
Definition: mod_forest.t:43
subroutine, public change_ipe_tree_leaf(recv_igrid, recv_ipe, send_igrid, send_ipe)
This module contains definitions of global parameters and variables and some generic functions/subrou...
logical stagger_grid
True for using stagger grid.
integer mype
The rank of the current MPI task.
integer npe
The number of MPI tasks.
integer max_blocks
The maximum number of grid blocks in a processor.
subroutine load_balance
reallocate blocks into processors for load balance
subroutine get_morton_range
Set the Morton range for each processor.