MPI-AMRVAC  3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
mod_coarsen_refine.t
Go to the documentation of this file.
1 !> Module to coarsen and refine grids for AMR
3  implicit none
4  private
5  !> MPI recv send variables for AMR
6  integer :: itag, irecv, isend
7  integer, dimension(:), allocatable :: recvrequest, sendrequest
8  integer, dimension(:,:), allocatable :: recvstatus, sendstatus
9  !> MPI recv send variables for staggered-variable AMR
10  integer :: itag_stg
11  integer, dimension(:), allocatable :: recvrequest_stg, sendrequest_stg
12  integer, dimension(:,:), allocatable :: recvstatus_stg, sendstatus_stg
13 
14  ! Public subroutines
15  public :: amr_coarsen_refine
16 
17 contains
18 
19  !> coarsen and refine blocks to update AMR grid
20  subroutine amr_coarsen_refine
21  use mod_forest
25  use mod_amr_fct
31  use mod_selectgrids, only: selectgrids
32  use mod_refine, only: refine_grids
33 
34 
36 
37  integer :: iigrid, igrid, ipe, igridco, ipeco, level, ic^d
38  integer, dimension(2^D&) :: igridfi, ipefi
39  integer :: n_coarsen, n_refine
40  type(tree_node_ptr) :: tree, sibling
41  logical :: active
42 
43  call proper_nesting
44 
45  if(stagger_grid) then
46  call store_faces
47  call comm_faces
48  end if
49 
50  n_coarsen = count(coarsen(:, :))
51  n_refine = count(refine(:, :))
52 
53  ! to save memory: first coarsen then refine
54  irecv=0
55  isend=0
56  allocate(recvstatus(mpi_status_size,max_blocks),recvrequest(max_blocks), &
57  sendstatus(mpi_status_size,max_blocks),sendrequest(max_blocks))
58  recvrequest=mpi_request_null
59  sendrequest=mpi_request_null
60 
61  if(stagger_grid) then
62  allocate(recvstatus_stg(mpi_status_size,max_blocks*^nd),recvrequest_stg(max_blocks*^nd), &
63  sendstatus_stg(mpi_status_size,max_blocks*^nd),sendrequest_stg(max_blocks*^nd))
64  recvrequest_stg=mpi_request_null
65  sendrequest_stg=mpi_request_null
66  end if
67 
68  do ipe=0,npe-1
69  do igrid=1,max_blocks
70  if (coarsen(igrid,ipe)) then
71  if (.not.associated(igrid_to_node(igrid,ipe)%node)) cycle
72 
73  tree%node => igrid_to_node(igrid,ipe)%node%parent%node
74  {do ic^db=1,2\}
75  sibling%node => tree%node%child(ic^d)%node
76  ipefi(ic^d)=sibling%node%ipe
77  igridfi(ic^d)=sibling%node%igrid
78  {end do\}
79 
80  ipeco=ipefi(1^d&)
81  igridco=getnode(ipeco)
82 
83  call coarsen_tree_leaf(igridco,ipeco,igridfi,ipefi,active)
84 
85  call coarsen_grid_siblings(igridco,ipeco,igridfi,ipefi,active)
86 
87  ! local coarsening done
88  {do ic^db=1,2\}
89  if (ipefi(ic^d)==ipeco) then
90  call putnode(igridfi(ic^d),ipefi(ic^d))
91  coarsen(igridfi(ic^d),ipefi(ic^d))=.false.
92  end if
93  {end do\}
94  end if
95  end do
96  end do
97 
98  if (irecv>0) then
99  call mpi_waitall(irecv,recvrequest,recvstatus,ierrmpi)
100  if(stagger_grid) call mpi_waitall(irecv,recvrequest_stg,recvstatus_stg,ierrmpi)
101  end if
102  if (isend>0) then
103  call mpi_waitall(isend,sendrequest,sendstatus,ierrmpi)
104  if(stagger_grid) call mpi_waitall(isend,sendrequest_stg,sendstatus_stg,ierrmpi)
105  end if
106 
107  deallocate(recvstatus,recvrequest,sendstatus,sendrequest)
108  if(stagger_grid) deallocate(recvstatus_stg,recvrequest_stg,sendstatus_stg,sendrequest_stg)
109 
110  ! non-local coarsening done
111  do ipe=0,npe-1
112  do igrid=1,max_blocks
113  if (coarsen(igrid,ipe)) then
114  !if (ipe==mype) call dealloc_node(igrid) ! do not deallocate node
115  ! memory preventing fragmentization of system memory as a result
116  ! of frequent allocating and deallocating memory
117 
118  ! put the node (igrid number) into unused.
119  call putnode(igrid,ipe)
120  coarsen(igrid,ipe)=.false.
121  end if
122  end do
123  end do
124 
125  do ipe=0,npe-1
126  do igrid=1,max_blocks
127  if (refine(igrid,ipe)) then
128 
129  {do ic^db=1,2\}
130  igridfi(ic^d)=getnode(ipe)
131  ipefi(ic^d)=ipe
132  {end do\}
133 
134  call refine_tree_leaf(igridfi,ipefi,igrid,ipe,active)
135 
136  if (ipe==mype) call refine_grids(igridfi,ipefi,igrid,ipe,active)
137 
138  ! refinement done
139  call putnode(igrid,ipe)
140  refine(igrid,ipe)=.false.
141  end if
142  end do
143  end do
144 
145  ! A crash occurs in later MPI_WAITALL when initial condition comsumes too
146  ! much time to filling new blocks with both gfortran and intel fortran compiler.
147  ! This barrier cure this problem
148  !TODO to find the reason
149  if(.not.time_advance) call mpi_barrier(icomm,ierrmpi)
150 
151  if(stagger_grid) call end_comm_faces
152 
153  call get_level_range
154 
155  ! Update sfc array: igrid and ipe info in space filling curve
156  call amr_morton_order()
157 
158  call load_balance
159 
160  ! Rebuild tree connectivity
161  call getigrids
162  call build_connectivity
163 
164  ! Update the list of active grids
165  call selectgrids
166  ! grid structure now complete again.
167 
168  ! since we only filled mesh values, and advance assumes filled
169  ! ghost cells, do boundary filling for the new levels
170  if (time_advance) then
171  call getbc(global_time+dt,0.d0,ps,iwstart,nwgc)
172  else
173  call getbc(global_time,0.d0,ps,iwstart,nwgc)
174  end if
175 
176  if (use_multigrid) call mg_update_refinement(n_coarsen, n_refine)
177 
178  if (associated(usr_after_refine)) then
179  call usr_after_refine(n_coarsen, n_refine)
180  end if
181 
182  end subroutine amr_coarsen_refine
183 
184  !> For all grids on all processors, do a check on refinement flags. Make
185  !> sure that neighbors will not differ more than one level of refinement.
186  subroutine proper_nesting
187  use mod_forest
190 
191  logical, dimension(:,:), allocatable :: refine2
192  integer :: iigrid, igrid, level, ic^D, inp^D, i^D, my_neighbor_type,ipe
193  logical :: coarsening, pole(ndim), sendbuf(max_blocks)
194  type(tree_node_ptr) :: tree, p_neighbor, my_parent, sibling, my_neighbor, &
195  neighborchild
196 
197  if (nbufferx^d/=0|.or.) then
198  allocate(refine2(max_blocks,npe))
199  call mpi_allreduce(refine,refine2,max_blocks*npe,mpi_logical,mpi_lor, &
200  icomm,ierrmpi)
201  refine=refine2
202  else
203  sendbuf(:)=refine(:,mype)
204  call mpi_allgather(sendbuf,max_blocks,mpi_logical,refine,max_blocks, &
205  mpi_logical,icomm,ierrmpi)
206  end if
207 
208  do level=min(levmax,refine_max_level-1),levmin+1,-1
209  tree%node => level_head(level)%node
210  do
211  if (.not.associated(tree%node)) exit
212 
213  if (refine(tree%node%igrid,tree%node%ipe)) then
214  ic^d=1+modulo(tree%node%ig^d-1,2);
215  {do inp^db=ic^db-2,ic^db-1\}
216  if (inp^d==0|.and.) cycle
217  p_neighbor%node => tree%node%parent%node
218  {if (inp^d/=0) then
219  p_neighbor%node => p_neighbor%node%neighbor(ic^d,^d)%node
220  if (.not.associated(p_neighbor%node)) cycle
221  end if\}
222  if (p_neighbor%node%leaf) then
223  refine(p_neighbor%node%igrid,p_neighbor%node%ipe)=.true.
224  end if
225  {end do\}
226  end if
227 
228  tree%node => tree%node%next%node
229  end do
230  end do
231 
232  ! On each processor locally, check if grids set for coarsening are already
233  ! set for refinement.
234 
235  do iigrid=1,igridstail; igrid=igrids(iigrid);
236  if (refine(igrid,mype).and.coarsen(igrid,mype)) coarsen(igrid,mype)=.false.
237  end do
238 
239  ! For all grids on all processors, do a check on coarse refinement flags
240  sendbuf(:)=coarsen(:,mype)
241  call mpi_allgather(sendbuf,max_blocks,mpi_logical,coarsen,max_blocks, &
242  mpi_logical,icomm,ierrmpi)
243 
244  do level=levmax,max(2,levmin),-1
245  tree%node => level_head(level)%node
246  do
247  if (.not.associated(tree%node)) exit
248 
249  if (coarsen(tree%node%igrid,tree%node%ipe)) then
250  coarsening=.true.
251  my_parent%node => tree%node%parent%node
252 
253  ! are all siblings flagged for coarsen ?
254  check1: {do ic^db=1,2\}
255  sibling%node => my_parent%node%child(ic^d)%node
256  if (sibling%node%leaf) then
257  if (coarsen(sibling%node%igrid,sibling%node%ipe)) cycle
258  end if
260  exit check1
261  {end do\} check1
262 
263  ! Make sure that neighbors will not differ more than one level of
264  ! refinement, otherwise unflag all siblings
265  if (coarsening) then
266  check2: {do ic^db=1,2\}
267  sibling%node => my_parent%node%child(ic^d)%node
268  {do i^db=ic^db-2,ic^db-1\}
269  if (i^d==0|.and.) cycle
270  call find_neighbor(my_neighbor,my_neighbor_type, &
271  sibling,i^d,pole)
272  select case (my_neighbor_type)
273  case (neighbor_sibling)
274  if (refine(my_neighbor%node%igrid, &
275  my_neighbor%node%ipe)) then
277  exit check2
278  else
279  cycle
280  end if
281  case (neighbor_fine)
282  neighborchild%node=>my_neighbor%node%child(1^d&)%node
283  if (neighborchild%node%leaf) then
284  if (coarsen(neighborchild%node%igrid, &
285  neighborchild%node%ipe)) then
286  cycle
287  end if
288  end if
290  exit check2
291  end select
292  {end do\}
293  {end do\} check2
294  end if
295 
296  end if
297 
298  tree%node => tree%node%next%node
299  end do
300  end do
301 
302  contains
303 
305 
306  integer :: ic^D
307  type(tree_node_ptr) :: sibling
308 
309  {do ic^db=1,2\}
310  sibling%node => my_parent%node%child(ic^d)%node
311  if (sibling%node%leaf) then
312  coarsen(sibling%node%igrid,sibling%node%ipe)=.false.
313  end if
314  {end do\}
315  coarsening=.false.
316 
317  end subroutine unflag_coarsen_siblings
318 
319  end subroutine proper_nesting
320 
321  !> coarsen sibling blocks into one block
322  subroutine coarsen_grid_siblings(igrid,ipe,child_igrid,child_ipe,active)
324  use mod_coarsen, only: coarsen_grid
327 
328  integer, intent(in) :: igrid, ipe
329  integer, dimension(2^D&), intent(in) :: child_igrid, child_ipe
330  logical, intent(in) :: active
331 
332  integer :: igridFi, ipeFi, ixCo^L, ixCoG^L, ixCoM^L, ic^D, idir
333 
334  if (ipe==mype) call alloc_node(igrid)
335 
336  ! New passive cell, coarsen from initial condition:
337  if (.not. active) then
338  if (ipe == mype) then
339  call initial_condition(igrid)
340  {do ic^db=1,2\}
341  igridfi=child_igrid(ic^d)
342  ipefi=child_ipe(ic^d)
343  !if (ipeFi==mype) then
344  ! ! remove solution space of child
345  ! call dealloc_node(igridFi)
346  !end if
347  {end do\}
348  end if
349  return
350  end if
351 
352  {do ic^db=1,2\}
353  igridfi=child_igrid(ic^d)
354  ipefi=child_ipe(ic^d)
355 
356  if (ipefi==mype) then
357  ^d&dxlevel(^d)=rnode(rpdx^d_,igridfi);
358  if (ipe==mype) then
359  ixcomin^d=ixmlo^d+(ic^d-1)*(ixmhi^d-ixmlo^d+1)/2;
360  ixcomax^d=ixmhi^d+(ic^d-2)*(ixmhi^d-ixmlo^d+1)/2;
361 
362  call coarsen_grid(ps(igridfi),ixg^ll,ixm^ll,ps(igrid),ixg^ll,ixco^l)
363  ! remove solution space of child
364  !call dealloc_node(igridFi)
365  else
366  ixcogmin^d=1;
367  ixcogmax^d=ixghi^d/2+nghostcells;
368  ixcom^l=ixcog^l^lsubnghostcells;
369  call coarsen_grid(ps(igridfi),ixg^ll,ixm^ll,psc(igridfi), &
370  ixcog^l,ixcom^l)
371 
372  !itag=ipeFi*max_blocks+igridFi
373  itag=ipefi+igridfi
374  isend=isend+1
375  call mpi_isend(psc(igridfi)%w,1,type_coarse_block,ipe,itag, &
376  icomm,sendrequest(isend),ierrmpi)
377  if(stagger_grid) then
378  do idir=1,ndim
379  !itag_stg=(npe+ipeFi+1)*max_blocks+igridFi*(ndir-1+idir)
380  itag_stg=(npe+ipefi+1)+igridfi*(ndir-1+idir)
381  call mpi_isend(psc(igridfi)%ws,1,type_coarse_block_stg(idir,ic^d),ipe,itag_stg, &
382  icomm,sendrequest_stg(isend),ierrmpi)
383  end do
384  end if
385  end if
386  else
387  if (ipe==mype) then
388  !itag=ipeFi*max_blocks+igridFi
389  itag=ipefi+igridfi
390  irecv=irecv+1
391  call mpi_irecv(ps(igrid)%w,1,type_sub_block(ic^d),ipefi,itag, &
392  icomm,recvrequest(irecv),ierrmpi)
393  if(stagger_grid) then
394  do idir=1,ndim
395  !itag_stg=(npe+ipeFi+1)*max_blocks+igridFi*(ndir-1+idir)
396  itag_stg=(npe+ipefi+1)+igridfi*(ndir-1+idir)
397  call mpi_irecv(ps(igrid)%ws,1,type_sub_block_stg(idir,ic^d),ipefi,itag_stg, &
398  icomm,recvrequest_stg(irecv),ierrmpi)
399  end do
400  end if
401  end if
402  end if
403  {end do\}
404 
405  end subroutine coarsen_grid_siblings
406 
407 end module mod_coarsen_refine
subroutine unflag_coarsen_siblings
subroutine, public store_faces
To achive consistency and thus conservation of divergence, when refining a block we take into account...
Definition: mod_amr_fct.t:458
subroutine, public comm_faces
When refining a coarse block with fine neighbours, it is necessary prolong consistently with the alre...
Definition: mod_amr_fct.t:529
subroutine, public find_neighbor(my_neighbor, my_neighbor_type, tree, iD, pole)
find neighors of all blocks
subroutine, public putnode(igrid, ipe)
subroutine, public alloc_node(igrid)
allocate arrays on igrid node
integer function, public getnode(ipe)
Get first available igrid on processor ipe.
Module to coarsen and refine grids for AMR.
subroutine proper_nesting
For all grids on all processors, do a check on refinement flags. Make sure that neighbors will not di...
subroutine, public amr_coarsen_refine
coarsen and refine blocks to update AMR grid
subroutine, public coarsen_grid(sFi, ixFiGL, ixFiL, sCo, ixCoGL, ixCoL)
coarsen one grid to its coarser representative
Definition: mod_coarsen.t:13
Module with basic grid data structures.
Definition: mod_forest.t:2
logical, dimension(:,:), allocatable, save refine
Definition: mod_forest.t:70
logical, dimension(:,:), allocatable, save coarsen
AMR flags and grids-in-use identifier per processor (igrid,ipe)
Definition: mod_forest.t:70
type(tree_node_ptr), dimension(:,:), allocatable, save igrid_to_node
Array to go from an [igrid, ipe] index to a node pointer.
Definition: mod_forest.t:32
type(tree_node_ptr), dimension(:), allocatable, save level_head
The head pointer of the linked list per refinement level.
Definition: mod_forest.t:35
subroutine, public refine_tree_leaf(child_igrid, child_ipe, igrid, ipe, active)
subroutine, public coarsen_tree_leaf(igrid, ipe, child_igrid, child_ipe, active)
update ghost cells of all blocks including physical boundaries
This module contains definitions of global parameters and variables and some generic functions/subrou...
logical stagger_grid
True for using stagger grid.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer, dimension(:), allocatable, parameter d
integer ierrmpi
A global MPI error return code.
integer npe
The number of MPI tasks.
integer nbufferx
Number of cells as buffer zone.
integer refine_max_level
Maximal number of AMR levels.
integer max_blocks
The maximum number of grid blocks in a processor.
subroutine, public initial_condition(igrid)
fill in initial condition
Module to couple the octree-mg library to AMRVAC. This file uses the VACPP preprocessor,...
subroutine, public refine_grids(child_igrid, child_ipe, igrid, ipe, active)
refine one block to its children blocks
Definition: mod_refine.t:13
subroutine, public selectgrids
Module with all the methods that users can customize in AMRVAC.
procedure(after_refine), pointer usr_after_refine
Pointer to a tree_node.
Definition: mod_forest.t:6