MPI-AMRVAC  3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
mod_ghostcells_update.t
Go to the documentation of this file.
1 !> update ghost cells of all blocks including physical boundaries
3 
4  implicit none
5 
6  ! The number of interleaving sending buffers for ghost cells
7  integer, parameter :: npwbuf=2
8 
9  integer :: ixm^l, ixcog^l, ixcom^l, ixcogs^l
10 
11  ! index ranges to send (S) to sibling blocks, receive (R) from sibling blocks
12  integer, dimension(-1:1) :: ixs_srl_^l, ixr_srl_^l
13 
14  ! index ranges of staggered variables to send (S) to sibling blocks, receive (R) from sibling blocks
15  integer, dimension(^ND,-1:1) :: ixs_srl_stg_^l, ixr_srl_stg_^l
16 
17  ! index ranges to send (S) restricted (r) ghost cells to coarser blocks
18  integer, dimension(-1:1) :: ixs_r_^l
19 
20  ! index ranges of staggered variables to send (S) restricted (r) ghost cells to coarser blocks
21  integer, dimension(^ND,-1:1) :: ixs_r_stg_^l
22 
23  ! index ranges to receive restriced ghost cells from finer blocks
24  integer, dimension(0:3) :: ixr_r_^l
25 
26  ! index ranges of staggered variables to receive restriced ghost cells from finer blocks
27  integer, dimension(^ND,0:3) :: ixr_r_stg_^l
28 
29  ! send prolongated (p) ghost cells to finer blocks, receive prolongated from coarser blocks
30  integer, dimension(0:3) :: ixs_p_^l, ixr_p_^l
31 
32  ! send prolongated (p) staggered ghost cells to finer blocks, receive prolongated from coarser blocks
33  integer, dimension(^ND,0:3) :: ixs_p_stg_^l, ixr_p_stg_^l
34 
35  ! number of MPI receive-send pairs, srl: same refinement level; r: restrict; p: prolong
37 
38  ! record index position of buffer arrays
40 
41  ! count of times of send and receive
43 
44  ! count of times of send and receive for cell center ghost cells
45  integer :: isend_c, irecv_c
46 
47  ! tag of MPI send and recv
48  integer, private :: itag
49 
50  ! total sizes = cell-center normal flux + stagger-grid flux of send and receive
51  integer, dimension(-1:1^D&) :: sizes_srl_send_total, sizes_srl_recv_total
52 
53  ! sizes of buffer arrays for center-grid variable for siblings and restrict
54  integer, dimension(:), allocatable :: recvrequest_c_sr, sendrequest_c_sr
55  integer, dimension(:,:), allocatable :: recvstatus_c_sr, sendstatus_c_sr
56 
57  ! sizes of buffer arrays for center-grid variable for prolongation
58  integer, dimension(:), allocatable :: recvrequest_c_p, sendrequest_c_p
59  integer, dimension(:,:), allocatable :: recvstatus_c_p, sendstatus_c_p
60 
61  ! sizes of buffer arrays for stagger-grid variable
62  integer, dimension(^ND,-1:1^D&) :: sizes_srl_send_stg, sizes_srl_recv_stg
63 
64  integer, dimension(:), allocatable :: recvrequest_srl, sendrequest_srl
65  integer, dimension(:,:), allocatable :: recvstatus_srl, sendstatus_srl
66 
67  ! buffer arrays for send and receive of siblings, allocate in build_connectivity
68  double precision, dimension(:), allocatable :: recvbuffer_srl, sendbuffer_srl
69 
70  integer, dimension(:), allocatable :: recvrequest_r, sendrequest_r
71  integer, dimension(:,:), allocatable :: recvstatus_r, sendstatus_r
72 
73  ! buffer arrays for send and receive in restriction
74  double precision, dimension(:), allocatable :: recvbuffer_r, sendbuffer_r
75 
76  integer, dimension(:), allocatable :: recvrequest_p, sendrequest_p
77  integer, dimension(:,:), allocatable :: recvstatus_p, sendstatus_p
78 
79  ! buffer arrays for send and receive in prolongation
80  double precision, dimension(:), allocatable :: recvbuffer_p, sendbuffer_p
81 
82  ! sizes to allocate buffer arrays for send and receive for restriction
83  integer, dimension(-1:1^D&) :: sizes_r_send_total
84  integer, dimension(0:3^D&) :: sizes_r_recv_total
85  integer, dimension(^ND,-1:1^D&) :: sizes_r_send_stg
86  integer, dimension(^ND,0:3^D&) :: sizes_r_recv_stg
87 
88  ! sizes to allocate buffer arrays for send and receive for restriction
89  integer, dimension(0:3^D&) :: sizes_p_send_total, sizes_p_recv_total
90  integer, dimension(^ND,0:3^D&) :: sizes_p_send_stg, sizes_p_recv_stg
91 
92  ! There are two variants, _f indicates that all flux variables are filled,
93  ! whereas _p means that part of the variables is filled
94  ! Furthermore _r_ stands for restrict, _p_ for prolongation.
95  integer, dimension(-1:1^D&), target :: type_send_srl_f, type_recv_srl_f
96  integer, dimension(-1:1^D&), target :: type_send_r_f
97  integer, dimension( 0:3^D&), target :: type_recv_r_f, type_send_p_f, type_recv_p_f
98  integer, dimension(-1:1^D&), target :: type_send_srl_p1, type_recv_srl_p1
99  integer, dimension(-1:1^D&), target :: type_send_r_p1
100  integer, dimension( 0:3^D&), target :: type_recv_r_p1, type_send_p_p1, type_recv_p_p1
101  integer, dimension(-1:1^D&), target :: type_send_srl_p2, type_recv_srl_p2
102  integer, dimension(-1:1^D&), target :: type_send_r_p2
103  integer, dimension( 0:3^D&), target :: type_recv_r_p2, type_send_p_p2, type_recv_p_p2
104  integer, dimension( :^D&), pointer :: type_send_srl, type_recv_srl, type_send_r
105  integer, dimension( :^D&), pointer :: type_recv_r, type_send_p, type_recv_p
106 
107  ! A switch of update physical boundary or not
108  logical, public :: bcphys=.true.
109  ! Special buffer for pole copy
110  type wbuffer
111  double precision, dimension(:^D&,:), allocatable :: w
112  end type wbuffer
113 
114 contains
115 
116  subroutine init_bc()
119  use mod_comm_lib, only: mpistop
120 
121  integer :: nghostcellsCo, interpolation_order
122  integer :: nx^D, nxCo^D, ixG^L, i^D, idir
123 
124  ixg^l=ixg^ll;
125  ixm^l=ixg^l^lsubnghostcells;
126  ixcogmin^d=1;
127  ixcogmax^d=(ixghi^d-2*nghostcells)/2+2*nghostcells;
128  ixcogsmin^d=0;
129  ixcogsmax^d=ixcogmax^d;
130 
131  ixcom^l=ixcog^l^lsubnghostcells;
132 
133  nx^d=ixmmax^d-ixmmin^d+1;
134  nxco^d=nx^d/2;
135 
136  if(ghost_copy) then
137  interpolation_order=1
138  else
139  interpolation_order=2
140  end if
141  nghostcellsco=int((nghostcells+1)/2)
142 
143  if (nghostcellsco+interpolation_order-1>nghostcells) then
144  call mpistop("interpolation order for prolongation in getbc too high")
145  end if
146 
147  ! i=-1 means subregion prepared for the neighbor at its minimum side
148  ! i= 1 means subregion prepared for the neighbor at its maximum side
149  {
150  ixs_srl_min^d(-1)=ixmmin^d
151  ixs_srl_min^d( 0)=ixmmin^d
152  ixs_srl_min^d( 1)=ixmmax^d+1-nghostcells
153  ixs_srl_max^d(-1)=ixmmin^d-1+nghostcells
154  ixs_srl_max^d( 0)=ixmmax^d
155  ixs_srl_max^d( 1)=ixmmax^d
156 
157  ixr_srl_min^d(-1)=1
158  ixr_srl_min^d( 0)=ixmmin^d
159  ixr_srl_min^d( 1)=ixmmax^d+1
160  ixr_srl_max^d(-1)=nghostcells
161  ixr_srl_max^d( 0)=ixmmax^d
162  ixr_srl_max^d( 1)=ixgmax^d
163 
164  ixs_r_min^d(-1)=ixcommin^d
165  ixs_r_min^d( 0)=ixcommin^d
166  ixs_r_min^d( 1)=ixcommax^d+1-nghostcells
167  ixs_r_max^d(-1)=ixcommin^d-1+nghostcells
168  ixs_r_max^d( 0)=ixcommax^d
169  ixs_r_max^d( 1)=ixcommax^d
170 
171  ixr_r_min^d(0)=1
172  ixr_r_min^d(1)=ixmmin^d
173  ixr_r_min^d(2)=ixmmin^d+nxco^d
174  ixr_r_min^d(3)=ixmmax^d+1
175  ixr_r_max^d(0)=nghostcells
176  ixr_r_max^d(1)=ixmmin^d-1+nxco^d
177  ixr_r_max^d(2)=ixmmax^d
178  ixr_r_max^d(3)=ixgmax^d
179 
180  ixs_p_min^d(0)=ixmmin^d-(interpolation_order-1)
181  ixs_p_min^d(1)=ixmmin^d-(interpolation_order-1)
182  ixs_p_min^d(2)=ixmmin^d+nxco^d-nghostcellsco-(interpolation_order-1)
183  ixs_p_min^d(3)=ixmmax^d+1-nghostcellsco-(interpolation_order-1)
184  ixs_p_max^d(0)=ixmmin^d-1+nghostcellsco+(interpolation_order-1)
185  ixs_p_max^d(1)=ixmmin^d-1+nxco^d+nghostcellsco+(interpolation_order-1)
186  ixs_p_max^d(2)=ixmmax^d+(interpolation_order-1)
187  ixs_p_max^d(3)=ixmmax^d+(interpolation_order-1)
188 
189  if(.not.phys_req_diagonal) then
190  ! exclude ghost-cell region when diagonal cells are unknown
191  ixs_p_min^d(0)=ixmmin^d
192  ixs_p_max^d(3)=ixmmax^d
193  ixs_p_max^d(1)=ixmmin^d-1+nxco^d+(interpolation_order-1)
194  ixs_p_min^d(2)=ixmmin^d+nxco^d-(interpolation_order-1)
195  end if
196 
197  ixr_p_min^d(0)=ixcommin^d-nghostcellsco-(interpolation_order-1)
198  ixr_p_min^d(1)=ixcommin^d-(interpolation_order-1)
199  ixr_p_min^d(2)=ixcommin^d-nghostcellsco-(interpolation_order-1)
200  ixr_p_min^d(3)=ixcommax^d+1-(interpolation_order-1)
201  ixr_p_max^d(0)=nghostcells+(interpolation_order-1)
202  ixr_p_max^d(1)=ixcommax^d+nghostcellsco+(interpolation_order-1)
203  ixr_p_max^d(2)=ixcommax^d+(interpolation_order-1)
204  ixr_p_max^d(3)=ixcommax^d+nghostcellsco+(interpolation_order-1)
205 
206  if(.not.phys_req_diagonal) then
207  ixr_p_max^d(0)=nghostcells
208  ixr_p_min^d(3)=ixcommax^d+1
209  ixr_p_max^d(1)=ixcommax^d+(interpolation_order-1)
210  ixr_p_min^d(2)=ixcommin^d-(interpolation_order-1)
211  end if
212 
213  \}
214 
215  if (stagger_grid) then
216  allocate(pole_buf%ws(ixgs^t,nws))
217  ! Staggered (face-allocated) variables
218  do idir=1,ndim
219  { ixs_srl_stg_min^d(idir,-1)=ixmmin^d-kr(idir,^d)
220  ixs_srl_stg_max^d(idir,-1)=ixmmin^d-1+nghostcells
221  ixs_srl_stg_min^d(idir,0) =ixmmin^d-kr(idir,^d)
222  ixs_srl_stg_max^d(idir,0) =ixmmax^d
223  ixs_srl_stg_min^d(idir,1) =ixmmax^d-nghostcells+1-kr(idir,^d)
224  ixs_srl_stg_max^d(idir,1) =ixmmax^d
225 
226  ixr_srl_stg_min^d(idir,-1)=1-kr(idir,^d)
227  ixr_srl_stg_max^d(idir,-1)=nghostcells
228  ixr_srl_stg_min^d(idir,0) =ixmmin^d-kr(idir,^d)
229  ixr_srl_stg_max^d(idir,0) =ixmmax^d
230  ixr_srl_stg_min^d(idir,1) =ixmmax^d+1-kr(idir,^d)
231  ixr_srl_stg_max^d(idir,1) =ixgmax^d
232 
233  ixs_r_stg_min^d(idir,-1)=ixcommin^d-kr(idir,^d)
234  ixs_r_stg_max^d(idir,-1)=ixcommin^d-1+nghostcells
235  ixs_r_stg_min^d(idir,0) =ixcommin^d-kr(idir,^d)
236  ixs_r_stg_max^d(idir,0) =ixcommax^d
237  ixs_r_stg_min^d(idir,1) =ixcommax^d+1-nghostcells-kr(idir,^d)
238  ixs_r_stg_max^d(idir,1) =ixcommax^d
239 
240  ixr_r_stg_min^d(idir,0)=1-kr(idir,^d)
241  ixr_r_stg_max^d(idir,0)=nghostcells
242  ixr_r_stg_min^d(idir,1)=ixmmin^d-kr(idir,^d)
243  ixr_r_stg_max^d(idir,1)=ixmmin^d-1+nxco^d
244  ixr_r_stg_min^d(idir,2)=ixmmin^d+nxco^d-kr(idir,^d)
245  ixr_r_stg_max^d(idir,2)=ixmmax^d
246  ixr_r_stg_min^d(idir,3)=ixmmax^d+1-kr(idir,^d)
247  ixr_r_stg_max^d(idir,3)=ixgmax^d
248  \}
249  {if (idir==^d) then
250  ! Parallel components
251  {
252  ixs_p_stg_min^d(idir,0)=ixmmin^d-1 ! -1 to make redundant
253  ixs_p_stg_max^d(idir,0)=ixmmin^d-1+nghostcellsco
254  ixs_p_stg_min^d(idir,1)=ixmmin^d-1 ! -1 to make redundant
255  ixs_p_stg_max^d(idir,1)=ixmmin^d-1+nxco^d+nghostcellsco
256  ixs_p_stg_min^d(idir,2)=ixmmax^d-nxco^d-nghostcellsco
257  ixs_p_stg_max^d(idir,2)=ixmmax^d
258  ixs_p_stg_min^d(idir,3)=ixmmax^d-nghostcellsco
259  ixs_p_stg_max^d(idir,3)=ixmmax^d
260 
261  ixr_p_stg_min^d(idir,0)=ixcommin^d-1-nghostcellsco
262  ixr_p_stg_max^d(idir,0)=ixcommin^d-1
263  ixr_p_stg_min^d(idir,1)=ixcommin^d-1 ! -1 to make redundant
264  ixr_p_stg_max^d(idir,1)=ixcommax^d+nghostcellsco
265  ixr_p_stg_min^d(idir,2)=ixcommin^d-1-nghostcellsco
266  ixr_p_stg_max^d(idir,2)=ixcommax^d
267  ixr_p_stg_min^d(idir,3)=ixcommax^d+1-1 ! -1 to make redundant
268  ixr_p_stg_max^d(idir,3)=ixcommax^d+nghostcellsco
269  \}
270  else
271  {
272  ! Perpendicular component
273  ixs_p_stg_min^d(idir,0)=ixmmin^d
274  ixs_p_stg_max^d(idir,0)=ixmmin^d-1+nghostcellsco+(interpolation_order-1)
275  ixs_p_stg_min^d(idir,1)=ixmmin^d
276  ixs_p_stg_max^d(idir,1)=ixmmin^d-1+nxco^d+nghostcellsco+(interpolation_order-1)
277  ixs_p_stg_min^d(idir,2)=ixmmax^d+1-nxco^d-nghostcellsco-(interpolation_order-1)
278  ixs_p_stg_max^d(idir,2)=ixmmax^d
279  ixs_p_stg_min^d(idir,3)=ixmmax^d+1-nghostcellsco-(interpolation_order-1)
280  ixs_p_stg_max^d(idir,3)=ixmmax^d
281 
282  ixr_p_stg_min^d(idir,0)=ixcommin^d-nghostcellsco-(interpolation_order-1)
283  ixr_p_stg_max^d(idir,0)=ixcommin^d-1
284  ixr_p_stg_min^d(idir,1)=ixcommin^d
285  ixr_p_stg_max^d(idir,1)=ixcommax^d+nghostcellsco+(interpolation_order-1)
286  ixr_p_stg_min^d(idir,2)=ixcommin^d-nghostcellsco-(interpolation_order-1)
287  ixr_p_stg_max^d(idir,2)=ixcommax^d
288  ixr_p_stg_min^d(idir,3)=ixcommax^d+1
289  ixr_p_stg_max^d(idir,3)=ixcommax^d+nghostcellsco+(interpolation_order-1)
290  \}
291  end if
292  }
293  end do
294  ! calculate sizes for buffer arrays for siblings
295  {do i^db=-1,1\}
296  ! Staggered (face-allocated) variables
297  do idir=1,ndim
298  sizes_srl_send_stg(idir,i^d)={(ixs_srl_stg_max^d(idir,i^d)-ixs_srl_stg_min^d(idir,i^d)+1)|*}
299  sizes_srl_recv_stg(idir,i^d)={(ixr_srl_stg_max^d(idir,i^d)-ixr_srl_stg_min^d(idir,i^d)+1)|*}
300  sizes_r_send_stg(idir,i^d)={(ixs_r_stg_max^d(idir,i^d)-ixs_r_stg_min^d(idir,i^d)+1)|*}
301  end do
304  sizes_r_send_total(i^d)=sum(sizes_r_send_stg(:,i^d))
305  {end do\}
306 
307  {do i^db=0,3\}
308  ! Staggered (face-allocated) variables
309  do idir=1,ndim
310  sizes_r_recv_stg(idir,i^d)={(ixr_r_stg_max^d(idir,i^d)-ixr_r_stg_min^d(idir,i^d)+1)|*}
311  sizes_p_send_stg(idir,i^d)={(ixs_p_stg_max^d(idir,i^d)-ixs_p_stg_min^d(idir,i^d)+1)|*}
312  sizes_p_recv_stg(idir,i^d)={(ixr_p_stg_max^d(idir,i^d)-ixr_p_stg_min^d(idir,i^d)+1)|*}
313  end do
314  sizes_r_recv_total(i^d)=sum(sizes_r_recv_stg(:,i^d))
315  sizes_p_send_total(i^d)=sum(sizes_p_send_stg(:,i^d))
316  sizes_p_recv_total(i^d)=sum(sizes_p_recv_stg(:,i^d))
317  {end do\}
318  end if
319 
320  end subroutine init_bc
321 
322  subroutine create_bc_mpi_datatype(nwstart,nwbc)
324 
325  integer, intent(in) :: nwstart, nwbc
326  integer :: i^D, ic^D, inc^D
327 
328  {do i^db=-1,1\}
329  if(i^d==0|.and.) cycle
330  call get_bc_comm_type(type_send_srl(i^d),ixs_srl_^l(i^d),ixg^ll,nwstart,nwbc)
331  call get_bc_comm_type(type_recv_srl(i^d),ixr_srl_^l(i^d),ixg^ll,nwstart,nwbc)
332  call get_bc_comm_type(type_send_r(i^d), ixs_r_^l(i^d),ixcog^l,nwstart,nwbc)
333  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
334  inc^db=2*i^db+ic^db\}
335  call get_bc_comm_type(type_recv_r(inc^d),ixr_r_^l(inc^d), ixg^ll,nwstart,nwbc)
336  call get_bc_comm_type(type_send_p(inc^d),ixs_p_^l(inc^d), ixg^ll,nwstart,nwbc)
337  call get_bc_comm_type(type_recv_p(inc^d),ixr_p_^l(inc^d),ixcog^l,nwstart,nwbc)
338  {end do\}
339  {end do\}
340 
341  end subroutine create_bc_mpi_datatype
342 
343  subroutine get_bc_comm_type(comm_type,ix^L,ixG^L,nwstart,nwbc)
345 
346  integer, intent(inout) :: comm_type
347  integer, intent(in) :: ix^L, ixG^L, nwstart, nwbc
348 
349  integer, dimension(ndim+1) :: fullsize, subsize, start
350 
351  ^d&fullsize(^d)=ixgmax^d;
352  fullsize(ndim+1)=nw
353  ^d&subsize(^d)=ixmax^d-ixmin^d+1;
354  subsize(ndim+1)=nwbc
355  ^d&start(^d)=ixmin^d-1;
356  start(ndim+1)=nwstart-1
357 
358  call mpi_type_create_subarray(ndim+1,fullsize,subsize,start,mpi_order_fortran, &
359  mpi_double_precision,comm_type,ierrmpi)
360  call mpi_type_commit(comm_type,ierrmpi)
361 
362  end subroutine get_bc_comm_type
363 
364  !> do update ghost cells of all blocks including physical boundaries
365  subroutine getbc(time,qdt,psb,nwstart,nwbc,req_diag)
367  use mod_physics
368  use mod_coarsen, only: coarsen_grid
370  use mod_comm_lib, only: mpistop
371 
372  double precision, intent(in) :: time, qdt
373  type(state), target :: psb(max_blocks)
374  integer, intent(in) :: nwstart ! Fill from nwstart
375  integer, intent(in) :: nwbc ! Number of variables to fill
376  logical, intent(in), optional :: req_diag ! If false, skip diagonal ghost cells
377 
378  double precision :: time_bcin
379  integer :: ipole, nwhead, nwtail
380  integer :: iigrid, igrid, ineighbor, ipe_neighbor, isizes
381  integer :: ixR^L, ixS^L
382  integer :: i^D, n_i^D, ic^D, inc^D, n_inc^D, idir
383  integer :: isend_buf(npwbuf), ipwbuf, nghostcellsco
384  ! index pointer for buffer arrays as a start for a segment
385  integer :: ibuf_start, ibuf_next
386  ! shapes of reshape
387  integer, dimension(1) :: shapes
388  logical :: req_diagonal
389  type(wbuffer) :: pwbuf(npwbuf)
390 
391  time_bcin=mpi_wtime()
392 
393  nwhead=nwstart
394  nwtail=nwstart+nwbc-1
395 
396  req_diagonal = .true.
397  if (present(req_diag)) req_diagonal = req_diag
398 
399  ! fill internal physical boundary
400  if (internalboundary) then
401  call getintbc(time,ixg^ll)
402  end if
403 
404  ! prepare coarse values to send to coarser neighbors
405  !$OMP PARALLEL DO SCHEDULE(dynamic) PRIVATE(igrid)
406  do iigrid=1,igridstail; igrid=igrids(iigrid);
407  if(any(neighbor_type(:^d&,igrid)==neighbor_coarse)) then
408  call coarsen_grid(psb(igrid),ixg^ll,ixm^l,psc(igrid),ixcog^l,ixcom^l)
409  end if
410  end do
411  !$OMP END PARALLEL DO
412 
413  ! default : no singular axis
414  ipole=0
415  irecv_c=0
416  isend_c=0
417  isend_buf=0
418  ipwbuf=1
419 
420  if(stagger_grid) then
421  ibuf_recv_srl=1
422  ibuf_recv_r=1
423  ibuf_recv_p=1
424  ibuf_send_srl=1
425  ibuf_send_r=1
426  ibuf_send_p=1
427  irecv_srl=0
428  irecv_r=0
429  irecv_p=0
430  isend_srl=0
431  isend_r=0
432  isend_p=0
433  end if
434 
435  ! MPI receive ghost-cell values from sibling blocks and finer neighbors in different processors
436  do iigrid=1,igridstail; igrid=igrids(iigrid);
437  {do i^db=-1,1\}
438  if (skip_direction([ i^d ])) cycle
439  select case (neighbor_type(i^d,igrid))
440  case (neighbor_sibling)
441  call bc_recv_srl
442  case (neighbor_fine)
443  call bc_recv_restrict
444  end select
445  {end do\}
446  end do
447 
448  ! MPI send ghost-cell values to sibling blocks and coarser neighbors in different processors
449  do iigrid=1,igridstail; igrid=igrids(iigrid);
450  {do i^db=-1,1\}
451  if(skip_direction([ i^d ])) cycle
452  select case (neighbor_type(i^d,igrid))
453  case (neighbor_sibling)
454  call bc_send_srl
455  case (neighbor_coarse)
456  call bc_send_restrict
457  end select
458  {end do\}
459  end do
460 
461  ! fill ghost-cell values of sibling blocks and coarser neighbors in the same processor
462  !$OMP PARALLEL DO SCHEDULE(dynamic) PRIVATE(igrid)
463  do iigrid=1,igridstail; igrid=igrids(iigrid);
464  {do i^db=-1,1\}
465  if(skip_direction([ i^d ])) cycle
466  select case (neighbor_type(i^d,igrid))
467  case(neighbor_sibling)
468  call bc_fill_srl(igrid,i^d)
469  case(neighbor_coarse)
470  call bc_fill_restrict(igrid,i^d)
471  end select
472  {end do\}
473  end do
474  !$OMP END PARALLEL DO
475 
476  call mpi_waitall(irecv_c,recvrequest_c_sr,recvstatus_c_sr,ierrmpi)
477  call mpi_waitall(isend_c,sendrequest_c_sr,sendstatus_c_sr,ierrmpi)
478 
479  if(stagger_grid) then
480  call mpi_waitall(nrecv_bc_srl,recvrequest_srl,recvstatus_srl,ierrmpi)
481  call mpi_waitall(nsend_bc_srl,sendrequest_srl,sendstatus_srl,ierrmpi)
482  call mpi_waitall(nrecv_bc_r,recvrequest_r,recvstatus_r,ierrmpi)
483  call mpi_waitall(nsend_bc_r,sendrequest_r,sendstatus_r,ierrmpi)
484  ! unpack the received data from sibling blocks and finer neighbors to fill ghost-cell staggered values
485  ibuf_recv_srl=1
486  ibuf_recv_r=1
487  do iigrid=1,igridstail; igrid=igrids(iigrid);
488  {do i^db=-1,1\}
489  if (skip_direction([ i^d ])) cycle
490  select case (neighbor_type(i^d,igrid))
491  case (neighbor_sibling)
492  call bc_fill_srl_stg
493  case (neighbor_fine)
495  end select
496  {end do\}
497  end do
498  end if
499 
500  do ipwbuf=1,npwbuf
501  if (isend_buf(ipwbuf)/=0) deallocate(pwbuf(ipwbuf)%w)
502  end do
503 
504  irecv_c=0
505  isend_c=0
506  isend_buf=0
507  ipwbuf=1
508 
509  ! MPI receive ghost-cell values from coarser neighbors in different processors
510  do iigrid=1,igridstail; igrid=igrids(iigrid);
511  {do i^db=-1,1\}
512  if (skip_direction([ i^d ])) cycle
513  if (neighbor_type(i^d,igrid)==neighbor_coarse) call bc_recv_prolong
514  {end do\}
515  end do
516  ! MPI send ghost-cell values to finer neighbors in different processors
517  do iigrid=1,igridstail; igrid=igrids(iigrid);
518  {do i^db=-1,1\}
519  if (skip_direction([ i^d ])) cycle
520  if (neighbor_type(i^d,igrid)==neighbor_fine) call bc_send_prolong
521  {end do\}
522  end do
523 
524  ! fill coarse ghost-cell values of finer neighbors in the same processor
525  !$OMP PARALLEL DO SCHEDULE(dynamic) PRIVATE(igrid)
526  do iigrid=1,igridstail; igrid=igrids(iigrid);
527  {do i^db=-1,1\}
528  if (skip_direction([ i^d ])) cycle
529  if (neighbor_type(i^d,igrid)==neighbor_fine) call bc_fill_prolong(igrid,i^d)
530  {end do\}
531  end do
532  !$OMP END PARALLEL DO
533 
534  call mpi_waitall(irecv_c,recvrequest_c_p,recvstatus_c_p,ierrmpi)
535  call mpi_waitall(isend_c,sendrequest_c_p,sendstatus_c_p,ierrmpi)
536 
537  if(stagger_grid) then
538  call mpi_waitall(nrecv_bc_p,recvrequest_p,recvstatus_p,ierrmpi)
539  call mpi_waitall(nsend_bc_p,sendrequest_p,sendstatus_p,ierrmpi)
540 
541  ! fill coarser representative ghost cells after receipt
542  ibuf_recv_p=1
543  do iigrid=1,igridstail; igrid=igrids(iigrid);
544  {do i^db=-1,1\}
545  if (skip_direction([ i^d ])) cycle
546  if(neighbor_type(i^d,igrid)==neighbor_coarse) call bc_fill_prolong_stg
547  {end do\}
548  end do
549  end if
550  ! do prolongation on the ghost-cell values based on the received coarse values from coarser neighbors
551  !$OMP PARALLEL DO SCHEDULE(dynamic) PRIVATE(igrid)
552  do iigrid=1,igridstail; igrid=igrids(iigrid);
553  call gc_prolong(igrid)
554  end do
555  !$OMP END PARALLEL DO
556 
557  do ipwbuf=1,npwbuf
558  if (isend_buf(ipwbuf)/=0) deallocate(pwbuf(ipwbuf)%w)
559  end do
560 
561  ! fill physical boundary ghost cells after internal ghost-cell values exchange
562  if(bcphys) then
563  !$OMP PARALLEL DO SCHEDULE(dynamic) PRIVATE(igrid)
564  do iigrid=1,igridstail; igrid=igrids(iigrid);
565  if(.not.phyboundblock(igrid)) cycle
566  call fill_boundary_after_gc(igrid)
567  end do
568  !$OMP END PARALLEL DO
569  end if
570 
571  ! modify normal component of magnetic field to fix divB=0
572  if(bcphys.and.associated(phys_boundary_adjust)) then
573  !$OMP PARALLEL DO SCHEDULE(dynamic) PRIVATE(igrid)
574  do iigrid=1,igridstail; igrid=igrids(iigrid);
575  if(.not.phyboundblock(igrid)) cycle
576  call phys_boundary_adjust(igrid,psb)
577  end do
578  !$OMP END PARALLEL DO
579  end if
580 
581  time_bc=time_bc+(mpi_wtime()-time_bcin)
582 
583  contains
584 
585  logical function skip_direction(dir)
586  integer, intent(in) :: dir(^nd)
587 
588  if (all(dir == 0)) then
589  skip_direction = .true.
590  else if (.not. req_diagonal .and. count(dir /= 0) > 1) then
591  skip_direction = .true.
592  else
593  skip_direction = .false.
594  end if
595  end function skip_direction
596 
597  !> Physical boundary conditions
598  subroutine fill_boundary_after_gc(igrid)
599 
600  integer, intent(in) :: igrid
601 
602  integer :: idims,iside,i^D,k^L,ixB^L
603 
604  block=>psb(igrid)
605  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
606  do idims=1,ndim
607  ! to avoid using as yet unknown corner info in more than 1D, we
608  ! fill only interior mesh ranges of the ghost cell ranges at first,
609  ! and progressively enlarge the ranges to include corners later
610  kmin1=0; kmax1=0;
611  {^iftwod
612  kmin2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0,-1,igrid)==1)
613  kmax2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0, 1,igrid)==1)}
614  {^ifthreed
615  kmin2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0,-1,0,igrid)==1)
616  kmax2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0, 1,0,igrid)==1)
617  kmin3=merge(1, 0, idims .lt. 3 .and. neighbor_type(0,0,-1,igrid)==1)
618  kmax3=merge(1, 0, idims .lt. 3 .and. neighbor_type(0,0, 1,igrid)==1)}
619  ixbmin^d=ixglo^d+kmin^d*nghostcells;
620  ixbmax^d=ixghi^d-kmax^d*nghostcells;
621  do iside=1,2
622  i^d=kr(^d,idims)*(2*iside-3);
623  if (aperiodb(idims)) then
624  if (neighbor_type(i^d,igrid) /= neighbor_boundary .and. &
625  .not. psb(igrid)%is_physical_boundary(2*idims-2+iside)) cycle
626  else
627  if (neighbor_type(i^d,igrid) /= neighbor_boundary) cycle
628  end if
629  call bc_phys(iside,idims,time,qdt,psb(igrid),ixg^ll,ixb^l)
630  end do
631  end do
632 
633  end subroutine fill_boundary_after_gc
634 
635  !> Receive from sibling at same refinement level
636  subroutine bc_recv_srl
637 
638  ipe_neighbor=neighbor(2,i^d,igrid)
639  if (ipe_neighbor/=mype) then
640  irecv_c=irecv_c+1
641  itag=(3**^nd+4**^nd)*(igrid-1)+{(i^d+1)*3**(^d-1)+}
642  call mpi_irecv(psb(igrid)%w,1,type_recv_srl(i^d), &
643  ipe_neighbor,itag,icomm,recvrequest_c_sr(irecv_c),ierrmpi)
644  if(stagger_grid) then
646  call mpi_irecv(recvbuffer_srl(ibuf_recv_srl),sizes_srl_recv_total(i^d),mpi_double_precision, &
647  ipe_neighbor,itag,icomm,recvrequest_srl(irecv_srl),ierrmpi)
649  end if
650  end if
651 
652  end subroutine bc_recv_srl
653 
654  !> Receive from fine neighbor
655  subroutine bc_recv_restrict
656 
657  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
658  inc^db=2*i^db+ic^db\}
659  ipe_neighbor=neighbor_child(2,inc^d,igrid)
660  if (ipe_neighbor/=mype) then
661  irecv_c=irecv_c+1
662  itag=(3**^nd+4**^nd)*(igrid-1)+3**^nd+{inc^d*4**(^d-1)+}
663  call mpi_irecv(psb(igrid)%w,1,type_recv_r(inc^d), &
664  ipe_neighbor,itag,icomm,recvrequest_c_sr(irecv_c),ierrmpi)
665  if(stagger_grid) then
666  irecv_r=irecv_r+1
667  call mpi_irecv(recvbuffer_r(ibuf_recv_r),sizes_r_recv_total(inc^d), &
668  mpi_double_precision,ipe_neighbor,itag, &
669  icomm,recvrequest_r(irecv_r),ierrmpi)
671  end if
672  end if
673  {end do\}
674 
675  end subroutine bc_recv_restrict
676 
677  !> Send to sibling at same refinement level
678  subroutine bc_send_srl
679 
680  ipe_neighbor=neighbor(2,i^d,igrid)
681 
682  if(ipe_neighbor/=mype) then
683  ineighbor=neighbor(1,i^d,igrid)
684  ipole=neighbor_pole(i^d,igrid)
685  if(ipole==0) then
686  n_i^d=-i^d;
687  isend_c=isend_c+1
688  itag=(3**^nd+4**^nd)*(ineighbor-1)+{(n_i^d+1)*3**(^d-1)+}
689  call mpi_isend(psb(igrid)%w,1,type_send_srl(i^d), &
690  ipe_neighbor,itag,icomm,sendrequest_c_sr(isend_c),ierrmpi)
691  if(stagger_grid) then
692  ibuf_start=ibuf_send_srl
693  do idir=1,ndim
694  ixs^l=ixs_srl_stg_^l(idir,i^d);
695  ibuf_next=ibuf_start+sizes_srl_send_stg(idir,i^d)
696  shapes=(/sizes_srl_send_stg(idir,i^d)/)
697  sendbuffer_srl(ibuf_start:ibuf_next-1)=&
698  reshape(psb(igrid)%ws(ixs^s,idir),shapes)
699  ibuf_start=ibuf_next
700  end do
702  call mpi_isend(sendbuffer_srl(ibuf_send_srl),sizes_srl_send_total(i^d),&
703  mpi_double_precision, ipe_neighbor,itag,icomm,sendrequest_srl(isend_srl),ierrmpi)
704  ibuf_send_srl=ibuf_next
705  end if
706  else
707  ixs^l=ixs_srl_^l(i^d);
708  select case (ipole)
709  {case (^d)
710  n_i^d=i^d^d%n_i^dd=-i^dd;\}
711  end select
712  if (isend_buf(ipwbuf)/=0) then
713  call mpi_wait(sendrequest_c_sr(isend_buf(ipwbuf)), &
714  sendstatus_c_sr(:,isend_buf(ipwbuf)),ierrmpi)
715  deallocate(pwbuf(ipwbuf)%w)
716  end if
717  allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
718  call pole_buffer(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psb(igrid)%w,ixg^ll,ixs^l)
719  isend_c=isend_c+1
720  isend_buf(ipwbuf)=isend_c
721  itag=(3**^nd+4**^nd)*(ineighbor-1)+{(n_i^d+1)*3**(^d-1)+}
722  isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
723  call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
724  ipe_neighbor,itag,icomm,sendrequest_c_sr(isend_c),ierrmpi)
725  ipwbuf=1+modulo(ipwbuf,npwbuf)
726  if(stagger_grid) then
727  ibuf_start=ibuf_send_srl
728  do idir=1,ndim
729  ixs^l=ixs_srl_stg_^l(idir,i^d);
730  ibuf_next=ibuf_start+sizes_srl_send_stg(idir,i^d)
731  shapes=(/sizes_srl_send_stg(idir,i^d)/)
732  sendbuffer_srl(ibuf_start:ibuf_next-1)=&
733  reshape(psb(igrid)%ws(ixs^s,idir),shapes)
734  ibuf_start=ibuf_next
735  end do
737  call mpi_isend(sendbuffer_srl(ibuf_send_srl),sizes_srl_send_total(i^d),&
738  mpi_double_precision, ipe_neighbor,itag,icomm,sendrequest_srl(isend_srl),ierrmpi)
739  ibuf_send_srl=ibuf_next
740  end if
741  end if
742  end if
743 
744  end subroutine bc_send_srl
745 
746  subroutine bc_fill_srl(igrid,i^D)
747  integer, intent(in) :: igrid,i^D
748  integer :: ineighbor,ipe_neighbor,ipole,ixS^L,ixR^L,n_i^D,idir
749 
750  ipe_neighbor=neighbor(2,i^d,igrid)
751  if(ipe_neighbor==mype) then
752  ineighbor=neighbor(1,i^d,igrid)
753  ipole=neighbor_pole(i^d,igrid)
754  if(ipole==0) then
755  n_i^d=-i^d;
756  ixs^l=ixs_srl_^l(i^d);
757  ixr^l=ixr_srl_^l(n_i^d);
758  psb(ineighbor)%w(ixr^s,nwhead:nwtail)=&
759  psb(igrid)%w(ixs^s,nwhead:nwtail)
760  if(stagger_grid) then
761  do idir=1,ndim
762  ixs^l=ixs_srl_stg_^l(idir,i^d);
763  ixr^l=ixr_srl_stg_^l(idir,n_i^d);
764  psb(ineighbor)%ws(ixr^s,idir)=psb(igrid)%ws(ixs^s,idir)
765  end do
766  end if
767  else
768  ixs^l=ixs_srl_^l(i^d);
769  select case (ipole)
770  {case (^d)
771  n_i^d=i^d^d%n_i^dd=-i^dd;\}
772  end select
773  ixr^l=ixr_srl_^l(n_i^d);
774  call pole_copy(psb(ineighbor)%w,ixg^ll,ixr^l,psb(igrid)%w,ixg^ll,ixs^l,ipole)
775  if(stagger_grid) then
776  do idir=1,ndim
777  ixs^l=ixs_srl_stg_^l(idir,i^d);
778  ixr^l=ixr_srl_stg_^l(idir,n_i^d);
779  call pole_copy_stg(psb(ineighbor)%ws,ixgs^ll,ixr^l,psb(igrid)%ws,ixgs^ll,ixs^l,idir,ipole)
780  end do
781  end if
782  end if
783  end if
784 
785  end subroutine bc_fill_srl
786 
787  !> Send to coarser neighbor
788  subroutine bc_send_restrict
789 
790  ipe_neighbor=neighbor(2,i^d,igrid)
791  if(ipe_neighbor/=mype) then
792  ic^d=1+modulo(node(pig^d_,igrid)-1,2);
793  if({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
794  ineighbor=neighbor(1,i^d,igrid)
795  ipole=neighbor_pole(i^d,igrid)
796  if(ipole==0) then
797  n_inc^d=-2*i^d+ic^d;
798  isend_c=isend_c+1
799  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
800  call mpi_isend(psc(igrid)%w,1,type_send_r(i^d), &
801  ipe_neighbor,itag,icomm,sendrequest_c_sr(isend_c),ierrmpi)
802  if(stagger_grid) then
803  ibuf_start=ibuf_send_r
804  do idir=1,ndim
805  ixs^l=ixs_r_stg_^l(idir,i^d);
806  ibuf_next=ibuf_start+sizes_r_send_stg(idir,i^d)
807  shapes=(/sizes_r_send_stg(idir,i^d)/)
808  sendbuffer_r(ibuf_start:ibuf_next-1)=&
809  reshape(psc(igrid)%ws(ixs^s,idir),shapes)
810  ibuf_start=ibuf_next
811  end do
812  isend_r=isend_r+1
813  call mpi_isend(sendbuffer_r(ibuf_send_r),sizes_r_send_total(i^d),&
814  mpi_double_precision,ipe_neighbor,itag, &
815  icomm,sendrequest_r(isend_r),ierrmpi)
816  ibuf_send_r=ibuf_next
817  end if
818  else
819  ixs^l=ixs_r_^l(i^d);
820  select case (ipole)
821  {case (^d)
822  n_inc^d=2*i^d+(3-ic^d)^d%n_inc^dd=-2*i^dd+ic^dd;\}
823  end select
824  if(isend_buf(ipwbuf)/=0) then
825  call mpi_wait(sendrequest_c_sr(isend_buf(ipwbuf)), &
826  sendstatus_c_sr(:,isend_buf(ipwbuf)),ierrmpi)
827  deallocate(pwbuf(ipwbuf)%w)
828  end if
829  allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
830  call pole_buffer(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psc(igrid)%w,ixcog^l,ixs^l)
831  isend_c=isend_c+1
832  isend_buf(ipwbuf)=isend_c
833  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
834  isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
835  call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
836  ipe_neighbor,itag,icomm,sendrequest_c_sr(isend_c),ierrmpi)
837  ipwbuf=1+modulo(ipwbuf,npwbuf)
838  if(stagger_grid) then
839  ibuf_start=ibuf_send_r
840  do idir=1,ndim
841  ixs^l=ixs_r_stg_^l(idir,i^d);
842  ibuf_next=ibuf_start+sizes_r_send_stg(idir,i^d)
843  shapes=(/sizes_r_send_stg(idir,i^d)/)
844  sendbuffer_r(ibuf_start:ibuf_next-1)=&
845  reshape(psc(igrid)%ws(ixs^s,idir),shapes)
846  ibuf_start=ibuf_next
847  end do
848  isend_r=isend_r+1
849  call mpi_isend(sendbuffer_r(ibuf_send_r),sizes_r_send_total(i^d),&
850  mpi_double_precision,ipe_neighbor,itag, &
851  icomm,sendrequest_r(isend_r),ierrmpi)
852  ibuf_send_r=ibuf_next
853  end if
854  end if
855  end if
856 
857  end subroutine bc_send_restrict
858 
859  !> fill coarser neighbor's ghost cells
860  subroutine bc_fill_restrict(igrid,i^D)
861  integer, intent(in) :: igrid,i^D
862 
863  integer :: ic^D,n_inc^D,ixS^L,ixR^L,ipe_neighbor,ineighbor,ipole,idir
864 
865  ipe_neighbor=neighbor(2,i^d,igrid)
866  if(ipe_neighbor==mype) then
867  ic^d=1+modulo(node(pig^d_,igrid)-1,2);
868  if({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
869  ineighbor=neighbor(1,i^d,igrid)
870  ipole=neighbor_pole(i^d,igrid)
871  if(ipole==0) then
872  n_inc^d=-2*i^d+ic^d;
873  ixs^l=ixs_r_^l(i^d);
874  ixr^l=ixr_r_^l(n_inc^d);
875  psb(ineighbor)%w(ixr^s,nwhead:nwtail)=&
876  psc(igrid)%w(ixs^s,nwhead:nwtail)
877  if(stagger_grid) then
878  do idir=1,ndim
879  ixs^l=ixs_r_stg_^l(idir,i^d);
880  ixr^l=ixr_r_stg_^l(idir,n_inc^d);
881  psb(ineighbor)%ws(ixr^s,idir)=psc(igrid)%ws(ixs^s,idir)
882  end do
883  end if
884  else
885  ixs^l=ixs_r_^l(i^d);
886  select case (ipole)
887  {case (^d)
888  n_inc^d=2*i^d+(3-ic^d)^d%n_inc^dd=-2*i^dd+ic^dd;\}
889  end select
890  ixr^l=ixr_r_^l(n_inc^d);
891  call pole_copy(psb(ineighbor)%w,ixg^ll,ixr^l,psc(igrid)%w,ixcog^l,ixs^l,ipole)
892  if(stagger_grid) then
893  do idir=1,ndim
894  ixs^l=ixs_r_stg_^l(idir,i^d);
895  ixr^l=ixr_r_stg_^l(idir,n_inc^d);
896  !! Fill ghost cells
897  call pole_copy_stg(psb(ineighbor)%ws,ixgs^ll,ixr^l,psc(igrid)%ws,ixcogs^l,ixs^l,idir,ipole)
898  end do
899  end if
900  end if
901  end if
902 
903  end subroutine bc_fill_restrict
904 
905  !> fill siblings ghost cells with received data
906  subroutine bc_fill_srl_stg
907  integer :: ixS^L,ixR^L,n_i^D,ixSsync^L,ixRsync^L,idir
908 
909  ipe_neighbor=neighbor(2,i^d,igrid)
910  if(ipe_neighbor/=mype) then
911  ineighbor=neighbor(1,i^d,igrid)
912  ipole=neighbor_pole(i^d,igrid)
913 
914  !! Now the special treatment of the pole is done here, at the receive step
915  if (ipole==0) then
916  ixr^l=ixr_srl_^l(i^d);
917  !! Unpack the buffer and fill the ghost cells
918  n_i^d=-i^d;
919  do idir=1,ndim
920  ixs^l=ixs_srl_stg_^l(idir,n_i^d);
921  ixr^l=ixr_srl_stg_^l(idir,i^d);
922  ibuf_next=ibuf_recv_srl+sizes_srl_recv_stg(idir,i^d)
923  psb(igrid)%ws(ixr^s,idir)=reshape(source=recvbuffer_srl(ibuf_recv_srl:ibuf_next-1),&
924  shape=shape(psb(igrid)%ws(ixs^s,idir)))
925  ibuf_recv_srl=ibuf_next
926  end do
927  else ! There is a pole
928  select case (ipole)
929  {case (^d)
930  n_i^d=i^d^d%n_i^dd=-i^dd;\}
931  end select
932  pole_buf%ws=zero
933  do idir=1,ndim
934  ixr^l=ixr_srl_stg_^l(idir,i^d);
935  ixs^l=ixs_srl_stg_^l(idir,n_i^d);
936  ibuf_next=ibuf_recv_srl+sizes_srl_recv_stg(idir,i^d)
937  pole_buf%ws(ixs^s,idir)=reshape(source=recvbuffer_srl(ibuf_recv_srl:ibuf_next-1),&
938  shape=shape(psb(igrid)%ws(ixs^s,idir)))
939  ibuf_recv_srl=ibuf_next
940  call pole_copy_stg(psb(igrid)%ws,ixgs^ll,ixr^l,pole_buf%ws,ixgs^ll,ixs^l,idir,ipole)
941  end do
942  end if
943  end if
944 
945  end subroutine bc_fill_srl_stg
946 
947  !> fill restricted ghost cells after receipt
949 
950  ipole=neighbor_pole(i^d,igrid)
951  if (ipole==0) then
952  ! Loop over the children ic^D to and their neighbors inc^D
953  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
954  inc^db=2*i^db+ic^db\}
955  ipe_neighbor=neighbor_child(2,inc^d,igrid)
956  if(ipe_neighbor/=mype) then
957  ineighbor=neighbor_child(1,inc^d,igrid)
958  n_i^d=-i^d;
959  !! Unpack the buffer and fill the ghost cells
960  do idir=1,ndim
961  ixr^l=ixr_r_stg_^l(idir,inc^d);
962  ibuf_next=ibuf_recv_r+sizes_r_recv_stg(idir,inc^d)
963  psb(igrid)%ws(ixr^s,idir)=reshape(source=recvbuffer_r(ibuf_recv_r:ibuf_next-1),&
964  shape=shape(psb(igrid)%ws(ixr^s,idir)))
965  ibuf_recv_r=ibuf_next
966  end do
967  end if
968  {end do\}
969  else !! There is a pole
970  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
971  inc^db=2*i^db+ic^db\}
972  ipe_neighbor=neighbor_child(2,inc^d,igrid)
973  if(ipe_neighbor/=mype) then
974  ineighbor=neighbor_child(1,inc^d,igrid)
975  select case(ipole)
976  {case (^d)
977  n_i^d=i^d^d%n_i^dd=-i^dd;\}
978  end select
979  ixr^l=ixr_r_^l(inc^d);
980  !! Unpack the buffer and fill an auxiliary array
981  pole_buf%ws=zero
982  do idir=1,ndim
983  ixs^l=ixs_r_stg_^l(idir,n_i^d);
984  ixr^l=ixr_r_stg_^l(idir,inc^d);
985  ibuf_next=ibuf_recv_r+sizes_r_recv_stg(idir,inc^d)
986  pole_buf%ws(ixr^s,idir)=reshape(source=recvbuffer_r(ibuf_recv_r:ibuf_next-1),&
987  shape=shape(psb(igrid)%ws(ixr^s,idir)))
988  call pole_copy_stg(psb(igrid)%ws,ixgs^ll,ixr^l,pole_buf%ws,ixgs^ll,ixr^l,idir,ipole)
989  ibuf_recv_r=ibuf_next
990  end do
991  end if
992  {end do\}
993  end if
994 
995  end subroutine bc_fill_restrict_stg
996 
997  !> Receive from coarse neighbor
998  subroutine bc_recv_prolong
999 
1000  ic^d=1+modulo(node(pig^d_,igrid)-1,2);
1001  if ({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
1002 
1003  ipe_neighbor=neighbor(2,i^d,igrid)
1004  if (ipe_neighbor/=mype) then
1005  irecv_c=irecv_c+1
1006  inc^d=ic^d+i^d;
1007  itag=(3**^nd+4**^nd)*(igrid-1)+3**^nd+{inc^d*4**(^d-1)+}
1008  call mpi_irecv(psc(igrid)%w,1,type_recv_p(inc^d), &
1009  ipe_neighbor,itag,icomm,recvrequest_c_p(irecv_c),ierrmpi)
1010  if(stagger_grid) then
1011  irecv_p=irecv_p+1
1012  call mpi_irecv(recvbuffer_p(ibuf_recv_p),sizes_p_recv_total(inc^d),&
1013  mpi_double_precision,ipe_neighbor,itag,&
1014  icomm,recvrequest_p(irecv_p),ierrmpi)
1016  end if
1017  end if
1018 
1019  end subroutine bc_recv_prolong
1020 
1021  !> Send to finer neighbor
1022  subroutine bc_send_prolong
1023  integer :: ii^D
1024 
1025  ipole=neighbor_pole(i^d,igrid)
1026 
1027  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1028  inc^db=2*i^db+ic^db\}
1029  ipe_neighbor=neighbor_child(2,inc^d,igrid)
1030  if(ipe_neighbor/=mype) then
1031  ineighbor=neighbor_child(1,inc^d,igrid)
1032  if(ipole==0) then
1033  n_i^d=-i^d;
1034  n_inc^d=ic^d+n_i^d;
1035  isend_c=isend_c+1
1036  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
1037  call mpi_isend(psb(igrid)%w,1,type_send_p(inc^d), &
1038  ipe_neighbor,itag,icomm,sendrequest_c_p(isend_c),ierrmpi)
1039  if(stagger_grid) then
1040  ibuf_start=ibuf_send_p
1041  do idir=1,ndim
1042  ixs^l=ixs_p_stg_^l(idir,inc^d);
1043  ibuf_next=ibuf_start+sizes_p_send_stg(idir,inc^d)
1044  shapes=(/sizes_p_send_stg(idir,inc^d)/)
1045  sendbuffer_p(ibuf_start:ibuf_next-1)=&
1046  reshape(psb(igrid)%ws(ixs^s,idir),shapes)
1047  ibuf_start=ibuf_next
1048  end do
1049  isend_p=isend_p+1
1050  call mpi_isend(sendbuffer_p(ibuf_send_p),sizes_p_send_total(inc^d),&
1051  mpi_double_precision,ipe_neighbor,itag, &
1052  icomm,sendrequest_p(isend_p),ierrmpi)
1053  ibuf_send_p=ibuf_next
1054  end if
1055  else
1056  ixs^l=ixs_p_^l(inc^d);
1057  select case (ipole)
1058  {case (^d)
1059  n_inc^d=inc^d^d%n_inc^dd=ic^dd-i^dd;\}
1060  end select
1061  if(isend_buf(ipwbuf)/=0) then
1062  call mpi_wait(sendrequest_c_p(isend_buf(ipwbuf)), &
1063  sendstatus_c_p(:,isend_buf(ipwbuf)),ierrmpi)
1064  deallocate(pwbuf(ipwbuf)%w)
1065  end if
1066  allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
1067  call pole_buffer(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psb(igrid)%w,ixg^ll,ixs^l)
1068  isend_c=isend_c+1
1069  isend_buf(ipwbuf)=isend_c
1070  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
1071  isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
1072  call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
1073  ipe_neighbor,itag,icomm,sendrequest_c_p(isend_c),ierrmpi)
1074  ipwbuf=1+modulo(ipwbuf,npwbuf)
1075  if(stagger_grid) then
1076  ibuf_start=ibuf_send_p
1077  do idir=1,ndim
1078  ixs^l=ixs_p_stg_^l(idir,inc^d);
1079  ibuf_next=ibuf_start+sizes_p_send_stg(idir,inc^d)
1080  shapes=(/sizes_p_send_stg(idir,inc^d)/)
1081  sendbuffer_p(ibuf_start:ibuf_next-1)=&
1082  reshape(psb(igrid)%ws(ixs^s,idir),shapes)
1083  ibuf_start=ibuf_next
1084  end do
1085  isend_p=isend_p+1
1086  call mpi_isend(sendbuffer_p(ibuf_send_p),sizes_p_send_total(inc^d),&
1087  mpi_double_precision,ipe_neighbor,itag, &
1088  icomm,sendrequest_p(isend_p),ierrmpi)
1089  ibuf_send_p=ibuf_next
1090  end if
1091  end if
1092  end if
1093  {end do\}
1094 
1095  end subroutine bc_send_prolong
1096 
1097  !> Send to finer neighbor
1098  subroutine bc_fill_prolong(igrid,i^D)
1099  integer, intent(in) :: igrid,i^D
1100 
1101  integer :: ipe_neighbor,ineighbor,ixS^L,ixR^L,ic^D,inc^D,ipole,idir
1102 
1103  ipole=neighbor_pole(i^d,igrid)
1104 
1105  if(ipole==0) then
1106  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1107  inc^db=2*i^db+ic^db\}
1108  ipe_neighbor=neighbor_child(2,inc^d,igrid)
1109  if(ipe_neighbor==mype) then
1110  ixs^l=ixs_p_^l(inc^d);
1111  ineighbor=neighbor_child(1,inc^d,igrid)
1112  n_i^d=-i^d;
1113  n_inc^d=ic^d+n_i^d;
1114  ixr^l=ixr_p_^l(n_inc^d);
1115  psc(ineighbor)%w(ixr^s,nwhead:nwtail) &
1116  =psb(igrid)%w(ixs^s,nwhead:nwtail)
1117  if(stagger_grid) then
1118  do idir=1,ndim
1119  ixs^l=ixs_p_stg_^l(idir,inc^d);
1120  ixr^l=ixr_p_stg_^l(idir,n_inc^d);
1121  psc(ineighbor)%ws(ixr^s,idir)=psb(igrid)%ws(ixs^s,idir)
1122  end do
1123  end if
1124  end if
1125  {end do\}
1126  else
1127  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1128  inc^db=2*i^db+ic^db\}
1129  ipe_neighbor=neighbor_child(2,inc^d,igrid)
1130  if(ipe_neighbor==mype) then
1131  ixs^l=ixs_p_^l(inc^d);
1132  ineighbor=neighbor_child(1,inc^d,igrid)
1133  select case (ipole)
1134  {case (^d)
1135  n_inc^d=inc^d^d%n_inc^dd=ic^dd-i^dd;\}
1136  end select
1137  ixr^l=ixr_p_^l(n_inc^d);
1138  call pole_copy(psc(ineighbor)%w,ixcog^l,ixr^l,psb(igrid)%w,ixg^ll,ixs^l,ipole)
1139  if(stagger_grid) then
1140  do idir=1,ndim
1141  ixs^l=ixs_p_stg_^l(idir,inc^d);
1142  ixr^l=ixr_p_stg_^l(idir,n_inc^d);
1143  call pole_copy_stg(psc(ineighbor)%ws,ixcogs^l,ixr^l,psb(igrid)%ws,ixgs^ll,ixs^l,idir,ipole)
1144  end do
1145  end if
1146  end if
1147  {end do\}
1148  end if
1149  end subroutine bc_fill_prolong
1150 
1151  subroutine gc_prolong(igrid)
1152  integer, intent(in) :: igrid
1153 
1154  integer :: i^D,idims,iside
1155  logical,dimension(-1:1^D&) :: NeedProlong
1156 
1157  needprolong=.false.
1158  {do i^db=-1,1\}
1159  if (skip_direction([ i^d ])) cycle
1160  if (neighbor_type(i^d,igrid)==neighbor_coarse) then
1161  call bc_prolong(igrid,i^d)
1162  needprolong(i^d)=.true.
1163  end if
1164  {end do\}
1165  if(stagger_grid) then
1166  ! Ghost cell prolongation for staggered variables
1167  ! must be done in a specific order.
1168  ! First the first neighbours, which have 2 indices=0 in 3D
1169  ! or one index=0 in 2D
1170  block=>psb(igrid)
1171  do idims=1,ndim
1172  i^d=0;
1173  select case(idims)
1174  {case(^d)
1175  do i^d=-1,1,2
1176  if (needprolong(i^dd)) call bc_prolong_stg(igrid,i^dd,needprolong)
1177  end do
1178  \}
1179  end select
1180  end do
1181  ! Then the second neighbours which have 1 index=0 in 3D
1182  ! (Only in 3D)
1183  {^ifthreed
1184  i1=0;
1185  do i2=-1,1,2
1186  do i3=-1,1,2
1187  if (needprolong(i^d)) call bc_prolong_stg(igrid,i^d,needprolong)
1188  end do
1189  end do
1190  i2=0;
1191  do i3=-1,1,2
1192  do i1=-1,1,2
1193  if (needprolong(i^d)) call bc_prolong_stg(igrid,i^d,needprolong)
1194  end do
1195  end do
1196  i3=0;
1197  do i1=-1,1,2
1198  do i2=-1,1,2
1199  if (needprolong(i^d)) call bc_prolong_stg(igrid,i^d,needprolong)
1200  end do
1201  end do
1202  }
1203  ! Finally, the corners, that have no index=0
1204  {do i^d=-1,1,2\}
1205  if (needprolong(i^d)) call bc_prolong_stg(igrid,i^d,needprolong)
1206  {end do\}
1207  end if
1208  end subroutine gc_prolong
1209 
1210  !> fill coarser representative with data from coarser neighbors
1212  ic^d=1+modulo(node(pig^d_,igrid)-1,2);
1213  if ({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
1214 
1215  ipe_neighbor=neighbor(2,i^d,igrid)
1216  if(ipe_neighbor/=mype) then
1217  ineighbor=neighbor(1,i^d,igrid)
1218  ipole=neighbor_pole(i^d,igrid)
1219 
1220  if (ipole==0) then !! There is no pole
1221  inc^d=ic^d+i^d;
1222  ixr^l=ixr_p_^l(inc^d);
1223  do idir=1,ndim
1224  ixr^l=ixr_p_stg_^l(idir,inc^d);
1225  ibuf_next=ibuf_recv_p+sizes_p_recv_stg(idir,inc^d)
1226  psc(igrid)%ws(ixr^s,idir)=reshape(source=recvbuffer_p(ibuf_recv_p:ibuf_next-1),&
1227  shape=shape(psc(igrid)%ws(ixr^s,idir)))
1228  ibuf_recv_p=ibuf_next
1229  end do
1230  else !! There is a pole
1231  inc^d=ic^d+i^d;
1232  select case (ipole)
1233  {case (^d)
1234  n_inc^d=2*i^d+(3-ic^d)^d%n_inc^dd=-2*i^dd+ic^dd;\}
1235  end select
1236  !! Unpack the buffer and fill an auxiliary array
1237  pole_buf%ws=zero
1238  do idir=1,ndim
1239  ixr^l=ixr_p_stg_^l(idir,inc^d);
1240  ibuf_next=ibuf_recv_p+sizes_p_recv_stg(idir,inc^d)
1241  pole_buf%ws(ixr^s,idir)=reshape(source=recvbuffer_p(ibuf_recv_p:ibuf_next-1),&
1242  shape=shape(psc(igrid)%ws(ixr^s,idir)))
1243  call pole_copy_stg(psc(igrid)%ws,ixcogs^l,ixr^l,pole_buf%ws,ixgs^ll,ixr^l,idir,ipole)
1244  ibuf_recv_p=ibuf_next
1245  end do
1246  end if
1247  end if
1248 
1249  end subroutine bc_fill_prolong_stg
1250 
1251  !> do prolongation for fine blocks after receipt data from coarse neighbors
1252  subroutine bc_prolong(igrid,i^D)
1254 
1255  double precision :: dxFi^D, dxCo^D, xFimin^D, xComin^D, invdxCo^D
1256  integer :: i^D,igrid
1257  integer :: ixFi^L,ixCo^L,ii^D, idims,iside,ixB^L
1258 
1259  ixfi^l=ixr_srl_^l(i^d);
1260  dxfi^d=rnode(rpdx^d_,igrid);
1261  dxco^d=two*dxfi^d;
1262  invdxco^d=1.d0/dxco^d;
1263 
1264  ! compute the enlarged grid lower left corner coordinates
1265  ! these are true coordinates for an equidistant grid,
1266  ! but we can temporarily also use them for getting indices
1267  ! in stretched grids
1268  xfimin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxfi^d;
1269  xcomin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxco^d;
1270 
1271  if(phyboundblock(igrid).and.bcphys) then
1272  block=>psc(igrid)
1273  do idims=1,ndim
1274  ixcomin^d=int((xfimin^d+(dble(ixfimin^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1-1;
1275  ixcomax^d=int((xfimin^d+(dble(ixfimax^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1+1;
1276  {^ifthreed
1277  ! avoid using undetermined ghost cells at physical boundary edges
1278  if(idims == 1) then
1279  if(neighbor_type(-1,0,0,igrid)==neighbor_boundary .or. &
1280  neighbor_type(1,0,0,igrid)==neighbor_boundary) then
1281  if(neighbor_type(0,-1,0,igrid)==neighbor_boundary) ixcomin2=ixcommin2
1282  if(neighbor_type(0,0,-1,igrid)==neighbor_boundary) ixcomin3=ixcommin3
1283  if(neighbor_type(0,1,0,igrid)==neighbor_boundary) ixcomax2=ixcommax2
1284  if(neighbor_type(0,0,1,igrid)==neighbor_boundary) ixcomax3=ixcommax3
1285  end if
1286  else if(idims == 2) then
1287  if(neighbor_type(0,-1,0,igrid)==neighbor_boundary .or. &
1288  neighbor_type(0,1,0,igrid)==neighbor_boundary) then
1289  if(neighbor_type(0,0,-1,igrid)==neighbor_boundary) ixcomin3=ixcommin3
1290  if(neighbor_type(0,0,1,igrid)==neighbor_boundary) ixcomax3=ixcommax3
1291  end if
1292  end if
1293  }
1294  do iside=1,2
1295  ii^d=kr(^d,idims)*(2*iside-3);
1296  if(neighbor_type(ii^d,igrid)/=neighbor_boundary) cycle
1297  if(( {(iside==1.and.idims==^d.and.ixcomin^d<ixcogmin^d+nghostcells)|.or.} ) &
1298  .or.( {(iside==2.and.idims==^d.and.ixcomax^d>ixcogmax^d-nghostcells)|.or. })) then
1299  {ixbmin^d=merge(ixcogmin^d,ixcomin^d,idims==^d);}
1300  {ixbmax^d=merge(ixcogmax^d,ixcomax^d,idims==^d);}
1301  call bc_phys(iside,idims,time,0.d0,psc(igrid),ixcog^l,ixb^l)
1302  end if
1303  end do
1304  end do
1305  end if
1306 
1307  if(prolongprimitive) then
1308  ! following line again assumes equidistant grid, but
1309  ! just computes indices, so also ok for stretched case
1310  ! reason for +1-1 and +1+1: the coarse representation has
1311  ! also nghostcells at each side. During
1312  ! prolongation, we need cells to left and right, hence -1/+1
1313  block=>psc(igrid)
1314  ixcomin^d=int((xfimin^d+(dble(ixfimin^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1-1;
1315  ixcomax^d=int((xfimin^d+(dble(ixfimax^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1+1;
1316  call phys_to_primitive(ixcog^l,ixco^l,psc(igrid)%w,psc(igrid)%x)
1317  end if
1318 
1319  if(ghost_copy) then
1320  call interpolation_copy(igrid,ixfi^l,dxfi^d,xfimin^d,dxco^d,invdxco^d,xcomin^d)
1321  else
1322  call interpolation_linear(igrid,ixfi^l,dxfi^d,xfimin^d,dxco^d,invdxco^d,xcomin^d)
1323  end if
1324 
1325  if(prolongprimitive) then
1326  block=>psc(igrid)
1327  call phys_to_conserved(ixcog^l,ixco^l,psc(igrid)%w,psc(igrid)%x)
1328  end if
1329 
1330  end subroutine bc_prolong
1331 
1332  subroutine bc_prolong_stg(igrid,i^D,NeedProlong)
1333  use mod_amr_fct
1334  double precision :: dxFi^D,dxCo^D,xFimin^D,xComin^D,invdxCo^D
1335  integer :: igrid,i^D
1336  integer :: ixFi^L,ixCo^L
1337  logical,dimension(-1:1^D&) :: NeedProlong
1338  logical :: fine_^Lin
1339  ! Check what is already at the desired level
1340  fine_^lin=.false.;
1341  {
1342  if(i^d>-1) fine_min^din=(.not.needprolong(i^dd-kr(^d,^dd)).and.neighbor_type(i^dd-kr(^d,^dd),igrid)/=1)
1343  if(i^d<1) fine_max^din=(.not.needprolong(i^dd+kr(^d,^dd)).and.neighbor_type(i^dd+kr(^d,^dd),igrid)/=1)
1344  \}
1345 
1346  ixfi^l=ixr_srl_^l(i^d);
1347 
1348  dxfi^d=rnode(rpdx^d_,igrid);
1349  dxco^d=two*dxfi^d;
1350  invdxco^d=1.d0/dxco^d;
1351 
1352  xfimin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxfi^d;
1353  xcomin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxco^d;
1354 
1355  ! moved the physical boundary filling here, to only fill the
1356  ! part needed
1357 
1358  ixcomin^d=int((xfimin^d+(dble(ixfimin^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1-1;
1359  ixcomax^d=int((xfimin^d+(dble(ixfimax^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1+1;
1360 
1361  if(prolongprimitive) call phys_to_primitive(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
1362 
1363  call prolong_2nd_stg(psc(igrid),psb(igrid),ixco^l,ixfi^l,dxco^d,xcomin^d,dxfi^d,xfimin^d,.true.,fine_^lin)
1364 
1365  if(prolongprimitive) call phys_to_conserved(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
1366 
1367  ! The current region has already been refined, so it does not need to be prolonged again
1368  needprolong(i^d)=.false.
1369 
1370  end subroutine bc_prolong_stg
1371 
1372  subroutine interpolation_linear(igrid,ixFi^L,dxFi^D,xFimin^D, &
1373  dxCo^D,invdxCo^D,xComin^D)
1374  use mod_physics, only: phys_to_conserved
1375  integer, intent(in) :: igrid, ixFi^L
1376  double precision, intent(in) :: dxFi^D, xFimin^D,dxCo^D, invdxCo^D, xComin^D
1377 
1378  double precision :: xCo^D, xFi^D, eta^D
1379  double precision :: slopeL, slopeR, slopeC, signC, signR
1380  double precision :: slope(1:nw,ndim)
1381  !!double precision :: local_invdxCo^D
1382  double precision :: signedfactorhalf^D
1383  integer :: ixCo^D, jxCo^D, hxCo^D, ixFi^D, ix^D, iw, idims, nwmin,nwmax
1384  !integer :: ixshift^D, icase
1385 
1386  !icase=mod(nghostcells,2)
1387 
1388  if(prolongprimitive) then
1389  nwmin=1
1390  nwmax=nw
1391  else
1392  nwmin=nwhead
1393  nwmax=nwtail
1394  end if
1395 
1396  {do ixfi^db = ixfi^lim^db
1397  ! cell-centered coordinates of fine grid point
1398  ! here we temporarily use an equidistant grid
1399  xfi^db=xfimin^db+(dble(ixfi^db)-half)*dxfi^db
1400 
1401  ! indices of coarse cell which contains the fine cell
1402  ! since we computed lower left corner earlier
1403  ! in equidistant fashion: also ok for stretched case
1404  ixco^db=int((xfi^db-xcomin^db)*invdxco^db)+1
1405 
1406  ! cell-centered coordinates of coarse grid point
1407  ! here we temporarily use an equidistant grid
1408  xco^db=xcomin^db+(dble(ixco^db)-half)*dxco^db \}
1409 
1410  !if(.not.slab) then
1411  ! ^D&local_invdxCo^D=1.d0/psc(igrid)%dx({ixCo^DD},^D)\
1412  !endif
1413 
1414  if(slab_uniform) then
1415  ! actual cell-centered coordinates of fine grid point
1416  !!^D&xFi^D=block%x({ixFi^DD},^D)\
1417  ! actual cell-centered coordinates of coarse grid point
1418  !!^D&xCo^D=psc(igrid)%x({ixCo^DD},^D)\
1419  ! normalized distance between fine/coarse cell center
1420  ! in coarse cell: ranges from -0.5 to 0.5 in each direction
1421  ! (origin is coarse cell center)
1422  ! this is essentially +1/4 or -1/4 on cartesian mesh
1423  eta^d=(xfi^d-xco^d)*invdxco^d;
1424  else
1425  !select case(icase)
1426  ! case(0)
1427  !{! here we assume an even number of ghostcells!!!
1428  !ixshift^D=2*(mod(ixFi^D,2)-1)+1
1429  !if(ixshift^D>0.0d0)then
1430  ! ! oneven fine grid points
1431  ! eta^D=-0.5d0*(one-block%dvolume(ixFi^DD) &
1432  ! /sum(block%dvolume(ixFi^D:ixFi^D+1^D%ixFi^DD)))
1433  !else
1434  ! ! even fine grid points
1435  ! eta^D=+0.5d0*(one-block%dvolume(ixFi^DD) &
1436  ! /sum(block%dvolume(ixFi^D-1:ixFi^D^D%ixFi^DD)))
1437  !endif\}
1438  ! case(1)
1439  !{! here we assume an odd number of ghostcells!!!
1440  !ixshift^D=2*(mod(ixFi^D,2)-1)+1
1441  !if(ixshift^D>0.0d0)then
1442  ! ! oneven fine grid points
1443  ! eta^D=+0.5d0*(one-block%dvolume(ixFi^DD) &
1444  ! /sum(block%dvolume(ixFi^D-1:ixFi^D^D%ixFi^DD)))
1445  !else
1446  ! ! even fine grid points
1447  ! eta^D=-0.5d0*(one-block%dvolume(ixFi^DD) &
1448  ! /sum(block%dvolume(ixFi^D:ixFi^D+1^D%ixFi^DD)))
1449  !endif\}
1450  ! case default
1451  ! call mpistop("no such case")
1452  !end select
1453  ! the different cases for even/uneven number of ghost cells
1454  ! are automatically handled using the relative index to ixMlo
1455  ! as well as the pseudo-coordinates xFi and xCo
1456  ! these latter differ from actual cell centers when stretching is used
1457  ix^d=2*int((ixfi^d+ixmlo^d)/2)-ixmlo^d;
1458  {if(xfi^d>xco^d) then
1459  signedfactorhalf^d=0.5d0
1460  else
1461  signedfactorhalf^d=-0.5d0
1462  end if
1463  eta^d=signedfactorhalf^d*(one-psb(igrid)%dvolume(ixfi^dd) &
1464  /sum(psb(igrid)%dvolume(ix^d:ix^d+1^d%ixFi^dd))) \}
1465  !{eta^D=(xFi^D-xCo^D)*invdxCo^D &
1466  ! *two*(one-block%dvolume(ixFi^DD) &
1467  ! /sum(block%dvolume(ix^D:ix^D+1^D%ixFi^DD))) \}
1468  end if
1469 
1470  do idims=1,ndim
1471  hxco^d=ixco^d-kr(^d,idims)\
1472  jxco^d=ixco^d+kr(^d,idims)\
1473 
1474  do iw=nwmin,nwmax
1475  slopel=psc(igrid)%w(ixco^d,iw)-psc(igrid)%w(hxco^d,iw)
1476  sloper=psc(igrid)%w(jxco^d,iw)-psc(igrid)%w(ixco^d,iw)
1477  slopec=half*(sloper+slopel)
1478 
1479  ! get limited slope
1480  signr=sign(one,sloper)
1481  signc=sign(one,slopec)
1482  !select case(prolong_limiter)
1483  !case(1)
1484  ! ! unlimit
1485  ! slope(iw,idims)=slopeC
1486  !case(2)
1487  ! ! minmod
1488  ! slope(iw,idims)=signR*max(zero,min(dabs(slopeR), &
1489  ! signR*slopeL))
1490  !case(3)
1491  ! ! woodward
1492  ! slope(iw,idims)=two*signR*max(zero,min(dabs(slopeR), &
1493  ! signR*slopeL,signR*half*slopeC))
1494  !case(4)
1495  ! ! koren
1496  ! slope(iw,idims)=signR*max(zero,min(two*signR*slopeL, &
1497  ! (dabs(slopeR)+two*slopeL*signR)*third,two*dabs(slopeR)))
1498  !case default
1499  slope(iw,idims)=signc*max(zero,min(dabs(slopec), &
1500  signc*slopel,signc*sloper))
1501  !end select
1502  end do
1503  end do
1504 
1505  ! Interpolate from coarse cell using limited slopes
1506  psb(igrid)%w(ixfi^d,nwmin:nwmax)=psc(igrid)%w(ixco^d,nwmin:nwmax)+&
1507  {(slope(nwmin:nwmax,^d)*eta^d)+}
1508 
1509  {end do\}
1510 
1511  if(prolongprimitive) then
1512  block=>psb(igrid)
1513  call phys_to_conserved(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
1514  end if
1515 
1516  end subroutine interpolation_linear
1517 
1518  subroutine interpolation_copy(igrid, ixFi^L,dxFi^D,xFimin^D, &
1519  dxCo^D,invdxCo^D,xComin^D)
1520  use mod_physics, only: phys_to_conserved
1521  integer, intent(in) :: igrid, ixFi^L
1522  double precision, intent(in) :: dxFi^D, xFimin^D,dxCo^D, invdxCo^D, xComin^D
1523 
1524  double precision :: xFi^D
1525  integer :: ixCo^D, ixFi^D, nwmin,nwmax
1526 
1527  if(prolongprimitive) then
1528  nwmin=1
1529  nwmax=nw
1530  else
1531  nwmin=nwhead
1532  nwmax=nwtail
1533  end if
1534 
1535  {do ixfi^db = ixfi^lim^db
1536  ! cell-centered coordinates of fine grid point
1537  xfi^db=xfimin^db+(dble(ixfi^db)-half)*dxfi^db
1538 
1539  ! indices of coarse cell which contains the fine cell
1540  ! note: this also works for stretched grids
1541  ixco^db=int((xfi^db-xcomin^db)*invdxco^db)+1\}
1542 
1543  ! Copy from coarse cell
1544  psb(igrid)%w(ixfi^d,nwmin:nwmax)=psc(igrid)%w(ixco^d,nwmin:nwmax)
1545 
1546  {end do\}
1547 
1548  if(prolongprimitive) call phys_to_conserved(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
1549 
1550  end subroutine interpolation_copy
1551 
1552  subroutine pole_copy(wrecv,ixIR^L,ixR^L,wsend,ixIS^L,ixS^L,ipole)
1553 
1554  integer, intent(in) :: ixIR^L,ixR^L,ixIS^L,ixS^L,ipole
1555  double precision :: wrecv(ixIR^S,1:nw), wsend(ixIS^S,1:nw)
1556 
1557  integer :: iw, iside, iB
1558 
1559  select case (ipole)
1560  {case (^d)
1561  iside=int((i^d+3)/2)
1562  ib=2*(^d-1)+iside
1563  do iw=nwhead,nwtail
1564  select case (typeboundary(iw,ib))
1565  case (bc_symm)
1566  wrecv(ixr^s,iw) = wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1567  case (bc_asymm)
1568  wrecv(ixr^s,iw) =-wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1569  case default
1570  call mpistop("Pole boundary condition should be symm or asymm")
1571  end select
1572  end do \}
1573  end select
1574 
1575  end subroutine pole_copy
1576 
1577  subroutine pole_copy_stg(wrecv,ixIR^L,ixR^L,wsend,ixIS^L,ixS^L,idirs,ipole)
1578 
1579  integer, intent(in) :: ixIR^L,ixR^L,ixIS^L,ixS^L,idirs,ipole
1580 
1581  double precision :: wrecv(ixIR^S,1:nws), wsend(ixIS^S,1:nws)
1582  integer :: iB, iside
1583 
1584  select case (ipole)
1585  {case (^d)
1586  iside=int((i^d+3)/2)
1587  ib=2*(^d-1)+iside
1588  select case (typeboundary(iw_mag(idirs),ib))
1589  case (bc_symm)
1590  wrecv(ixr^s,idirs) = wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,idirs)
1591  case (bc_asymm)
1592  wrecv(ixr^s,idirs) =-wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,idirs)
1593  case default
1594  call mpistop("Pole boundary condition should be symm or asymm")
1595  end select
1596  \}
1597  end select
1598 
1599  end subroutine pole_copy_stg
1600 
1601  subroutine pole_buffer(wrecv,ixIR^L,ixR^L,wsend,ixIS^L,ixS^L)
1602 
1603  integer, intent(in) :: ixIR^L,ixR^L,ixIS^L,ixS^L
1604  double precision :: wrecv(ixIR^S,nwhead:nwtail), wsend(ixIS^S,1:nw)
1605 
1606  integer :: iw, iside, iB
1607 
1608  select case (ipole)
1609  {case (^d)
1610  iside=int((i^d+3)/2)
1611  ib=2*(^d-1)+iside
1612  do iw=nwhead,nwtail
1613  select case (typeboundary(iw,ib))
1614  case (bc_symm)
1615  wrecv(ixr^s,iw) = wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1616  case (bc_asymm)
1617  wrecv(ixr^s,iw) =-wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1618  case default
1619  call mpistop("Pole boundary condition should be symm or asymm")
1620  end select
1621  end do \}
1622  end select
1623 
1624  end subroutine pole_buffer
1625 
1626  end subroutine getbc
1627 
1628 end module mod_ghostcells_update
subroutine bc_recv_restrict
Receive from fine neighbor.
subroutine gc_prolong(igrid)
subroutine interpolation_linear(igrid, ixFiL, dxFiD, xFiminD, dxCoD, invdxCoD, xCominD)
subroutine bc_fill_srl(igrid, iD)
subroutine bc_fill_srl_stg
fill siblings ghost cells with received data
subroutine pole_copy(wrecv, ixIRL, ixRL, wsend, ixISL, ixSL, ipole)
subroutine bc_recv_prolong
Receive from coarse neighbor.
subroutine bc_fill_prolong(igrid, iD)
Send to finer neighbor.
subroutine bc_prolong_stg(igrid, iD, NeedProlong)
subroutine bc_recv_srl
Receive from sibling at same refinement level.
subroutine interpolation_copy(igrid, ixFiL, dxFiD, xFiminD, dxCoD, invdxCoD, xCominD)
subroutine pole_copy_stg(wrecv, ixIRL, ixRL, wsend, ixISL, ixSL, idirs, ipole)
subroutine bc_prolong(igrid, iD)
do prolongation for fine blocks after receipt data from coarse neighbors
subroutine bc_send_restrict
Send to coarser neighbor.
subroutine pole_buffer(wrecv, ixIRL, ixRL, wsend, ixISL, ixSL)
subroutine bc_fill_restrict(igrid, iD)
fill coarser neighbor's ghost cells
subroutine bc_fill_restrict_stg
fill restricted ghost cells after receipt
subroutine bc_send_srl
Send to sibling at same refinement level.
subroutine fill_boundary_after_gc(igrid)
Physical boundary conditions.
logical function skip_direction(dir)
subroutine bc_send_prolong
Send to finer neighbor.
subroutine bc_fill_prolong_stg
fill coarser representative with data from coarser neighbors
subroutine, public prolong_2nd_stg(sCo, sFi, ixCoLin, ixFiLin, dxCoD, xCominD, dxFiD, xFiminD, ghost, fine_Lin)
This subroutine performs a 2nd order prolongation for a staggered field F, preserving the divergence ...
Definition: mod_amr_fct.t:41
subroutine, public getintbc(time, ixGL)
fill inner boundary values
subroutine, public bc_phys(iside, idims, time, qdt, s, ixGL, ixBL)
fill ghost cells at a physical boundary
subroutine, public coarsen_grid(sFi, ixFiGL, ixFiL, sCo, ixCoGL, ixCoL)
coarsen one grid to its coarser representative
Definition: mod_coarsen.t:13
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: mod_comm_lib.t:208
update ghost cells of all blocks including physical boundaries
integer, dimension(0:3^d &), target type_recv_p_p1
integer, dimension( :^d &), pointer type_recv_r
integer, dimension(-1:1^d &), target type_send_srl_p1
integer, dimension(-1:1^d &) sizes_r_send_total
integer, dimension(^nd, 0:3) l
integer, dimension(-1:1^d &), target type_send_r_p2
integer, dimension(:), allocatable sendrequest_c_p
integer, dimension(^nd,-1:1) ixs_r_stg_
integer, dimension(^nd,-1:1) ixs_srl_stg_
subroutine get_bc_comm_type(comm_type, ixL, ixGL, nwstart, nwbc)
integer, dimension(^nd, 0:3^d &) sizes_p_send_stg
integer, dimension(-1:1^d &) sizes_srl_send_total
integer, dimension(^nd, 0:3) ixr_r_stg_
integer, dimension(:), allocatable sendrequest_r
integer, dimension(:), allocatable recvrequest_r
double precision, dimension(:), allocatable sendbuffer_p
integer, dimension(:,:), allocatable sendstatus_c_p
integer, dimension(^nd,-1:1^d &) sizes_r_send_stg
integer, dimension(0:3) ixs_p_
integer, dimension(0:3) ixr_r_
integer, dimension(0:3^d &), target type_send_p_f
integer, dimension( :^d &), pointer type_send_p
integer, dimension(:,:), allocatable sendstatus_c_sr
integer, dimension(0:3^d &), target type_recv_p_p2
integer, dimension(0:3) ixr_p_
double precision, dimension(:), allocatable recvbuffer_r
integer, dimension( :^d &), pointer type_send_srl
integer, dimension(-1:1^d &), target type_send_r_p1
integer, dimension(:), allocatable recvrequest_c_p
integer, dimension(-1:1^d &), target type_send_srl_p2
subroutine create_bc_mpi_datatype(nwstart, nwbc)
integer, dimension(0:3^d &), target type_send_p_p2
integer, dimension(:), allocatable recvrequest_c_sr
integer, dimension(0:3^d &), target type_recv_r_f
integer, dimension(-1:1^d &), target type_recv_srl_f
integer, dimension(:), allocatable sendrequest_c_sr
integer, dimension(-1:1^d &), target type_send_r_f
double precision, dimension(:), allocatable recvbuffer_srl
integer, dimension(:,:), allocatable recvstatus_p
integer, dimension(^nd, 0:3) ixs_p_stg_
integer, dimension(-1:1^d &), target type_send_srl_f
integer, dimension(^nd, 0:3^d &) sizes_p_recv_stg
integer, dimension(0:3^d &), target type_send_p_p1
integer, dimension(^nd,-1:1^d &) sizes_srl_recv_stg
double precision, dimension(:), allocatable recvbuffer_p
integer, dimension(^nd,-1:1) ixr_srl_stg_
integer, dimension(-1:1) ixs_r_
integer, dimension( :^d &), pointer type_send_r
integer, dimension(-1:1) ixr_srl_
integer, dimension(0:3^d &), target type_recv_r_p1
double precision, dimension(:), allocatable sendbuffer_r
integer, dimension(0:3^d &), target type_recv_r_p2
integer, dimension(0:3^d &) sizes_p_recv_total
integer, dimension(:,:), allocatable recvstatus_c_sr
integer, dimension(0:3^d &) sizes_r_recv_total
integer, parameter npwbuf
integer, dimension(:), allocatable sendrequest_p
integer, dimension(^nd, 0:3^d &) sizes_r_recv_stg
subroutine getbc(time, qdt, psb, nwstart, nwbc, req_diag)
do update ghost cells of all blocks including physical boundaries
integer, dimension(-1:1^d &), target type_recv_srl_p2
double precision, dimension(:), allocatable sendbuffer_srl
integer, dimension(-1:1) ixs_srl_
integer, dimension(:,:), allocatable sendstatus_srl
integer, dimension( :^d &), pointer type_recv_p
integer, dimension(:,:), allocatable recvstatus_srl
integer, dimension(:), allocatable sendrequest_srl
integer, dimension(:,:), allocatable recvstatus_r
integer, dimension(0:3^d &) sizes_p_send_total
integer, dimension(^nd, 0:3) ixr_p_stg_
integer, dimension(-1:1^d &), target type_recv_srl_p1
integer, dimension(:,:), allocatable sendstatus_r
integer, dimension(0:3^d &), target type_recv_p_f
integer, dimension( :^d &), pointer type_recv_srl
integer, dimension(:,:), allocatable sendstatus_p
integer, dimension(-1:1^d &) sizes_srl_recv_total
integer, dimension(:), allocatable recvrequest_p
integer, dimension(:), allocatable recvrequest_srl
integer, dimension(^nd,-1:1^d &) sizes_srl_send_stg
integer, dimension(:,:), allocatable recvstatus_c_p
This module contains definitions of global parameters and variables and some generic functions/subrou...
logical internalboundary
if there is an internal boundary
integer ixghi
Upper index of grid block arrays.
integer, dimension(3, 3) kr
Kronecker delta tensor.
integer, parameter ndim
Number of spatial dimensions for grid variables.
logical stagger_grid
True for using stagger grid.
double precision, dimension(:), allocatable, parameter d
logical ghost_copy
whether copy values instead of interpolation in ghost cells of finer blocks
integer ierrmpi
A global MPI error return code.
integer nghostcells
Number of ghost cells surrounding a grid.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
procedure(sub_convert), pointer phys_to_primitive
Definition: mod_physics.t:59
logical phys_req_diagonal
Whether the physics routines require diagonal ghost cells, for example for computing a curl.
Definition: mod_physics.t:36
procedure(sub_convert), pointer phys_to_conserved
Definition: mod_physics.t:58
character(len=name_len) physics_type
String describing the physics type of the simulation.
Definition: mod_physics.t:54