MPI-AMRVAC 3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
Loading...
Searching...
No Matches
mod_ghostcells_update.t
Go to the documentation of this file.
1!> update ghost cells of all blocks including physical boundaries
3
4 implicit none
5
6 ! The number of interleaving sending buffers for ghost cells
7 integer, parameter :: npwbuf=2
8
9 integer :: ixm^l, ixcog^l, ixcom^l, ixcogs^l
10
11 ! index ranges to send (S) to sibling blocks, receive (R) from sibling blocks
12 integer, dimension(-1:1) :: ixs_srl_^l, ixr_srl_^l
13
14 ! index ranges of staggered variables to send (S) to sibling blocks, receive (R) from sibling blocks
15 integer, dimension(^ND,-1:1) :: ixs_srl_stg_^l, ixr_srl_stg_^l
16
17 ! index ranges to send (S) restricted (r) ghost cells to coarser blocks
18 integer, dimension(-1:1) :: ixs_r_^l
19
20 ! index ranges of staggered variables to send (S) restricted (r) ghost cells to coarser blocks
21 integer, dimension(^ND,-1:1) :: ixs_r_stg_^l
22
23 ! index ranges to receive restriced ghost cells from finer blocks
24 integer, dimension(0:3) :: ixr_r_^l
25
26 ! index ranges of staggered variables to receive restriced ghost cells from finer blocks
27 integer, dimension(^ND,0:3) :: ixr_r_stg_^l
28
29 ! send prolongated (p) ghost cells to finer blocks, receive prolongated from coarser blocks
30 integer, dimension(0:3) :: ixs_p_^l, ixr_p_^l
31
32 ! send prolongated (p) staggered ghost cells to finer blocks, receive prolongated from coarser blocks
33 integer, dimension(^ND,0:3) :: ixs_p_stg_^l, ixr_p_stg_^l
34
35 ! number of MPI receive-send pairs, srl: same refinement level; r: restrict; p: prolong
37
38 ! record index position of buffer arrays
40
41 ! count of times of send and receive
43
44 ! count of times of send and receive for cell center ghost cells
45 integer :: isend_c, irecv_c
46
47 ! tag of MPI send and recv
48 integer, private :: itag
49
50 ! total sizes = cell-center normal flux + stagger-grid flux of send and receive
51 integer, dimension(-1:1^D&) :: sizes_srl_send_total, sizes_srl_recv_total
52
53 ! sizes of buffer arrays for center-grid variable for siblings and restrict
54 integer, dimension(:), allocatable :: recvrequest_c_sr, sendrequest_c_sr
55 integer, dimension(:,:), allocatable :: recvstatus_c_sr, sendstatus_c_sr
56
57 ! sizes of buffer arrays for center-grid variable for prolongation
58 integer, dimension(:), allocatable :: recvrequest_c_p, sendrequest_c_p
59 integer, dimension(:,:), allocatable :: recvstatus_c_p, sendstatus_c_p
60
61 ! sizes of buffer arrays for stagger-grid variable
62 integer, dimension(^ND,-1:1^D&) :: sizes_srl_send_stg, sizes_srl_recv_stg
63
64 integer, dimension(:), allocatable :: recvrequest_srl, sendrequest_srl
65 integer, dimension(:,:), allocatable :: recvstatus_srl, sendstatus_srl
66
67 ! buffer arrays for send and receive of siblings, allocate in build_connectivity
68 double precision, dimension(:), allocatable :: recvbuffer_srl, sendbuffer_srl
69
70 integer, dimension(:), allocatable :: recvrequest_r, sendrequest_r
71 integer, dimension(:,:), allocatable :: recvstatus_r, sendstatus_r
72
73 ! buffer arrays for send and receive in restriction
74 double precision, dimension(:), allocatable :: recvbuffer_r, sendbuffer_r
75
76 integer, dimension(:), allocatable :: recvrequest_p, sendrequest_p
77 integer, dimension(:,:), allocatable :: recvstatus_p, sendstatus_p
78
79 ! buffer arrays for send and receive in prolongation
80 double precision, dimension(:), allocatable :: recvbuffer_p, sendbuffer_p
81
82 ! sizes to allocate buffer arrays for send and receive for restriction
83 integer, dimension(-1:1^D&) :: sizes_r_send_total
84 integer, dimension(0:3^D&) :: sizes_r_recv_total
85 integer, dimension(^ND,-1:1^D&) :: sizes_r_send_stg
86 integer, dimension(^ND,0:3^D&) :: sizes_r_recv_stg
87
88 ! sizes to allocate buffer arrays for send and receive for restriction
89 integer, dimension(0:3^D&) :: sizes_p_send_total, sizes_p_recv_total
90 integer, dimension(^ND,0:3^D&) :: sizes_p_send_stg, sizes_p_recv_stg
91
92 ! There are two variants, _f indicates that all flux variables are filled,
93 ! whereas _p means that part of the variables is filled
94 ! Furthermore _r_ stands for restrict, _p_ for prolongation.
95 integer, dimension(-1:1^D&), target :: type_send_srl_f, type_recv_srl_f
96 integer, dimension(-1:1^D&), target :: type_send_r_f
97 integer, dimension( 0:3^D&), target :: type_recv_r_f, type_send_p_f, type_recv_p_f
98 integer, dimension(-1:1^D&), target :: type_send_srl_p1, type_recv_srl_p1
99 integer, dimension(-1:1^D&), target :: type_send_r_p1
100 integer, dimension( 0:3^D&), target :: type_recv_r_p1, type_send_p_p1, type_recv_p_p1
101 integer, dimension(-1:1^D&), target :: type_send_srl_p2, type_recv_srl_p2
102 integer, dimension(-1:1^D&), target :: type_send_r_p2
103 integer, dimension( 0:3^D&), target :: type_recv_r_p2, type_send_p_p2, type_recv_p_p2
104 integer, dimension( :^D&), pointer :: type_send_srl, type_recv_srl, type_send_r
105 integer, dimension( :^D&), pointer :: type_recv_r, type_send_p, type_recv_p
106
107 ! A switch of update physical boundary or not
108 logical, public :: bcphys=.true.
109 ! Special buffer for pole copy
111 double precision, dimension(:^D&,:), allocatable :: w
112 end type wbuffer
113
114contains
115
116 subroutine init_bc()
118 use mod_physics, only: physics_type
119 use mod_comm_lib, only: mpistop
120
121 integer :: nghostcellsCo, interpolation_order
122 integer :: nx^D, nxCo^D, ixG^L, i^D, idir
123
124 ixg^l=ixg^ll;
125 ixm^l=ixg^l^lsubnghostcells;
126 ixcogmin^d=1;
127 ixcogmax^d=(ixghi^d-2*nghostcells)/2+2*nghostcells;
128 ixcogsmin^d=0;
129 ixcogsmax^d=ixcogmax^d;
130
131 ixcom^l=ixcog^l^lsubnghostcells;
132
133 nx^d=ixmmax^d-ixmmin^d+1;
134 nxco^d=nx^d/2;
135
136 if(ghost_copy) then
137 interpolation_order=1
138 else
139 interpolation_order=2
140 end if
141 nghostcellsco=int((nghostcells+1)/2)
142
143 if (nghostcellsco+interpolation_order-1>nghostcells) then
144 call mpistop("interpolation order for prolongation in getbc too high")
145 end if
146
147 ! i=-1 means subregion prepared for the neighbor at its minimum side
148 ! i= 1 means subregion prepared for the neighbor at its maximum side
149 {
150 ixs_srl_min^d(-1)=ixmmin^d
151 ixs_srl_min^d( 0)=ixmmin^d
152 ixs_srl_min^d( 1)=ixmmax^d+1-nghostcells
153 ixs_srl_max^d(-1)=ixmmin^d-1+nghostcells
154 ixs_srl_max^d( 0)=ixmmax^d
155 ixs_srl_max^d( 1)=ixmmax^d
156
157 ixr_srl_min^d(-1)=1
158 ixr_srl_min^d( 0)=ixmmin^d
159 ixr_srl_min^d( 1)=ixmmax^d+1
160 ixr_srl_max^d(-1)=nghostcells
161 ixr_srl_max^d( 0)=ixmmax^d
162 ixr_srl_max^d( 1)=ixgmax^d
163
164 ixs_r_min^d(-1)=ixcommin^d
165 ixs_r_min^d( 0)=ixcommin^d
166 ixs_r_min^d( 1)=ixcommax^d+1-nghostcells
167 ixs_r_max^d(-1)=ixcommin^d-1+nghostcells
168 ixs_r_max^d( 0)=ixcommax^d
169 ixs_r_max^d( 1)=ixcommax^d
170
171 ixr_r_min^d(0)=1
172 ixr_r_min^d(1)=ixmmin^d
173 ixr_r_min^d(2)=ixmmin^d+nxco^d
174 ixr_r_min^d(3)=ixmmax^d+1
175 ixr_r_max^d(0)=nghostcells
176 ixr_r_max^d(1)=ixmmin^d-1+nxco^d
177 ixr_r_max^d(2)=ixmmax^d
178 ixr_r_max^d(3)=ixgmax^d
179
180 ixs_p_min^d(0)=ixmmin^d-(interpolation_order-1)
181 ixs_p_min^d(1)=ixmmin^d-(interpolation_order-1)
182 ixs_p_min^d(2)=ixmmin^d+nxco^d-nghostcellsco-(interpolation_order-1)
183 ixs_p_min^d(3)=ixmmax^d+1-nghostcellsco-(interpolation_order-1)
184 ixs_p_max^d(0)=ixmmin^d-1+nghostcellsco+(interpolation_order-1)
185 ixs_p_max^d(1)=ixmmin^d-1+nxco^d+nghostcellsco+(interpolation_order-1)
186 ixs_p_max^d(2)=ixmmax^d+(interpolation_order-1)
187 ixs_p_max^d(3)=ixmmax^d+(interpolation_order-1)
188
189 ixr_p_min^d(0)=ixcommin^d-nghostcellsco-(interpolation_order-1)
190 ixr_p_min^d(1)=ixcommin^d-(interpolation_order-1)
191 ixr_p_min^d(2)=ixcommin^d-nghostcellsco-(interpolation_order-1)
192 ixr_p_min^d(3)=ixcommax^d+1-(interpolation_order-1)
193 ixr_p_max^d(0)=nghostcells+(interpolation_order-1)
194 ixr_p_max^d(1)=ixcommax^d+nghostcellsco+(interpolation_order-1)
195 ixr_p_max^d(2)=ixcommax^d+(interpolation_order-1)
196 ixr_p_max^d(3)=ixcommax^d+nghostcellsco+(interpolation_order-1)
197
198 \}
199
200 if (stagger_grid) then
201 allocate(pole_buf%ws(ixgs^t,nws))
202 ! Staggered (face-allocated) variables
203 do idir=1,ndim
204 { ixs_srl_stg_min^d(idir,-1)=ixmmin^d-kr(idir,^d)
205 ixs_srl_stg_max^d(idir,-1)=ixmmin^d-1+nghostcells
206 ixs_srl_stg_min^d(idir,0) =ixmmin^d-kr(idir,^d)
207 ixs_srl_stg_max^d(idir,0) =ixmmax^d
208 ixs_srl_stg_min^d(idir,1) =ixmmax^d-nghostcells+1-kr(idir,^d)
209 ixs_srl_stg_max^d(idir,1) =ixmmax^d
210
211 ixr_srl_stg_min^d(idir,-1)=1-kr(idir,^d)
212 ixr_srl_stg_max^d(idir,-1)=nghostcells
213 ixr_srl_stg_min^d(idir,0) =ixmmin^d-kr(idir,^d)
214 ixr_srl_stg_max^d(idir,0) =ixmmax^d
215 ixr_srl_stg_min^d(idir,1) =ixmmax^d+1-kr(idir,^d)
216 ixr_srl_stg_max^d(idir,1) =ixgmax^d
217
218 ixs_r_stg_min^d(idir,-1)=ixcommin^d-kr(idir,^d)
219 ixs_r_stg_max^d(idir,-1)=ixcommin^d-1+nghostcells
220 ixs_r_stg_min^d(idir,0) =ixcommin^d-kr(idir,^d)
221 ixs_r_stg_max^d(idir,0) =ixcommax^d
222 ixs_r_stg_min^d(idir,1) =ixcommax^d+1-nghostcells-kr(idir,^d)
223 ixs_r_stg_max^d(idir,1) =ixcommax^d
224
225 ixr_r_stg_min^d(idir,0)=1-kr(idir,^d)
226 ixr_r_stg_max^d(idir,0)=nghostcells
227 ixr_r_stg_min^d(idir,1)=ixmmin^d-kr(idir,^d)
228 ixr_r_stg_max^d(idir,1)=ixmmin^d-1+nxco^d
229 ixr_r_stg_min^d(idir,2)=ixmmin^d+nxco^d-kr(idir,^d)
230 ixr_r_stg_max^d(idir,2)=ixmmax^d
231 ixr_r_stg_min^d(idir,3)=ixmmax^d+1-kr(idir,^d)
232 ixr_r_stg_max^d(idir,3)=ixgmax^d
233 \}
234 {if (idir==^d) then
235 ! Parallel components
236 {
237 ixs_p_stg_min^d(idir,0)=ixmmin^d-1 ! -1 to make redundant
238 ixs_p_stg_max^d(idir,0)=ixmmin^d-1+nghostcellsco
239 ixs_p_stg_min^d(idir,1)=ixmmin^d-1 ! -1 to make redundant
240 ixs_p_stg_max^d(idir,1)=ixmmin^d-1+nxco^d+nghostcellsco
241 ixs_p_stg_min^d(idir,2)=ixmmax^d-nxco^d-nghostcellsco
242 ixs_p_stg_max^d(idir,2)=ixmmax^d
243 ixs_p_stg_min^d(idir,3)=ixmmax^d-nghostcellsco
244 ixs_p_stg_max^d(idir,3)=ixmmax^d
245
246 ixr_p_stg_min^d(idir,0)=ixcommin^d-1-nghostcellsco
247 ixr_p_stg_max^d(idir,0)=ixcommin^d-1
248 ixr_p_stg_min^d(idir,1)=ixcommin^d-1 ! -1 to make redundant
249 ixr_p_stg_max^d(idir,1)=ixcommax^d+nghostcellsco
250 ixr_p_stg_min^d(idir,2)=ixcommin^d-1-nghostcellsco
251 ixr_p_stg_max^d(idir,2)=ixcommax^d
252 ixr_p_stg_min^d(idir,3)=ixcommax^d+1-1 ! -1 to make redundant
253 ixr_p_stg_max^d(idir,3)=ixcommax^d+nghostcellsco
254 \}
255 else
256 {
257 ! Perpendicular component
258 ixs_p_stg_min^d(idir,0)=ixmmin^d
259 ixs_p_stg_max^d(idir,0)=ixmmin^d-1+nghostcellsco+(interpolation_order-1)
260 ixs_p_stg_min^d(idir,1)=ixmmin^d
261 ixs_p_stg_max^d(idir,1)=ixmmin^d-1+nxco^d+nghostcellsco+(interpolation_order-1)
262 ixs_p_stg_min^d(idir,2)=ixmmax^d+1-nxco^d-nghostcellsco-(interpolation_order-1)
263 ixs_p_stg_max^d(idir,2)=ixmmax^d
264 ixs_p_stg_min^d(idir,3)=ixmmax^d+1-nghostcellsco-(interpolation_order-1)
265 ixs_p_stg_max^d(idir,3)=ixmmax^d
266
267 ixr_p_stg_min^d(idir,0)=ixcommin^d-nghostcellsco-(interpolation_order-1)
268 ixr_p_stg_max^d(idir,0)=ixcommin^d-1
269 ixr_p_stg_min^d(idir,1)=ixcommin^d
270 ixr_p_stg_max^d(idir,1)=ixcommax^d+nghostcellsco+(interpolation_order-1)
271 ixr_p_stg_min^d(idir,2)=ixcommin^d-nghostcellsco-(interpolation_order-1)
272 ixr_p_stg_max^d(idir,2)=ixcommax^d
273 ixr_p_stg_min^d(idir,3)=ixcommax^d+1
274 ixr_p_stg_max^d(idir,3)=ixcommax^d+nghostcellsco+(interpolation_order-1)
275 \}
276 end if
277 }
278 end do
279 ! calculate sizes for buffer arrays for siblings
280 {do i^db=-1,1\}
281 ! Staggered (face-allocated) variables
282 do idir=1,ndim
283 sizes_srl_send_stg(idir,i^d)={(ixs_srl_stg_max^d(idir,i^d)-ixs_srl_stg_min^d(idir,i^d)+1)|*}
284 sizes_srl_recv_stg(idir,i^d)={(ixr_srl_stg_max^d(idir,i^d)-ixr_srl_stg_min^d(idir,i^d)+1)|*}
285 sizes_r_send_stg(idir,i^d)={(ixs_r_stg_max^d(idir,i^d)-ixs_r_stg_min^d(idir,i^d)+1)|*}
286 end do
289 sizes_r_send_total(i^d)=sum(sizes_r_send_stg(:,i^d))
290 {end do\}
291
292 {do i^db=0,3\}
293 ! Staggered (face-allocated) variables
294 do idir=1,ndim
295 sizes_r_recv_stg(idir,i^d)={(ixr_r_stg_max^d(idir,i^d)-ixr_r_stg_min^d(idir,i^d)+1)|*}
296 sizes_p_send_stg(idir,i^d)={(ixs_p_stg_max^d(idir,i^d)-ixs_p_stg_min^d(idir,i^d)+1)|*}
297 sizes_p_recv_stg(idir,i^d)={(ixr_p_stg_max^d(idir,i^d)-ixr_p_stg_min^d(idir,i^d)+1)|*}
298 end do
299 sizes_r_recv_total(i^d)=sum(sizes_r_recv_stg(:,i^d))
300 sizes_p_send_total(i^d)=sum(sizes_p_send_stg(:,i^d))
301 sizes_p_recv_total(i^d)=sum(sizes_p_recv_stg(:,i^d))
302 {end do\}
303 end if
304
305 end subroutine init_bc
306
307 subroutine create_bc_mpi_datatype(nwstart,nwbc)
309
310 integer, intent(in) :: nwstart, nwbc
311 integer :: i^D, ic^D, inc^D
312
313 {do i^db=-1,1\}
314 if(i^d==0|.and.) cycle
315 call get_bc_comm_type(type_send_srl(i^d),ixs_srl_^l(i^d),ixg^ll,nwstart,nwbc)
316 call get_bc_comm_type(type_recv_srl(i^d),ixr_srl_^l(i^d),ixg^ll,nwstart,nwbc)
317 call get_bc_comm_type(type_send_r(i^d), ixs_r_^l(i^d),ixcog^l,nwstart,nwbc)
318 {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
319 inc^db=2*i^db+ic^db\}
320 call get_bc_comm_type(type_recv_r(inc^d),ixr_r_^l(inc^d), ixg^ll,nwstart,nwbc)
321 call get_bc_comm_type(type_send_p(inc^d),ixs_p_^l(inc^d), ixg^ll,nwstart,nwbc)
322 call get_bc_comm_type(type_recv_p(inc^d),ixr_p_^l(inc^d),ixcog^l,nwstart,nwbc)
323 {end do\}
324 {end do\}
325
326 end subroutine create_bc_mpi_datatype
327
328 subroutine get_bc_comm_type(comm_type,ix^L,ixG^L,nwstart,nwbc)
330
331 integer, intent(inout) :: comm_type
332 integer, intent(in) :: ix^L, ixG^L, nwstart, nwbc
333
334 integer, dimension(ndim+1) :: fullsize, subsize, start
335
336 ^d&fullsize(^d)=ixgmax^d;
337 fullsize(ndim+1)=nw
338 ^d&subsize(^d)=ixmax^d-ixmin^d+1;
339 subsize(ndim+1)=nwbc
340 ^d&start(^d)=ixmin^d-1;
341 start(ndim+1)=nwstart-1
342
343 call mpi_type_create_subarray(ndim+1,fullsize,subsize,start,mpi_order_fortran, &
344 mpi_double_precision,comm_type,ierrmpi)
345 call mpi_type_commit(comm_type,ierrmpi)
346
347 end subroutine get_bc_comm_type
348
349 !> do update ghost cells of all blocks including physical boundaries
350 subroutine getbc(time,qdt,psb,nwstart,nwbc)
352 use mod_physics
353 use mod_coarsen, only: coarsen_grid
355 use mod_comm_lib, only: mpistop
356
357 double precision, intent(in) :: time, qdt
358 type(state), target :: psb(max_blocks)
359 integer, intent(in) :: nwstart ! Fill from nwstart
360 integer, intent(in) :: nwbc ! Number of variables to fill
361
362 double precision :: time_bcin
363 integer :: nwhead, nwtail
364 integer :: iigrid, igrid, isizes, i^D
365 integer :: isend_buf(npwbuf), ipwbuf, nghostcellsco
366 ! index pointer for buffer arrays as a start for a segment
367 integer :: ibuf_start, ibuf_next
368 ! shapes of reshape
369 integer, dimension(1) :: shapes
370 type(wbuffer) :: pwbuf(npwbuf)
371
372 time_bcin=mpi_wtime()
373
374 nwhead=nwstart
375 nwtail=nwstart+nwbc-1
376
377 ! fill internal physical boundary
378 if (internalboundary) then
379 call getintbc(time,ixg^ll)
380 end if
381
382 ! prepare coarse values to send to coarser neighbors
383 !$OMP PARALLEL DO SCHEDULE(dynamic) PRIVATE(igrid)
384 do iigrid=1,igridstail; igrid=igrids(iigrid);
385 if(any(neighbor_type(:^d&,igrid)==neighbor_coarse)) then
386 call coarsen_grid(psb(igrid),ixg^ll,ixm^l,psc(igrid),ixcog^l,ixcom^l)
387 end if
388 end do
389 !$OMP END PARALLEL DO
390
391 ! default : no singular axis
392 irecv_c=0
393 isend_c=0
394 isend_buf=0
395 ipwbuf=1
396
397 if(stagger_grid) then
404 irecv_srl=0
405 irecv_r=0
406 irecv_p=0
407 isend_srl=0
408 isend_r=0
409 isend_p=0
410 end if
411
412 !$OMP PARALLEL SECTIONS PRIVATE(iigrid,igrid,i^D)
413 !$OMP SECTION
414 ! MPI receive ghost-cell values from sibling blocks and finer neighbors in different processors
415 do iigrid=1,igridstail; igrid=igrids(iigrid);
416 {do i^db=-1,1\}
417 if (skip_direction([ i^d ])) cycle
418 select case (neighbor_type(i^d,igrid))
419 case (neighbor_sibling)
420 call bc_recv_srl(igrid,i^d)
421 case (neighbor_fine)
422 call bc_recv_restrict(igrid,i^d)
423 end select
424 {end do\}
425 end do
426
427 ! MPI send ghost-cell values to sibling blocks and coarser neighbors in different processors
428 do iigrid=1,igridstail; igrid=igrids(iigrid);
429 {do i^db=-1,1\}
430 if(skip_direction([ i^d ])) cycle
431 select case (neighbor_type(i^d,igrid))
432 case (neighbor_sibling)
433 call bc_send_srl(igrid,i^d)
434 case (neighbor_coarse)
435 call bc_send_restrict(igrid,i^d)
436 end select
437 {end do\}
438 end do
439
440 call mpi_waitall(irecv_c,recvrequest_c_sr,recvstatus_c_sr,ierrmpi)
441 call mpi_waitall(isend_c,sendrequest_c_sr,sendstatus_c_sr,ierrmpi)
442 if(stagger_grid) then
443 call mpi_waitall(nrecv_bc_srl,recvrequest_srl,recvstatus_srl,ierrmpi)
444 call mpi_waitall(nsend_bc_srl,sendrequest_srl,sendstatus_srl,ierrmpi)
445 call mpi_waitall(nrecv_bc_r,recvrequest_r,recvstatus_r,ierrmpi)
446 call mpi_waitall(nsend_bc_r,sendrequest_r,sendstatus_r,ierrmpi)
447 end if
448
449 do ipwbuf=1,npwbuf
450 if (isend_buf(ipwbuf)/=0) deallocate(pwbuf(ipwbuf)%w)
451 end do
452 !$OMP SECTION
453 ! fill ghost-cell values of sibling blocks and coarser neighbors in the same processor
454 do iigrid=1,igridstail; igrid=igrids(iigrid);
455 {do i^db=-1,1\}
456 if(skip_direction([ i^d ])) cycle
457 select case (neighbor_type(i^d,igrid))
458 case(neighbor_sibling)
459 call bc_fill_srl(igrid,i^d)
460 case(neighbor_coarse)
461 call bc_fill_restrict(igrid,i^d)
462 end select
463 {end do\}
464 end do
465 !$OMP END PARALLEL SECTIONS
466
467 if(stagger_grid) then
468 ! unpack the received data from sibling blocks and finer neighbors to fill ghost-cell staggered values
471 do iigrid=1,igridstail; igrid=igrids(iigrid);
472 {do i^db=-1,1\}
473 if (skip_direction([ i^d ])) cycle
474 select case (neighbor_type(i^d,igrid))
475 case (neighbor_sibling)
476 call bc_fill_srl_stg(igrid,i^d)
477 case (neighbor_fine)
478 call bc_fill_restrict_stg(igrid,i^d)
479 end select
480 {end do\}
481 end do
482 end if
483
484 irecv_c=0
485 isend_c=0
486 isend_buf=0
487 ipwbuf=1
488
489 !$OMP PARALLEL SECTIONS PRIVATE(iigrid,igrid,i^D)
490 !$OMP SECTION
491 ! MPI receive ghost-cell values from coarser neighbors in different processors
492 do iigrid=1,igridstail; igrid=igrids(iigrid);
493 {do i^db=-1,1\}
494 if (skip_direction([ i^d ])) cycle
495 if (neighbor_type(i^d,igrid)==neighbor_coarse) call bc_recv_prolong(igrid,i^d)
496 {end do\}
497 end do
498 ! MPI send ghost-cell values to finer neighbors in different processors
499 do iigrid=1,igridstail; igrid=igrids(iigrid);
500 {do i^db=-1,1\}
501 if (skip_direction([ i^d ])) cycle
502 if (neighbor_type(i^d,igrid)==neighbor_fine) call bc_send_prolong(igrid,i^d)
503 {end do\}
504 end do
505
506 call mpi_waitall(irecv_c,recvrequest_c_p,recvstatus_c_p,ierrmpi)
507 call mpi_waitall(isend_c,sendrequest_c_p,sendstatus_c_p,ierrmpi)
508
509 if(stagger_grid) then
510 call mpi_waitall(nrecv_bc_p,recvrequest_p,recvstatus_p,ierrmpi)
511 call mpi_waitall(nsend_bc_p,sendrequest_p,sendstatus_p,ierrmpi)
512 end if
513
514 do ipwbuf=1,npwbuf
515 if (isend_buf(ipwbuf)/=0) deallocate(pwbuf(ipwbuf)%w)
516 end do
517
518 !$OMP SECTION
519 ! fill coarse ghost-cell values of finer neighbors in the same processor
520 do iigrid=1,igridstail; igrid=igrids(iigrid);
521 {do i^db=-1,1\}
522 if (skip_direction([ i^d ])) cycle
523 if (neighbor_type(i^d,igrid)==neighbor_fine) call bc_fill_prolong(igrid,i^d)
524 {end do\}
525 end do
526 !$OMP END PARALLEL SECTIONS
527
528 if(stagger_grid) then
529 ! fill coarser representative ghost cells after receipt
531 do iigrid=1,igridstail; igrid=igrids(iigrid);
532 {do i^db=-1,1\}
533 if (skip_direction([ i^d ])) cycle
534 if(neighbor_type(i^d,igrid)==neighbor_coarse) call bc_fill_prolong_stg(igrid,i^d)
535 {end do\}
536 end do
537 end if
538 ! do prolongation on the ghost-cell values based on the received coarse values from coarser neighbors
539 !$OMP PARALLEL DO SCHEDULE(dynamic) PRIVATE(igrid)
540 do iigrid=1,igridstail; igrid=igrids(iigrid);
541 call gc_prolong(igrid)
542 end do
543 !$OMP END PARALLEL DO
544
545 ! fill physical boundary ghost cells after internal ghost-cell values exchange
546 if(bcphys) then
547 !$OMP PARALLEL DO SCHEDULE(dynamic) PRIVATE(igrid)
548 do iigrid=1,igridstail; igrid=igrids(iigrid);
549 if(.not.phyboundblock(igrid)) cycle
550 call fill_boundary_after_gc(igrid)
551 end do
552 !$OMP END PARALLEL DO
553 end if
554
555 ! modify normal component of magnetic field to fix divB=0
556 if(bcphys.and.associated(phys_boundary_adjust)) then
557 !$OMP PARALLEL DO SCHEDULE(dynamic) PRIVATE(igrid)
558 do iigrid=1,igridstail; igrid=igrids(iigrid);
559 if(.not.phyboundblock(igrid)) cycle
560 call phys_boundary_adjust(igrid,psb)
561 end do
562 !$OMP END PARALLEL DO
563 end if
564
565 time_bc=time_bc+(mpi_wtime()-time_bcin)
566
567 contains
568
569 logical function skip_direction(dir)
570 integer, intent(in) :: dir(^nd)
571
572 if (all(dir == 0)) then
573 skip_direction = .true.
574 else
575 skip_direction = .false.
576 end if
577 end function skip_direction
578
579 !> Physical boundary conditions
580 subroutine fill_boundary_after_gc(igrid)
581
582 integer, intent(in) :: igrid
583
584 integer :: idims,iside,i^d,k^l,ixb^l
585
586 block=>psb(igrid)
587 ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
588 do idims=1,ndim
589 ! to avoid using as yet unknown corner info in more than 1D, we
590 ! fill only interior mesh ranges of the ghost cell ranges at first,
591 ! and progressively enlarge the ranges to include corners later
592 kmin1=0; kmax1=0;
593 {^iftwod
594 kmin2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0,-1,igrid)==1)
595 kmax2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0, 1,igrid)==1)}
596 {^ifthreed
597 kmin2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0,-1,0,igrid)==1)
598 kmax2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0, 1,0,igrid)==1)
599 kmin3=merge(1, 0, idims .lt. 3 .and. neighbor_type(0,0,-1,igrid)==1)
600 kmax3=merge(1, 0, idims .lt. 3 .and. neighbor_type(0,0, 1,igrid)==1)}
601 ixbmin^d=ixglo^d+kmin^d*nghostcells;
602 ixbmax^d=ixghi^d-kmax^d*nghostcells;
603 do iside=1,2
604 i^d=kr(^d,idims)*(2*iside-3);
605 if (aperiodb(idims)) then
606 if (neighbor_type(i^d,igrid) /= neighbor_boundary .and. &
607 .not. psb(igrid)%is_physical_boundary(2*idims-2+iside)) cycle
608 else
609 if (neighbor_type(i^d,igrid) /= neighbor_boundary) cycle
610 end if
611 call bc_phys(iside,idims,time,qdt,psb(igrid),ixg^ll,ixb^l)
612 end do
613 end do
614
615 end subroutine fill_boundary_after_gc
616
617 !> MPI receive from sibling at same refinement level
618 subroutine bc_recv_srl(igrid,i^D)
619 integer, intent(in) :: igrid,i^d
620
621 integer :: ipe_neighbor
622
623 ipe_neighbor=neighbor(2,i^d,igrid)
624 if (ipe_neighbor/=mype) then
626 itag=(3**^nd+4**^nd)*(igrid-1)+{(i^d+1)*3**(^d-1)+}
627 call mpi_irecv(psb(igrid)%w,1,type_recv_srl(i^d), &
628 ipe_neighbor,itag,icomm,recvrequest_c_sr(irecv_c),ierrmpi)
629 if(stagger_grid) then
631 call mpi_irecv(recvbuffer_srl(ibuf_recv_srl),sizes_srl_recv_total(i^d),mpi_double_precision, &
632 ipe_neighbor,itag,icomm,recvrequest_srl(irecv_srl),ierrmpi)
634 end if
635 end if
636
637 end subroutine bc_recv_srl
638
639 !> MPI receive from fine neighbor
640 subroutine bc_recv_restrict(igrid,i^D)
641 integer, intent(in) :: igrid,i^d
642
643 integer :: ic^d,inc^d,ipe_neighbor
644
645 {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
646 inc^db=2*i^db+ic^db\}
647 ipe_neighbor=neighbor_child(2,inc^d,igrid)
648 if (ipe_neighbor/=mype) then
650 itag=(3**^nd+4**^nd)*(igrid-1)+3**^nd+{inc^d*4**(^d-1)+}
651 call mpi_irecv(psb(igrid)%w,1,type_recv_r(inc^d), &
652 ipe_neighbor,itag,icomm,recvrequest_c_sr(irecv_c),ierrmpi)
653 if(stagger_grid) then
655 call mpi_irecv(recvbuffer_r(ibuf_recv_r),sizes_r_recv_total(inc^d), &
656 mpi_double_precision,ipe_neighbor,itag, &
657 icomm,recvrequest_r(irecv_r),ierrmpi)
659 end if
660 end if
661 {end do\}
662
663 end subroutine bc_recv_restrict
664
665 !> MPI send to sibling at same refinement level
666 subroutine bc_send_srl(igrid,i^D)
667 integer, intent(in) :: igrid,i^d
668
669 integer :: n_i^d,ipole,idir,ineighbor,ipe_neighbor,ixs^l
670
671 ipe_neighbor=neighbor(2,i^d,igrid)
672 if(ipe_neighbor/=mype) then
673 ineighbor=neighbor(1,i^d,igrid)
674 ipole=neighbor_pole(i^d,igrid)
675 if(ipole==0) then
676 n_i^d=-i^d;
678 itag=(3**^nd+4**^nd)*(ineighbor-1)+{(n_i^d+1)*3**(^d-1)+}
679 call mpi_isend(psb(igrid)%w,1,type_send_srl(i^d), &
680 ipe_neighbor,itag,icomm,sendrequest_c_sr(isend_c),ierrmpi)
681 if(stagger_grid) then
682 ibuf_start=ibuf_send_srl
683 do idir=1,ndim
684 ixs^l=ixs_srl_stg_^l(idir,i^d);
685 ibuf_next=ibuf_start+sizes_srl_send_stg(idir,i^d)
686 shapes=(/sizes_srl_send_stg(idir,i^d)/)
687 sendbuffer_srl(ibuf_start:ibuf_next-1)=&
688 reshape(psb(igrid)%ws(ixs^s,idir),shapes)
689 ibuf_start=ibuf_next
690 end do
693 mpi_double_precision, ipe_neighbor,itag,icomm,sendrequest_srl(isend_srl),ierrmpi)
694 ibuf_send_srl=ibuf_next
695 end if
696 else
697 ixs^l=ixs_srl_^l(i^d);
698 select case (ipole)
699 {case (^d)
700 n_i^d=i^d^d%n_i^dd=-i^dd;\}
701 end select
702 if (isend_buf(ipwbuf)/=0) then
703 call mpi_wait(sendrequest_c_sr(isend_buf(ipwbuf)), &
704 sendstatus_c_sr(:,isend_buf(ipwbuf)),ierrmpi)
705 deallocate(pwbuf(ipwbuf)%w)
706 end if
707 allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
708 call pole_buffer(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psb(igrid)%w,ixg^ll,ixs^l,ipole)
710 isend_buf(ipwbuf)=isend_c
711 itag=(3**^nd+4**^nd)*(ineighbor-1)+{(n_i^d+1)*3**(^d-1)+}
712 isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
713 call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
714 ipe_neighbor,itag,icomm,sendrequest_c_sr(isend_c),ierrmpi)
715 ipwbuf=1+modulo(ipwbuf,npwbuf)
716 if(stagger_grid) then
717 ibuf_start=ibuf_send_srl
718 do idir=1,ndim
719 ixs^l=ixs_srl_stg_^l(idir,i^d);
720 ibuf_next=ibuf_start+sizes_srl_send_stg(idir,i^d)
721 shapes=(/sizes_srl_send_stg(idir,i^d)/)
722 sendbuffer_srl(ibuf_start:ibuf_next-1)=&
723 reshape(psb(igrid)%ws(ixs^s,idir),shapes)
724 ibuf_start=ibuf_next
725 end do
728 mpi_double_precision, ipe_neighbor,itag,icomm,sendrequest_srl(isend_srl),ierrmpi)
729 ibuf_send_srl=ibuf_next
730 end if
731 end if
732 end if
733
734 end subroutine bc_send_srl
735
736 !> MPI send to coarser neighbor's ghost cells
737 subroutine bc_send_restrict(igrid,i^D)
738 integer, intent(in) :: igrid,i^d
739
740 integer :: ic^d,n_inc^d,ipole,idir,ineighbor,ipe_neighbor,ixs^l
741
742 ipe_neighbor=neighbor(2,i^d,igrid)
743 if(ipe_neighbor/=mype) then
744 ic^d=1+modulo(node(pig^d_,igrid)-1,2);
745 if({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
746 ineighbor=neighbor(1,i^d,igrid)
747 ipole=neighbor_pole(i^d,igrid)
748 if(ipole==0) then
749 n_inc^d=-2*i^d+ic^d;
751 itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
752 call mpi_isend(psc(igrid)%w,1,type_send_r(i^d), &
753 ipe_neighbor,itag,icomm,sendrequest_c_sr(isend_c),ierrmpi)
754 if(stagger_grid) then
755 ibuf_start=ibuf_send_r
756 do idir=1,ndim
757 ixs^l=ixs_r_stg_^l(idir,i^d);
758 ibuf_next=ibuf_start+sizes_r_send_stg(idir,i^d)
759 shapes=(/sizes_r_send_stg(idir,i^d)/)
760 sendbuffer_r(ibuf_start:ibuf_next-1)=&
761 reshape(psc(igrid)%ws(ixs^s,idir),shapes)
762 ibuf_start=ibuf_next
763 end do
765 call mpi_isend(sendbuffer_r(ibuf_send_r),sizes_r_send_total(i^d),&
766 mpi_double_precision,ipe_neighbor,itag, &
767 icomm,sendrequest_r(isend_r),ierrmpi)
768 ibuf_send_r=ibuf_next
769 end if
770 else
771 ixs^l=ixs_r_^l(i^d);
772 select case (ipole)
773 {case (^d)
774 n_inc^d=2*i^d+(3-ic^d)^d%n_inc^dd=-2*i^dd+ic^dd;\}
775 end select
776 if(isend_buf(ipwbuf)/=0) then
777 call mpi_wait(sendrequest_c_sr(isend_buf(ipwbuf)), &
778 sendstatus_c_sr(:,isend_buf(ipwbuf)),ierrmpi)
779 deallocate(pwbuf(ipwbuf)%w)
780 end if
781 allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
782 call pole_buffer(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psc(igrid)%w,ixcog^l,ixs^l,ipole)
784 isend_buf(ipwbuf)=isend_c
785 itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
786 isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
787 call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
788 ipe_neighbor,itag,icomm,sendrequest_c_sr(isend_c),ierrmpi)
789 ipwbuf=1+modulo(ipwbuf,npwbuf)
790 if(stagger_grid) then
791 ibuf_start=ibuf_send_r
792 do idir=1,ndim
793 ixs^l=ixs_r_stg_^l(idir,i^d);
794 ibuf_next=ibuf_start+sizes_r_send_stg(idir,i^d)
795 shapes=(/sizes_r_send_stg(idir,i^d)/)
796 sendbuffer_r(ibuf_start:ibuf_next-1)=&
797 reshape(psc(igrid)%ws(ixs^s,idir),shapes)
798 ibuf_start=ibuf_next
799 end do
801 call mpi_isend(sendbuffer_r(ibuf_send_r),sizes_r_send_total(i^d),&
802 mpi_double_precision,ipe_neighbor,itag, &
803 icomm,sendrequest_r(isend_r),ierrmpi)
804 ibuf_send_r=ibuf_next
805 end if
806 end if
807 end if
808
809 end subroutine bc_send_restrict
810
811 !> fill same-level neighbor's ghost cells in the same processor
812 subroutine bc_fill_srl(igrid,i^D)
813 integer, intent(in) :: igrid,i^d
814
815 integer :: ineighbor,ipe_neighbor,ipole,ixs^l,ixr^l,n_i^d,idir
816
817 ipe_neighbor=neighbor(2,i^d,igrid)
818 if(ipe_neighbor==mype) then
819 ineighbor=neighbor(1,i^d,igrid)
820 ipole=neighbor_pole(i^d,igrid)
821 if(ipole==0) then
822 n_i^d=-i^d;
823 ixs^l=ixs_srl_^l(i^d);
824 ixr^l=ixr_srl_^l(n_i^d);
825 psb(ineighbor)%w(ixr^s,nwhead:nwtail)=&
826 psb(igrid)%w(ixs^s,nwhead:nwtail)
827 if(stagger_grid) then
828 do idir=1,ndim
829 ixs^l=ixs_srl_stg_^l(idir,i^d);
830 ixr^l=ixr_srl_stg_^l(idir,n_i^d);
831 psb(ineighbor)%ws(ixr^s,idir)=psb(igrid)%ws(ixs^s,idir)
832 end do
833 end if
834 else
835 ixs^l=ixs_srl_^l(i^d);
836 select case (ipole)
837 {case (^d)
838 n_i^d=i^d^d%n_i^dd=-i^dd;\}
839 end select
840 ixr^l=ixr_srl_^l(n_i^d);
841 call pole_copy(psb(ineighbor)%w,ixg^ll,ixr^l,psb(igrid)%w,ixg^ll,ixs^l,ipole)
842 if(stagger_grid) then
843 do idir=1,ndim
844 ixs^l=ixs_srl_stg_^l(idir,i^d);
845 ixr^l=ixr_srl_stg_^l(idir,n_i^d);
846 call pole_copy_stg(psb(ineighbor)%ws,ixgs^ll,ixr^l,psb(igrid)%ws,ixgs^ll,ixs^l,idir,ipole)
847 end do
848 end if
849 end if
850 end if
851
852 end subroutine bc_fill_srl
853
854 !> fill coarser neighbor's ghost cells in the same processor
855 subroutine bc_fill_restrict(igrid,i^D)
856 integer, intent(in) :: igrid,i^d
857
858 integer :: ic^d,n_inc^d,ixs^l,ixr^l,ipe_neighbor,ineighbor,ipole,idir
859
860 ipe_neighbor=neighbor(2,i^d,igrid)
861 if(ipe_neighbor==mype) then
862 ic^d=1+modulo(node(pig^d_,igrid)-1,2);
863 if({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
864 ineighbor=neighbor(1,i^d,igrid)
865 ipole=neighbor_pole(i^d,igrid)
866 if(ipole==0) then
867 n_inc^d=-2*i^d+ic^d;
868 ixs^l=ixs_r_^l(i^d);
869 ixr^l=ixr_r_^l(n_inc^d);
870 psb(ineighbor)%w(ixr^s,nwhead:nwtail)=&
871 psc(igrid)%w(ixs^s,nwhead:nwtail)
872 if(stagger_grid) then
873 do idir=1,ndim
874 ixs^l=ixs_r_stg_^l(idir,i^d);
875 ixr^l=ixr_r_stg_^l(idir,n_inc^d);
876 psb(ineighbor)%ws(ixr^s,idir)=psc(igrid)%ws(ixs^s,idir)
877 end do
878 end if
879 else
880 ixs^l=ixs_r_^l(i^d);
881 select case (ipole)
882 {case (^d)
883 n_inc^d=2*i^d+(3-ic^d)^d%n_inc^dd=-2*i^dd+ic^dd;\}
884 end select
885 ixr^l=ixr_r_^l(n_inc^d);
886 call pole_copy(psb(ineighbor)%w,ixg^ll,ixr^l,psc(igrid)%w,ixcog^l,ixs^l,ipole)
887 if(stagger_grid) then
888 do idir=1,ndim
889 ixs^l=ixs_r_stg_^l(idir,i^d);
890 ixr^l=ixr_r_stg_^l(idir,n_inc^d);
891 !! Fill ghost cells
892 call pole_copy_stg(psb(ineighbor)%ws,ixgs^ll,ixr^l,psc(igrid)%ws,ixcogs^l,ixs^l,idir,ipole)
893 end do
894 end if
895 end if
896 end if
897
898 end subroutine bc_fill_restrict
899
900 !> fill siblings ghost cells with received data
901 subroutine bc_fill_srl_stg(igrid,i^D)
902 integer, intent(in) :: igrid,i^d
903
904 integer :: ixs^l,ixr^l,n_i^d,idir,ineighbor,ipe_neighbor,ipole
905
906 ipe_neighbor=neighbor(2,i^d,igrid)
907 if(ipe_neighbor/=mype) then
908 ineighbor=neighbor(1,i^d,igrid)
909 ipole=neighbor_pole(i^d,igrid)
910
911 !! Now the special treatment of the pole is done here, at the receive step
912 if (ipole==0) then
913 ixr^l=ixr_srl_^l(i^d);
914 !! Unpack the buffer and fill the ghost cells
915 n_i^d=-i^d;
916 do idir=1,ndim
917 ixs^l=ixs_srl_stg_^l(idir,n_i^d);
918 ixr^l=ixr_srl_stg_^l(idir,i^d);
919 ibuf_next=ibuf_recv_srl+sizes_srl_recv_stg(idir,i^d)
920 psb(igrid)%ws(ixr^s,idir)=reshape(source=recvbuffer_srl(ibuf_recv_srl:ibuf_next-1),&
921 shape=shape(psb(igrid)%ws(ixs^s,idir)))
922 ibuf_recv_srl=ibuf_next
923 end do
924 else ! There is a pole
925 select case (ipole)
926 {case (^d)
927 n_i^d=i^d^d%n_i^dd=-i^dd;\}
928 end select
929 pole_buf%ws=zero
930 do idir=1,ndim
931 ixr^l=ixr_srl_stg_^l(idir,i^d);
932 ixs^l=ixs_srl_stg_^l(idir,n_i^d);
933 ibuf_next=ibuf_recv_srl+sizes_srl_recv_stg(idir,i^d)
934 pole_buf%ws(ixs^s,idir)=reshape(source=recvbuffer_srl(ibuf_recv_srl:ibuf_next-1),&
935 shape=shape(psb(igrid)%ws(ixs^s,idir)))
936 ibuf_recv_srl=ibuf_next
937 call pole_copy_stg(psb(igrid)%ws,ixgs^ll,ixr^l,pole_buf%ws,ixgs^ll,ixs^l,idir,ipole)
938 end do
939 end if
940 end if
941
942 end subroutine bc_fill_srl_stg
943
944 !> fill restricted ghost cells after receipt
945 subroutine bc_fill_restrict_stg(igrid,i^D)
946 integer, intent(in) :: igrid,i^d
947
948 integer :: ipole,ic^d,inc^d,ineighbor,ipe_neighbor,ixs^l,ixr^l,n_i^d,idir
949
950 ipole=neighbor_pole(i^d,igrid)
951 if (ipole==0) then
952 ! Loop over the children ic^D to and their neighbors inc^D
953 {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
954 inc^db=2*i^db+ic^db\}
955 ipe_neighbor=neighbor_child(2,inc^d,igrid)
956 if(ipe_neighbor/=mype) then
957 ineighbor=neighbor_child(1,inc^d,igrid)
958 n_i^d=-i^d;
959 !! Unpack the buffer and fill the ghost cells
960 do idir=1,ndim
961 ixr^l=ixr_r_stg_^l(idir,inc^d);
962 ibuf_next=ibuf_recv_r+sizes_r_recv_stg(idir,inc^d)
963 psb(igrid)%ws(ixr^s,idir)=reshape(source=recvbuffer_r(ibuf_recv_r:ibuf_next-1),&
964 shape=shape(psb(igrid)%ws(ixr^s,idir)))
965 ibuf_recv_r=ibuf_next
966 end do
967 end if
968 {end do\}
969 else !! There is a pole
970 {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
971 inc^db=2*i^db+ic^db\}
972 ipe_neighbor=neighbor_child(2,inc^d,igrid)
973 if(ipe_neighbor/=mype) then
974 ineighbor=neighbor_child(1,inc^d,igrid)
975 select case(ipole)
976 {case (^d)
977 n_i^d=i^d^d%n_i^dd=-i^dd;\}
978 end select
979 ixr^l=ixr_r_^l(inc^d);
980 !! Unpack the buffer and fill an auxiliary array
981 pole_buf%ws=zero
982 do idir=1,ndim
983 ixs^l=ixs_r_stg_^l(idir,n_i^d);
984 ixr^l=ixr_r_stg_^l(idir,inc^d);
985 ibuf_next=ibuf_recv_r+sizes_r_recv_stg(idir,inc^d)
986 pole_buf%ws(ixr^s,idir)=reshape(source=recvbuffer_r(ibuf_recv_r:ibuf_next-1),&
987 shape=shape(psb(igrid)%ws(ixr^s,idir)))
988 call pole_copy_stg(psb(igrid)%ws,ixgs^ll,ixr^l,pole_buf%ws,ixgs^ll,ixr^l,idir,ipole)
989 ibuf_recv_r=ibuf_next
990 end do
991 end if
992 {end do\}
993 end if
994
995 end subroutine bc_fill_restrict_stg
996
997 !> Receive from coarse neighbor
998 subroutine bc_recv_prolong(igrid,i^D)
999 integer, intent(in) :: igrid,i^d
1000
1001 integer :: ic^d,ipe_neighbor,inc^d
1002
1003 ic^d=1+modulo(node(pig^d_,igrid)-1,2);
1004 if ({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
1005
1006 ipe_neighbor=neighbor(2,i^d,igrid)
1007 if (ipe_neighbor/=mype) then
1009 inc^d=ic^d+i^d;
1010 itag=(3**^nd+4**^nd)*(igrid-1)+3**^nd+{inc^d*4**(^d-1)+}
1011 call mpi_irecv(psc(igrid)%w,1,type_recv_p(inc^d), &
1012 ipe_neighbor,itag,icomm,recvrequest_c_p(irecv_c),ierrmpi)
1013 if(stagger_grid) then
1015 call mpi_irecv(recvbuffer_p(ibuf_recv_p),sizes_p_recv_total(inc^d),&
1016 mpi_double_precision,ipe_neighbor,itag,&
1017 icomm,recvrequest_p(irecv_p),ierrmpi)
1019 end if
1020 end if
1021
1022 end subroutine bc_recv_prolong
1023
1024 !> Send to finer neighbor
1025 subroutine bc_send_prolong(igrid,i^D)
1026 integer, intent(in) :: igrid,i^d
1027
1028 integer :: ic^d,inc^d,n_i^d,n_inc^d,ineighbor,ipe_neighbor,ixs^l,ipole,idir
1029
1030 ipole=neighbor_pole(i^d,igrid)
1031
1032 {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1033 inc^db=2*i^db+ic^db\}
1034 ipe_neighbor=neighbor_child(2,inc^d,igrid)
1035 if(ipe_neighbor/=mype) then
1036 ineighbor=neighbor_child(1,inc^d,igrid)
1037 if(ipole==0) then
1038 n_i^d=-i^d;
1039 n_inc^d=ic^d+n_i^d;
1041 itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
1042 call mpi_isend(psb(igrid)%w,1,type_send_p(inc^d), &
1043 ipe_neighbor,itag,icomm,sendrequest_c_p(isend_c),ierrmpi)
1044 if(stagger_grid) then
1045 ibuf_start=ibuf_send_p
1046 do idir=1,ndim
1047 ixs^l=ixs_p_stg_^l(idir,inc^d);
1048 ibuf_next=ibuf_start+sizes_p_send_stg(idir,inc^d)
1049 shapes=(/sizes_p_send_stg(idir,inc^d)/)
1050 sendbuffer_p(ibuf_start:ibuf_next-1)=&
1051 reshape(psb(igrid)%ws(ixs^s,idir),shapes)
1052 ibuf_start=ibuf_next
1053 end do
1055 call mpi_isend(sendbuffer_p(ibuf_send_p),sizes_p_send_total(inc^d),&
1056 mpi_double_precision,ipe_neighbor,itag, &
1057 icomm,sendrequest_p(isend_p),ierrmpi)
1058 ibuf_send_p=ibuf_next
1059 end if
1060 else
1061 ixs^l=ixs_p_^l(inc^d);
1062 select case (ipole)
1063 {case (^d)
1064 n_inc^d=inc^d^d%n_inc^dd=ic^dd-i^dd;\}
1065 end select
1066 if(isend_buf(ipwbuf)/=0) then
1067 call mpi_wait(sendrequest_c_p(isend_buf(ipwbuf)), &
1068 sendstatus_c_p(:,isend_buf(ipwbuf)),ierrmpi)
1069 deallocate(pwbuf(ipwbuf)%w)
1070 end if
1071 allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
1072 call pole_buffer(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psb(igrid)%w,ixg^ll,ixs^l,ipole)
1074 isend_buf(ipwbuf)=isend_c
1075 itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
1076 isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
1077 call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
1078 ipe_neighbor,itag,icomm,sendrequest_c_p(isend_c),ierrmpi)
1079 ipwbuf=1+modulo(ipwbuf,npwbuf)
1080 if(stagger_grid) then
1081 ibuf_start=ibuf_send_p
1082 do idir=1,ndim
1083 ixs^l=ixs_p_stg_^l(idir,inc^d);
1084 ibuf_next=ibuf_start+sizes_p_send_stg(idir,inc^d)
1085 shapes=(/sizes_p_send_stg(idir,inc^d)/)
1086 sendbuffer_p(ibuf_start:ibuf_next-1)=&
1087 reshape(psb(igrid)%ws(ixs^s,idir),shapes)
1088 ibuf_start=ibuf_next
1089 end do
1091 call mpi_isend(sendbuffer_p(ibuf_send_p),sizes_p_send_total(inc^d),&
1092 mpi_double_precision,ipe_neighbor,itag, &
1093 icomm,sendrequest_p(isend_p),ierrmpi)
1094 ibuf_send_p=ibuf_next
1095 end if
1096 end if
1097 end if
1098 {end do\}
1099
1100 end subroutine bc_send_prolong
1101
1102 !> Send to finer neighbor
1103 subroutine bc_fill_prolong(igrid,i^D)
1104 integer, intent(in) :: igrid,i^d
1105
1106 integer :: ipe_neighbor,ineighbor,ixs^l,ixr^l,ic^d,inc^d,n_i^d,n_inc^d,ipole,idir
1107
1108 ipole=neighbor_pole(i^d,igrid)
1109
1110 if(ipole==0) then
1111 {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1112 inc^db=2*i^db+ic^db\}
1113 ipe_neighbor=neighbor_child(2,inc^d,igrid)
1114 if(ipe_neighbor==mype) then
1115 ixs^l=ixs_p_^l(inc^d);
1116 ineighbor=neighbor_child(1,inc^d,igrid)
1117 n_i^d=-i^d;
1118 n_inc^d=ic^d+n_i^d;
1119 ixr^l=ixr_p_^l(n_inc^d);
1120 psc(ineighbor)%w(ixr^s,nwhead:nwtail) &
1121 =psb(igrid)%w(ixs^s,nwhead:nwtail)
1122 if(stagger_grid) then
1123 do idir=1,ndim
1124 ixs^l=ixs_p_stg_^l(idir,inc^d);
1125 ixr^l=ixr_p_stg_^l(idir,n_inc^d);
1126 psc(ineighbor)%ws(ixr^s,idir)=psb(igrid)%ws(ixs^s,idir)
1127 end do
1128 end if
1129 end if
1130 {end do\}
1131 else
1132 {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1133 inc^db=2*i^db+ic^db\}
1134 ipe_neighbor=neighbor_child(2,inc^d,igrid)
1135 if(ipe_neighbor==mype) then
1136 ixs^l=ixs_p_^l(inc^d);
1137 ineighbor=neighbor_child(1,inc^d,igrid)
1138 select case (ipole)
1139 {case (^d)
1140 n_inc^d=inc^d^d%n_inc^dd=ic^dd-i^dd;\}
1141 end select
1142 ixr^l=ixr_p_^l(n_inc^d);
1143 call pole_copy(psc(ineighbor)%w,ixcog^l,ixr^l,psb(igrid)%w,ixg^ll,ixs^l,ipole)
1144 if(stagger_grid) then
1145 do idir=1,ndim
1146 ixs^l=ixs_p_stg_^l(idir,inc^d);
1147 ixr^l=ixr_p_stg_^l(idir,n_inc^d);
1148 call pole_copy_stg(psc(ineighbor)%ws,ixcogs^l,ixr^l,psb(igrid)%ws,ixgs^ll,ixs^l,idir,ipole)
1149 end do
1150 end if
1151 end if
1152 {end do\}
1153 end if
1154 end subroutine bc_fill_prolong
1155
1156 subroutine gc_prolong(igrid)
1157 integer, intent(in) :: igrid
1158
1159 integer :: i^d,idims,iside
1160 logical,dimension(-1:1^D&) :: needprolong
1161
1162 needprolong=.false.
1163 {do i^db=-1,1\}
1164 if (skip_direction([ i^d ])) cycle
1165 if (neighbor_type(i^d,igrid)==neighbor_coarse) then
1166 call bc_prolong(igrid,i^d)
1167 needprolong(i^d)=.true.
1168 end if
1169 {end do\}
1170 if(stagger_grid) then
1171 ! Ghost cell prolongation for staggered variables
1172 ! must be done in a specific order.
1173 ! First the first neighbours, which have 2 indices=0 in 3D
1174 ! or one index=0 in 2D
1175 block=>psb(igrid)
1176 do idims=1,ndim
1177 i^d=0;
1178 select case(idims)
1179 {case(^d)
1180 do i^d=-1,1,2
1181 if (needprolong(i^dd)) call bc_prolong_stg(igrid,i^dd,needprolong)
1182 end do
1183 \}
1184 end select
1185 end do
1186 ! Then the second neighbours which have 1 index=0 in 3D
1187 ! (Only in 3D)
1188 {^ifthreed
1189 i1=0;
1190 do i2=-1,1,2
1191 do i3=-1,1,2
1192 if (needprolong(i^d)) call bc_prolong_stg(igrid,i^d,needprolong)
1193 end do
1194 end do
1195 i2=0;
1196 do i3=-1,1,2
1197 do i1=-1,1,2
1198 if (needprolong(i^d)) call bc_prolong_stg(igrid,i^d,needprolong)
1199 end do
1200 end do
1201 i3=0;
1202 do i1=-1,1,2
1203 do i2=-1,1,2
1204 if (needprolong(i^d)) call bc_prolong_stg(igrid,i^d,needprolong)
1205 end do
1206 end do
1207 }
1208 ! Finally, the corners, that have no index=0
1209 {do i^d=-1,1,2\}
1210 if (needprolong(i^d)) call bc_prolong_stg(igrid,i^d,needprolong)
1211 {end do\}
1212 end if
1213 end subroutine gc_prolong
1214
1215 !> fill coarser representative with data from coarser neighbors
1216 subroutine bc_fill_prolong_stg(igrid,i^D)
1217 integer, intent(in) :: igrid,i^d
1218
1219 integer :: ipe_neighbor,ineighbor,ipole,ixr^l,ic^d,inc^d,n_inc^d,idir
1220
1221 ic^d=1+modulo(node(pig^d_,igrid)-1,2);
1222 if ({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
1223
1224 ipe_neighbor=neighbor(2,i^d,igrid)
1225 if(ipe_neighbor/=mype) then
1226 ineighbor=neighbor(1,i^d,igrid)
1227 ipole=neighbor_pole(i^d,igrid)
1228
1229 if (ipole==0) then !! There is no pole
1230 inc^d=ic^d+i^d;
1231 ixr^l=ixr_p_^l(inc^d);
1232 do idir=1,ndim
1233 ixr^l=ixr_p_stg_^l(idir,inc^d);
1234 ibuf_next=ibuf_recv_p+sizes_p_recv_stg(idir,inc^d)
1235 psc(igrid)%ws(ixr^s,idir)=reshape(source=recvbuffer_p(ibuf_recv_p:ibuf_next-1),&
1236 shape=shape(psc(igrid)%ws(ixr^s,idir)))
1237 ibuf_recv_p=ibuf_next
1238 end do
1239 else !! There is a pole
1240 inc^d=ic^d+i^d;
1241 select case (ipole)
1242 {case (^d)
1243 n_inc^d=2*i^d+(3-ic^d)^d%n_inc^dd=-2*i^dd+ic^dd;\}
1244 end select
1245 !! Unpack the buffer and fill an auxiliary array
1246 pole_buf%ws=zero
1247 do idir=1,ndim
1248 ixr^l=ixr_p_stg_^l(idir,inc^d);
1249 ibuf_next=ibuf_recv_p+sizes_p_recv_stg(idir,inc^d)
1250 pole_buf%ws(ixr^s,idir)=reshape(source=recvbuffer_p(ibuf_recv_p:ibuf_next-1),&
1251 shape=shape(psc(igrid)%ws(ixr^s,idir)))
1252 call pole_copy_stg(psc(igrid)%ws,ixcogs^l,ixr^l,pole_buf%ws,ixgs^ll,ixr^l,idir,ipole)
1253 ibuf_recv_p=ibuf_next
1254 end do
1255 end if
1256 end if
1257
1258 end subroutine bc_fill_prolong_stg
1259
1260 !> do prolongation for fine blocks after receipt data from coarse neighbors
1261 subroutine bc_prolong(igrid,i^D)
1263
1264 double precision :: dxfi^d, dxco^d, xfimin^d, xcomin^d, invdxco^d
1265 integer :: i^d,igrid
1266 integer :: ixfi^l,ixco^l,ii^d, idims,iside,ixb^l
1267
1268 ixfi^l=ixr_srl_^l(i^d);
1269 dxfi^d=rnode(rpdx^d_,igrid);
1270 dxco^d=two*dxfi^d;
1271 invdxco^d=1.d0/dxco^d;
1272
1273 ! compute the enlarged grid lower left corner coordinates
1274 ! these are true coordinates for an equidistant grid,
1275 ! but we can temporarily also use them for getting indices
1276 ! in stretched grids
1277 xfimin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxfi^d;
1278 xcomin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxco^d;
1279
1280 if(phyboundblock(igrid).and.bcphys) then
1281 block=>psc(igrid)
1282 do idims=1,ndim
1283 ixcomin^d=int((xfimin^d+(dble(ixfimin^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1-1;
1284 ixcomax^d=int((xfimin^d+(dble(ixfimax^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1+1;
1285 {^ifthreed
1286 ! avoid using undetermined ghost cells at physical boundary edges
1287 if(idims == 1) then
1288 if(neighbor_type(-1,0,0,igrid)==neighbor_boundary .or. &
1289 neighbor_type(1,0,0,igrid)==neighbor_boundary) then
1290 if(neighbor_type(0,-1,0,igrid)==neighbor_boundary) ixcomin2=ixcommin2
1291 if(neighbor_type(0,0,-1,igrid)==neighbor_boundary) ixcomin3=ixcommin3
1292 if(neighbor_type(0,1,0,igrid)==neighbor_boundary) ixcomax2=ixcommax2
1293 if(neighbor_type(0,0,1,igrid)==neighbor_boundary) ixcomax3=ixcommax3
1294 end if
1295 else if(idims == 2) then
1296 if(neighbor_type(0,-1,0,igrid)==neighbor_boundary .or. &
1297 neighbor_type(0,1,0,igrid)==neighbor_boundary) then
1298 if(neighbor_type(0,0,-1,igrid)==neighbor_boundary) ixcomin3=ixcommin3
1299 if(neighbor_type(0,0,1,igrid)==neighbor_boundary) ixcomax3=ixcommax3
1300 end if
1301 end if
1302 }
1303 do iside=1,2
1304 ii^d=kr(^d,idims)*(2*iside-3);
1305 if(neighbor_type(ii^d,igrid)/=neighbor_boundary) cycle
1306 if(( {(iside==1.and.idims==^d.and.ixcomin^d<ixcogmin^d+nghostcells)|.or.} ) &
1307 .or.( {(iside==2.and.idims==^d.and.ixcomax^d>ixcogmax^d-nghostcells)|.or. })) then
1308 {ixbmin^d=merge(ixcogmin^d,ixcomin^d,idims==^d);}
1309 {ixbmax^d=merge(ixcogmax^d,ixcomax^d,idims==^d);}
1310 call bc_phys(iside,idims,time,0.d0,psc(igrid),ixcog^l,ixb^l)
1311 end if
1312 end do
1313 end do
1314 end if
1315
1316 if(prolongprimitive) then
1317 ! following line again assumes equidistant grid, but
1318 ! just computes indices, so also ok for stretched case
1319 ! reason for +1-1 and +1+1: the coarse representation has
1320 ! also nghostcells at each side. During
1321 ! prolongation, we need cells to left and right, hence -1/+1
1322 block=>psc(igrid)
1323 ixcomin^d=int((xfimin^d+(dble(ixfimin^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1-1;
1324 ixcomax^d=int((xfimin^d+(dble(ixfimax^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1+1;
1325 call phys_to_primitive(ixcog^l,ixco^l,psc(igrid)%w,psc(igrid)%x)
1326 end if
1327
1328 if(ghost_copy) then
1329 call interpolation_copy(igrid,ixfi^l,dxfi^d,xfimin^d,dxco^d,invdxco^d,xcomin^d)
1330 else
1331 call interpolation_linear(igrid,ixfi^l,dxfi^d,xfimin^d,dxco^d,invdxco^d,xcomin^d)
1332 end if
1333
1334 if(prolongprimitive) then
1335 block=>psc(igrid)
1336 call phys_to_conserved(ixcog^l,ixco^l,psc(igrid)%w,psc(igrid)%x)
1337 end if
1338
1339 end subroutine bc_prolong
1340
1341 subroutine bc_prolong_stg(igrid,i^D,NeedProlong)
1342 use mod_amr_fct
1343 double precision :: dxfi^d,dxco^d,xfimin^d,xcomin^d,invdxco^d
1344 integer :: igrid,i^d
1345 integer :: ixfi^l,ixco^l
1346 logical,dimension(-1:1^D&) :: needprolong
1347 logical :: fine_^lin
1348 ! Check what is already at the desired level
1349 fine_^lin=.false.;
1350 {
1351 if(i^d>-1) fine_min^din=(.not.needprolong(i^dd-kr(^d,^dd)).and.neighbor_type(i^dd-kr(^d,^dd),igrid)/=1)
1352 if(i^d<1) fine_max^din=(.not.needprolong(i^dd+kr(^d,^dd)).and.neighbor_type(i^dd+kr(^d,^dd),igrid)/=1)
1353 \}
1354
1355 ixfi^l=ixr_srl_^l(i^d);
1356
1357 dxfi^d=rnode(rpdx^d_,igrid);
1358 dxco^d=two*dxfi^d;
1359 invdxco^d=1.d0/dxco^d;
1360
1361 xfimin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxfi^d;
1362 xcomin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxco^d;
1363
1364 ! moved the physical boundary filling here, to only fill the
1365 ! part needed
1366
1367 ixcomin^d=int((xfimin^d+(dble(ixfimin^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1-1;
1368 ixcomax^d=int((xfimin^d+(dble(ixfimax^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1+1;
1369
1370 if(prolongprimitive) call phys_to_primitive(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
1371
1372 call prolong_2nd_stg(psc(igrid),psb(igrid),ixco^l,ixfi^l,dxco^d,xcomin^d,dxfi^d,xfimin^d,.true.,fine_^lin)
1373
1374 if(prolongprimitive) call phys_to_conserved(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
1375
1376 ! The current region has already been refined, so it does not need to be prolonged again
1377 needprolong(i^d)=.false.
1378
1379 end subroutine bc_prolong_stg
1380
1381 subroutine interpolation_linear(igrid,ixFi^L,dxFi^D,xFimin^D, &
1382 dxCo^D,invdxCo^D,xComin^D)
1384 integer, intent(in) :: igrid, ixfi^l
1385 double precision, intent(in) :: dxfi^d, xfimin^d,dxco^d, invdxco^d, xcomin^d
1386
1387 double precision :: xco^d, xfi^d, eta^d
1388 double precision :: slopel, sloper, slopec, signc, signr
1389 double precision :: slope(1:nw,ndim)
1390 !!double precision :: local_invdxCo^D
1391 double precision :: signedfactorhalf^d
1392 integer :: ixco^d, jxco^d, hxco^d, ixfi^d, ix^d, iw, idims, nwmin,nwmax
1393 !integer :: ixshift^D, icase
1394
1395 !icase=mod(nghostcells,2)
1396
1397 if(prolongprimitive) then
1398 nwmin=1
1399 nwmax=nw
1400 else
1401 nwmin=nwhead
1402 nwmax=nwtail
1403 end if
1404
1405 {do ixfi^db = ixfi^lim^db
1406 ! cell-centered coordinates of fine grid point
1407 ! here we temporarily use an equidistant grid
1408 xfi^db=xfimin^db+(dble(ixfi^db)-half)*dxfi^db
1409
1410 ! indices of coarse cell which contains the fine cell
1411 ! since we computed lower left corner earlier
1412 ! in equidistant fashion: also ok for stretched case
1413 ixco^db=int((xfi^db-xcomin^db)*invdxco^db)+1
1414
1415 ! cell-centered coordinates of coarse grid point
1416 ! here we temporarily use an equidistant grid
1417 xco^db=xcomin^db+(dble(ixco^db)-half)*dxco^db \}
1418
1419 !if(.not.slab) then
1420 ! ^D&local_invdxCo^D=1.d0/psc(igrid)%dx({ixCo^DD},^D)\
1421 !endif
1422
1423 if(slab_uniform) then
1424 ! actual cell-centered coordinates of fine grid point
1425 !!^D&xFi^D=block%x({ixFi^DD},^D)\
1426 ! actual cell-centered coordinates of coarse grid point
1427 !!^D&xCo^D=psc(igrid)%x({ixCo^DD},^D)\
1428 ! normalized distance between fine/coarse cell center
1429 ! in coarse cell: ranges from -0.5 to 0.5 in each direction
1430 ! (origin is coarse cell center)
1431 ! this is essentially +1/4 or -1/4 on cartesian mesh
1432 eta^d=(xfi^d-xco^d)*invdxco^d;
1433 else
1434 !select case(icase)
1435 ! case(0)
1436 !{! here we assume an even number of ghostcells!!!
1437 !ixshift^D=2*(mod(ixFi^D,2)-1)+1
1438 !if(ixshift^D>0.0d0)then
1439 ! ! oneven fine grid points
1440 ! eta^D=-0.5d0*(one-block%dvolume(ixFi^DD) &
1441 ! /sum(block%dvolume(ixFi^D:ixFi^D+1^D%ixFi^DD)))
1442 !else
1443 ! ! even fine grid points
1444 ! eta^D=+0.5d0*(one-block%dvolume(ixFi^DD) &
1445 ! /sum(block%dvolume(ixFi^D-1:ixFi^D^D%ixFi^DD)))
1446 !endif\}
1447 ! case(1)
1448 !{! here we assume an odd number of ghostcells!!!
1449 !ixshift^D=2*(mod(ixFi^D,2)-1)+1
1450 !if(ixshift^D>0.0d0)then
1451 ! ! oneven fine grid points
1452 ! eta^D=+0.5d0*(one-block%dvolume(ixFi^DD) &
1453 ! /sum(block%dvolume(ixFi^D-1:ixFi^D^D%ixFi^DD)))
1454 !else
1455 ! ! even fine grid points
1456 ! eta^D=-0.5d0*(one-block%dvolume(ixFi^DD) &
1457 ! /sum(block%dvolume(ixFi^D:ixFi^D+1^D%ixFi^DD)))
1458 !endif\}
1459 ! case default
1460 ! call mpistop("no such case")
1461 !end select
1462 ! the different cases for even/uneven number of ghost cells
1463 ! are automatically handled using the relative index to ixMlo
1464 ! as well as the pseudo-coordinates xFi and xCo
1465 ! these latter differ from actual cell centers when stretching is used
1466 ix^d=2*int((ixfi^d+ixmlo^d)/2)-ixmlo^d;
1467 {if(xfi^d>xco^d) then
1468 signedfactorhalf^d=0.5d0
1469 else
1470 signedfactorhalf^d=-0.5d0
1471 end if
1472 eta^d=signedfactorhalf^d*(one-psb(igrid)%dvolume(ixfi^dd) &
1473 /sum(psb(igrid)%dvolume(ix^d:ix^d+1^d%ixFi^dd))) \}
1474 !{eta^D=(xFi^D-xCo^D)*invdxCo^D &
1475 ! *two*(one-block%dvolume(ixFi^DD) &
1476 ! /sum(block%dvolume(ix^D:ix^D+1^D%ixFi^DD))) \}
1477 end if
1478
1479 do idims=1,ndim
1480 hxco^d=ixco^d-kr(^d,idims)\
1481 jxco^d=ixco^d+kr(^d,idims)\
1482
1483 do iw=nwmin,nwmax
1484 slopel=psc(igrid)%w(ixco^d,iw)-psc(igrid)%w(hxco^d,iw)
1485 sloper=psc(igrid)%w(jxco^d,iw)-psc(igrid)%w(ixco^d,iw)
1486 slopec=half*(sloper+slopel)
1487
1488 ! get limited slope
1489 signr=sign(one,sloper)
1490 signc=sign(one,slopec)
1491 !select case(prolong_limiter)
1492 !case(1)
1493 ! ! unlimit
1494 ! slope(iw,idims)=slopeC
1495 !case(2)
1496 ! ! minmod
1497 ! slope(iw,idims)=signR*max(zero,min(dabs(slopeR), &
1498 ! signR*slopeL))
1499 !case(3)
1500 ! ! woodward
1501 ! slope(iw,idims)=two*signR*max(zero,min(dabs(slopeR), &
1502 ! signR*slopeL,signR*half*slopeC))
1503 !case(4)
1504 ! ! koren
1505 ! slope(iw,idims)=signR*max(zero,min(two*signR*slopeL, &
1506 ! (dabs(slopeR)+two*slopeL*signR)*third,two*dabs(slopeR)))
1507 !case default
1508 slope(iw,idims)=signc*max(zero,min(dabs(slopec), &
1509 signc*slopel,signc*sloper))
1510 !end select
1511 end do
1512 end do
1513
1514 ! Interpolate from coarse cell using limited slopes
1515 psb(igrid)%w(ixfi^d,nwmin:nwmax)=psc(igrid)%w(ixco^d,nwmin:nwmax)+&
1516 {(slope(nwmin:nwmax,^d)*eta^d)+}
1517
1518 {end do\}
1519
1520 if(prolongprimitive) then
1521 block=>psb(igrid)
1522 call phys_to_conserved(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
1523 end if
1524
1525 end subroutine interpolation_linear
1526
1527 subroutine interpolation_copy(igrid, ixFi^L,dxFi^D,xFimin^D, &
1528 dxCo^D,invdxCo^D,xComin^D)
1530 integer, intent(in) :: igrid, ixfi^l
1531 double precision, intent(in) :: dxfi^d, xfimin^d,dxco^d, invdxco^d, xcomin^d
1532
1533 double precision :: xfi^d
1534 integer :: ixco^d, ixfi^d, nwmin,nwmax
1535
1536 if(prolongprimitive) then
1537 nwmin=1
1538 nwmax=nw
1539 else
1540 nwmin=nwhead
1541 nwmax=nwtail
1542 end if
1543
1544 {do ixfi^db = ixfi^lim^db
1545 ! cell-centered coordinates of fine grid point
1546 xfi^db=xfimin^db+(dble(ixfi^db)-half)*dxfi^db
1547
1548 ! indices of coarse cell which contains the fine cell
1549 ! note: this also works for stretched grids
1550 ixco^db=int((xfi^db-xcomin^db)*invdxco^db)+1\}
1551
1552 ! Copy from coarse cell
1553 psb(igrid)%w(ixfi^d,nwmin:nwmax)=psc(igrid)%w(ixco^d,nwmin:nwmax)
1554
1555 {end do\}
1556
1557 if(prolongprimitive) call phys_to_conserved(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
1558
1559 end subroutine interpolation_copy
1560
1561 subroutine pole_copy(wrecv,ixIR^L,ixR^L,wsend,ixIS^L,ixS^L,ipole)
1562
1563 integer, intent(in) :: ixir^l,ixr^l,ixis^l,ixs^l,ipole
1564 double precision :: wrecv(ixir^s,1:nw), wsend(ixis^s,1:nw)
1565
1566 integer :: iw, iside, ib
1567
1568 select case (ipole)
1569 {case (^d)
1570 iside=int((i^d+3)/2)
1571 ib=2*(^d-1)+iside
1572 do iw=nwhead,nwtail
1573 select case (typeboundary(iw,ib))
1574 case (bc_symm)
1575 wrecv(ixr^s,iw) = wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1576 case (bc_asymm)
1577 wrecv(ixr^s,iw) =-wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1578 case default
1579 call mpistop("Pole boundary condition should be symm or asymm")
1580 end select
1581 end do \}
1582 end select
1583
1584 end subroutine pole_copy
1585
1586 subroutine pole_copy_stg(wrecv,ixIR^L,ixR^L,wsend,ixIS^L,ixS^L,idirs,ipole)
1587
1588 integer, intent(in) :: ixir^l,ixr^l,ixis^l,ixs^l,idirs,ipole
1589
1590 double precision :: wrecv(ixir^s,1:nws), wsend(ixis^s,1:nws)
1591 integer :: ib, iside
1592
1593 select case (ipole)
1594 {case (^d)
1595 iside=int((i^d+3)/2)
1596 ib=2*(^d-1)+iside
1597 select case (typeboundary(iw_mag(idirs),ib))
1598 case (bc_symm)
1599 wrecv(ixr^s,idirs) = wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,idirs)
1600 case (bc_asymm)
1601 wrecv(ixr^s,idirs) =-wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,idirs)
1602 case default
1603 call mpistop("Pole boundary condition should be symm or asymm")
1604 end select
1605 \}
1606 end select
1607
1608 end subroutine pole_copy_stg
1609
1610 subroutine pole_buffer(wrecv,ixIR^L,ixR^L,wsend,ixIS^L,ixS^L,ipole)
1611
1612 integer, intent(in) :: ixir^l,ixr^l,ixis^l,ixs^l,ipole
1613 double precision :: wrecv(ixir^s,nwhead:nwtail), wsend(ixis^s,1:nw)
1614
1615 integer :: iw, iside, ib
1616
1617 select case (ipole)
1618 {case (^d)
1619 iside=int((i^d+3)/2)
1620 ib=2*(^d-1)+iside
1621 do iw=nwhead,nwtail
1622 select case (typeboundary(iw,ib))
1623 case (bc_symm)
1624 wrecv(ixr^s,iw) = wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1625 case (bc_asymm)
1626 wrecv(ixr^s,iw) =-wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1627 case default
1628 call mpistop("Pole boundary condition should be symm or asymm")
1629 end select
1630 end do \}
1631 end select
1632
1633 end subroutine pole_buffer
1634
1635 end subroutine getbc
1636
1637end module mod_ghostcells_update
logical function skip_direction(dir)
subroutine, public prolong_2nd_stg(sco, sfi, ixcolin, ixfilin, dxcod, xcomind, dxfid, xfimind, ghost, fine_lin)
This subroutine performs a 2nd order prolongation for a staggered field F, preserving the divergence ...
Definition mod_amr_fct.t:41
subroutine, public getintbc(time, ixgl)
fill inner boundary values
subroutine, public bc_phys(iside, idims, time, qdt, s, ixgl, ixbl)
fill ghost cells at a physical boundary
subroutine, public coarsen_grid(sfi, ixfigl, ixfil, sco, ixcogl, ixcol)
coarsen one grid to its coarser representative
Definition mod_coarsen.t:13
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
update ghost cells of all blocks including physical boundaries
integer, dimension(0:3^d &), target type_recv_p_p1
integer, dimension( :^d &), pointer type_recv_r
integer, dimension(-1:1^d &), target type_send_srl_p1
integer, dimension(-1:1^d &) sizes_r_send_total
integer, dimension(^nd, 0:3) l
integer, dimension(-1:1^d &), target type_send_r_p2
integer, dimension(:), allocatable sendrequest_c_p
integer, dimension(^nd,-1:1) ixs_r_stg_
integer, dimension(^nd,-1:1) ixs_srl_stg_
integer, dimension(^nd, 0:3^d &) sizes_p_send_stg
subroutine getbc(time, qdt, psb, nwstart, nwbc)
do update ghost cells of all blocks including physical boundaries
integer, dimension(-1:1^d &) sizes_srl_send_total
integer, dimension(^nd, 0:3) ixr_r_stg_
integer, dimension(:), allocatable sendrequest_r
integer, dimension(:), allocatable recvrequest_r
double precision, dimension(:), allocatable sendbuffer_p
integer, dimension(:,:), allocatable sendstatus_c_p
integer, dimension(^nd,-1:1^d &) sizes_r_send_stg
integer, dimension(0:3) ixs_p_
integer, dimension(0:3) ixr_r_
integer, dimension(0:3^d &), target type_send_p_f
integer, dimension( :^d &), pointer type_send_p
integer, dimension(:,:), allocatable sendstatus_c_sr
integer, dimension(0:3^d &), target type_recv_p_p2
integer, dimension(0:3) ixr_p_
double precision, dimension(:), allocatable recvbuffer_r
integer, dimension( :^d &), pointer type_send_srl
integer, dimension(-1:1^d &), target type_send_r_p1
integer, dimension(:), allocatable recvrequest_c_p
integer, dimension(-1:1^d &), target type_send_srl_p2
subroutine create_bc_mpi_datatype(nwstart, nwbc)
integer, dimension(0:3^d &), target type_send_p_p2
integer, dimension(:), allocatable recvrequest_c_sr
integer, dimension(0:3^d &), target type_recv_r_f
integer, dimension(-1:1^d &), target type_recv_srl_f
integer, dimension(:), allocatable sendrequest_c_sr
integer, dimension(-1:1^d &), target type_send_r_f
double precision, dimension(:), allocatable recvbuffer_srl
integer, dimension(:,:), allocatable recvstatus_p
integer, dimension(^nd, 0:3) ixs_p_stg_
integer, dimension(-1:1^d &), target type_send_srl_f
integer, dimension(^nd, 0:3^d &) sizes_p_recv_stg
integer, dimension(0:3^d &), target type_send_p_p1
integer, dimension(^nd,-1:1^d &) sizes_srl_recv_stg
double precision, dimension(:), allocatable recvbuffer_p
integer, dimension(^nd,-1:1) ixr_srl_stg_
integer, dimension(-1:1) ixs_r_
integer, dimension( :^d &), pointer type_send_r
integer, dimension(-1:1) ixr_srl_
integer, dimension(0:3^d &), target type_recv_r_p1
double precision, dimension(:), allocatable sendbuffer_r
integer, dimension(0:3^d &), target type_recv_r_p2
integer, dimension(0:3^d &) sizes_p_recv_total
integer, dimension(:,:), allocatable recvstatus_c_sr
integer, dimension(0:3^d &) sizes_r_recv_total
integer, parameter npwbuf
integer, dimension(:), allocatable sendrequest_p
integer, dimension(^nd, 0:3^d &) sizes_r_recv_stg
integer, dimension(-1:1^d &), target type_recv_srl_p2
double precision, dimension(:), allocatable sendbuffer_srl
integer, dimension(-1:1) ixs_srl_
integer, dimension(:,:), allocatable sendstatus_srl
integer, dimension( :^d &), pointer type_recv_p
integer, dimension(:,:), allocatable recvstatus_srl
integer, dimension(:), allocatable sendrequest_srl
integer, dimension(:,:), allocatable recvstatus_r
integer, dimension(0:3^d &) sizes_p_send_total
integer, dimension(^nd, 0:3) ixr_p_stg_
integer, dimension(-1:1^d &), target type_recv_srl_p1
integer, dimension(:,:), allocatable sendstatus_r
integer, dimension(0:3^d &), target type_recv_p_f
integer, dimension( :^d &), pointer type_recv_srl
integer, dimension(:,:), allocatable sendstatus_p
integer, dimension(-1:1^d &) sizes_srl_recv_total
subroutine get_bc_comm_type(comm_type, ixl, ixgl, nwstart, nwbc)
integer, dimension(:), allocatable recvrequest_p
integer, dimension(:), allocatable recvrequest_srl
integer, dimension(^nd,-1:1^d &) sizes_srl_send_stg
integer, dimension(:,:), allocatable recvstatus_c_p
This module contains definitions of global parameters and variables and some generic functions/subrou...
logical internalboundary
if there is an internal boundary
integer ixghi
Upper index of grid block arrays.
integer, dimension(3, 3) kr
Kronecker delta tensor.
integer, parameter ndim
Number of spatial dimensions for grid variables.
logical stagger_grid
True for using stagger grid.
double precision, dimension(:), allocatable, parameter d
logical ghost_copy
whether copy values instead of interpolation in ghost cells of finer blocks
integer ierrmpi
A global MPI error return code.
integer nghostcells
Number of ghost cells surrounding a grid.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition mod_physics.t:4
procedure(sub_convert), pointer phys_to_primitive
Definition mod_physics.t:55
procedure(sub_convert), pointer phys_to_conserved
Definition mod_physics.t:54
character(len=name_len) physics_type
String describing the physics type of the simulation.
Definition mod_physics.t:50