MPI-AMRVAC  3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
mod_advance.t
Go to the documentation of this file.
1 !> Module containing all the time stepping schemes
2 module mod_advance
3 
4  implicit none
5  private
6 
7  !> Whether to conserve fluxes at the current sub-step
8  logical :: fix_conserve_at_step = .true.
9 
10  public :: advance
11  public :: process
12  public :: process_advanced
13 
14 contains
15 
16  !> Advance all the grids over one time step, including all sources
17  subroutine advance(iit)
19  use mod_particles, only: handle_particles
20  use mod_source, only: add_split_source
21 
22  integer, intent(in) :: iit
23 
24  integer :: iigrid, igrid, idimsplit
25 
26  ! split source addition
27  call add_split_source(prior=.true.)
28 
29  if (dimsplit) then
30  if ((iit/2)*2==iit .or. typedimsplit=='xy') then
31  ! do the sweeps in order of increasing idim,
32  do idimsplit=1,ndim
33  call advect(idimsplit,idimsplit)
34  end do
35  else
36  ! If the parity of "iit" is odd and typedimsplit=xyyx,
37  ! do sweeps backwards
38  do idimsplit=ndim,1,-1
39  call advect(idimsplit,idimsplit)
40  end do
41  end if
42  else
43  ! Add fluxes from all directions at once
44  call advect(1,ndim)
45  end if
46 
47  ! split source addition
48  call add_split_source(prior=.false.)
49 
50  if(use_particles) call handle_particles
51 
52  end subroutine advance
53 
54  !> Advance all grids over one time step, but without taking dimensional
55  !> splitting or split source terms into account
56  subroutine advect(idim^LIM)
61  use mod_comm_lib, only: mpistop
62 
63  integer, intent(in) :: idim^LIM
64  integer :: iigrid, igrid
65 
66  call init_comm_fix_conserve(idim^lim,nwflux)
67  fix_conserve_at_step = time_advance .and. levmax>levmin
68 
69  ! copy w instead of wold because of potential use of dimsplit or sourcesplit
70  !$OMP PARALLEL DO PRIVATE(igrid)
71  do iigrid=1,igridstail; igrid=igrids(iigrid);
72  ps1(igrid)%w=ps(igrid)%w
73  if(stagger_grid) ps1(igrid)%ws=ps(igrid)%ws
74  end do
75  !$OMP END PARALLEL DO
76 
77  istep = 0
78 
79  select case (t_stepper)
80  case (onestep)
81  select case (t_integrator)
82  case (forward_euler)
83  call advect1(flux_method,one,idim^lim,global_time,ps1,global_time,ps)
84 
85  case (imex_euler)
86  call advect1(flux_method,one,idim^lim,global_time,ps,global_time,ps1)
87  call global_implicit_update(one,dt,global_time+dt,ps,ps1)
88 
89  case (imex_sp)
90  call global_implicit_update(one,dt,global_time,ps,ps1)
91  !$OMP PARALLEL DO PRIVATE(igrid)
92  do iigrid=1,igridstail; igrid=igrids(iigrid);
93  ps1(igrid)%w=ps(igrid)%w
94  if(stagger_grid) ps1(igrid)%ws=ps(igrid)%ws
95  end do
96  !$OMP END PARALLEL DO
97  call advect1(flux_method,one,idim^lim,global_time,ps1,global_time,ps)
98 
99  case default
100  call mpistop("unkown onestep time_integrator in advect")
101  end select
102 
103  case (twostep)
104  select case (t_integrator)
105  case (predictor_corrector)
106  ! PC or explicit midpoint
107  ! predictor step
108  fix_conserve_at_step = .false.
109  call advect1(typepred1,half,idim^lim,global_time,ps,global_time,ps1)
110  ! corrector step
111  fix_conserve_at_step = time_advance .and. levmax>levmin
112  call advect1(flux_method,one,idim^lim,global_time+half*dt,ps1,global_time,ps)
113 
114  case (rk2_alf)
115  ! RK2 with alfa parameter, where rk_a21=alfa
116  call advect1(flux_method,rk_a21, idim^lim,global_time,ps,global_time,ps1)
117  !$OMP PARALLEL DO PRIVATE(igrid)
118  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
119  ps(igrid)%w = ps(igrid)%w+rk_b1*(ps1(igrid)%w-ps(igrid)%w)/rk_a21
120  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+(one-rk_b2)*(ps1(igrid)%ws-ps(igrid)%ws)/rk_a21
121  end do
122  !$OMP END PARALLEL DO
124 
125  case (ssprk2)
126  ! ssprk2 or Heun's method
127  call advect1(flux_method,one, idim^lim,global_time,ps,global_time,ps1)
128  !$OMP PARALLEL DO PRIVATE(igrid)
129  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
130  ps(igrid)%w = half*ps(igrid)%w+half*ps1(igrid)%w
131  if(stagger_grid) ps(igrid)%ws = half*ps(igrid)%ws+half*ps1(igrid)%ws
132  end do
133  !$OMP END PARALLEL DO
134  call advect1(flux_method,half,idim^lim,global_time+dt,ps1,global_time+half*dt,ps)
135 
136  case (imex_midpoint)
137  !$OMP PARALLEL DO PRIVATE(igrid)
138  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
139  ps2(igrid)%w = ps(igrid)%w
140  if(stagger_grid) ps2(igrid)%ws = ps(igrid)%ws
141  end do
142  !$OMP END PARALLEL DO
143  call advect1(flux_method,half, idim^lim,global_time,ps,global_time,ps1)
144  call global_implicit_update(half,dt,global_time+half*dt,ps2,ps1)
145  !$OMP PARALLEL DO PRIVATE(igrid)
146  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
147  ps(igrid)%w = ps(igrid)%w+2.0d0*(ps2(igrid)%w-ps1(igrid)%w)
148  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+2.0d0*(ps2(igrid)%ws-ps1(igrid)%ws)
149  end do
150  !$OMP END PARALLEL DO
151  call advect1(flux_method,one, idim^lim,global_time+half*dt,ps2,global_time,ps)
152 
153  case (imex_trapezoidal)
154  call advect1(flux_method,one, idim^lim,global_time,ps,global_time,ps1)
155  !$OMP PARALLEL DO PRIVATE(igrid)
156  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
157  ps2(igrid)%w = half*(ps(igrid)%w+ps1(igrid)%w)
158  if(stagger_grid) ps2(igrid)%ws = half*(ps(igrid)%ws+ps1(igrid)%ws)
159  end do
160  !$OMP END PARALLEL DO
162  !$OMP PARALLEL DO PRIVATE(igrid)
163  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
164  ps1(igrid)%w = ps1(igrid)%w+half*dt*ps(igrid)%w
165  if(stagger_grid) ps1(igrid)%ws = ps1(igrid)%ws+half*dt*ps(igrid)%ws
166  end do
167  !$OMP END PARALLEL DO
168  !$OMP PARALLEL DO PRIVATE(igrid)
169  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
170  ps(igrid)%w = ps2(igrid)%w+half*dt*ps(igrid)%w
171  if(stagger_grid) ps(igrid)%ws = ps2(igrid)%ws+half*dt*ps(igrid)%ws
172  end do
173  !$OMP END PARALLEL DO
174  call getbc(global_time+dt,dt,ps1,iwstart,nwgc,phys_req_diagonal)
175  call global_implicit_update(half,dt,global_time+dt,ps2,ps1)
176  !$OMP PARALLEL DO PRIVATE(igrid)
177  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
178  ps(igrid)%w = ps(igrid)%w+ps2(igrid)%w-ps1(igrid)%w
179  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+ps2(igrid)%ws-ps1(igrid)%ws
180  end do
181  !$OMP END PARALLEL DO
182  call advect1(flux_method,half, idim^lim,global_time+dt,ps2,global_time+half*dt,ps)
183 
184  case (imex_222)
185  ! One-parameter family of schemes (parameter is imex222_lambda) from
186  ! Pareschi&Russo 2005, which is L-stable (for default lambda) and
187  ! asymptotically SSP.
188  ! See doi.org/10.1007/s10915-004-4636-4 (table II)
189  ! See doi.org/10.1016/j.apnum.2016.10.018 for interesting values of lambda
190 
191  ! Preallocate ps2 as y^n for the implicit update
192  !$OMP PARALLEL DO PRIVATE(igrid)
193  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
194  ps2(igrid)%w = ps(igrid)%w
195  if(stagger_grid) ps2(igrid)%ws = ps(igrid)%ws
196  end do
197  !$OMP END PARALLEL DO
198  ! Solve xi1 = y^n + lambda.dt.F_im(xi1)
200 
201  ! Set ps1 = y^n + dt.F_ex(xi1)
202  call advect1(flux_method, one, idim^lim, global_time, ps2, global_time, ps1)
203  ! Set ps2 = dt.F_im(xi1) (is at t^n)
204  ! Set ps = y^n + dt/2 . F(xi1) (is at t^n+dt/2)
205  ! Set ps1 = y^n + dt.F_ex(xi1) + (1-2.lambda).dt.F_im(xi1) and enforce BC (at t^n+dt)
206  !$OMP PARALLEL DO PRIVATE(igrid)
207  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
208  ps2(igrid)%w = (ps2(igrid)%w - ps(igrid)%w) / imex222_lambda
209  if(stagger_grid) ps2(igrid)%ws = (ps2(igrid)%ws - ps(igrid)%ws) / imex222_lambda
210  end do
211  !$OMP END PARALLEL DO
212  !$OMP PARALLEL DO PRIVATE(igrid)
213  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
214  ps(igrid)%w = half*(ps(igrid)%w + ps1(igrid)%w + ps2(igrid)%w)
215  if(stagger_grid) ps(igrid)%ws = half*(ps(igrid)%ws + ps1(igrid)%ws + ps2(igrid)%ws)
216  end do
217  !$OMP END PARALLEL DO
218  !$OMP PARALLEL DO PRIVATE(igrid)
219  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
220  ps1(igrid)%w = ps1(igrid)%w + (1.0d0 - 2.0d0*imex222_lambda)*ps2(igrid)%w
221  if(stagger_grid) ps1(igrid)%ws = ps1(igrid)%ws + (1.0d0 - 2.0d0*imex222_lambda)*ps2(igrid)%ws
222  end do
223  !$OMP END PARALLEL DO
224  call getbc(global_time+dt,dt,ps1,iwstart,nwgc,phys_req_diagonal)
225 
226  ! Preallocate ps2 as xi1 for the implicit update (is at t^n)
227  !$OMP PARALLEL DO PRIVATE(igrid)
228  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
229  ps2(igrid)%w = 2.0d0*ps2(igrid)%w - ps1(igrid)%w - imex222_lambda*ps2(igrid)%w
230  if(stagger_grid) ps2(igrid)%ws = 2.0d0*ps2(igrid)%ws - ps1(igrid)%ws - imex222_lambda*ps2(igrid)%ws
231  end do
232  !$OMP END PARALLEL DO
233  ! Solve xi2 = (ps1) + lambda.dt.F_im(xi2)
235 
236  ! Add dt/2.F_im(xi2) to ps
237  !$OMP PARALLEL DO PRIVATE(igrid)
238  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
239  ps(igrid)%w = ps(igrid)%w + (ps2(igrid)%w - ps1(igrid)%w) / (2.0d0 * imex222_lambda)
240  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + (ps2(igrid)%ws - ps1(igrid)%ws) / (2.0d0 * imex222_lambda)
241  end do
242  !$OMP END PARALLEL DO
243  ! Set ps = y^n + dt/2.(F(xi1)+F(xi2)) = y^(n+1)
244  call advect1(flux_method, half, idim^lim, global_time+dt, ps2, global_time+half*dt, ps)
245 
246  case default
247  call mpistop("unkown twostep time_integrator in advect")
248  end select
249 
250  case (threestep)
251  select case (t_integrator)
252  case (ssprk3)
253  ! this is SSPRK(3,3) Gottlieb-Shu 1998 or SSP(3,2) depending on ssprk_order (3 vs 2)
254  call advect1(flux_method,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
255  !$OMP PARALLEL DO PRIVATE(igrid)
256  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
257  ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
258  if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
259  end do
260  !$OMP END PARALLEL DO
262  !$OMP PARALLEL DO PRIVATE(igrid)
263  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
264  ps(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
265  if(stagger_grid) ps(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
266  end do
267  !$OMP END PARALLEL DO
268  call advect1(flux_method,rk_beta33, idim^lim,global_time+rk_c3*dt,ps2,global_time+(1.0d0-rk_beta33)*dt,ps)
269 
270  case (rk3_bt)
271  ! this is a general threestep RK according to its Butcher Table
272  call advect1(flux_method,rk3_a21, idim^lim,global_time,ps,global_time,ps1)
273  !$OMP PARALLEL DO PRIVATE(igrid)
274  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
275  ps3(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/rk3_a21
276  if(stagger_grid) ps3(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/rk3_a21
277  end do
278  !$OMP END PARALLEL DO
279  !$OMP PARALLEL DO PRIVATE(igrid)
280  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
281  ps2(igrid)%w=ps(igrid)%w+rk3_a31*ps3(igrid)%w
282  if(stagger_grid) ps2(igrid)%ws=ps(igrid)%ws+rk3_a31*ps3(igrid)%ws
283  end do
284  !$OMP END PARALLEL DO
286  !$OMP PARALLEL DO PRIVATE(igrid)
287  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
288  ps(igrid)%w=ps(igrid)%w+rk3_b1*ps3(igrid)%w &
289  +rk3_b2*(ps2(igrid)%w-(ps(igrid)%w+rk3_a31*ps3(igrid)%w))/rk3_a32
290  if(stagger_grid)then
291  ps(igrid)%ws=ps(igrid)%ws+rk3_b1*ps3(igrid)%ws &
292  +rk3_b2*(ps2(igrid)%ws-(ps(igrid)%ws+rk3_a31*ps3(igrid)%ws))/rk3_a32
293  endif
294  end do
295  !$OMP END PARALLEL DO
296  call advect1(flux_method,rk3_b3, idim^lim,global_time+rk3_c3*dt,ps2,global_time+(1.0d0-rk3_b3)*dt,ps)
297 
298  case (imex_ars3)
299  ! this is IMEX scheme ARS3
300  call advect1(flux_method,ars_gamma, idim^lim,global_time,ps,global_time,ps1)
301  !$OMP PARALLEL DO PRIVATE(igrid)
302  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
303  ps4(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/ars_gamma
304  if(stagger_grid) ps4(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/ars_gamma
305  end do
306  !$OMP END PARALLEL DO
308  !$OMP PARALLEL DO PRIVATE(igrid)
309  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
310  ps1(igrid)%w=(ps2(igrid)%w-ps1(igrid)%w)/ars_gamma
311  if(stagger_grid) ps1(igrid)%ws=(ps2(igrid)%ws-ps1(igrid)%ws)/ars_gamma
312  end do
313  !$OMP END PARALLEL DO
314  !$OMP PARALLEL DO PRIVATE(igrid)
315  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
316  ps3(igrid)%w=ps(igrid)%w+(ars_gamma-1.0d0)*ps4(igrid)%w+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%w
317  if(stagger_grid) then
318  ps3(igrid)%ws=ps(igrid)%ws+(ars_gamma-1.0d0)*ps4(igrid)%ws+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%ws
319  endif
320  end do
321  !$OMP END PARALLEL DO
322  call advect1(flux_method,2.0d0*(1.0d0-ars_gamma), idim^lim,global_time+ars_gamma*dt,ps2,global_time+(ars_gamma-1.0d0)*dt,ps3)
323  !$OMP PARALLEL DO PRIVATE(igrid)
324  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
325  ps2(igrid)%w=ps1(igrid)%w+(ps3(igrid)%w-(ps(igrid)%w+ &
326  (ars_gamma-1.0d0)*ps4(igrid)%w+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%w))/(2.0d0*(1.0d0-ars_gamma))
327  if(stagger_grid) then
328  ps2(igrid)%ws=ps1(igrid)%ws+(ps3(igrid)%ws-(ps(igrid)%ws+ &
329  (ars_gamma-1.0d0)*ps4(igrid)%ws+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%ws))/(2.0d0*(1.0d0-ars_gamma))
330  endif
331  end do
332  !$OMP END PARALLEL DO
334  !$OMP PARALLEL DO PRIVATE(igrid)
335  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
336  ps(igrid)%w=ps(igrid)%w+half*ps2(igrid)%w &
337  +half*(ps4(igrid)%w-ps3(igrid)%w)/ars_gamma
338  if(stagger_grid) then
339  ps(igrid)%ws=ps(igrid)%ws+half*ps2(igrid)%ws &
340  +half*(ps4(igrid)%ws-ps3(igrid)%ws)/ars_gamma
341  endif
342  end do
343  !$OMP END PARALLEL DO
344  call advect1(flux_method,half, idim^lim,global_time+(1.0d0-ars_gamma)*dt,ps4,global_time+half*dt,ps)
345 
346  case (imex_232)
347  ! this is IMEX_ARK(2,3,2) or IMEX_SSP(2,3,2)
348  call advect1(flux_method,imex_a21, idim^lim,global_time,ps,global_time,ps1)
349  !$OMP PARALLEL DO PRIVATE(igrid)
350  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
351  ps4(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/imex_a21
352  ps3(igrid)%w=ps(igrid)%w
353  if(stagger_grid) then
354  ps4(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/imex_a21
355  ps3(igrid)%ws=ps(igrid)%ws
356  endif
357  end do
358  !$OMP END PARALLEL DO
360  !$OMP PARALLEL DO PRIVATE(igrid)
361  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
362  ps1(igrid)%w=ps1(igrid)%w+imex_ha21*dt*ps3(igrid)%w
363  if(stagger_grid) ps1(igrid)%ws=ps1(igrid)%ws+imex_ha21*dt*ps3(igrid)%ws
364  end do
365  !$OMP END PARALLEL DO
366  call getbc(global_time+imex_a21*dt,dt,ps1,iwstart,nwgc,phys_req_diagonal)
368  !$OMP PARALLEL DO PRIVATE(igrid)
369  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
370  ps(igrid)%w=ps(igrid)%w+imex_a31*ps4(igrid)%w &
371  +imex_b1*dt*ps3(igrid)%w+imex_b2*(ps2(igrid)%w-ps1(igrid)%w)/imex_ha22
372  if(stagger_grid) then
373  ps(igrid)%ws=ps(igrid)%ws+imex_a31*ps4(igrid)%ws &
374  +imex_b1*dt*ps3(igrid)%ws+imex_b2*(ps2(igrid)%ws-ps1(igrid)%ws)/imex_ha22
375  endif
376  end do
377  !$OMP END PARALLEL DO
378  !$OMP PARALLEL DO PRIVATE(igrid)
379  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
380  ps3(igrid)%w=ps1(igrid)%w-imex_a21*ps4(igrid)%w &
381  -imex_ha21*dt*ps3(igrid)%w+imex_b1*dt*ps3(igrid)%w
382  if(stagger_grid) then
383  ps3(igrid)%ws=ps1(igrid)%ws-imex_a21*ps4(igrid)%ws &
384  -imex_ha21*dt*ps3(igrid)%ws+imex_b1*dt*ps3(igrid)%ws
385  endif
386  end do
387  !$OMP END PARALLEL DO
389  !$OMP PARALLEL DO PRIVATE(igrid)
390  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
391  ps2(igrid)%w=(ps(igrid)%w-ps3(igrid)%w-imex_a31*ps4(igrid)%w)/imex_a32 &
392  +(1.0d0-imex_b2/imex_a32)*(ps2(igrid)%w-ps1(igrid)%w)/imex_ha22
393  if(stagger_grid) then
394  ps2(igrid)%ws=(ps(igrid)%ws-ps3(igrid)%ws-imex_a31*ps4(igrid)%ws)/imex_a32 &
395  +(1.0d0-imex_b2/imex_a32)*(ps2(igrid)%ws-ps1(igrid)%ws)/imex_ha22
396  endif
397  end do
398  !$OMP END PARALLEL DO
399  !$OMP PARALLEL DO PRIVATE(igrid)
400  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
401  ps1(igrid)%w=ps3(igrid)%w+imex_b1*ps4(igrid)%w+imex_b2*ps2(igrid)%w
402  if(stagger_grid) then
403  ps1(igrid)%ws=ps3(igrid)%ws+imex_b1*ps4(igrid)%ws+imex_b2*ps2(igrid)%ws
404  endif
405  end do
406  !$OMP END PARALLEL DO
408  !$OMP PARALLEL DO PRIVATE(igrid)
409  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
410  ps(igrid)%w=ps1(igrid)%w+ps2(igrid)%w-ps(igrid)%w
411  if(stagger_grid) then
412  ps(igrid)%ws=ps1(igrid)%ws+ps2(igrid)%ws-ps(igrid)%ws
413  endif
414  end do
415  !$OMP END PARALLEL DO
416  call advect1(flux_method,imex_b3, idim^lim,global_time+imex_c3*dt,ps2,global_time+(1.0d0-imex_b3)*dt,ps)
417 
418  case (imex_cb3a)
419  ! Third order IMEX scheme with low-storage implementation (4 registers).
420  ! From Cavaglieri&Bewley 2015, see doi.org/10.1016/j.jcp.2015.01.031
421  ! (scheme called "IMEXRKCB3a" there). Uses 3 explicit and 2 implicit stages.
422  ! Parameters are in imex_bj, imex_cj (same for implicit/explicit),
423  ! imex_aij (implicit tableau) and imex_haij (explicit tableau).
424  call advect1(flux_method, imex_ha21, idim^lim, global_time, ps, global_time, ps1)
426  !$OMP PARALLEL DO PRIVATE(igrid)
427  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
428  ps3(igrid)%w = ps(igrid)%w + imex_a32/imex_a22 * (ps2(igrid)%w - ps1(igrid)%w)
429  ps(igrid)%w = ps(igrid)%w + imex_b2 /imex_a22 * (ps2(igrid)%w - ps1(igrid)%w)
430  ps1(igrid)%w = ps3(igrid)%w
431  if(stagger_grid) ps3(igrid)%ws = ps(igrid)%ws + imex_a32/imex_a22 * (ps2(igrid)%ws - ps1(igrid)%ws)
432  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + imex_b2 /imex_a22 * (ps2(igrid)%ws - ps1(igrid)%ws)
433  if(stagger_grid) ps1(igrid)%ws = ps3(igrid)%ws
434  end do
435  !$OMP END PARALLEL DO
436  call advect1(flux_method, imex_ha32, idim^lim, global_time+imex_c2*dt, ps2, global_time, ps3)
437  !$OMP PARALLEL DO PRIVATE(igrid)
438  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
439  ps(igrid)%w = ps(igrid)%w + imex_b2 /imex_ha32 * (ps3(igrid)%w - ps1(igrid)%w)
440  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + imex_b2 /imex_ha32 * (ps3(igrid)%ws - ps1(igrid)%ws)
441  end do
443  !$OMP PARALLEL DO PRIVATE(igrid)
444  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
445  ps(igrid)%w = ps(igrid)%w + imex_b3 /imex_a33 * (ps1(igrid)%w - ps3(igrid)%w)
446  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + imex_b3 /imex_a33 * (ps1(igrid)%ws - ps3(igrid)%ws)
447  end do
448  !$OMP END PARALLEL DO
450 
451  case default
452  call mpistop("unkown threestep time_integrator in advect")
453  end select
454 
455  case (fourstep)
456  select case (t_integrator)
457  case (ssprk4)
458  ! SSPRK(4,3) or SSP(4,2) depending on ssprk_order (3 vs 2)
459  ! ssprk43: Strong stability preserving 4 stage RK 3rd order by Ruuth and Spiteri
460  ! Ruuth & Spiteri J. S C, 17 (2002) p. 211 - 220
461  ! supposed to be stable up to CFL=2.
462  ! ssp42: stable up to CFL=3
463  call advect1(flux_method,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
464  !$OMP PARALLEL DO PRIVATE(igrid)
465  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
466  ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
467  if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
468  end do
469  !$OMP END PARALLEL DO
471  !$OMP PARALLEL DO PRIVATE(igrid)
472  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
473  ps1(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
474  if(stagger_grid) ps1(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
475  end do
476  !$OMP END PARALLEL DO
478  !$OMP PARALLEL DO PRIVATE(igrid)
479  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
480  ps(igrid)%w=rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w
481  if(stagger_grid) ps(igrid)%ws=rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws
482  end do
483  !$OMP END PARALLEL DO
484  call advect1(flux_method,rk_beta44, idim^lim,global_time+rk_c4*dt,ps1,global_time+(1.0d0-rk_beta44)*dt,ps)
485 
486  case (rk4)
487  ! the standard RK(4,4) method
488  !$OMP PARALLEL DO PRIVATE(igrid)
489  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
490  ps2(igrid)%w=ps(igrid)%w
491  ps3(igrid)%w=ps(igrid)%w
492  if(stagger_grid) then
493  ps2(igrid)%ws=ps(igrid)%ws
494  ps3(igrid)%ws=ps(igrid)%ws
495  endif
496  end do
497  !$OMP END PARALLEL DO
498  call advect1(flux_method,half, idim^lim,global_time,ps,global_time,ps1)
499  call advect1(flux_method,half, idim^lim,global_time+half*dt,ps1,global_time,ps2)
500  call advect1(flux_method,1.0d0, idim^lim,global_time+half*dt,ps2,global_time,ps3)
501  !$OMP PARALLEL DO PRIVATE(igrid)
502  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
503  ps(igrid)%w=(1.0d0/3.0d0)*(-ps(igrid)%w+ps1(igrid)%w+2.0d0*ps2(igrid)%w+ps3(igrid)%w)
504  if(stagger_grid) ps(igrid)%ws=(1.0d0/3.0d0) &
505  *(-ps(igrid)%ws+ps1(igrid)%ws+2.0d0*ps2(igrid)%ws+ps3(igrid)%ws)
506  end do
507  !$OMP END PARALLEL DO
508  call advect1(flux_method,1.0d0/6.0d0, idim^lim,global_time+dt,ps3,global_time+dt*5.0d0/6.0d0,ps)
509 
510  case default
511  call mpistop("unkown fourstep time_integrator in advect")
512  end select
513 
514  case (fivestep)
515  select case (t_integrator)
516  case (ssprk5)
517  ! SSPRK(5,4) by Ruuth and Spiteri
518  !bcexch = .false.
519  call advect1(flux_method,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
520  !$OMP PARALLEL DO PRIVATE(igrid)
521  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
522  ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
523  if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
524  end do
525  !$OMP END PARALLEL DO
527  !$OMP PARALLEL DO PRIVATE(igrid)
528  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
529  ps1(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
530  if(stagger_grid) ps1(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
531  end do
532  !$OMP END PARALLEL DO
534  !$OMP PARALLEL DO PRIVATE(igrid)
535  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
536  ps3(igrid)%w=rk_alfa53*ps2(igrid)%w+rk_alfa54*ps1(igrid)%w
537  if(stagger_grid) ps3(igrid)%ws=rk_alfa53*ps2(igrid)%ws+rk_alfa54*ps1(igrid)%ws
538  end do
539  !$OMP END PARALLEL DO
540  !$OMP PARALLEL DO PRIVATE(igrid)
541  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
542  ps2(igrid)%w=rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w
543  if(stagger_grid) ps2(igrid)%ws=rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws
544  end do
545  !$OMP END PARALLEL DO
547  !$OMP PARALLEL DO PRIVATE(igrid)
548  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
549  ps(igrid)%w=ps3(igrid)%w+rk_alfa55*ps2(igrid)%w &
550  +(rk_beta54/rk_beta44)*(ps2(igrid)%w-(rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w))
551  if(stagger_grid) then
552  ps(igrid)%ws=ps3(igrid)%ws+rk_alfa55*ps2(igrid)%ws &
553  +(rk_beta54/rk_beta44)*(ps2(igrid)%ws-(rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws))
554  endif
555  end do
556  !$OMP END PARALLEL DO
557  !bcexch = .true.
558  call advect1(flux_method,rk_beta55, idim^lim,global_time+rk_c5*dt,ps2,global_time+(1.0d0-rk_beta55)*dt,ps)
559 
560  case default
561  call mpistop("unkown fivestep time_integrator in advect")
562  end select
563 
564  case default
565  call mpistop("unkown time_stepper in advect")
566  end select
567 
568  end subroutine advect
569 
570  !> Implicit global update step within IMEX schemes, advance psa=psb+dtfactor*qdt*F_im(psa)
571  subroutine global_implicit_update(dtfactor,qdt,qtC,psa,psb)
575 
576  type(state), target :: psa(max_blocks) !< Compute implicit part from this state and update it
577  type(state), target :: psb(max_blocks) !< Will be unchanged, as on entry
578  double precision, intent(in) :: qdt !< overall time step dt
579  double precision, intent(in) :: qtC !< Both states psa and psb at this time level
580  double precision, intent(in) :: dtfactor !< Advance psa=psb+dtfactor*qdt*F_im(psa)
581 
582  integer :: iigrid, igrid
583 
584  !> First copy all variables from a to b, this is necessary to account for
585  ! quantities is w with no implicit sourceterm
586  do iigrid=1,igridstail; igrid=igrids(iigrid);
587  psa(igrid)%w = psb(igrid)%w
588  end do
589 
590  if (associated(phys_implicit_update)) then
591  call phys_implicit_update(dtfactor,qdt,qtc,psa,psb)
592  end if
593 
594  ! enforce boundary conditions for psa
595  call getbc(qtc,0.d0,psa,iwstart,nwgc,phys_req_diagonal)
596 
597  end subroutine global_implicit_update
598 
599  !> Evaluate Implicit part in place, i.e. psa==>F_im(psa)
600  subroutine evaluate_implicit(qtC,psa)
603 
604  type(state), target :: psa(max_blocks) !< Compute implicit part from this state and update it
605  double precision, intent(in) :: qtC !< psa at this time level
606 
607  if (associated(phys_evaluate_implicit)) then
608  call phys_evaluate_implicit(qtc,psa)
609  end if
610 
611  end subroutine evaluate_implicit
612 
613  !> Integrate all grids by one partial step
614  subroutine advect1(method,dtfactor,idim^LIM,qtC,psa,qt,psb)
617  use mod_fix_conserve
618  use mod_physics
619 
620  integer, intent(in) :: idim^LIM
621  type(state), target :: psa(max_blocks) !< Compute fluxes based on this state
622  type(state), target :: psb(max_blocks) !< Update solution on this state
623  double precision, intent(in) :: dtfactor !< Advance over dtfactor * dt
624  double precision, intent(in) :: qtC
625  double precision, intent(in) :: qt
626  integer, intent(in) :: method(nlevelshi)
627 
628  ! cell face flux
629  double precision :: fC(ixG^T,1:nwflux,1:ndim)
630  ! cell edge flux
631  double precision :: fE(ixG^T,sdim:3)
632  double precision :: qdt
633  integer :: iigrid, igrid
634 
635  istep = istep+1
636 
637  if(associated(phys_special_advance)) then
638  call phys_special_advance(qtc,psa)
639  end if
640 
641  qdt=dtfactor*dt
642  ! opedit: Just advance the active grids:
643  !$OMP PARALLEL DO PRIVATE(igrid)
644  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
645  block=>ps(igrid)
646  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
647 
648  call advect1_grid(method(block%level),qdt,dtfactor,ixg^ll,idim^lim,&
649  qtc,psa(igrid),qt,psb(igrid),fc,fe,rnode(rpdx1_:rnodehi,igrid),ps(igrid)%x)
650 
651  ! opedit: Obviously, flux is stored only for active grids.
652  ! but we know in fix_conserve wether there is a passive neighbor
653  ! but we know in conserve_fix wether there is a passive neighbor
654  ! via neighbor_active(i^D,igrid) thus we skip the correction for those.
655  ! This violates strict conservation when the active/passive interface
656  ! coincides with a coarse/fine interface.
657  if (fix_conserve_global .and. fix_conserve_at_step) then
658  call store_flux(igrid,fc,idim^lim,nwflux)
659  if(stagger_grid) call store_edge(igrid,ixg^ll,fe,idim^lim)
660  end if
661 
662  end do
663  !$OMP END PARALLEL DO
664 
665  ! opedit: Send flux for all grids, expects sends for all
666  ! nsend_fc(^D), set in connectivity.t.
667 
668  if (fix_conserve_global .and. fix_conserve_at_step) then
669  call recvflux(idim^lim)
670  call sendflux(idim^lim)
671  call fix_conserve(psb,idim^lim,1,nwflux)
672  if(stagger_grid) then
673  call fix_edges(psb,idim^lim)
674  ! fill the cell-center values from the updated staggered variables
675  !$OMP PARALLEL DO PRIVATE(igrid)
676  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
677  call phys_face_to_center(ixm^ll,psb(igrid))
678  end do
679  !$OMP END PARALLEL DO
680  end if
681  end if
682 
683  ! For all grids: fill ghost cells
684  call getbc(qt+qdt,qdt,psb,iwstart,nwgc,phys_req_diagonal)
685 
686  end subroutine advect1
687 
688  !> Advance a single grid over one partial time step
689  subroutine advect1_grid(method,qdt,dtfactor,ixI^L,idim^LIM,qtC,sCT,qt,s,fC,fE,dxs,x)
690 
691  ! integrate one grid by one partial step
694  use mod_tvd
695  use mod_source, only: addsource2
696  use mod_physics, only: phys_to_primitive
698  use mod_comm_lib, only: mpistop
699 
700  integer, intent(in) :: method
701  integer, intent(in) :: ixI^L, idim^LIM
702  double precision, intent(in) :: qdt, dtfactor, qtC, qt, dxs(ndim), x(ixI^S,1:ndim)
703  type(state), target :: sCT, s
704  double precision :: fC(ixI^S,1:nwflux,1:ndim), wprim(ixI^S,1:nw)
705  double precision :: fE(ixI^S,sdim:3)
706 
707  integer :: ixO^L
708 
709  ixo^l=ixi^l^lsubnghostcells;
710  select case (method)
712  call finite_volume(method,qdt,dtfactor,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dxs,x)
713  case (fs_cd,fs_cd4)
714  call centdiff(method,qdt,dtfactor,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dxs,x)
715  case (fs_hancock)
716  call hancock(qdt,dtfactor,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,dxs,x)
717  case (fs_fd)
718  call fd(qdt,dtfactor,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dxs,x)
719  case (fs_tvd)
720  call centdiff(fs_cd,qdt,dtfactor,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dxs,x)
721  call tvdlimit(method,qdt,ixi^l,ixo^l,idim^lim,sct,qt+qdt,s,fc,dxs,x)
722  case (fs_source)
723  wprim=sct%w
724  call phys_to_primitive(ixi^l,ixi^l,wprim,x)
725  call addsource2(qdt*dble(idimmax-idimmin+1)/dble(ndim),&
726  dtfactor*dble(idimmax-idimmin+1)/dble(ndim),&
727  ixi^l,ixo^l,1,nw,qtc,sct%w,wprim,qt,s%w,x,.false.)
728  case (fs_nul)
729  ! There is nothing to do
730  case default
731  call mpistop("unknown flux scheme in advect1_grid")
732  end select
733 
734  end subroutine advect1_grid
735 
736  !> process is a user entry in time loop, before output and advance
737  !> allows to modify solution, add extra variables, etc.
738  !> Warning: CFL dt already determined (and is not recomputed)!
739  subroutine process(iit,qt)
743  use mod_physics, only: phys_req_diagonal
744  ! .. scalars ..
745  integer,intent(in) :: iit
746  double precision, intent(in):: qt
747 
748  integer:: iigrid, igrid
749 
750  if (associated(usr_process_global)) then
751  call usr_process_global(iit,qt)
752  end if
753 
754  if (associated(usr_process_grid)) then
755  !$OMP PARALLEL DO PRIVATE(igrid)
756  do iigrid=1,igridstail; igrid=igrids(iigrid);
757  ! next few lines ensure correct usage of routines like divvector etc
758  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
759  block=>ps(igrid)
760  call usr_process_grid(igrid,node(plevel_,igrid),ixg^ll,ixm^ll, &
761  qt,ps(igrid)%w,ps(igrid)%x)
762  end do
763  !$OMP END PARALLEL DO
764  call getbc(qt,dt,ps,iwstart,nwgc,phys_req_diagonal)
765  end if
766  end subroutine process
767 
768  !> process_advanced is user entry in time loop, just after advance
769  !> allows to modify solution, add extra variables, etc.
770  !> added for handling two-way coupled PIC-MHD
771  !> Warning: w is now at global_time^(n+1), global time and iteration at global_time^n, it^n
772  subroutine process_advanced(iit,qt)
777  use mod_physics, only: phys_req_diagonal
778  ! .. scalars ..
779  integer,intent(in) :: iit
780  double precision, intent(in):: qt
781 
782  integer:: iigrid, igrid
783 
784  if (associated(usr_process_adv_global)) then
785  call usr_process_adv_global(iit,qt)
786  end if
787 
788  if (associated(usr_process_adv_grid)) then
789  !$OMP PARALLEL DO PRIVATE(igrid)
790  do iigrid=1,igridstail; igrid=igrids(iigrid);
791  ! next few lines ensure correct usage of routines like divvector etc
792  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
793  block=>ps(igrid)
794 
795  call usr_process_adv_grid(igrid,node(plevel_,igrid),ixg^ll,ixm^ll, &
796  qt,ps(igrid)%w,ps(igrid)%x)
797  end do
798  !$OMP END PARALLEL DO
799  call getbc(qt,dt,ps,iwstart,nwgc,phys_req_diagonal)
800  end if
801  end subroutine process_advanced
802 
803 end module mod_advance
Module containing all the time stepping schemes.
Definition: mod_advance.t:2
subroutine, public process_advanced(iit, qt)
process_advanced is user entry in time loop, just after advance allows to modify solution,...
Definition: mod_advance.t:773
subroutine, public advance(iit)
Advance all the grids over one time step, including all sources.
Definition: mod_advance.t:18
subroutine advect1(method, dtfactor, idimLIM, qtC, psa, qt, psb)
Integrate all grids by one partial step.
Definition: mod_advance.t:615
subroutine, public process(iit, qt)
process is a user entry in time loop, before output and advance allows to modify solution,...
Definition: mod_advance.t:740
subroutine evaluate_implicit(qtC, psa)
Evaluate Implicit part in place, i.e. psa==>F_im(psa)
Definition: mod_advance.t:601
subroutine global_implicit_update(dtfactor, qdt, qtC, psa, psb)
Implicit global update step within IMEX schemes, advance psa=psb+dtfactor*qdt*F_im(psa)
Definition: mod_advance.t:572
subroutine advect1_grid(method, qdt, dtfactor, ixIL, idimLIM, qtC, sCT, qt, s, fC, fE, dxs, x)
Advance a single grid over one partial time step.
Definition: mod_advance.t:690
subroutine advect(idimLIM)
Advance all grids over one time step, but without taking dimensional splitting or split source terms ...
Definition: mod_advance.t:57
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: mod_comm_lib.t:208
Module with finite difference methods for fluxes.
subroutine, public centdiff(method, qdt, dtfactor, ixIL, ixOL, idimsLIM, qtC, sCT, qt, s, fC, fE, dxs, x)
subroutine, public fd(qdt, dtfactor, ixIL, ixOL, idimsLIM, qtC, sCT, qt, snew, fC, fE, dxs, x)
Module with finite volume methods for fluxes.
subroutine, public finite_volume(method, qdt, dtfactor, ixIL, ixOL, idimsLIM, qtC, sCT, qt, snew, fC, fE, dxs, x)
finite volume method
subroutine, public hancock(qdt, dtfactor, ixIL, ixOL, idimsLIM, qtC, sCT, qt, snew, dxs, x)
The non-conservative Hancock predictor for TVDLF.
Module for flux conservation near refinement boundaries.
subroutine, public recvflux(idimLIM)
subroutine, public fix_edges(psuse, idimLIM)
subroutine, public sendflux(idimLIM)
subroutine, public fix_conserve(psb, idimLIM, nw0, nwfluxin)
subroutine, public store_edge(igrid, ixIL, fE, idimLIM)
subroutine, public init_comm_fix_conserve(idimLIM, nwfluxin)
subroutine, public store_flux(igrid, fC, idimLIM, nwfluxin)
update ghost cells of all blocks including physical boundaries
subroutine getbc(time, qdt, psb, nwstart, nwbc, req_diag)
do update ghost cells of all blocks including physical boundaries
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
integer, dimension(:), allocatable typepred1
The spatial discretization for the predictor step when using a two step PC method.
integer, parameter fs_tvdlf
integer, parameter fs_hlld
integer, parameter imex_euler
double precision global_time
The global simulation time.
integer istep
Index of the sub-step in a multi-step time integrator.
integer, parameter threestep
integer, parameter imex_232
integer, parameter fivestep
double precision ars_gamma
IMEX_ARS3 parameter ars_gamma.
integer, parameter fs_hllcd
integer, parameter ndim
Number of spatial dimensions for grid variables.
logical stagger_grid
True for using stagger grid.
integer, parameter imex_ars3
logical use_particles
Use particles module or not.
integer, parameter fs_tvdmu
integer, parameter imex_trapezoidal
integer, parameter plevel_
integer, dimension(:), allocatable, parameter d
double precision imex222_lambda
IMEX-222(lambda) one-parameter family of schemes.
integer, dimension(:), allocatable flux_method
Which flux scheme of spatial discretization to use (per grid level)
integer, parameter rk2_alf
integer, parameter fs_hll
flux schemes
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer, parameter imex_sp
integer, parameter twostep
integer, parameter onestep
double precision rk_a21
RK2(alfa) method parameters from Butcher tableau.
integer, parameter predictor_corrector
integer, parameter forward_euler
logical fix_conserve_global
Whether to apply flux conservation at refinement boundaries.
character(len=std_len) typedimsplit
integer t_stepper
time stepper type
integer, parameter fourstep
integer, parameter rnodehi
grid location info (corner coordinates and grid spacing)
integer, parameter fs_hllc
double precision imex_a22
IMEX_CB3a extra parameters.
integer, parameter imex_cb3a
integer, parameter imex_222
integer t_integrator
time integrator method
integer, parameter fs_hancock
integer, dimension(:,:), allocatable node
integer, parameter fs_source
double precision, dimension(ndim) dxlevel
integer, parameter imex_midpoint
Module containing all the particle routines.
Definition: mod_particles.t:2
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
procedure(sub_convert), pointer phys_to_primitive
Definition: mod_physics.t:59
procedure(sub_evaluate_implicit), pointer phys_evaluate_implicit
Definition: mod_physics.t:87
logical phys_req_diagonal
Whether the physics routines require diagonal ghost cells, for example for computing a curl.
Definition: mod_physics.t:24
procedure(sub_implicit_update), pointer phys_implicit_update
Definition: mod_physics.t:86
procedure(sub_face_to_center), pointer phys_face_to_center
Definition: mod_physics.t:85
procedure(sub_special_advance), pointer phys_special_advance
Definition: mod_physics.t:75
Module for handling split source terms (split from the fluxes)
Definition: mod_source.t:2
subroutine, public addsource2(qdt, dtfactor, ixIL, ixOL, iwLIM, qtC, wCT, wCTprim, qt, w, x, qsourcesplit, src_active)
Add source within ixO for iws: w=w+qdt*S[wCT].
Definition: mod_source.t:132
subroutine, public add_split_source(prior)
Definition: mod_source.t:20
Subroutines for TVD-MUSCL schemes.
Definition: mod_tvd.t:2
subroutine, public tvdlimit(method, qdt, ixIL, ixOL, idimLIM, s, qt, snew, fC, dxs, x)
Definition: mod_tvd.t:14
Module with all the methods that users can customize in AMRVAC.
procedure(process_grid), pointer usr_process_grid
procedure(process_adv_grid), pointer usr_process_adv_grid
procedure(process_global), pointer usr_process_global
procedure(process_adv_global), pointer usr_process_adv_global