MPI-AMRVAC 3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
Loading...
Searching...
No Matches
mod_advance.t
Go to the documentation of this file.
1!> Module containing all the time stepping schemes
3
4 implicit none
5 private
6
7 !> Whether to conserve fluxes at the current sub-step
8 logical :: fix_conserve_at_step = .true.
9
10 public :: advance
11 public :: process
12 public :: process_advanced
13
14contains
15
16 !> Advance all the grids over one time step, including all sources
17 subroutine advance(iit)
19 use mod_particles, only: handle_particles
21
22 integer, intent(in) :: iit
23
24 integer :: iigrid, igrid, idimsplit
25
26 ! split source addition
27 call add_split_source(prior=.true.)
28
29 if (dimsplit) then
30 if ((iit/2)*2==iit .or. typedimsplit=='xy') then
31 ! do the sweeps in order of increasing idim,
32 do idimsplit=1,ndim
33 call advect(idimsplit,idimsplit)
34 end do
35 else
36 ! If the parity of "iit" is odd and typedimsplit=xyyx,
37 ! do sweeps backwards
38 do idimsplit=ndim,1,-1
39 call advect(idimsplit,idimsplit)
40 end do
41 end if
42 else
43 ! Add fluxes from all directions at once
44 call advect(1,ndim)
45 end if
46
47 ! split source addition
48 call add_split_source(prior=.false.)
49
50 if(use_particles) call handle_particles
51
52 end subroutine advance
53
54 !> Advance all grids over one time step, but without taking dimensional
55 !> splitting or split source terms into account
56 subroutine advect(idim^LIM)
60 use mod_comm_lib, only: mpistop
61
62 integer, intent(in) :: idim^lim
63 integer :: iigrid, igrid
64
65 call init_comm_fix_conserve(idim^lim,nwflux)
66 fix_conserve_at_step = time_advance .and. levmax>levmin
67
68 ! copy w instead of wold because of potential use of dimsplit or sourcesplit
69 !$OMP PARALLEL DO PRIVATE(igrid)
70 do iigrid=1,igridstail; igrid=igrids(iigrid);
71 ps1(igrid)%w=ps(igrid)%w
72 if(stagger_grid) ps1(igrid)%ws=ps(igrid)%ws
73 end do
74 !$OMP END PARALLEL DO
75
76 istep = 0
77
78 select case (t_stepper)
79 case (onestep)
80 select case (t_integrator)
81 case (forward_euler)
82 call advect1(flux_method,one,idim^lim,global_time,ps1,global_time,ps)
83
84 case (imex_euler)
85 call advect1(flux_method,one,idim^lim,global_time,ps,global_time,ps1)
86 call global_implicit_update(one,dt,global_time+dt,ps,ps1)
87
88 case (imex_sp)
89 call global_implicit_update(one,dt,global_time,ps,ps1)
90 !$OMP PARALLEL DO PRIVATE(igrid)
91 do iigrid=1,igridstail; igrid=igrids(iigrid);
92 ps1(igrid)%w=ps(igrid)%w
93 if(stagger_grid) ps1(igrid)%ws=ps(igrid)%ws
94 end do
95 !$OMP END PARALLEL DO
96 call advect1(flux_method,one,idim^lim,global_time,ps1,global_time,ps)
97
98 case default
99 call mpistop("unkown onestep time_integrator in advect")
100 end select
101
102 case (twostep)
103 select case (t_integrator)
105 ! PC or explicit midpoint
106 ! predictor step
107 fix_conserve_at_step = .false.
108 call advect1(typepred1,half,idim^lim,global_time,ps,global_time,ps1)
109 ! corrector step
110 fix_conserve_at_step = time_advance .and. levmax>levmin
111 call advect1(flux_method,one,idim^lim,global_time+half*dt,ps1,global_time,ps)
112
113 case (rk2_alf)
114 ! RK2 with alfa parameter, where rk_a21=alfa
115 call advect1(flux_method,rk_a21, idim^lim,global_time,ps,global_time,ps1)
116 !$OMP PARALLEL DO PRIVATE(igrid)
117 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
118 ps(igrid)%w = ps(igrid)%w+rk_b1*(ps1(igrid)%w-ps(igrid)%w)/rk_a21
119 if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+(one-rk_b2)*(ps1(igrid)%ws-ps(igrid)%ws)/rk_a21
120 end do
121 !$OMP END PARALLEL DO
122 call advect1(flux_method,rk_b2,idim^lim,global_time+rk_a21*dt,ps1,global_time+rk_b1*dt,ps)
123
124 case (ssprk2)
125 ! ssprk2 or Heun's method
126 call advect1(flux_method,one, idim^lim,global_time,ps,global_time,ps1)
127 !$OMP PARALLEL DO PRIVATE(igrid)
128 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
129 ps(igrid)%w = half*ps(igrid)%w+half*ps1(igrid)%w
130 if(stagger_grid) ps(igrid)%ws = half*ps(igrid)%ws+half*ps1(igrid)%ws
131 end do
132 !$OMP END PARALLEL DO
133 call advect1(flux_method,half,idim^lim,global_time+dt,ps1,global_time+half*dt,ps)
134
135 case (imex_midpoint)
136 !$OMP PARALLEL DO PRIVATE(igrid)
137 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
138 ps2(igrid)%w = ps(igrid)%w
139 if(stagger_grid) ps2(igrid)%ws = ps(igrid)%ws
140 end do
141 !$OMP END PARALLEL DO
142 call advect1(flux_method,half, idim^lim,global_time,ps,global_time,ps1)
143 call global_implicit_update(half,dt,global_time+half*dt,ps2,ps1)
144 !$OMP PARALLEL DO PRIVATE(igrid)
145 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
146 ps(igrid)%w = ps(igrid)%w+2.0d0*(ps2(igrid)%w-ps1(igrid)%w)
147 if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+2.0d0*(ps2(igrid)%ws-ps1(igrid)%ws)
148 end do
149 !$OMP END PARALLEL DO
150 call advect1(flux_method,one, idim^lim,global_time+half*dt,ps2,global_time,ps)
151
152 case (imex_trapezoidal)
153 call advect1(flux_method,one, idim^lim,global_time,ps,global_time,ps1)
154 !$OMP PARALLEL DO PRIVATE(igrid)
155 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
156 ps2(igrid)%w = half*(ps(igrid)%w+ps1(igrid)%w)
157 if(stagger_grid) ps2(igrid)%ws = half*(ps(igrid)%ws+ps1(igrid)%ws)
158 end do
159 !$OMP END PARALLEL DO
160 call evaluate_implicit(global_time,ps)
161 !$OMP PARALLEL DO PRIVATE(igrid)
162 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
163 ps1(igrid)%w = ps1(igrid)%w+half*dt*ps(igrid)%w
164 if(stagger_grid) ps1(igrid)%ws = ps1(igrid)%ws+half*dt*ps(igrid)%ws
165 end do
166 !$OMP END PARALLEL DO
167 !$OMP PARALLEL DO PRIVATE(igrid)
168 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
169 ps(igrid)%w = ps2(igrid)%w+half*dt*ps(igrid)%w
170 if(stagger_grid) ps(igrid)%ws = ps2(igrid)%ws+half*dt*ps(igrid)%ws
171 end do
172 !$OMP END PARALLEL DO
173 call getbc(global_time+dt,dt,ps1,iwstart,nwgc)
174 call global_implicit_update(half,dt,global_time+dt,ps2,ps1)
175 !$OMP PARALLEL DO PRIVATE(igrid)
176 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
177 ps(igrid)%w = ps(igrid)%w+ps2(igrid)%w-ps1(igrid)%w
178 if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+ps2(igrid)%ws-ps1(igrid)%ws
179 end do
180 !$OMP END PARALLEL DO
181 call advect1(flux_method,half, idim^lim,global_time+dt,ps2,global_time+half*dt,ps)
182
183 case (imex_222)
184 ! One-parameter family of schemes (parameter is imex222_lambda) from
185 ! Pareschi&Russo 2005, which is L-stable (for default lambda) and
186 ! asymptotically SSP.
187 ! See doi.org/10.1007/s10915-004-4636-4 (table II)
188 ! See doi.org/10.1016/j.apnum.2016.10.018 for interesting values of lambda
189
190 ! Preallocate ps2 as y^n for the implicit update
191 !$OMP PARALLEL DO PRIVATE(igrid)
192 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
193 ps2(igrid)%w = ps(igrid)%w
194 if(stagger_grid) ps2(igrid)%ws = ps(igrid)%ws
195 end do
196 !$OMP END PARALLEL DO
197 ! Solve xi1 = y^n + lambda.dt.F_im(xi1)
198 call global_implicit_update(imex222_lambda, dt, global_time, ps2, ps)
199
200 ! Set ps1 = y^n + dt.F_ex(xi1)
201 call advect1(flux_method, one, idim^lim, global_time, ps2, global_time, ps1)
202 ! Set ps2 = dt.F_im(xi1) (is at t^n)
203 ! Set ps = y^n + dt/2 . F(xi1) (is at t^n+dt/2)
204 ! Set ps1 = y^n + dt.F_ex(xi1) + (1-2.lambda).dt.F_im(xi1) and enforce BC (at t^n+dt)
205 !$OMP PARALLEL DO PRIVATE(igrid)
206 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
207 ps2(igrid)%w = (ps2(igrid)%w - ps(igrid)%w) / imex222_lambda
208 if(stagger_grid) ps2(igrid)%ws = (ps2(igrid)%ws - ps(igrid)%ws) / imex222_lambda
209 end do
210 !$OMP END PARALLEL DO
211 !$OMP PARALLEL DO PRIVATE(igrid)
212 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
213 ps(igrid)%w = half*(ps(igrid)%w + ps1(igrid)%w + ps2(igrid)%w)
214 if(stagger_grid) ps(igrid)%ws = half*(ps(igrid)%ws + ps1(igrid)%ws + ps2(igrid)%ws)
215 end do
216 !$OMP END PARALLEL DO
217 !$OMP PARALLEL DO PRIVATE(igrid)
218 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
219 ps1(igrid)%w = ps1(igrid)%w + (1.0d0 - 2.0d0*imex222_lambda)*ps2(igrid)%w
220 if(stagger_grid) ps1(igrid)%ws = ps1(igrid)%ws + (1.0d0 - 2.0d0*imex222_lambda)*ps2(igrid)%ws
221 end do
222 !$OMP END PARALLEL DO
223 call getbc(global_time+dt,dt,ps1,iwstart,nwgc)
224
225 ! Preallocate ps2 as xi1 for the implicit update (is at t^n)
226 !$OMP PARALLEL DO PRIVATE(igrid)
227 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
228 ps2(igrid)%w = 2.0d0*ps2(igrid)%w - ps1(igrid)%w - imex222_lambda*ps2(igrid)%w
229 if(stagger_grid) ps2(igrid)%ws = 2.0d0*ps2(igrid)%ws - ps1(igrid)%ws - imex222_lambda*ps2(igrid)%ws
230 end do
231 !$OMP END PARALLEL DO
232 ! Solve xi2 = (ps1) + lambda.dt.F_im(xi2)
233 call global_implicit_update(imex222_lambda, dt, global_time, ps2, ps1)
234
235 ! Add dt/2.F_im(xi2) to ps
236 !$OMP PARALLEL DO PRIVATE(igrid)
237 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
238 ps(igrid)%w = ps(igrid)%w + (ps2(igrid)%w - ps1(igrid)%w) / (2.0d0 * imex222_lambda)
239 if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + (ps2(igrid)%ws - ps1(igrid)%ws) / (2.0d0 * imex222_lambda)
240 end do
241 !$OMP END PARALLEL DO
242 ! Set ps = y^n + dt/2.(F(xi1)+F(xi2)) = y^(n+1)
243 call advect1(flux_method, half, idim^lim, global_time+dt, ps2, global_time+half*dt, ps)
244
245 case default
246 call mpistop("unkown twostep time_integrator in advect")
247 end select
248
249 case (threestep)
250 select case (t_integrator)
251 case (ssprk3)
252 ! this is SSPRK(3,3) Gottlieb-Shu 1998 or SSP(3,2) depending on ssprk_order (3 vs 2)
253 call advect1(flux_method,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
254 !$OMP PARALLEL DO PRIVATE(igrid)
255 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
256 ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
257 if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
258 end do
259 !$OMP END PARALLEL DO
260 call advect1(flux_method,rk_beta22, idim^lim,global_time+rk_c2*dt,ps1,global_time+rk_alfa22*rk_c2*dt,ps2)
261 !$OMP PARALLEL DO PRIVATE(igrid)
262 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
263 ps(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
264 if(stagger_grid) ps(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
265 end do
266 !$OMP END PARALLEL DO
267 call advect1(flux_method,rk_beta33, idim^lim,global_time+rk_c3*dt,ps2,global_time+(1.0d0-rk_beta33)*dt,ps)
268
269 case (rk3_bt)
270 ! this is a general threestep RK according to its Butcher Table
271 call advect1(flux_method,rk3_a21, idim^lim,global_time,ps,global_time,ps1)
272 !$OMP PARALLEL DO PRIVATE(igrid)
273 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
274 ps3(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/rk3_a21
275 if(stagger_grid) ps3(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/rk3_a21
276 end do
277 !$OMP END PARALLEL DO
278 !$OMP PARALLEL DO PRIVATE(igrid)
279 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
280 ps2(igrid)%w=ps(igrid)%w+rk3_a31*ps3(igrid)%w
281 if(stagger_grid) ps2(igrid)%ws=ps(igrid)%ws+rk3_a31*ps3(igrid)%ws
282 end do
283 !$OMP END PARALLEL DO
284 call advect1(flux_method,rk3_a32, idim^lim,global_time+rk3_c2*dt,ps1,global_time+rk3_a31*dt,ps2)
285 !$OMP PARALLEL DO PRIVATE(igrid)
286 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
287 ps(igrid)%w=ps(igrid)%w+rk3_b1*ps3(igrid)%w &
288 +rk3_b2*(ps2(igrid)%w-(ps(igrid)%w+rk3_a31*ps3(igrid)%w))/rk3_a32
289 if(stagger_grid)then
290 ps(igrid)%ws=ps(igrid)%ws+rk3_b1*ps3(igrid)%ws &
291 +rk3_b2*(ps2(igrid)%ws-(ps(igrid)%ws+rk3_a31*ps3(igrid)%ws))/rk3_a32
292 endif
293 end do
294 !$OMP END PARALLEL DO
295 call advect1(flux_method,rk3_b3, idim^lim,global_time+rk3_c3*dt,ps2,global_time+(1.0d0-rk3_b3)*dt,ps)
296
297 case (imex_ars3)
298 ! this is IMEX scheme ARS3
299 call advect1(flux_method,ars_gamma, idim^lim,global_time,ps,global_time,ps1)
300 !$OMP PARALLEL DO PRIVATE(igrid)
301 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
302 ps4(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/ars_gamma
303 if(stagger_grid) ps4(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/ars_gamma
304 end do
305 !$OMP END PARALLEL DO
306 call global_implicit_update(ars_gamma,dt,global_time+ars_gamma*dt,ps2,ps1)
307 !$OMP PARALLEL DO PRIVATE(igrid)
308 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
309 ps1(igrid)%w=(ps2(igrid)%w-ps1(igrid)%w)/ars_gamma
310 if(stagger_grid) ps1(igrid)%ws=(ps2(igrid)%ws-ps1(igrid)%ws)/ars_gamma
311 end do
312 !$OMP END PARALLEL DO
313 !$OMP PARALLEL DO PRIVATE(igrid)
314 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
315 ps3(igrid)%w=ps(igrid)%w+(ars_gamma-1.0d0)*ps4(igrid)%w+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%w
316 if(stagger_grid) then
317 ps3(igrid)%ws=ps(igrid)%ws+(ars_gamma-1.0d0)*ps4(igrid)%ws+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%ws
318 endif
319 end do
320 !$OMP END PARALLEL DO
321 call advect1(flux_method,2.0d0*(1.0d0-ars_gamma), idim^lim,global_time+ars_gamma*dt,ps2,global_time+(ars_gamma-1.0d0)*dt,ps3)
322 !$OMP PARALLEL DO PRIVATE(igrid)
323 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
324 ps2(igrid)%w=ps1(igrid)%w+(ps3(igrid)%w-(ps(igrid)%w+ &
325 (ars_gamma-1.0d0)*ps4(igrid)%w+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%w))/(2.0d0*(1.0d0-ars_gamma))
326 if(stagger_grid) then
327 ps2(igrid)%ws=ps1(igrid)%ws+(ps3(igrid)%ws-(ps(igrid)%ws+ &
328 (ars_gamma-1.0d0)*ps4(igrid)%ws+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%ws))/(2.0d0*(1.0d0-ars_gamma))
329 endif
330 end do
331 !$OMP END PARALLEL DO
332 call global_implicit_update(ars_gamma,dt,global_time+(1.0d0-ars_gamma)*dt,ps4,ps3)
333 !$OMP PARALLEL DO PRIVATE(igrid)
334 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
335 ps(igrid)%w=ps(igrid)%w+half*ps2(igrid)%w &
336 +half*(ps4(igrid)%w-ps3(igrid)%w)/ars_gamma
337 if(stagger_grid) then
338 ps(igrid)%ws=ps(igrid)%ws+half*ps2(igrid)%ws &
339 +half*(ps4(igrid)%ws-ps3(igrid)%ws)/ars_gamma
340 endif
341 end do
342 !$OMP END PARALLEL DO
343 call advect1(flux_method,half, idim^lim,global_time+(1.0d0-ars_gamma)*dt,ps4,global_time+half*dt,ps)
344
345 case (imex_232)
346 ! this is IMEX_ARK(2,3,2) or IMEX_SSP(2,3,2)
347 call advect1(flux_method,imex_a21, idim^lim,global_time,ps,global_time,ps1)
348 !$OMP PARALLEL DO PRIVATE(igrid)
349 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
350 ps4(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/imex_a21
351 ps3(igrid)%w=ps(igrid)%w
352 if(stagger_grid) then
353 ps4(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/imex_a21
354 ps3(igrid)%ws=ps(igrid)%ws
355 endif
356 end do
357 !$OMP END PARALLEL DO
358 call evaluate_implicit(global_time,ps3)
359 !$OMP PARALLEL DO PRIVATE(igrid)
360 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
361 ps1(igrid)%w=ps1(igrid)%w+imex_ha21*dt*ps3(igrid)%w
362 if(stagger_grid) ps1(igrid)%ws=ps1(igrid)%ws+imex_ha21*dt*ps3(igrid)%ws
363 end do
364 !$OMP END PARALLEL DO
365 call getbc(global_time+imex_a21*dt,dt,ps1,iwstart,nwgc)
366 call global_implicit_update(imex_ha22,dt,global_time+imex_c2*dt,ps2,ps1)
367 !$OMP PARALLEL DO PRIVATE(igrid)
368 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
369 ps(igrid)%w=ps(igrid)%w+imex_a31*ps4(igrid)%w &
370 +imex_b1*dt*ps3(igrid)%w+imex_b2*(ps2(igrid)%w-ps1(igrid)%w)/imex_ha22
371 if(stagger_grid) then
372 ps(igrid)%ws=ps(igrid)%ws+imex_a31*ps4(igrid)%ws &
373 +imex_b1*dt*ps3(igrid)%ws+imex_b2*(ps2(igrid)%ws-ps1(igrid)%ws)/imex_ha22
374 endif
375 end do
376 !$OMP END PARALLEL DO
377 !$OMP PARALLEL DO PRIVATE(igrid)
378 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
379 ps3(igrid)%w=ps1(igrid)%w-imex_a21*ps4(igrid)%w &
380 -imex_ha21*dt*ps3(igrid)%w+imex_b1*dt*ps3(igrid)%w
381 if(stagger_grid) then
382 ps3(igrid)%ws=ps1(igrid)%ws-imex_a21*ps4(igrid)%ws &
383 -imex_ha21*dt*ps3(igrid)%ws+imex_b1*dt*ps3(igrid)%ws
384 endif
385 end do
386 !$OMP END PARALLEL DO
387 call advect1(flux_method,imex_a32, idim^lim,global_time+imex_c2*dt,ps2,global_time+imex_a31*dt,ps)
388 !$OMP PARALLEL DO PRIVATE(igrid)
389 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
390 ps2(igrid)%w=(ps(igrid)%w-ps3(igrid)%w-imex_a31*ps4(igrid)%w)/imex_a32 &
391 +(1.0d0-imex_b2/imex_a32)*(ps2(igrid)%w-ps1(igrid)%w)/imex_ha22
392 if(stagger_grid) then
393 ps2(igrid)%ws=(ps(igrid)%ws-ps3(igrid)%ws-imex_a31*ps4(igrid)%ws)/imex_a32 &
394 +(1.0d0-imex_b2/imex_a32)*(ps2(igrid)%ws-ps1(igrid)%ws)/imex_ha22
395 endif
396 end do
397 !$OMP END PARALLEL DO
398 !$OMP PARALLEL DO PRIVATE(igrid)
399 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
400 ps1(igrid)%w=ps3(igrid)%w+imex_b1*ps4(igrid)%w+imex_b2*ps2(igrid)%w
401 if(stagger_grid) then
402 ps1(igrid)%ws=ps3(igrid)%ws+imex_b1*ps4(igrid)%ws+imex_b2*ps2(igrid)%ws
403 endif
404 end do
405 !$OMP END PARALLEL DO
406 call global_implicit_update(imex_b3,dt,global_time+imex_c3*dt,ps2,ps)
407 !$OMP PARALLEL DO PRIVATE(igrid)
408 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
409 ps(igrid)%w=ps1(igrid)%w+ps2(igrid)%w-ps(igrid)%w
410 if(stagger_grid) then
411 ps(igrid)%ws=ps1(igrid)%ws+ps2(igrid)%ws-ps(igrid)%ws
412 endif
413 end do
414 !$OMP END PARALLEL DO
415 call advect1(flux_method,imex_b3, idim^lim,global_time+imex_c3*dt,ps2,global_time+(1.0d0-imex_b3)*dt,ps)
416
417 case (imex_cb3a)
418 ! Third order IMEX scheme with low-storage implementation (4 registers).
419 ! From Cavaglieri&Bewley 2015, see doi.org/10.1016/j.jcp.2015.01.031
420 ! (scheme called "IMEXRKCB3a" there). Uses 3 explicit and 2 implicit stages.
421 ! Parameters are in imex_bj, imex_cj (same for implicit/explicit),
422 ! imex_aij (implicit tableau) and imex_haij (explicit tableau).
423 call advect1(flux_method, imex_ha21, idim^lim, global_time, ps, global_time, ps1)
424 call global_implicit_update(imex_a22, dt, global_time+imex_c2*dt, ps2, ps1)
425 !$OMP PARALLEL DO PRIVATE(igrid)
426 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
427 ps3(igrid)%w = ps(igrid)%w + imex_a32/imex_a22 * (ps2(igrid)%w - ps1(igrid)%w)
428 ps(igrid)%w = ps(igrid)%w + imex_b2 /imex_a22 * (ps2(igrid)%w - ps1(igrid)%w)
429 ps1(igrid)%w = ps3(igrid)%w
430 if(stagger_grid) ps3(igrid)%ws = ps(igrid)%ws + imex_a32/imex_a22 * (ps2(igrid)%ws - ps1(igrid)%ws)
431 if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + imex_b2 /imex_a22 * (ps2(igrid)%ws - ps1(igrid)%ws)
432 if(stagger_grid) ps1(igrid)%ws = ps3(igrid)%ws
433 end do
434 !$OMP END PARALLEL DO
435 call advect1(flux_method, imex_ha32, idim^lim, global_time+imex_c2*dt, ps2, global_time, ps3)
436 !$OMP PARALLEL DO PRIVATE(igrid)
437 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
438 ps(igrid)%w = ps(igrid)%w + imex_b2 /imex_ha32 * (ps3(igrid)%w - ps1(igrid)%w)
439 if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + imex_b2 /imex_ha32 * (ps3(igrid)%ws - ps1(igrid)%ws)
440 end do
441 call global_implicit_update(imex_a33, dt, global_time+imex_c3*dt, ps1, ps3)
442 !$OMP PARALLEL DO PRIVATE(igrid)
443 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
444 ps(igrid)%w = ps(igrid)%w + imex_b3 /imex_a33 * (ps1(igrid)%w - ps3(igrid)%w)
445 if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + imex_b3 /imex_a33 * (ps1(igrid)%ws - ps3(igrid)%ws)
446 end do
447 !$OMP END PARALLEL DO
448 call advect1(flux_method, imex_b3, idim^lim, global_time+imex_c3*dt, ps1, global_time+imex_b2*dt, ps)
449
450 case default
451 call mpistop("unkown threestep time_integrator in advect")
452 end select
453
454 case (fourstep)
455 select case (t_integrator)
456 case (ssprk4)
457 ! SSPRK(4,3) or SSP(4,2) depending on ssprk_order (3 vs 2)
458 ! ssprk43: Strong stability preserving 4 stage RK 3rd order by Ruuth and Spiteri
459 ! Ruuth & Spiteri J. S C, 17 (2002) p. 211 - 220
460 ! supposed to be stable up to CFL=2.
461 ! ssp42: stable up to CFL=3
462 call advect1(flux_method,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
463 !$OMP PARALLEL DO PRIVATE(igrid)
464 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
465 ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
466 if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
467 end do
468 !$OMP END PARALLEL DO
469 call advect1(flux_method,rk_beta22, idim^lim,global_time+rk_c2*dt,ps1,global_time+rk_alfa22*rk_c2*dt,ps2)
470 !$OMP PARALLEL DO PRIVATE(igrid)
471 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
472 ps1(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
473 if(stagger_grid) ps1(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
474 end do
475 !$OMP END PARALLEL DO
476 call advect1(flux_method,rk_beta33, idim^lim,global_time+rk_c3*dt,ps2,global_time+rk_alfa33*rk_c3*dt,ps1)
477 !$OMP PARALLEL DO PRIVATE(igrid)
478 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
479 ps(igrid)%w=rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w
480 if(stagger_grid) ps(igrid)%ws=rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws
481 end do
482 !$OMP END PARALLEL DO
483 call advect1(flux_method,rk_beta44, idim^lim,global_time+rk_c4*dt,ps1,global_time+(1.0d0-rk_beta44)*dt,ps)
484
485 case (rk4)
486 ! the standard RK(4,4) method
487 !$OMP PARALLEL DO PRIVATE(igrid)
488 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
489 ps2(igrid)%w=ps(igrid)%w
490 ps3(igrid)%w=ps(igrid)%w
491 if(stagger_grid) then
492 ps2(igrid)%ws=ps(igrid)%ws
493 ps3(igrid)%ws=ps(igrid)%ws
494 endif
495 end do
496 !$OMP END PARALLEL DO
497 call advect1(flux_method,half, idim^lim,global_time,ps,global_time,ps1)
498 call advect1(flux_method,half, idim^lim,global_time+half*dt,ps1,global_time,ps2)
499 call advect1(flux_method,1.0d0, idim^lim,global_time+half*dt,ps2,global_time,ps3)
500 !$OMP PARALLEL DO PRIVATE(igrid)
501 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
502 ps(igrid)%w=(1.0d0/3.0d0)*(-ps(igrid)%w+ps1(igrid)%w+2.0d0*ps2(igrid)%w+ps3(igrid)%w)
503 if(stagger_grid) ps(igrid)%ws=(1.0d0/3.0d0) &
504 *(-ps(igrid)%ws+ps1(igrid)%ws+2.0d0*ps2(igrid)%ws+ps3(igrid)%ws)
505 end do
506 !$OMP END PARALLEL DO
507 call advect1(flux_method,1.0d0/6.0d0, idim^lim,global_time+dt,ps3,global_time+dt*5.0d0/6.0d0,ps)
508
509 case default
510 call mpistop("unkown fourstep time_integrator in advect")
511 end select
512
513 case (fivestep)
514 select case (t_integrator)
515 case (ssprk5)
516 ! SSPRK(5,4) by Ruuth and Spiteri
517 !bcexch = .false.
518 call advect1(flux_method,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
519 !$OMP PARALLEL DO PRIVATE(igrid)
520 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
521 ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
522 if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
523 end do
524 !$OMP END PARALLEL DO
525 call advect1(flux_method,rk_beta22, idim^lim,global_time+rk_c2*dt,ps1,global_time+rk_alfa22*rk_c2*dt,ps2)
526 !$OMP PARALLEL DO PRIVATE(igrid)
527 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
528 ps1(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
529 if(stagger_grid) ps1(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
530 end do
531 !$OMP END PARALLEL DO
532 call advect1(flux_method,rk_beta33, idim^lim,global_time+rk_c3*dt,ps2,global_time+rk_alfa33*rk_c3*dt,ps1)
533 !$OMP PARALLEL DO PRIVATE(igrid)
534 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
535 ps3(igrid)%w=rk_alfa53*ps2(igrid)%w+rk_alfa54*ps1(igrid)%w
536 if(stagger_grid) ps3(igrid)%ws=rk_alfa53*ps2(igrid)%ws+rk_alfa54*ps1(igrid)%ws
537 end do
538 !$OMP END PARALLEL DO
539 !$OMP PARALLEL DO PRIVATE(igrid)
540 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
541 ps2(igrid)%w=rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w
542 if(stagger_grid) ps2(igrid)%ws=rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws
543 end do
544 !$OMP END PARALLEL DO
545 call advect1(flux_method,rk_beta44, idim^lim,global_time+rk_c4*dt,ps1,global_time+rk_alfa44*rk_c4*dt,ps2)
546 !$OMP PARALLEL DO PRIVATE(igrid)
547 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
548 ps(igrid)%w=ps3(igrid)%w+rk_alfa55*ps2(igrid)%w &
549 +(rk_beta54/rk_beta44)*(ps2(igrid)%w-(rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w))
550 if(stagger_grid) then
551 ps(igrid)%ws=ps3(igrid)%ws+rk_alfa55*ps2(igrid)%ws &
552 +(rk_beta54/rk_beta44)*(ps2(igrid)%ws-(rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws))
553 endif
554 end do
555 !$OMP END PARALLEL DO
556 !bcexch = .true.
557 call advect1(flux_method,rk_beta55, idim^lim,global_time+rk_c5*dt,ps2,global_time+(1.0d0-rk_beta55)*dt,ps)
558
559 case default
560 call mpistop("unkown fivestep time_integrator in advect")
561 end select
562
563 case default
564 call mpistop("unkown time_stepper in advect")
565 end select
566
567 end subroutine advect
568
569 !> Implicit global update step within IMEX schemes, advance psa=psb+dtfactor*qdt*F_im(psa)
570 subroutine global_implicit_update(dtfactor,qdt,qtC,psa,psb)
574
575 type(state), target :: psa(max_blocks) !< Compute implicit part from this state and update it
576 type(state), target :: psb(max_blocks) !< Will be unchanged, as on entry
577 double precision, intent(in) :: qdt !< overall time step dt
578 double precision, intent(in) :: qtc !< Both states psa and psb at this time level
579 double precision, intent(in) :: dtfactor !< Advance psa=psb+dtfactor*qdt*F_im(psa)
580
581 integer :: iigrid, igrid
582
583 !> First copy all variables from a to b, this is necessary to account for
584 ! quantities is w with no implicit sourceterm
585 do iigrid=1,igridstail; igrid=igrids(iigrid);
586 psa(igrid)%w = psb(igrid)%w
587 end do
588
589 if (associated(phys_implicit_update)) then
590 call phys_implicit_update(dtfactor,qdt,qtc,psa,psb)
591 end if
592
593 ! enforce boundary conditions for psa
594 call getbc(qtc,0.d0,psa,iwstart,nwgc)
595
596 end subroutine global_implicit_update
597
598 !> Evaluate Implicit part in place, i.e. psa==>F_im(psa)
599 subroutine evaluate_implicit(qtC,psa)
602
603 type(state), target :: psa(max_blocks) !< Compute implicit part from this state and update it
604 double precision, intent(in) :: qtc !< psa at this time level
605
606 if (associated(phys_evaluate_implicit)) then
607 call phys_evaluate_implicit(qtc,psa)
608 end if
609
610 end subroutine evaluate_implicit
611
612 !> Integrate all grids by one partial step
613 subroutine advect1(method,dtfactor,idim^LIM,qtC,psa,qt,psb)
617 use mod_physics
618
619 integer, intent(in) :: idim^lim
620 type(state), target :: psa(max_blocks) !< Compute fluxes based on this state
621 type(state), target :: psb(max_blocks) !< Update solution on this state
622 double precision, intent(in) :: dtfactor !< Advance over dtfactor * dt
623 double precision, intent(in) :: qtc
624 double precision, intent(in) :: qt
625 integer, intent(in) :: method(nlevelshi)
626
627 double precision :: qdt
628 integer :: iigrid, igrid
629
630 istep = istep+1
631
632 if(associated(phys_special_advance)) then
633 call phys_special_advance(qtc,psa)
634 end if
635
636 qdt=dtfactor*dt
637 ! opedit: Just advance the active grids:
638 !$OMP PARALLEL DO PRIVATE(igrid)
639 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
640 block=>ps(igrid)
641 ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
642 call advect1_grid(igrid,method(block%level),qdt,dtfactor,ixg^ll,idim^lim,&
643 qtc,psa(igrid),qt,psb(igrid),rnode(rpdx1_:rnodehi,igrid),ps(igrid)%x)
644 end do
645 !$OMP END PARALLEL DO
646
647 ! opedit: Send flux for all grids, expects sends for all
648 ! nsend_fc(^D), set in connectivity.t.
649
650 if (fix_conserve_global .and. fix_conserve_at_step) then
651 call recvflux(idim^lim)
652 call sendflux(idim^lim)
653 call fix_conserve(psb,idim^lim,1,nwflux)
654 if(stagger_grid) then
655 call fix_edges(psb,idim^lim)
656 ! fill the cell-center values from the updated staggered variables
657 !$OMP PARALLEL DO PRIVATE(igrid)
658 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
659 call phys_face_to_center(ixm^ll,psb(igrid))
660 end do
661 !$OMP END PARALLEL DO
662 end if
663 end if
664
665 ! For all grids: fill ghost cells
666 call getbc(qt+qdt,qdt,psb,iwstart,nwgc)
667
668 end subroutine advect1
669
670 !> Advance a single grid over one partial time step
671 subroutine advect1_grid(igrid,method,qdt,dtfactor,ixI^L,idim^LIM,qtC,sCT,qt,s,dxs,x)
672
673 ! integrate one grid by one partial step
676 use mod_tvd
677 use mod_source, only: addsource2
680 use mod_comm_lib, only: mpistop
682
683 integer, intent(in) :: igrid,method
684 integer, intent(in) :: ixi^l, idim^lim
685 double precision, intent(in) :: qdt, dtfactor, qtc, qt, dxs(ndim), x(ixi^s,1:ndim)
686 type(state), target :: sct, s
687
688 ! cell face flux
689 double precision :: fc(ixi^s,1:nwflux,1:ndim)
690 ! cell edge flux
691 double precision :: fe(ixi^s,sdim:3)
692 double precision :: wprim(ixi^s,1:nw)
693 integer :: ixo^l
694
695 ixo^l=ixi^l^lsubnghostcells;
696 select case (method)
698 call finite_volume(method,qdt,dtfactor,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dxs,x)
699 case (fs_cd,fs_cd4)
700 call centdiff(method,qdt,dtfactor,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dxs,x)
701 case (fs_hancock)
702 call hancock(qdt,dtfactor,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,dxs,x)
703 case (fs_fd)
704 call fd(qdt,dtfactor,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dxs,x)
705 case (fs_tvd)
706 call centdiff(fs_cd,qdt,dtfactor,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dxs,x)
707 call tvdlimit(method,qdt,ixi^l,ixo^l,idim^lim,sct,qt+qdt,s,fc,dxs,x)
708 case (fs_source)
709 wprim=sct%w
710 call phys_to_primitive(ixi^l,ixi^l,wprim,x)
711 call addsource2(qdt*dble(idimmax-idimmin+1)/dble(ndim),&
712 dtfactor*dble(idimmax-idimmin+1)/dble(ndim),&
713 ixi^l,ixo^l,1,nw,qtc,sct%w,wprim,qt,s%w,x,.false.)
714 case (fs_nul)
715 ! There is nothing to do
716 case default
717 call mpistop("unknown flux scheme in advect1_grid")
718 end select
719
720 ! opedit: Obviously, flux is stored only for active grids.
721 ! but we know in fix_conserve wether there is a passive neighbor
722 ! but we know in conserve_fix wether there is a passive neighbor
723 ! via neighbor_active(i^D,igrid) thus we skip the correction for those.
724 ! This violates strict conservation when the active/passive interface
725 ! coincides with a coarse/fine interface.
726 if (fix_conserve_global .and. fix_conserve_at_step) then
727 call store_flux(igrid,fc,idim^lim,nwflux)
728 if(stagger_grid) call store_edge(igrid,ixg^ll,fe,idim^lim)
729 end if
730
731 end subroutine advect1_grid
732
733 !> process is a user entry in time loop, before output and advance
734 !> allows to modify solution, add extra variables, etc.
735 !> Warning: CFL dt already determined (and is not recomputed)!
736 subroutine process(iit,qt)
740 ! .. scalars ..
741 integer,intent(in) :: iit
742 double precision, intent(in):: qt
743
744 integer:: iigrid, igrid
745
746 if (associated(usr_process_global)) then
747 call usr_process_global(iit,qt)
748 end if
749
750 if (associated(usr_process_grid)) then
751 !$OMP PARALLEL DO PRIVATE(igrid)
752 do iigrid=1,igridstail; igrid=igrids(iigrid);
753 ! next few lines ensure correct usage of routines like divvector etc
754 ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
755 block=>ps(igrid)
756 call usr_process_grid(igrid,node(plevel_,igrid),ixg^ll,ixm^ll, &
757 qt,ps(igrid)%w,ps(igrid)%x)
758 end do
759 !$OMP END PARALLEL DO
760 call getbc(qt,dt,ps,iwstart,nwgc)
761 end if
762 end subroutine process
763
764 !> process_advanced is user entry in time loop, just after advance
765 !> allows to modify solution, add extra variables, etc.
766 !> added for handling two-way coupled PIC-MHD
767 !> Warning: w is now at global_time^(n+1), global time and iteration at global_time^n, it^n
768 subroutine process_advanced(iit,qt)
773 ! .. scalars ..
774 integer,intent(in) :: iit
775 double precision, intent(in):: qt
776
777 integer:: iigrid, igrid
778
779 if (associated(usr_process_adv_global)) then
780 call usr_process_adv_global(iit,qt)
781 end if
782
783 if (associated(usr_process_adv_grid)) then
784 !$OMP PARALLEL DO PRIVATE(igrid)
785 do iigrid=1,igridstail; igrid=igrids(iigrid);
786 ! next few lines ensure correct usage of routines like divvector etc
787 ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
788 block=>ps(igrid)
789
790 call usr_process_adv_grid(igrid,node(plevel_,igrid),ixg^ll,ixm^ll, &
791 qt,ps(igrid)%w,ps(igrid)%x)
792 end do
793 !$OMP END PARALLEL DO
794 call getbc(qt,dt,ps,iwstart,nwgc)
795 end if
796 end subroutine process_advanced
797
798end module mod_advance
Module containing all the time stepping schemes.
Definition mod_advance.t:2
subroutine, public process_advanced(iit, qt)
process_advanced is user entry in time loop, just after advance allows to modify solution,...
subroutine, public advance(iit)
Advance all the grids over one time step, including all sources.
Definition mod_advance.t:18
subroutine, public process(iit, qt)
process is a user entry in time loop, before output and advance allows to modify solution,...
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Module with finite difference methods for fluxes.
subroutine, public fd(qdt, dtfactor, ixil, ixol, idimslim, qtc, sct, qt, snew, fc, fe, dxs, x)
subroutine, public centdiff(method, qdt, dtfactor, ixil, ixol, idimslim, qtc, sct, qt, s, fc, fe, dxs, x)
Module with finite volume methods for fluxes.
subroutine, public finite_volume(method, qdt, dtfactor, ixil, ixol, idimslim, qtc, sct, qt, snew, fc, fe, dxs, x)
finite volume method
subroutine, public hancock(qdt, dtfactor, ixil, ixol, idimslim, qtc, sct, qt, snew, dxs, x)
The non-conservative Hancock predictor for TVDLF.
Module for flux conservation near refinement boundaries.
subroutine, public init_comm_fix_conserve(idimlim, nwfluxin)
subroutine, public fix_edges(psuse, idimlim)
subroutine, public recvflux(idimlim)
subroutine, public sendflux(idimlim)
subroutine, public store_flux(igrid, fc, idimlim, nwfluxin)
subroutine, public store_edge(igrid, ixil, fe, idimlim)
subroutine, public fix_conserve(psb, idimlim, nw0, nwfluxin)
update ghost cells of all blocks including physical boundaries
subroutine getbc(time, qdt, psb, nwstart, nwbc)
do update ghost cells of all blocks including physical boundaries
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
integer, dimension(:), allocatable typepred1
The spatial discretization for the predictor step when using a two step PC method.
integer, parameter imex_euler
double precision global_time
The global simulation time.
integer istep
Index of the sub-step in a multi-step time integrator.
double precision ars_gamma
IMEX_ARS3 parameter ars_gamma.
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, parameter nlevelshi
The maximum number of levels in the grid refinement.
logical stagger_grid
True for using stagger grid.
logical use_particles
Use particles module or not.
integer, parameter imex_trapezoidal
double precision, dimension(:), allocatable, parameter d
double precision dt
global time step
double precision imex222_lambda
IMEX-222(lambda) one-parameter family of schemes.
integer, dimension(:), allocatable flux_method
Which flux scheme of spatial discretization to use (per grid level)
integer, parameter fs_hll
flux schemes
logical time_advance
do time evolving
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
double precision rk_a21
RK2(alfa) method parameters from Butcher tableau.
integer, parameter predictor_corrector
integer, parameter sdim
starting dimension for electric field
integer, parameter forward_euler
logical fix_conserve_global
Whether to apply flux conservation at refinement boundaries.
character(len=std_len) typedimsplit
double precision, dimension(^nd) dxlevel
store unstretched cell size of current level
integer t_stepper
time stepper type
integer, parameter rnodehi
grid location info (corner coordinates and grid spacing)
integer max_blocks
The maximum number of grid blocks in a processor.
double precision imex_a22
IMEX_CB3a extra parameters.
integer t_integrator
time integrator method
integer, parameter fs_hancock
integer, dimension(:,:), allocatable node
integer, parameter imex_midpoint
Module containing all the particle routines.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition mod_physics.t:4
procedure(sub_convert), pointer phys_to_primitive
Definition mod_physics.t:55
procedure(sub_evaluate_implicit), pointer phys_evaluate_implicit
Definition mod_physics.t:83
procedure(sub_implicit_update), pointer phys_implicit_update
Definition mod_physics.t:82
procedure(sub_face_to_center), pointer phys_face_to_center
Definition mod_physics.t:81
procedure(sub_special_advance), pointer phys_special_advance
Definition mod_physics.t:71
Module for handling split source terms (split from the fluxes)
Definition mod_source.t:2
subroutine, public addsource2(qdt, dtfactor, ixil, ixol, iwlim, qtc, wct, wctprim, qt, w, x, qsourcesplit, src_active)
Add source within ixO for iws: w=w+qdt*S[wCT].
Definition mod_source.t:132
subroutine, public add_split_source(prior)
Definition mod_source.t:20
Subroutines for TVD-MUSCL schemes.
Definition mod_tvd.t:2
subroutine, public tvdlimit(method, qdt, ixil, ixol, idimlim, s, qt, snew, fc, dxs, x)
Definition mod_tvd.t:14
Module with all the methods that users can customize in AMRVAC.
procedure(process_grid), pointer usr_process_grid
procedure(process_adv_grid), pointer usr_process_adv_grid
procedure(process_global), pointer usr_process_global
procedure(process_adv_global), pointer usr_process_adv_global