MPI-AMRVAC 3.2
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
Loading...
Searching...
No Matches
amrvac.t
Go to the documentation of this file.
1!> AMRVAC solves a set of hyperbolic equations
2!> \f$\vec{u}_t + \nabla_x \cdot \vec{f}(\vec{u}) = \vec{s}\f$
3!> using adaptive mesh refinement.
4program amrvac
5
10 use mod_usr
17 use mod_advance, only: process
19 use mod_convert, only: init_convert
20 use mod_physics
25
26
27 double precision :: time0, time_in
28 logical,save :: part_file_exists=.false.
29
30
31 call comm_start()
32
33 time0 = mpi_wtime()
34 time_advance = .false.
35 time_bc = zero
36
37 ! read command line arguments first
38 call read_arguments()
39
40 ! init_convert is called before usr_init as user might associate a convert method
41 call init_convert()
42 ! the user_init routine should load a physics module
43 call usr_init()
44
46
47 if (restart_from_file /= undefined) then
48 ! restart from previous file or dat file conversion
49 ! get input data from previous AMRVAC run
50
51 ! read in dat file
52 call read_snapshot()
53
54 ! rewrite it=0 snapshot when restart from it=0 state
55 if(it==0.and.itsave(1,2)==0) snapshotnext=snapshotnext-1
56
57 if (reset_time) then
58 ! reset it and global time to original value
59 it = it_init
61 ! reset snapshot number
63 end if
64
65 if (reset_it) then
66 ! reset it to original value
67 it = it_init
68 end if
69
70 ! allow use to read extra data before filling boundary condition
71 if (associated(usr_process_grid) .or. &
72 associated(usr_process_global)) then
74 end if
75
76 ! modify initial condition
77 if (firstprocess) then
78 ! update ghost cells for all need-boundary variables before modification
79 call getbc(global_time,0.d0,ps,1,nwflux+nwaux)
80 call modify_ic
81 end if
82
83 ! select active grids
84 call selectgrids
85
86 ! update ghost cells for all need-boundary variables
87 call getbc(global_time,0.d0,ps,1,nwflux+nwaux)
88
89 ! reset AMR grid
90 if (reset_grid) then
91 call settree
92 else
93 ! set up boundary flux conservation arrays
95 end if
96
97 ! all blocks refined to the same level for output
98 if(convert .and. level_io>0 .or. level_io_min.ne.1 .or. level_io_max.ne.nlevelshi) &
100
101 {^nooned
102 ! improve initial condition after restart and modification
104 }
105
107
108 if(use_particles) then
109 call read_particles_snapshot(part_file_exists)
110 call init_gridvars()
111 if (.not. part_file_exists) call particles_create()
112 if(convert) then
113 call handle_particles()
114 call finish_gridvars()
115 call time_spent_on_particles()
116 call comm_finalize
117 stop
118 end if
119 end if
120
121 if(convert) then
122 if (npe/=1.and.(.not.(index(convert_type,'mpi')>=1)) &
123 .and. convert_type .ne. 'user') &
124 call mpistop("non-mpi conversion only uses 1 cpu")
125 if(mype==0.and.level_io>0) write(unitterm,*)'reset tree to fixed level=',level_io
126
127 !here requires -1 snapshot
129
130 if(associated(phys_special_advance)) then
131 ! e.g. calculate MF velocity from magnetic field
133 end if
134
136 call comm_finalize
137 stop
138 end if
139
140 else
141
142 ! form and initialize all grids at level one
143 call initlevelone
144
145 ! set up and initialize finer level grids, if needed
146 call settree
147
149
150 ! improve initial condition
152
153 ! select active grids
154 call selectgrids
155
156 if (use_particles) then
157 call init_gridvars()
158 call particles_create()
159 end if
160
161 end if
162
163 ! initialize something base on tree information
165
166 if (mype==0) then
167 print*,'-------------------------------------------------------------------------------'
168 write(*,'(a,f17.3,a)')' Startup phase took : ',mpi_wtime()-time0,' sec'
169 print*,'-------------------------------------------------------------------------------'
170 end if
171
172 ! an interface to allow user to do special things before the main loop
173 if (associated(usr_before_main_loop)) &
175
176 ! do time integration of all grids on all levels
177 call timeintegration()
178
179 if (mype==0) then
180 time0=mpi_wtime()-time0
181 print*,'-------------------------------------------------------------------------------'
182 write(*,'(a,f17.3,a,f17.3,a)')' Finished AMRVAC in : ',time0,' sec', dble(npe)*time0/3.6d3,' core hour'
183 print*,'-------------------------------------------------------------------------------'
184 end if
185
186 call comm_finalize
187
188contains
189
190 subroutine timeintegration()
191 use mod_timing
193 use mod_forest, only: nleafs_active
198 use mod_dt, only: setdt
199
200
201 double precision :: time_last_print, time_write0, time_write, time_before_advance, dt_loop
202 integer(kind=8) ncells_update
203 integer :: level, ifile, fixcount, ncells_block, igrid, iigrid
204 logical :: crashall
205
206 time_in=mpi_wtime()
207 time_last_print = -bigdouble
208 fixcount=1
209
211
212 do ifile=nfile,1,-1
213 if(resume_previous_run) then
214 tsavelast(ifile)=aint((global_time+smalldouble)/dtsave(ifile))*dtsave(ifile)
215 itsavelast(ifile)=it/ditsave(ifile)*ditsave(ifile)
216 else
218 itsavelast(ifile)=it
219 end if
220 end do
221
222 ! the next two are used to keep track of the performance during runtime:
224 timelast=mpi_wtime()
225
226 ! ------ start of integration loop. ------------------
227 if (mype==0) then
228 write(*, '(A,ES9.2,A)') ' Start integrating, print status every ', &
229 time_between_print, ' seconds'
230 write(*, '(A4,A10,A12,A12,A12)') ' #', 'it', 'time', 'dt', 'wc-time(s)'
231 end if
232
233 timeloop0=mpi_wtime()
234 time_bc=0.d0
235 time_write=0.d0
236 ncells_block={(ixghi^d-2*nghostcells)*}
237 ncells_update=0
238 dt_loop=0.d0
239
240 time_advance=.true.
241
242 time_evol : do
243
244 time_before_advance=mpi_wtime()
245 ! set time step
246 call setdt()
247
248 ! Optionally call a user method that can modify the grid variables at the
249 ! beginning of a time step
250 if (associated(usr_process_grid) .or. &
251 associated(usr_process_global)) then
253 end if
254
255 ! Check if output needs to be written
256 do ifile=nfile,1,-1
257 save_file(ifile) = timetosave(ifile)
258 end do
259
260 timeio0=mpi_wtime()
261
262 if (timeio0 - time_last_print > time_between_print) then
263 time_last_print = timeio0
264 if (mype == 0) then
265 write(*, '(A4,I10,ES12.4,ES12.4,ES12.4)') " #", &
267 end if
268 end if
269
270 ! output data
271 if (any(save_file)) then
272 if(associated(usr_modify_output)) then
273 ! Users can modify or set variables before output is written
274 do iigrid=1,igridstail; igrid=igrids(iigrid);
275 ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
276 block=>ps(igrid)
277 call usr_modify_output(ixg^ll,ixm^ll,global_time,ps(igrid)%w,ps(igrid)%x)
278 end do
279 end if
280 time_write=0.d0
281 do ifile=nfile,1,-1
282 if (save_file(ifile)) then
283 time_write0=mpi_wtime()
284 call saveamrfile(ifile)
285 time_write=time_write+mpi_wtime()-time_write0
286 end if
287 end do
288 end if
289
290 ! output a snapshot when user write a file named 'savenow' in the same
291 ! folder as the executable amrvac
292 if (mype==0) inquire(file='savenow',exist=save_now)
293 if (npe>1) call mpi_bcast(save_now,1,mpi_logical,0,icomm,ierrmpi)
294
295 if (save_now) then
296 if(mype==0) write(*,'(a,i7,a,i7,a,es12.4)') ' save a snapshot No.',&
297 snapshotnext,' at it=',it,' global_time=',global_time
298 call saveamrfile(1)
299 call saveamrfile(2)
300 call mpi_file_delete('savenow',mpi_info_null,ierrmpi)
301 end if
302 timeio_tot=timeio_tot+mpi_wtime()-timeio0
303
304 pass_wall_time=mpi_wtime()-time0+dt_loop+4.d0*time_write >=wall_time_max
305
306 ! exit time loop if time is up
307 if (it>=it_max .or. global_time>=time_max .or. pass_wall_time .or. final_dt_exit) exit time_evol
308
309 ! solving equations
310 call advance(it)
311
312 ! if met unphysical values, output the last good status and stop the run
313 call mpi_allreduce(crash,crashall,1,mpi_logical,mpi_lor,icomm,ierrmpi)
314 if (crashall) then
315 call saveamrfile(1)
316 call saveamrfile(2)
317 if(mype==0) write(*,*) "Error: small value encountered, run crash."
318 call mpi_abort(icomm, iigrid, ierrmpi)
319 end if
320
321 ! Optionally call a user method that can modify the grid variables at the
322 ! end of a time step: this is for two-way coupling to PIC, e.g.
323 if (associated(usr_process_adv_grid) .or. &
324 associated(usr_process_adv_global)) then
326 end if
327
328 ! update AMR mesh and tree
329 timegr0=mpi_wtime()
330 if(ditregrid>1) then
331 if(fixcount<ditregrid) then
332 fixcount=fixcount+1
333 else
334 if (refine_max_level>1 .and. .not.(fixgrid())) call resettree
335 fixcount=1
336 end if
337 else
338 if (refine_max_level>1 .and. .not.(fixgrid())) call resettree
339 end if
340 timegr_tot=timegr_tot+(mpi_wtime()-timegr0)
341
342 ! update time variables
343 it = it + 1
345
346 if(it>9000000)then
348 itsavelast(:)=0
349 end if
350
351 ! count updated cells
352 ncells_update=ncells_update+ncells_block*nleafs_active
353
354 ! time lapses in one loop
355 dt_loop=mpi_wtime()-time_before_advance
356 end do time_evol
357
358 if(use_particles) then
359 call write_particle_output()
360 call finish_gridvars()
361 end if
362
363 time_advance=.false.
364
365 timeloop=mpi_wtime()-timeloop0
366
367 if (mype==0) then
368 write(*,'(a,f12.3,a)')' Total timeloop took : ',timeloop,' sec'
369 write(*,'(a,f12.3,a)')' Time spent on AMR : ',timegr_tot,' sec'
370 write(*,'(a,f12.2,a)')' Percentage: ',100.0*timegr_tot/timeloop,' %'
371 write(*,'(a,f12.3,a)')' Time spent on IO in loop : ',timeio_tot,' sec'
372 write(*,'(a,f12.2,a)')' Percentage: ',100.0*timeio_tot/timeloop,' %'
373 write(*,'(a,f12.3,a)')' Time spent on ghost cells : ',time_bc,' sec'
374 write(*,'(a,f12.2,a)')' Percentage: ',100.0*time_bc/timeloop,' %'
375 write(*,'(a,f12.3,a)')' Time spent on computing : ',timeloop-timeio_tot-timegr_tot-time_bc,' sec'
376 write(*,'(a,f12.2,a)')' Percentage: ',100.0*(timeloop-timeio_tot-timegr_tot-time_bc)/timeloop,' %'
377 write(*,'(a,es12.3 )')' Cells updated / proc / sec : ',dble(ncells_update)*dble(nstep)/dble(npe)/timeloop
378 end if
379
380 ! output end state
381 timeio0=mpi_wtime()
382 do ifile=nfile,1,-1
383 if(itsavelast(ifile)<it) call saveamrfile(ifile)
384 end do
385 if (mype==0) call mpi_file_close(log_fh,ierrmpi)
386 timeio_tot=timeio_tot+(mpi_wtime()-timeio0)
387
388 if (mype==0) then
389 write(*,'(a,f12.3,a)')' Total time spent on IO : ',timeio_tot,' sec'
390 write(*,'(a,f12.3,a)')' Total timeintegration took : ',mpi_wtime()-time_in,' sec'
391 write(*, '(A4,I10,ES12.3,ES12.3,ES12.3)') " #", &
393 end if
394
395 {#IFDEF RAY
396 call time_spent_on_rays
397 }
398
399 if(use_particles) call time_spent_on_particles
400
401 if (use_multigrid) call mg_timers_show(mg)
402 end subroutine timeintegration
403
404 !> Save times are defined by either tsave(isavet(ifile),ifile) or
405 !> itsave(isaveit(ifile),ifile) or dtsave(ifile) or ditsave(ifile)
406 !> tsavestart(ifile) determines first start time. This only affects
407 !> read out times determined by dtsave(ifiles).
408 !> Other conditions may be included.
409 logical function timetosave(ifile)
411
412 integer:: ifile
413 logical:: oksave
414
415 oksave=.false.
416 if (it==itsave(isaveit(ifile),ifile)) then
417 oksave=.true.
418 isaveit(ifile)=isaveit(ifile)+1
419 end if
420 if (it==itsavelast(ifile)+ditsave(ifile)) oksave=.true.
421
422 if (global_time>=tsave(isavet(ifile),ifile).and.global_time-dt<tsave(isavet(ifile),ifile)) then
423 oksave=.true.
424 isavet(ifile)=isavet(ifile)+1
425 end if
426
427 if(global_time>=tsavestart(ifile)-smalldouble)then
428 if (global_time>=tsavelast(ifile)+dtsave(ifile)-smalldouble)then
429 oksave=.true.
430 n_saves(ifile) = n_saves(ifile) + 1
431 endif
432 endif
433
434 if (oksave) then
435 tsavelast(ifile) =global_time
436 itsavelast(ifile)=it
437 end if
438 timetosave=oksave
439
440 return
441 end function timetosave
442
443 !> Return true if the AMR grid should not be adapted any more. This is
444 !> controlled by tfixgrid or itfixgrid. Other conditions may be included.
445 logical function fixgrid()
447
448 fixgrid= (global_time>=tfixgrid .or. it>=itfixgrid)
449 end function fixgrid
450
451end program amrvac
program amrvac
AMRVAC solves a set of hyperbolic equations using adaptive mesh refinement.
Definition amrvac.t:4
subroutine timeintegration()
Definition amrvac.t:191
Module containing all the time stepping schemes.
Definition mod_advance.t:2
subroutine, public process_advanced(iit, qt)
process_advanced is user entry in time loop, just after advance allows to modify solution,...
subroutine, public advance(iit)
Advance all the grids over one time step, including all sources.
Definition mod_advance.t:18
subroutine, public process(iit, qt)
process is a user entry in time loop, before output and advance allows to modify solution,...
subroutine, public resettree
reset AMR and (de)allocate boundary flux storage at level changes
subroutine, public resettree_convert
Force the tree to desired level(s) from level_io(_min/_max) used for conversion to vtk output.
subroutine, public settree
Build up AMR.
subroutine, public comm_start
Initialize the MPI environment.
subroutine, public comm_finalize
Finalize (or shutdown) the MPI environment.
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
subroutine generate_plotfile
subroutine init_convert()
Definition mod_convert.t:32
Definition mod_dt.t:1
subroutine, public setdt()
setdt - set dt for all levels between levmin and levmax. dtpar>0 --> use fixed dtpar for all level dt...
Definition mod_dt.t:11
Module for flux conservation near refinement boundaries.
subroutine, public allocatebflux
Module with basic grid data structures.
Definition mod_forest.t:2
integer nleafs_active
Definition mod_forest.t:78
update ghost cells of all blocks including physical boundaries
subroutine getbc(time, qdt, psb, nwstart, nwbc)
do update ghost cells of all blocks including physical boundaries
This module contains definitions of global parameters and variables and some generic functions/subrou...
double precision, dimension(nfile) tsavelast
type(state), pointer block
Block pointer for using one block and its previous state.
integer nstep
How many sub-steps the time integrator takes.
integer it_max
Stop the simulation after this many time steps have been taken.
logical reset_it
If true, reset iteration count to 0.
integer ixghi
Upper index of grid block arrays.
logical, dimension(nfile) save_file
whether or not to save an output file
logical resume_previous_run
If true, restart a previous run from the latest snapshot.
double precision global_time
The global simulation time.
integer, dimension(nsavehi, nfile) itsave
Save output of type N on iterations itsave(:, N)
double precision time_max
End time for the simulation.
double precision time_init
Start time for the simulation.
logical firstprocess
If true, call initonegrid_usr upon restarting.
integer snapshotini
Resume from the snapshot with this index.
integer it
Number of time steps taken.
integer it_init
initial iteration count
integer ditregrid
Reconstruct the AMR grid once every ditregrid iteration(s)
character(len=std_len) convert_type
Which format to use when converting.
integer itfixgrid
Fix the AMR grid after this many time steps.
integer, parameter nlevelshi
The maximum number of levels in the grid refinement.
logical use_particles
Use particles module or not.
integer icomm
The MPI communicator.
logical reset_time
If true, reset iteration count and global_time to original values, and start writing snapshots at ind...
integer mype
The rank of the current MPI task.
integer, dimension(1:nfile) n_saves
Number of saved files of each type.
double precision, dimension(nfile) tsavestart
Start of read out (not counting specified read outs)
integer, dimension(nfile) ditsave
Repeatedly save output of type N when ditsave(N) time steps have passed.
double precision dt
global time step
double precision wall_time_max
Ending wall time (in hours) for the simulation.
integer ierrmpi
A global MPI error return code.
logical autoconvert
If true, already convert to output format during the run.
integer slowsteps
If > 1, then in the first slowsteps-1 time steps dt is reduced by a factor .
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
integer, dimension(nfile) itsavelast
double precision time_between_print
to monitor timeintegration loop at given wall-clock time intervals
integer, parameter unitterm
Unit for standard output.
double precision, dimension(nfile) dtsave
Repeatedly save output of type N when dtsave(N) simulation time has passed.
logical time_advance
do time evolving
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer, parameter filelog_
Constant indicating log output.
integer, parameter fileout_
Constant indicating regular output.
double precision time_bc
accumulated wall-clock time spent on boundary conditions
double precision tfixgrid
Fix the AMR grid after this time.
integer nghostcells
Number of ghost cells surrounding a grid.
character(len= *), parameter undefined
double precision, dimension(nsavehi, nfile) tsave
Save output of type N on times tsave(:, N)
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
logical reset_grid
If true, rebuild the AMR grid upon restarting.
logical crash
Save a snapshot before crash a run met unphysical values.
double precision, dimension(^nd) dxlevel
store unstretched cell size of current level
logical use_multigrid
Use multigrid (only available in 2D and 3D)
integer refine_max_level
Maximal number of AMR levels.
integer, parameter nfile
Number of output methods.
logical pass_wall_time
If true, wall time is up, modify snapshotnext for later overwrite.
logical final_dt_exit
Force timeloop exit when final dt < dtmin.
integer, dimension(nfile) isaveit
integer, dimension(nfile) isavet
integer log_fh
MPI file handle for logfile.
subroutine, public improve_initial_condition()
improve initial condition after initialization
subroutine, public initlevelone
Generate and initialize all grids at the coarsest level (level one)
subroutine, public modify_ic
modify initial condition
This module handles the initialization of various components of amrvac.
subroutine, public initialize_amrvac()
Initialize amrvac: read par files and initialize variables.
logical save_now
whether a manually inserted snapshot is saved
Module for reading input and writing output.
subroutine saveamrfile(ifile)
subroutine read_arguments()
Read the command line arguments passed to amrvac.
subroutine read_snapshot
Routine to read in snapshots (.dat files). When it cannot recognize the file version,...
Module to couple the octree-mg library to AMRVAC. This file uses the VACPP preprocessor,...
type(mg_t) mg
Data structure containing the multigrid tree.
subroutine mg_setup_multigrid()
Setup multigrid for usage.
Module containing all the particle routines.
subroutine particles_create()
Create initial particles.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition mod_physics.t:4
procedure(sub_special_advance), pointer phys_special_advance
Definition mod_physics.t:68
subroutine, public selectgrids
integer ittimelast
Definition mod_timing.t:13
double precision timegr_tot
Definition mod_timing.t:9
double precision timeio_tot
Definition mod_timing.t:8
double precision time_in
Definition mod_timing.t:8
double precision timelast
Definition mod_timing.t:12
double precision timeio0
Definition mod_timing.t:8
double precision timeloop
Definition mod_timing.t:9
double precision timeloop0
Definition mod_timing.t:9
double precision timegr0
Definition mod_timing.t:9
subroutine, public initialize_trac_after_settree
Definition mod_trac.t:870
Module with all the methods that users can customize in AMRVAC.
procedure(process_grid), pointer usr_process_grid
procedure(process_adv_grid), pointer usr_process_adv_grid
procedure(sub_modify_io), pointer usr_modify_output
procedure(p_no_args), pointer usr_before_main_loop
procedure(process_global), pointer usr_process_global
procedure(process_adv_global), pointer usr_process_adv_global
This is a template for a new user problem of mhd.
subroutine usr_init()
This routine should set user methods, and activate the physics module.