MPI-AMRVAC  3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
mod_initialize.t
Go to the documentation of this file.
1 !> This module handles the initialization of various components of amrvac
3  use mod_comm_lib, only: mpistop
4 
5  implicit none
6  private
7 
8  logical :: initialized_already = .false.
9 
10  ! Public methods
11  public :: initialize_amrvac
12 
13 contains
14 
15  !> Initialize amrvac: read par files and initialize variables
16  subroutine initialize_amrvac()
21  use mod_bc_data, only: bc_data_init
23  use mod_comm_lib, only: init_comm_types
24 
25  if (initialized_already) return
26 
27  ! Check whether the user has loaded a physics module
28  call phys_check()
29 
30  ! Read input files
31  call read_par_files()
32  call initialize_vars()
33  call init_comm_types()
34 
35  ! Possibly load boundary condition data or initial data
36  call bc_data_init()
37  call read_data_init()
38 
39  if(associated(usr_set_parameters)) call usr_set_parameters()
40 
41  call phys_check_params()
42 
43  initialized_already = .true.
44  end subroutine initialize_amrvac
45 
46  !> Initialize (and allocate) simulation and grid variables
47  subroutine initialize_vars
48  use mod_forest
51  use mod_fix_conserve, only: pflux
53  use mod_geometry
54 
55  integer :: igrid, level, ipe, ig^D
56  logical :: ok
57 
58  allocate(ps(max_blocks))
59  allocate(ps1(max_blocks))
60  allocate(ps2(max_blocks))
61  allocate(ps3(max_blocks))
62  allocate(ps4(max_blocks))
63  allocate(psc(max_blocks))
64  allocate(ps_sub(max_blocks))
65  allocate(neighbor(2,-1:1^d&,max_blocks),neighbor_child(2,0:3^d&,max_blocks))
66  allocate(neighbor_type(-1:1^d&,max_blocks),neighbor_active(-1:1^d&,max_blocks))
67  allocate(neighbor_pole(-1:1^d&,max_blocks))
68  allocate(igrids(max_blocks),igrids_active(max_blocks),igrids_passive(max_blocks))
71  allocate(pflux(2,^nd,max_blocks))
72  ! allocate mesh for particles
73  if(use_particles) allocate(gridvars(max_blocks))
74  if(stagger_grid) then
75  allocate(pface(2,^nd,max_blocks),fine_neighbors(2^d&,^nd,max_blocks))
76  allocate(old_neighbor(2,-1:1^d,max_blocks))
77  end if
78 
79  it=it_init
81 
82  dt=zero
83 
84  ! no poles initially
85  neighbor_pole=0
86 
87  ! check resolution
88  if ({mod(ixghi^d,2)/=0|.or.}) then
89  call mpistop("mesh widths must give even number grid points")
90  end if
91  ixm^ll=ixg^ll^lsubnghostcells;
92 
93  if (nbufferx^d>(ixmhi^d-ixmlo^d+1)|.or.) then
94  write(unitterm,*) "nbufferx^D bigger than mesh size makes no sense."
95  write(unitterm,*) "Decrease nbufferx or increase mesh size"
96  call mpistop("")
97  end if
98 
99  ! initialize dx arrays on finer (>1) levels
100  do level=2,refine_max_level
101  {dx(^d,level) = dx(^d,level-1) * half\} ! refine ratio 2
102  end do
103 
104  ! domain decomposition
105  ! physical extent of a grid block at level 1, per dimension
106  ^d&dg^d(1)=dx(^d,1)*dble(block_nx^d)\
107  ! number of grid blocks at level 1 in simulation domain, per dimension
108  ^d&ng^d(1)=nint((xprobmax^d-xprobmin^d)/dg^d(1))\
109  ! total number of grid blocks at level 1
110  nglev1={ng^d(1)*}
111 
112  do level=2,refine_max_level
113  dg^d(level)=half*dg^d(level-1);
114  ng^d(level)=ng^d(level-1)*2;
115  end do
116 
117  ! check that specified stepsize correctly divides domain
118  ok=({(abs(dble(ng^d(1))*dg^d(1)-(xprobmax^d-xprobmin^d))<=smalldouble)|.and.})
119  if (.not.ok) then
120  write(unitterm,*)"domain cannot be divided by meshes of given gridsize"
121  call mpistop("domain cannot be divided by meshes of given gridsize")
122  end if
123 
124  poleb=.false.
125  if (.not.slab) call set_pole
126 
127  ! number of grid blocks at level 1 along a dimension, which does not have a pole or periodic boundary,
128  ! must be larger than 1 for a rectangular AMR mesh
129  if(({ng^d(1)/=1|.or.}).and.refine_max_level>1) then
130  {
131  if(ng^d(1)==1.and..not.poleb(1,^d).and.&
132  .not.poleb(2,^d).and..not.periodb(^d).and..not.aperiodb(^d)) then
133  write(unitterm,"(a,i2,a)") "number of grid blocks at level 1 in dimension",^d,&
134  " be larger than 1 for a rectangular AMR mesh!"
135  write(unitterm,"(a,i1)") "increase domain_nx",^d
136  call mpistop("")
137  end if
138  \}
139  end if
140 
141  ! initialize connectivity data
142  igridstail=0
143 
144  ! allocate memory for forest data structures
146  do level=1,refine_max_level
147  nullify(level_head(level)%node,level_tail(level)%node)
148  end do
149 
150  allocate(igrid_to_node(max_blocks,0:npe-1))
151  do ipe=0,npe-1
152  do igrid=1,max_blocks
153  nullify(igrid_to_node(igrid,ipe)%node)
154  end do
155  end do
156 
157  allocate(sfc(1:3,max_blocks*npe))
158 
159  allocate(igrid_to_sfc(max_blocks))
160 
161  sfc=0
162  allocate(morton_start(0:npe-1),morton_stop(0:npe-1))
163  allocate(morton_sub_start(0:npe-1),morton_sub_stop(0:npe-1))
164 
165  allocate(nleafs_level(1:nlevelshi))
166 
167  allocate(coarsen(max_blocks,0:npe-1),refine(max_blocks,0:npe-1))
168  coarsen=.false.
169  refine=.false.
170  if (nbufferx^d/=0|.or.) then
171  allocate(buffer(max_blocks,0:npe-1))
172  buffer=.false.
173  end if
174  allocate(igrid_inuse(max_blocks,0:npe-1))
175  igrid_inuse=.false.
176 
177  allocate(tree_root(1:ng^d(1)))
178  {do ig^db=1,ng^db(1)\}
179  nullify(tree_root(ig^d)%node)
180  {end do\}
181 
182  ! define index ranges and MPI send/receive derived datatype for ghost-cell swap
183  call init_bc()
184  type_send_srl=>type_send_srl_f
185  type_recv_srl=>type_recv_srl_f
186  type_send_r=>type_send_r_f
187  type_recv_r=>type_recv_r_f
188  type_send_p=>type_send_p_f
189  type_recv_p=>type_recv_p_f
190  call create_bc_mpi_datatype(iwstart,nwgc)
191 
192  end subroutine initialize_vars
193 
194 
195 end module mod_initialize
type(fake_neighbors), dimension(:^d &,:,:), allocatable, public fine_neighbors
Definition: mod_amr_fct.t:16
integer, dimension(:,:^d &,:), allocatable, public old_neighbor
Definition: mod_amr_fct.t:18
type(facealloc), dimension(:,:,:), allocatable, public pface
Definition: mod_amr_fct.t:14
Module to set boundary conditions from user data.
Definition: mod_bc_data.t:2
subroutine, public bc_data_init()
Definition: mod_bc_data.t:46
subroutine, public init_comm_types
Create and store the MPI types that will be used for parallel communication.
Definition: mod_comm_lib.t:58
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: mod_comm_lib.t:208
Module for flux conservation near refinement boundaries.
type(fluxalloc), dimension(:,:,:), allocatable, public pflux
store flux to fix conservation
Module with basic grid data structures.
Definition: mod_forest.t:2
integer, dimension(:), allocatable, save nleafs_level
How many leaves are present per refinement level.
Definition: mod_forest.t:81
logical, dimension(:,:), allocatable, save refine
Definition: mod_forest.t:70
integer, dimension(:), allocatable, save morton_start
First Morton number per processor.
Definition: mod_forest.t:62
integer, dimension(:), allocatable, save morton_sub_start
Definition: mod_forest.t:67
logical, dimension(:,:), allocatable, save buffer
Definition: mod_forest.t:70
logical, dimension(:,:), allocatable, save igrid_inuse
Definition: mod_forest.t:70
integer, dimension(:), allocatable, save igrid_to_sfc
Go from a grid index to Morton number (for a single processor)
Definition: mod_forest.t:56
integer nglev1
Definition: mod_forest.t:78
integer, dimension(:), allocatable, save morton_sub_stop
Definition: mod_forest.t:67
logical, dimension(:,:), allocatable, save coarsen
AMR flags and grids-in-use identifier per processor (igrid,ipe)
Definition: mod_forest.t:70
type(tree_node_ptr), dimension(:), allocatable, save level_tail
The tail pointer of the linked list per refinement level.
Definition: mod_forest.t:38
integer, dimension(:), allocatable, save morton_stop
Last Morton number per processor.
Definition: mod_forest.t:65
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Definition: mod_forest.t:29
integer, dimension(:,:), allocatable, save sfc
Array to go from a Morton number to an igrid and processor index. Sfc(1:3, MN) contains [igrid,...
Definition: mod_forest.t:43
type(tree_node_ptr), dimension(:,:), allocatable, save igrid_to_node
Array to go from an [igrid, ipe] index to a node pointer.
Definition: mod_forest.t:32
type(tree_node_ptr), dimension(:), allocatable, save level_head
The head pointer of the linked list per refinement level.
Definition: mod_forest.t:35
Module with geometry-related routines (e.g., divergence, curl)
Definition: mod_geometry.t:2
subroutine set_pole
Definition: mod_geometry.t:101
update ghost cells of all blocks including physical boundaries
This module contains definitions of global parameters and variables and some generic functions/subrou...
logical, dimension(ndim) aperiodb
True for dimensions with aperiodic boundaries.
integer ixghi
Upper index of grid block arrays.
double precision global_time
The global simulation time.
double precision time_init
Start time for the simulation.
integer it
Number of time steps taken.
double precision, dimension(:), allocatable dg
extent of grid blocks in domain per dimension, in array over levels
integer it_init
initial iteration count
integer, parameter nlevelshi
The maximum number of levels in the grid refinement.
logical stagger_grid
True for using stagger grid.
logical, dimension(:), allocatable phyboundblock
True if a block has any physical boundary.
logical use_particles
Use particles module or not.
integer, dimension(:), allocatable ng
number of grid blocks in domain per dimension, in array over levels
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
integer, parameter nodehi
grid hierarchy info (level and grid indices)
logical slab
Cartesian geometry or not.
integer npe
The number of MPI tasks.
integer, parameter unitterm
Unit for standard output.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nbufferx
Number of cells as buffer zone.
double precision, dimension(:,:), allocatable dx
double precision, dimension(:,:), allocatable rnode_sub
integer, parameter rnodehi
grid location info (corner coordinates and grid spacing)
integer refine_max_level
Maximal number of AMR levels.
integer max_blocks
The maximum number of grid blocks in a processor.
logical, dimension(2, ndim) poleb
Indicates whether there is a pole at a boundary.
integer, dimension(:,:), allocatable node
integer, dimension(:,:), allocatable node_sub
Module to set (or derive) initial conditions from user data We read in a vtk file that provides value...
subroutine, public read_data_init()
This module handles the initialization of various components of amrvac.
Definition: mod_initialize.t:2
subroutine, public initialize_amrvac()
Initialize amrvac: read par files and initialize variables.
subroutine initialize_vars
Initialize (and allocate) simulation and grid variables.
Module for reading input and writing output.
subroutine read_par_files()
Read in the user-supplied parameter-file.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
procedure(sub_check_params), pointer phys_check_params
Definition: mod_physics.t:56
subroutine phys_check()
Definition: mod_physics.t:382
Module with all the methods that users can customize in AMRVAC.
procedure(p_no_args), pointer usr_set_parameters
Initialize the user's settings (after initializing amrvac)