MPI-AMRVAC 3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
Loading...
Searching...
No Matches
mod_comm_lib.t
Go to the documentation of this file.
2
3 implicit none
4 private
5
6
7 public :: comm_start
8 public :: comm_finalize
9 public :: init_comm_types
10 public :: mpistop
11
12contains
13
14
15 !> Initialize the MPI environment
16 subroutine comm_start
18
19 integer(kind=MPI_ADDRESS_KIND) :: lb
20 integer(kind=MPI_ADDRESS_KIND) :: sizes
21
22 ! Initialize MPI
23 call mpi_init(ierrmpi)
24
25 ! Each process stores its rank, which ranges from 0 to N-1, where N is the
26 ! number of processes.
27 call mpi_comm_rank(mpi_comm_world,mype,ierrmpi)
28
29 ! Store the number of processes
30 call mpi_comm_size(mpi_comm_world,npe,ierrmpi)
31
32 ! Use the default communicator, which contains all the processes
33 icomm = mpi_comm_world
34
35 ! Get size of double/integer
36 call mpi_type_get_extent(mpi_real,lb,sizes,ierrmpi)
37 if (sizes /= size_real) call mpistop("Incompatible real size")
38 call mpi_type_get_extent(mpi_double_precision,lb,sizes,ierrmpi)
39 if (sizes /= size_double) call mpistop("Incompatible double size")
40 call mpi_type_get_extent(mpi_integer,lb,sizes,ierrmpi)
41 if (sizes /= size_int) call mpistop("Incompatible integer size")
42 call mpi_type_get_extent(mpi_logical,lb,sizes,ierrmpi)
43 if (sizes /= size_logical) call mpistop("Incompatible logical size")
44
45 end subroutine comm_start
46
47 !> Finalize (or shutdown) the MPI environment
48 subroutine comm_finalize
50
51 call mpi_barrier(mpi_comm_world,ierrmpi)
52 call mpi_finalize(ierrmpi)
53
54 end subroutine comm_finalize
55
56 !> Create and store the MPI types that will be used for parallel communication
57 subroutine init_comm_types
59
60 integer, dimension(ndim+1) :: sizes, subsizes, start
61 integer :: i^d, ic^d, nx^d, nxco^d, nxg^d, idir
62
63 nx^d=ixmhi^d-ixmlo^d+1;
64 nxg^d=ixghi^d-ixglo^d+1;
65 nxco^d=nx^d/2;
66
67 ^d&sizes(^d)=ixghi^d;
68 sizes(ndim+1)=nw
69 ^d&subsizes(^d)=nxg^d;
70 subsizes(ndim+1)=nw
71 ^d&start(^d)=ixglo^d-1;
72 start(ndim+1)=0
73 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
74 mpi_order_fortran,mpi_double_precision, &
76 call mpi_type_commit(type_block,ierrmpi)
77 size_block={nxg^d*}*nw*size_double
78
79 ^d&sizes(^d)=ixghi^d/2+nghostcells;
80 sizes(ndim+1)=nw
81 ^d&subsizes(^d)=nxco^d;
82 subsizes(ndim+1)=nw
83 ^d&start(^d)=ixmlo^d-1;
84 start(ndim+1)=0
85 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
86 mpi_order_fortran,mpi_double_precision, &
88 call mpi_type_commit(type_coarse_block,ierrmpi)
89
90 if(stagger_grid) then
91 ^d&sizes(^d)=ixghi^d+1;
92 sizes(ndim+1)=nws
93 ^d&subsizes(^d)=nx^d+1;
94 subsizes(ndim+1)=nws
95 ^d&start(^d)=ixmlo^d-1;
96 start(ndim+1)=0
97 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
98 mpi_order_fortran,mpi_double_precision, &
100 call mpi_type_commit(type_block_io_stg,ierrmpi)
101 size_block_io_stg={(nx^d+1)*}*nws*size_double
102
103 ^d&sizes(^d)=ixghi^d/2+nghostcells+1;
104 sizes(ndim+1)=nws
105 {do ic^db=1,2\}
106 do idir=1,ndim
107 ^d&subsizes(^d)=nxco^d+kr(ic^d,1)*kr(idir,^d);
108 subsizes(ndim+1)=1
109 ^d&start(^d)=ixmlo^d-kr(ic^d,1)*kr(idir,^d);
110 start(ndim+1)=idir-1
111
112 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
113 mpi_order_fortran,mpi_double_precision, &
115 call mpi_type_commit(type_coarse_block_stg(idir,ic^d),ierrmpi)
116 end do
117 {end do\}
118
119 ^d&sizes(^d)=ixghi^d+1;
120 sizes(ndim+1)=nws
121 {do ic^db=1,2\}
122 do idir=1,^nd
123 ^d&subsizes(^d)=nxco^d+kr(ic^d,1)*kr(idir,^d);
124 subsizes(ndim+1)=1
125 ^d&start(^d)=ixmlo^d-kr(ic^d,1)*kr(idir,^d)+(ic^d-1)*nxco^d;
126 start(ndim+1)=idir-1
127 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
128 mpi_order_fortran,mpi_double_precision, &
129 type_sub_block_stg(idir,ic^d),ierrmpi)
130 call mpi_type_commit(type_sub_block_stg(idir,ic^d),ierrmpi)
131 end do
132 {end do\}
133 end if
134
135 ^d&sizes(^d)=ixghi^d;
136 sizes(ndim+1)=nw
137 {do ic^db=1,2\}
138 ^d&subsizes(^d)=nxco^d;
139 subsizes(ndim+1)=nw
140 ^d&start(^d)=ixmlo^d-1+(ic^d-1)*nxco^d;
141 start(ndim+1)=0
142 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
143 mpi_order_fortran,mpi_double_precision, &
144 type_sub_block(ic^d),ierrmpi)
145 call mpi_type_commit(type_sub_block(ic^d),ierrmpi)
146 {end do\}
147
148 ^d&sizes(^d)=ixghi^d;
149 sizes(ndim+1)=nw
150 ^d&subsizes(^d)=nx^d;
151 subsizes(ndim+1)=nw
152 ^d&start(^d)=ixmlo^d-1;
153 start(ndim+1)=0
154 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
155 mpi_order_fortran,mpi_double_precision, &
156 type_block_io,ierrmpi)
157 call mpi_type_commit(type_block_io,ierrmpi)
158 size_block_io={nx^d*}*nw*size_double
159
160 ^d&sizes(^d)=ixmhi^d-ixmlo^d+1;
161 sizes(ndim+1)=^nd
162 ^d&subsizes(^d)=sizes(^d);
163 subsizes(ndim+1)=^nd
164 ^d&start(^d)=0;
165 start(ndim+1)=0
166 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
167 mpi_order_fortran,mpi_double_precision, &
168 type_block_xcc_io,ierrmpi)
169 call mpi_type_commit(type_block_xcc_io,ierrmpi)
170
171 ^d&sizes(^d)=ixmhi^d-ixmlo^d+2;
172 sizes(ndim+1)=^nd
173 ^d&subsizes(^d)=sizes(^d);
174 subsizes(ndim+1)=^nd
175 ^d&start(^d)=0;
176 start(ndim+1)=0
177 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
178 mpi_order_fortran,mpi_double_precision, &
179 type_block_xc_io,ierrmpi)
180 call mpi_type_commit(type_block_xc_io,ierrmpi)
181
182 ^d&sizes(^d)=ixmhi^d-ixmlo^d+1;
183 sizes(ndim+1)=nw+nwauxio
184 ^d&subsizes(^d)=sizes(^d);
185 subsizes(ndim+1)=nw+nwauxio
186 ^d&start(^d)=0;
187 start(ndim+1)=0
188 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
189 mpi_order_fortran,mpi_double_precision, &
190 type_block_wcc_io,ierrmpi)
191 call mpi_type_commit(type_block_wcc_io,ierrmpi)
192
193 ^d&sizes(^d)=ixmhi^d-ixmlo^d+2;
194 sizes(ndim+1)=nw+nwauxio
195 ^d&subsizes(^d)=sizes(^d);
196 subsizes(ndim+1)=nw+nwauxio
197 ^d&start(^d)=0;
198 start(ndim+1)=0
199 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
200 mpi_order_fortran,mpi_double_precision, &
201 type_block_wc_io,ierrmpi)
202 call mpi_type_commit(type_block_wc_io,ierrmpi)
203
204 end subroutine init_comm_types
205
206 !> Exit MPI-AMRVAC with an error message
207 subroutine mpistop(message)
209
210 character(len=*), intent(in) :: message !< The error message
211 integer :: ierrcode
212
213 write(*, *) "ERROR for processor", mype, ":"
214 write(*, *) trim(message)
215
216 call mpi_abort(icomm, ierrcode, ierrmpi)
217
218 end subroutine mpistop
219
220end module mod_comm_lib
subroutine, public init_comm_types
Create and store the MPI types that will be used for parallel communication.
subroutine, public comm_start
Initialize the MPI environment.
subroutine, public comm_finalize
Finalize (or shutdown) the MPI environment.
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
This module contains definitions of global parameters and variables and some generic functions/subrou...
integer type_coarse_block
MPI type for block coarsened by 2, and for its children blocks.
integer ixghi
Upper index of grid block arrays.
integer, dimension(3, 3) kr
Kronecker delta tensor.
integer, parameter ndim
Number of spatial dimensions for grid variables.
logical stagger_grid
True for using stagger grid.
integer, dimension(^nd, 2^d &) type_coarse_block_stg
MPI type for staggered block coarsened by 2, and for its children blocks.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
double precision, dimension(:), allocatable, parameter d
integer ierrmpi
A global MPI error return code.
integer npe
The number of MPI tasks.
integer type_block
MPI type for block including ghost cells and its size.
integer nghostcells
Number of ghost cells surrounding a grid.
integer type_block_io_stg
MPI type for IO of staggered variables.
integer, parameter ixglo
Lower index of grid block arrays (always 1)