MPI-AMRVAC  3.0
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
comm_lib.t
Go to the documentation of this file.
1 !> Initialize the MPI environment
2 subroutine comm_start
4 
5  integer(kind=MPI_ADDRESS_KIND) :: lb
6  integer(kind=MPI_ADDRESS_KIND) :: sizes
7 
8  ! Initialize MPI
9  call mpi_init(ierrmpi)
10 
11  ! Each process stores its rank, which ranges from 0 to N-1, where N is the
12  ! number of processes.
13  call mpi_comm_rank(mpi_comm_world,mype,ierrmpi)
14 
15  ! Store the number of processes
16  call mpi_comm_size(mpi_comm_world,npe,ierrmpi)
17 
18  ! Use the default communicator, which contains all the processes
19  icomm = mpi_comm_world
20 
21  ! Get size of double/integer
22  call mpi_type_get_extent(mpi_real,lb,sizes,ierrmpi)
23  if (sizes /= size_real) call mpistop("Incompatible real size")
24  call mpi_type_get_extent(mpi_double_precision,lb,sizes,ierrmpi)
25  if (sizes /= size_double) call mpistop("Incompatible double size")
26  call mpi_type_get_extent(mpi_integer,lb,sizes,ierrmpi)
27  if (sizes /= size_int) call mpistop("Incompatible integer size")
28  call mpi_type_get_extent(mpi_logical,lb,sizes,ierrmpi)
29  if (sizes /= size_logical) call mpistop("Incompatible logical size")
30 
31 end subroutine comm_start
32 
33 !> Finalize (or shutdown) the MPI environment
34 subroutine comm_finalize
36 
37  call mpi_barrier(mpi_comm_world,ierrmpi)
38  call mpi_finalize(ierrmpi)
39 
40 end subroutine comm_finalize
41 
42 !> Create and store the MPI types that will be used for parallel communication
43 subroutine init_comm_types
45 
46  integer, dimension(ndim+1) :: sizes, subsizes, start
47  integer :: i^D, ic^D, nx^D, nxCo^D, nxG^D, idir
48 
49  nx^d=ixmhi^d-ixmlo^d+1;
50  nxg^d=ixghi^d-ixglo^d+1;
51  nxco^d=nx^d/2;
52 
53  ^d&sizes(^d)=ixghi^d;
54  sizes(ndim+1)=nw
55  ^d&subsizes(^d)=nxg^d;
56  subsizes(ndim+1)=nw
57  ^d&start(^d)=ixglo^d-1;
58  start(ndim+1)=0
59  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
60  mpi_order_fortran,mpi_double_precision, &
62  call mpi_type_commit(type_block,ierrmpi)
63  size_block={nxg^d*}*nw*size_double
64 
65  ^d&sizes(^d)=ixghi^d/2+nghostcells;
66  sizes(ndim+1)=nw
67  ^d&subsizes(^d)=nxco^d;
68  subsizes(ndim+1)=nw
69  ^d&start(^d)=ixmlo^d-1;
70  start(ndim+1)=0
71  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
72  mpi_order_fortran,mpi_double_precision, &
74  call mpi_type_commit(type_coarse_block,ierrmpi)
75 
76  if(stagger_grid) then
77  ^d&sizes(^d)=ixghi^d+1;
78  sizes(ndim+1)=nws
79  ^d&subsizes(^d)=nx^d+1;
80  subsizes(ndim+1)=nws
81  ^d&start(^d)=ixmlo^d-1;
82  start(ndim+1)=0
83  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
84  mpi_order_fortran,mpi_double_precision, &
86  call mpi_type_commit(type_block_io_stg,ierrmpi)
87  size_block_io_stg={(nx^d+1)*}*nws*size_double
88 
89  ^d&sizes(^d)=ixghi^d/2+nghostcells+1;
90  sizes(ndim+1)=nws
91  {do ic^db=1,2\}
92  do idir=1,ndim
93  ^d&subsizes(^d)=nxco^d+kr(ic^d,1)*kr(idir,^d);
94  subsizes(ndim+1)=1
95  ^d&start(^d)=ixmlo^d-kr(ic^d,1)*kr(idir,^d);
96  start(ndim+1)=idir-1
97 
98  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
99  mpi_order_fortran,mpi_double_precision, &
100  type_coarse_block_stg(idir,ic^d),ierrmpi)
101  call mpi_type_commit(type_coarse_block_stg(idir,ic^d),ierrmpi)
102  end do
103  {end do\}
104 
105  ^d&sizes(^d)=ixghi^d+1;
106  sizes(ndim+1)=nws
107  {do ic^db=1,2\}
108  do idir=1,^nd
109  ^d&subsizes(^d)=nxco^d+kr(ic^d,1)*kr(idir,^d);
110  subsizes(ndim+1)=1
111  ^d&start(^d)=ixmlo^d-kr(ic^d,1)*kr(idir,^d)+(ic^d-1)*nxco^d;
112  start(ndim+1)=idir-1
113  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
114  mpi_order_fortran,mpi_double_precision, &
115  type_sub_block_stg(idir,ic^d),ierrmpi)
116  call mpi_type_commit(type_sub_block_stg(idir,ic^d),ierrmpi)
117  end do
118  {end do\}
119  end if
120 
121  ^d&sizes(^d)=ixghi^d;
122  sizes(ndim+1)=nw
123  {do ic^db=1,2\}
124  ^d&subsizes(^d)=nxco^d;
125  subsizes(ndim+1)=nw
126  ^d&start(^d)=ixmlo^d-1+(ic^d-1)*nxco^d;
127  start(ndim+1)=0
128  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
129  mpi_order_fortran,mpi_double_precision, &
130  type_sub_block(ic^d),ierrmpi)
131  call mpi_type_commit(type_sub_block(ic^d),ierrmpi)
132  {end do\}
133 
134  ^d&sizes(^d)=ixghi^d;
135  sizes(ndim+1)=nw
136  ^d&subsizes(^d)=nx^d;
137  subsizes(ndim+1)=nw
138  ^d&start(^d)=ixmlo^d-1;
139  start(ndim+1)=0
140  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
141  mpi_order_fortran,mpi_double_precision, &
142  type_block_io,ierrmpi)
143  call mpi_type_commit(type_block_io,ierrmpi)
144  size_block_io={nx^d*}*nw*size_double
145 
146  ^d&sizes(^d)=ixmhi^d-ixmlo^d+1;
147  sizes(ndim+1)=^nd
148  ^d&subsizes(^d)=sizes(^d);
149  subsizes(ndim+1)=^nd
150  ^d&start(^d)=0;
151  start(ndim+1)=0
152  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
153  mpi_order_fortran,mpi_double_precision, &
154  type_block_xcc_io,ierrmpi)
155  call mpi_type_commit(type_block_xcc_io,ierrmpi)
156 
157  ^d&sizes(^d)=ixmhi^d-ixmlo^d+2;
158  sizes(ndim+1)=^nd
159  ^d&subsizes(^d)=sizes(^d);
160  subsizes(ndim+1)=^nd
161  ^d&start(^d)=0;
162  start(ndim+1)=0
163  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
164  mpi_order_fortran,mpi_double_precision, &
165  type_block_xc_io,ierrmpi)
166  call mpi_type_commit(type_block_xc_io,ierrmpi)
167 
168  ^d&sizes(^d)=ixmhi^d-ixmlo^d+1;
169  sizes(ndim+1)=nw+nwauxio
170  ^d&subsizes(^d)=sizes(^d);
171  subsizes(ndim+1)=nw+nwauxio
172  ^d&start(^d)=0;
173  start(ndim+1)=0
174  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
175  mpi_order_fortran,mpi_double_precision, &
176  type_block_wcc_io,ierrmpi)
177  call mpi_type_commit(type_block_wcc_io,ierrmpi)
178 
179  ^d&sizes(^d)=ixmhi^d-ixmlo^d+2;
180  sizes(ndim+1)=nw+nwauxio
181  ^d&subsizes(^d)=sizes(^d);
182  subsizes(ndim+1)=nw+nwauxio
183  ^d&start(^d)=0;
184  start(ndim+1)=0
185  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
186  mpi_order_fortran,mpi_double_precision, &
187  type_block_wc_io,ierrmpi)
188  call mpi_type_commit(type_block_wc_io,ierrmpi)
189 
190 end subroutine init_comm_types
191 
192 !> Exit MPI-AMRVAC with an error message
193 subroutine mpistop(message)
195 
196  character(len=*), intent(in) :: message !< The error message
197  integer :: ierrcode
198 
199  write(*, *) "ERROR for processor", mype, ":"
200  write(*, *) trim(message)
201 
202  call mpi_abort(icomm, ierrcode, ierrmpi)
203 
204 end subroutine mpistop
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:194
subroutine comm_start
Initialize the MPI environment.
Definition: comm_lib.t:3
subroutine init_comm_types
Create and store the MPI types that will be used for parallel communication.
Definition: comm_lib.t:44
subroutine comm_finalize
Finalize (or shutdown) the MPI environment.
Definition: comm_lib.t:35
This module contains definitions of global parameters and variables and some generic functions/subrou...
integer type_coarse_block
MPI type for block coarsened by 2, and for its children blocks.
integer ixghi
Upper index of grid block arrays.
integer, dimension(3, 3) kr
Kronecker delta tensor.
integer, parameter ndim
Number of spatial dimensions for grid variables.
logical stagger_grid
True for using stagger grid.
integer, dimension(^nd, 2^d &) type_coarse_block_stg
MPI type for staggered block coarsened by 2, and for its children blocks.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer ierrmpi
A global MPI error return code.
integer npe
The number of MPI tasks.
integer type_block
MPI type for block including ghost cells and its size.
integer nghostcells
Number of ghost cells surrounding a grid.
integer type_block_io_stg
MPI type for IO of staggered variables.
integer, parameter ixglo
Lower index of grid block arrays (always 1)