MPI-AMRVAC  2.2
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
comm_lib.t
Go to the documentation of this file.
1 !> \file
2 !> @todo Convert this file to a module
3 
4 !> Initialize the MPI environment
5 !> @todo Check for errors in return code
6 subroutine comm_start
8 
9  integer(kind=MPI_ADDRESS_KIND) :: lb
10  integer(kind=MPI_ADDRESS_KIND) :: sizes
11 
12  ! Initialize MPI
13  call mpi_init(ierrmpi)
14 
15  ! Each process stores its rank, which ranges from 0 to N-1, where N is the
16  ! number of processes.
17  call mpi_comm_rank(mpi_comm_world,mype,ierrmpi)
18 
19  ! Store the number of processes
20  call mpi_comm_size(mpi_comm_world,npe,ierrmpi)
21 
22  ! Use the default communicator, which contains all the processes
23  icomm = mpi_comm_world
24 
25  ! Get size of double/integer
26  call mpi_type_get_extent(mpi_real,lb,sizes,ierrmpi)
27  if (sizes /= size_real) call mpistop("Incompatible real size")
28  call mpi_type_get_extent(mpi_double_precision,lb,sizes,ierrmpi)
29  if (sizes /= size_double) call mpistop("Incompatible double size")
30  call mpi_type_get_extent(mpi_integer,lb,sizes,ierrmpi)
31  if (sizes /= size_int) call mpistop("Incompatible integer size")
32  call mpi_type_get_extent(mpi_logical,lb,sizes,ierrmpi)
33  if (sizes /= size_logical) call mpistop("Incompatible logical size")
34 
35 end subroutine comm_start
36 
37 !> Finalize (or shutdown) the MPI environment
38 subroutine comm_finalize
40 
41  call mpi_barrier(mpi_comm_world,ierrmpi)
42  call mpi_finalize(ierrmpi)
43 
44 end subroutine comm_finalize
45 
46 !> Create and store the MPI types that will be used for parallel communication
47 subroutine init_comm_types
49 
50  integer, dimension(ndim+1) :: sizes, subsizes, start
51  integer :: i^D, ic^D, nx^D, nxCo^D, nxG^D, idir
52 
53  nx^d=ixmhi^d-ixmlo^d+1;
54  nxg^d=ixghi^d-ixglo^d+1;
55  nxco^d=nx^d/2;
56 
57  ^d&sizes(^d)=ixghi^d;
58  sizes(ndim+1)=nw
59  ^d&subsizes(^d)=nxg^d;
60  subsizes(ndim+1)=nw
61  ^d&start(^d)=ixglo^d-1;
62  start(ndim+1)=0
63  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
64  mpi_order_fortran,mpi_double_precision, &
66  call mpi_type_commit(type_block,ierrmpi)
67  size_block={nxg^d*}*nw*size_double
68 
69  ^d&sizes(^d)=ixghi^d/2+nghostcells;
70  sizes(ndim+1)=nw
71  ^d&subsizes(^d)=nxco^d;
72  subsizes(ndim+1)=nw
73  ^d&start(^d)=ixmlo^d-1;
74  start(ndim+1)=0
75  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
76  mpi_order_fortran,mpi_double_precision, &
78  call mpi_type_commit(type_coarse_block,ierrmpi)
79 
80  if(stagger_grid) then
81  ^d&sizes(^d)=ixghi^d+1;
82  sizes(ndim+1)=nws
83  ^d&subsizes(^d)=nx^d+1;
84  subsizes(ndim+1)=nws
85  ^d&start(^d)=ixmlo^d-1;
86  start(ndim+1)=0
87  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
88  mpi_order_fortran,mpi_double_precision, &
90  call mpi_type_commit(type_block_io_stg,ierrmpi)
91  size_block_io_stg={(nx^d+1)*}*nws*size_double
92 
93  ^d&sizes(^d)=ixghi^d/2+nghostcells+1;
94  sizes(ndim+1)=nws
95  {do ic^db=1,2\}
96  do idir=1,ndim
97  ^d&subsizes(^d)=nxco^d+kr(ic^d,1)*kr(idir,^d);
98  subsizes(ndim+1)=1
99  ^d&start(^d)=ixmlo^d-kr(ic^d,1)*kr(idir,^d);
100  start(ndim+1)=idir-1
101 
102  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
103  mpi_order_fortran,mpi_double_precision, &
104  type_coarse_block_stg(idir,ic^d),ierrmpi)
105  call mpi_type_commit(type_coarse_block_stg(idir,ic^d),ierrmpi)
106  end do
107  {end do\}
108 
109  ^d&sizes(^d)=ixghi^d+1;
110  sizes(ndim+1)=nws
111  {do ic^db=1,2\}
112  do idir=1,^nd
113  ^d&subsizes(^d)=nxco^d+kr(ic^d,1)*kr(idir,^d);
114  subsizes(ndim+1)=1
115  ^d&start(^d)=ixmlo^d-kr(ic^d,1)*kr(idir,^d)+(ic^d-1)*nxco^d;
116  start(ndim+1)=idir-1
117  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
118  mpi_order_fortran,mpi_double_precision, &
119  type_sub_block_stg(idir,ic^d),ierrmpi)
120  call mpi_type_commit(type_sub_block_stg(idir,ic^d),ierrmpi)
121  end do
122  {end do\}
123  end if
124 
125  ^d&sizes(^d)=ixghi^d;
126  sizes(ndim+1)=nw
127  {do ic^db=1,2\}
128  ^d&subsizes(^d)=nxco^d;
129  subsizes(ndim+1)=nw
130  ^d&start(^d)=ixmlo^d-1+(ic^d-1)*nxco^d;
131  start(ndim+1)=0
132  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
133  mpi_order_fortran,mpi_double_precision, &
134  type_sub_block(ic^d),ierrmpi)
135  call mpi_type_commit(type_sub_block(ic^d),ierrmpi)
136  {end do\}
137 
138  ^d&sizes(^d)=ixghi^d;
139  sizes(ndim+1)=nw
140  ^d&subsizes(^d)=nx^d;
141  subsizes(ndim+1)=nw
142  ^d&start(^d)=ixmlo^d-1;
143  start(ndim+1)=0
144  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
145  mpi_order_fortran,mpi_double_precision, &
147  call mpi_type_commit(type_block_io,ierrmpi)
148  size_block_io={nx^d*}*nw*size_double
149 
150  ^d&sizes(^d)=ixmhi^d-ixmlo^d+1;
151  sizes(ndim+1)=^nd
152  ^d&subsizes(^d)=sizes(^d);
153  subsizes(ndim+1)=^nd
154  ^d&start(^d)=0;
155  start(ndim+1)=0
156  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
157  mpi_order_fortran,mpi_double_precision, &
159  call mpi_type_commit(type_block_xcc_io,ierrmpi)
160 
161  ^d&sizes(^d)=ixmhi^d-ixmlo^d+2;
162  sizes(ndim+1)=^nd
163  ^d&subsizes(^d)=sizes(^d);
164  subsizes(ndim+1)=^nd
165  ^d&start(^d)=0;
166  start(ndim+1)=0
167  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
168  mpi_order_fortran,mpi_double_precision, &
170  call mpi_type_commit(type_block_xc_io,ierrmpi)
171 
172  ^d&sizes(^d)=ixmhi^d-ixmlo^d+1;
173  sizes(ndim+1)=nw+nwauxio
174  ^d&subsizes(^d)=sizes(^d);
175  subsizes(ndim+1)=nw+nwauxio
176  ^d&start(^d)=0;
177  start(ndim+1)=0
178  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
179  mpi_order_fortran,mpi_double_precision, &
181  call mpi_type_commit(type_block_wcc_io,ierrmpi)
182 
183  ^d&sizes(^d)=ixmhi^d-ixmlo^d+2;
184  sizes(ndim+1)=nw+nwauxio
185  ^d&subsizes(^d)=sizes(^d);
186  subsizes(ndim+1)=nw+nwauxio
187  ^d&start(^d)=0;
188  start(ndim+1)=0
189  call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
190  mpi_order_fortran,mpi_double_precision, &
192  call mpi_type_commit(type_block_wc_io,ierrmpi)
193 
194 end subroutine init_comm_types
195 
196 !> Exit MPI-AMRVAC with an error message
197 subroutine mpistop(message)
199 
200  character(len=*), intent(in) :: message !< The error message
201  integer :: ierrcode
202 
203  write(*, *) "ERROR for processor", mype, ":"
204  write(*, *) trim(message)
205 
206  call mpi_abort(icomm, ierrcode, ierrmpi)
207 
208 end subroutine mpistop
This module contains definitions of global parameters and variables and some generic functions/subrou...
integer type_block
MPI type for block including ghost cells and its size.
integer type_block_wc_io
MPI type for IO: cell corner (wc) or cell center (wcc) variables.
integer, dimension(^nd, 2^d &) type_sub_block_stg
integer npe
The number of MPI tasks.
integer, parameter ixglo
Lower index of grid block arrays (always 1)
integer nghostcells
Number of ghost cells surrounding a grid.
integer ixghi
Upper index of grid block arrays.
integer type_block_io
MPI type for IO: block excluding ghost cells.
logical stagger_grid
True for using stagger grid.
integer ierrmpi
A global MPI error return code.
integer, dimension(3, 3) kr
Kronecker delta tensor.
integer mype
The rank of the current MPI task.
subroutine comm_start
Initialize the MPI environment.
Definition: comm_lib.t:7
subroutine init_comm_types
Create and store the MPI types that will be used for parallel communication.
Definition: comm_lib.t:48
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:198
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, dimension(2^d &) type_sub_block
integer nwauxio
Number of auxiliary variables that are only included in output.
integer icomm
The MPI communicator.
integer type_block_io_stg
MPI type for IO of staggered variables.
integer, dimension(^nd, 2^d &) type_coarse_block_stg
MPI type for staggered block coarsened by 2, and for its children blocks.
integer type_coarse_block
MPI type for block coarsened by 2, and for its children blocks.
subroutine comm_finalize
Finalize (or shutdown) the MPI environment.
Definition: comm_lib.t:39
integer type_block_xc_io
MPI type for IO: cell corner (xc) or cell center (xcc) coordinates.