5 integer(kind=MPI_ADDRESS_KIND) :: lb
6 integer(kind=MPI_ADDRESS_KIND) :: sizes
19 icomm = mpi_comm_world
22 call mpi_type_get_extent(mpi_real,lb,sizes,
ierrmpi)
23 if (sizes /= size_real)
call mpistop(
"Incompatible real size")
24 call mpi_type_get_extent(mpi_double_precision,lb,sizes,
ierrmpi)
25 if (sizes /= size_double)
call mpistop(
"Incompatible double size")
26 call mpi_type_get_extent(mpi_integer,lb,sizes,
ierrmpi)
27 if (sizes /= size_int)
call mpistop(
"Incompatible integer size")
28 call mpi_type_get_extent(mpi_logical,lb,sizes,
ierrmpi)
29 if (sizes /= size_logical)
call mpistop(
"Incompatible logical size")
37 call mpi_barrier(mpi_comm_world,
ierrmpi)
46 integer,
dimension(ndim+1) :: sizes, subsizes, start
47 integer :: i^D, ic^D, nx^D, nxCo^D, nxG^D, idir
49 nx^d=ixmhi^d-ixmlo^d+1;
55 ^d&subsizes(^d)=nxg^d;
57 ^d&start(^d)=
ixglo^d-1;
59 call mpi_type_create_subarray(
ndim+1,sizes,subsizes,start, &
60 mpi_order_fortran,mpi_double_precision, &
67 ^d&subsizes(^d)=nxco^d;
69 ^d&start(^d)=ixmlo^d-1;
71 call mpi_type_create_subarray(
ndim+1,sizes,subsizes,start, &
72 mpi_order_fortran,mpi_double_precision, &
77 ^d&sizes(^d)=
ixghi^d+1;
79 ^d&subsizes(^d)=nx^d+1;
81 ^d&start(^d)=ixmlo^d-1;
83 call mpi_type_create_subarray(
ndim+1,sizes,subsizes,start, &
84 mpi_order_fortran,mpi_double_precision, &
93 ^d&subsizes(^d)=nxco^d+
kr(ic^d,1)*
kr(idir,^d);
95 ^d&start(^d)=ixmlo^d-
kr(ic^d,1)*
kr(idir,^d);
98 call mpi_type_create_subarray(
ndim+1,sizes,subsizes,start, &
99 mpi_order_fortran,mpi_double_precision, &
105 ^d&sizes(^d)=ixghi^d+1;
109 ^d&subsizes(^d)=nxco^d+kr(ic^d,1)*kr(idir,^d);
111 ^d&start(^d)=ixmlo^d-kr(ic^d,1)*kr(idir,^d)+(ic^d-1)*nxco^d;
113 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
114 mpi_order_fortran,mpi_double_precision, &
115 type_sub_block_stg(idir,ic^d),ierrmpi)
116 call mpi_type_commit(type_sub_block_stg(idir,ic^d),ierrmpi)
121 ^d&sizes(^d)=ixghi^d;
124 ^d&subsizes(^d)=nxco^d;
126 ^d&start(^d)=ixmlo^d-1+(ic^d-1)*nxco^d;
128 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
129 mpi_order_fortran,mpi_double_precision, &
130 type_sub_block(ic^d),ierrmpi)
131 call mpi_type_commit(type_sub_block(ic^d),ierrmpi)
134 ^d&sizes(^d)=ixghi^d;
136 ^d&subsizes(^d)=nx^d;
138 ^d&start(^d)=ixmlo^d-1;
140 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
141 mpi_order_fortran,mpi_double_precision, &
142 type_block_io,ierrmpi)
143 call mpi_type_commit(type_block_io,ierrmpi)
144 size_block_io={nx^d*}*nw*size_double
146 ^d&sizes(^d)=ixmhi^d-ixmlo^d+1;
148 ^d&subsizes(^d)=sizes(^d);
152 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
153 mpi_order_fortran,mpi_double_precision, &
154 type_block_xcc_io,ierrmpi)
155 call mpi_type_commit(type_block_xcc_io,ierrmpi)
157 ^d&sizes(^d)=ixmhi^d-ixmlo^d+2;
159 ^d&subsizes(^d)=sizes(^d);
163 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
164 mpi_order_fortran,mpi_double_precision, &
165 type_block_xc_io,ierrmpi)
166 call mpi_type_commit(type_block_xc_io,ierrmpi)
168 ^d&sizes(^d)=ixmhi^d-ixmlo^d+1;
169 sizes(ndim+1)=nw+nwauxio
170 ^d&subsizes(^d)=sizes(^d);
171 subsizes(ndim+1)=nw+nwauxio
174 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
175 mpi_order_fortran,mpi_double_precision, &
176 type_block_wcc_io,ierrmpi)
177 call mpi_type_commit(type_block_wcc_io,ierrmpi)
179 ^d&sizes(^d)=ixmhi^d-ixmlo^d+2;
180 sizes(ndim+1)=nw+nwauxio
181 ^d&subsizes(^d)=sizes(^d);
182 subsizes(ndim+1)=nw+nwauxio
185 call mpi_type_create_subarray(ndim+1,sizes,subsizes,start, &
186 mpi_order_fortran,mpi_double_precision, &
187 type_block_wc_io,ierrmpi)
188 call mpi_type_commit(type_block_wc_io,ierrmpi)
196 character(len=*),
intent(in) :: message
199 write(*, *)
"ERROR for processor",
mype,
":"
200 write(*, *) trim(message)
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
subroutine comm_start
Initialize the MPI environment.
subroutine init_comm_types
Create and store the MPI types that will be used for parallel communication.
subroutine comm_finalize
Finalize (or shutdown) the MPI environment.
This module contains definitions of global parameters and variables and some generic functions/subrou...
integer type_coarse_block
MPI type for block coarsened by 2, and for its children blocks.
integer ixghi
Upper index of grid block arrays.
integer, dimension(3, 3) kr
Kronecker delta tensor.
integer, parameter ndim
Number of spatial dimensions for grid variables.
logical stagger_grid
True for using stagger grid.
integer, dimension(^nd, 2^d &) type_coarse_block_stg
MPI type for staggered block coarsened by 2, and for its children blocks.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer ierrmpi
A global MPI error return code.
integer npe
The number of MPI tasks.
integer type_block
MPI type for block including ghost cells and its size.
integer nghostcells
Number of ghost cells surrounding a grid.
integer type_block_io_stg
MPI type for IO of staggered variables.
integer size_block_io_stg
integer, parameter ixglo
Lower index of grid block arrays (always 1)