MPI-AMRVAC  2.2
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_ghostcells_update.t
Go to the documentation of this file.
1 !> update ghost cells of all blocks including physical boundaries
3 
4  implicit none
5  ! Special buffer for pole copy
6  type wbuffer
7  double precision, dimension(:^D&,:), allocatable :: w
8  end type wbuffer
9 
10  ! A switch of update physical boundary or not
11  logical, public :: bcphys=.true.
12  integer :: ixm^l, ixcog^l, ixcom^l
13 
14  !> The number of interleaving sending buffers for ghost cells
15  integer, parameter :: npwbuf=2
16 
17  ! The first index goes from -1:2, where -1 is used when a block touches the
18  ! lower boundary, 1 when a block touches an upper boundary, and 0 a situation
19  ! away from boundary conditions, 2 when a block touched both lower and upper
20  ! boundary
21 
22  ! index ranges to send (S) to sibling blocks, receive (R) from sibling blocks
23  integer, dimension(-1:2,-1:1) :: ixs_srl_^l, ixr_srl_^l
24 
25  ! index ranges of staggered variables to send (S) to sibling blocks, receive (R) from sibling blocks
26  integer, dimension(^ND,-1:1) :: ixs_srl_stg_^l, ixr_srl_stg_^l
27 
28  ! index ranges to send (S) restricted (r) ghost cells to coarser blocks
29  integer, dimension(-1:1,-1:1) :: ixs_r_^l
30 
31  ! index ranges of staggered variables to send (S) restricted (r) ghost cells to coarser blocks
32  integer, dimension(^ND,-1:1) :: ixs_r_stg_^l
33 
34  ! index ranges to receive restriced ghost cells from finer blocks
35  integer, dimension(-1:1, 0:3) :: ixr_r_^l
36 
37  ! index ranges of staggered variables to receive restriced ghost cells from finer blocks
38  integer, dimension(^ND,0:3) :: ixr_r_stg_^l
39 
40  ! send prolongated (p) ghost cells to finer blocks, receive prolongated from coarser blocks
41  integer, dimension(-1:1, 0:3) :: ixs_p_^l, ixr_p_^l
42 
43  ! send prolongated (p) staggered ghost cells to finer blocks, receive prolongated from coarser blocks
44  integer, dimension(^ND,0:3) :: ixs_p_stg_^l, ixr_p_stg_^l
45 
46  ! number of MPI receive-send pairs, srl: same refinement level; r: restrict; p: prolong
48 
49  ! total size of buffer arrays
51 
52  ! record index position of buffer arrays
54 
55  ! count of times of send and receive
57 
58  ! total sizes = cell-center normal flux + stagger-grid flux of send and receive
59  integer, dimension(-1:1^D&) :: sizes_srl_send_total, sizes_srl_recv_total
60 
61  ! sizes of buffer arrays for stagger-grid variable
62  integer, dimension(^ND,-1:1^D&) :: sizes_srl_send_stg, sizes_srl_recv_stg
63 
64  integer, dimension(:), allocatable :: recvrequest_srl, sendrequest_srl
65  integer, dimension(:,:), allocatable :: recvstatus_srl, sendstatus_srl
66 
67  ! buffer arrays for send and receive of siblings, allocate in build_connectivity
68  double precision, dimension(:), allocatable :: recvbuffer_srl, sendbuffer_srl
69 
70  integer, dimension(:), allocatable :: recvrequest_r, sendrequest_r
71  integer, dimension(:,:), allocatable :: recvstatus_r, sendstatus_r
72 
73  ! buffer arrays for send and receive in restriction
74  double precision, dimension(:), allocatable :: recvbuffer_r, sendbuffer_r
75 
76  integer, dimension(:), allocatable :: recvrequest_p, sendrequest_p
77  integer, dimension(:,:), allocatable :: recvstatus_p, sendstatus_p
78 
79  ! buffer arrays for send and receive in prolongation
80  double precision, dimension(:), allocatable :: recvbuffer_p, sendbuffer_p
81 
82  ! sizes to allocate buffer arrays for send and receive for restriction
83  integer, dimension(-1:1^D&) :: sizes_r_send_total
84  integer, dimension(0:3^D&) :: sizes_r_recv_total
85  integer, dimension(^ND,-1:1^D&) :: sizes_r_send_stg
86  integer, dimension(^ND,0:3^D&) :: sizes_r_recv_stg
87 
88  ! sizes to allocate buffer arrays for send and receive for restriction
89  integer, dimension(0:3^D&) :: sizes_p_send_total, sizes_p_recv_total
90  integer, dimension(^ND,0:3^D&) :: sizes_p_send_stg, sizes_p_recv_stg
91 
92  ! There are two variants, _f indicates that all flux variables are filled,
93  ! whereas _p means that part of the variables is filled
94  ! Furthermore _r_ stands for restrict, _p_ for prolongation.
95  integer, dimension(-1:2^D&,-1:1^D&), target :: type_send_srl_f, type_recv_srl_f
96  integer, dimension(-1:1^D&,-1:1^D&), target :: type_send_r_f
97  integer, dimension(-1:1^D&, 0:3^D&), target :: type_recv_r_f, type_send_p_f, type_recv_p_f
98  integer, dimension(-1:2^D&,-1:1^D&), target :: type_send_srl_p1, type_recv_srl_p1
99  integer, dimension(-1:1^D&,-1:1^D&), target :: type_send_r_p1
100  integer, dimension(-1:1^D&, 0:3^D&), target :: type_recv_r_p1, type_send_p_p1, type_recv_p_p1
101  integer, dimension(-1:2^D&,-1:1^D&), target :: type_send_srl_p2, type_recv_srl_p2
102  integer, dimension(-1:1^D&,-1:1^D&), target :: type_send_r_p2
103  integer, dimension(-1:1^D&, 0:3^D&), target :: type_recv_r_p2, type_send_p_p2, type_recv_p_p2
104  integer, dimension(:^D&,:^D&), pointer :: type_send_srl, type_recv_srl, type_send_r
105  integer, dimension(:^D&,:^D&), pointer :: type_recv_r, type_send_p, type_recv_p
106 
107 contains
108 
109  subroutine init_bc()
111  use mod_physics, only: phys_req_diagonal
112 
113  integer :: nghostcellsCo, interpolation_order
114  integer :: nx^D, nxCo^D, ixG^L, i^D, ic^D, inc^D, idir
115 
116  ixg^l=ixg^ll;
117  ixm^l=ixg^l^lsubnghostcells;
118  ixcogmin^d=1;
119  !ixCoGmax^D=ixGmax^D/2+nghostcells;
120  ixcogmax^d=(ixghi^d-2*nghostcells)/2+2*nghostcells;
121 
122  ixcom^l=ixcog^l^lsubnghostcells;
123 
124  nx^d=ixmmax^d-ixmmin^d+1;
125  nxco^d=nx^d/2;
126 
127  select case (typeghostfill)
128  case ("copy")
129  interpolation_order=1
130  case ("linear")
131  interpolation_order=2
132  case default
133  write (unitterm,*) "Undefined typeghostfill ",typeghostfill
134  call mpistop("Undefined typeghostfill")
135  end select
136  nghostcellsco=int((nghostcells+1)/2)
137 
138  if (nghostcellsco+interpolation_order-1>nghostcells) then
139  call mpistop("interpolation order for prolongation in getbc too high")
140  end if
141 
142  ! (iib,i) index has following meanings: iib = 0 means it is not at any physical boundary
143  ! iib=-1 means it is at the minimum side of a physical boundary
144  ! iib= 1 means it is at the maximum side of a physical boundary
145  ! i=-1 means subregion prepared for the neighbor at its minimum side
146  ! i= 1 means subregion prepared for the neighbor at its maximum side
147  {
148  ixs_srl_min^d(:,-1)=ixmmin^d
149  ixs_srl_min^d(:, 0)=ixmmin^d
150  ixs_srl_min^d(:, 1)=ixmmax^d+1-nghostcells
151  ixs_srl_max^d(:,-1)=ixmmin^d-1+nghostcells
152  ixs_srl_max^d(:, 0)=ixmmax^d
153  ixs_srl_max^d(:, 1)=ixmmax^d
154 
155  ixr_srl_min^d(:,-1)=1
156  ixr_srl_min^d(:, 0)=ixmmin^d
157  ixr_srl_min^d(:, 1)=ixmmax^d+1
158  ixr_srl_max^d(:,-1)=nghostcells
159  ixr_srl_max^d(:, 0)=ixmmax^d
160  ixr_srl_max^d(:, 1)=ixgmax^d
161 
162  ixs_r_min^d(:,-1)=ixcommin^d
163  ixs_r_min^d(:, 0)=ixcommin^d
164  ixs_r_min^d(:, 1)=ixcommax^d+1-nghostcells
165  ixs_r_max^d(:,-1)=ixcommin^d-1+nghostcells
166  ixs_r_max^d(:, 0)=ixcommax^d
167  ixs_r_max^d(:, 1)=ixcommax^d
168 
169  ixr_r_min^d(:, 0)=1
170  ixr_r_min^d(:, 1)=ixmmin^d
171  ixr_r_min^d(:, 2)=ixmmin^d+nxco^d
172  ixr_r_min^d(:, 3)=ixmmax^d+1
173  ixr_r_max^d(:, 0)=nghostcells
174  ixr_r_max^d(:, 1)=ixmmin^d-1+nxco^d
175  ixr_r_max^d(:, 2)=ixmmax^d
176  ixr_r_max^d(:, 3)=ixgmax^d
177 
178  ixs_p_min^d(:, 0)=ixmmin^d-(interpolation_order-1)
179  ixs_p_min^d(:, 1)=ixmmin^d-(interpolation_order-1)
180  ixs_p_min^d(:, 2)=ixmmin^d+nxco^d-nghostcellsco-(interpolation_order-1)
181  ixs_p_min^d(:, 3)=ixmmax^d+1-nghostcellsco-(interpolation_order-1)
182  ixs_p_max^d(:, 0)=ixmmin^d-1+nghostcellsco+(interpolation_order-1)
183  ixs_p_max^d(:, 1)=ixmmin^d-1+nxco^d+nghostcellsco+(interpolation_order-1)
184  ixs_p_max^d(:, 2)=ixmmax^d+(interpolation_order-1)
185  ixs_p_max^d(:, 3)=ixmmax^d+(interpolation_order-1)
186 
187  if(.not.phys_req_diagonal) then
188  ! exclude ghost-cell region when diagonal cells are unknown
189  ixs_p_min^d(:, 0)=ixmmin^d
190  ixs_p_max^d(:, 3)=ixmmax^d
191  ixs_p_max^d(:, 1)=ixmmin^d-1+nxco^d+(interpolation_order-1)
192  ixs_p_min^d(:, 2)=ixmmin^d+nxco^d-(interpolation_order-1)
193  end if
194 
195  ixr_p_min^d(:, 0)=ixcommin^d-nghostcellsco-(interpolation_order-1)
196  ixr_p_min^d(:, 1)=ixcommin^d-(interpolation_order-1)
197  ixr_p_min^d(:, 2)=ixcommin^d-nghostcellsco-(interpolation_order-1)
198  ixr_p_min^d(:, 3)=ixcommax^d+1-(interpolation_order-1)
199  ixr_p_max^d(:, 0)=nghostcells+(interpolation_order-1)
200  ixr_p_max^d(:, 1)=ixcommax^d+nghostcellsco+(interpolation_order-1)
201  ixr_p_max^d(:, 2)=ixcommax^d+(interpolation_order-1)
202  ixr_p_max^d(:, 3)=ixcommax^d+nghostcellsco+(interpolation_order-1)
203 
204  if(.not.phys_req_diagonal) then
205  ixr_p_max^d(:, 0)=nghostcells
206  ixr_p_min^d(:, 3)=ixcommax^d+1
207  ixr_p_max^d(:, 1)=ixcommax^d+(interpolation_order-1)
208  ixr_p_min^d(:, 2)=ixcommin^d-(interpolation_order-1)
209  end if
210 
211  \}
212 
213  if (stagger_grid) then
214  allocate(pole_buf%ws(ixgs^t,nws))
215  ! Staggered (face-allocated) variables
216  do idir=1,ndim
217  { ixs_srl_stg_min^d(idir,-1)=ixmmin^d-kr(idir,^d)
218  ixs_srl_stg_max^d(idir,-1)=ixmmin^d-1+nghostcells
219  ixs_srl_stg_min^d(idir,0) =ixmmin^d-kr(idir,^d)
220  ixs_srl_stg_max^d(idir,0) =ixmmax^d
221  ixs_srl_stg_min^d(idir,1) =ixmmax^d-nghostcells+1-kr(idir,^d)
222  ixs_srl_stg_max^d(idir,1) =ixmmax^d
223 
224  ixr_srl_stg_min^d(idir,-1)=1-kr(idir,^d)
225  ixr_srl_stg_max^d(idir,-1)=nghostcells
226  ixr_srl_stg_min^d(idir,0) =ixmmin^d-kr(idir,^d)
227  ixr_srl_stg_max^d(idir,0) =ixmmax^d
228  ixr_srl_stg_min^d(idir,1) =ixmmax^d+1-kr(idir,^d)
229  ixr_srl_stg_max^d(idir,1) =ixgmax^d
230 
231  ixs_r_stg_min^d(idir,-1)=ixcommin^d-kr(idir,^d)
232  ixs_r_stg_max^d(idir,-1)=ixcommin^d-1+nghostcells
233  ixs_r_stg_min^d(idir,0) =ixcommin^d-kr(idir,^d)
234  ixs_r_stg_max^d(idir,0) =ixcommax^d
235  ixs_r_stg_min^d(idir,1) =ixcommax^d+1-nghostcells-kr(idir,^d)
236  ixs_r_stg_max^d(idir,1) =ixcommax^d
237 
238  ixr_r_stg_min^d(idir,0)=1-kr(idir,^d)
239  ixr_r_stg_max^d(idir,0)=nghostcells
240  ixr_r_stg_min^d(idir,1)=ixmmin^d-kr(idir,^d)
241  ixr_r_stg_max^d(idir,1)=ixmmin^d-1+nxco^d
242  ixr_r_stg_min^d(idir,2)=ixmmin^d+nxco^d-kr(idir,^d)
243  ixr_r_stg_max^d(idir,2)=ixmmax^d
244  ixr_r_stg_min^d(idir,3)=ixmmax^d+1-kr(idir,^d)
245  ixr_r_stg_max^d(idir,3)=ixgmax^d
246  \}
247  {if (idir==^d) then
248  ! Parallel components
249  {
250  ixs_p_stg_min^d(idir,0)=ixmmin^d-1 ! -1 to make redundant
251  ixs_p_stg_max^d(idir,0)=ixmmin^d-1+nghostcellsco
252  ixs_p_stg_min^d(idir,1)=ixmmin^d-1 ! -1 to make redundant
253  ixs_p_stg_max^d(idir,1)=ixmmin^d-1+nxco^d+nghostcellsco
254  ixs_p_stg_min^d(idir,2)=ixmmax^d-nxco^d-nghostcellsco
255  ixs_p_stg_max^d(idir,2)=ixmmax^d
256  ixs_p_stg_min^d(idir,3)=ixmmax^d-nghostcellsco
257  ixs_p_stg_max^d(idir,3)=ixmmax^d
258 
259  ixr_p_stg_min^d(idir,0)=ixcommin^d-1-nghostcellsco
260  ixr_p_stg_max^d(idir,0)=ixcommin^d-1
261  ixr_p_stg_min^d(idir,1)=ixcommin^d-1 ! -1 to make redundant
262  ixr_p_stg_max^d(idir,1)=ixcommax^d+nghostcellsco
263  ixr_p_stg_min^d(idir,2)=ixcommin^d-1-nghostcellsco
264  ixr_p_stg_max^d(idir,2)=ixcommax^d
265  ixr_p_stg_min^d(idir,3)=ixcommax^d+1-1 ! -1 to make redundant
266  ixr_p_stg_max^d(idir,3)=ixcommax^d+nghostcellsco
267  \}
268  else
269  {
270  ! Perpendicular component
271  ixs_p_stg_min^d(idir,0)=ixmmin^d
272  ixs_p_stg_max^d(idir,0)=ixmmin^d-1+nghostcellsco+(interpolation_order-1)
273  ixs_p_stg_min^d(idir,1)=ixmmin^d
274  ixs_p_stg_max^d(idir,1)=ixmmin^d-1+nxco^d+nghostcellsco+(interpolation_order-1)
275  ixs_p_stg_min^d(idir,2)=ixmmax^d+1-nxco^d-nghostcellsco-(interpolation_order-1)
276  ixs_p_stg_max^d(idir,2)=ixmmax^d
277  ixs_p_stg_min^d(idir,3)=ixmmax^d+1-nghostcellsco-(interpolation_order-1)
278  ixs_p_stg_max^d(idir,3)=ixmmax^d
279 
280  ixr_p_stg_min^d(idir,0)=ixcommin^d-nghostcellsco-(interpolation_order-1)
281  ixr_p_stg_max^d(idir,0)=ixcommin^d-1
282  ixr_p_stg_min^d(idir,1)=ixcommin^d
283  ixr_p_stg_max^d(idir,1)=ixcommax^d+nghostcellsco+(interpolation_order-1)
284  ixr_p_stg_min^d(idir,2)=ixcommin^d-nghostcellsco-(interpolation_order-1)
285  ixr_p_stg_max^d(idir,2)=ixcommax^d
286  ixr_p_stg_min^d(idir,3)=ixcommax^d+1
287  ixr_p_stg_max^d(idir,3)=ixcommax^d+nghostcellsco+(interpolation_order-1)
288  \}
289  end if
290  }
291  end do
292  ! calculate sizes for buffer arrays for siblings
293  {do i^db=-1,1\}
294  ! Staggered (face-allocated) variables
295  do idir=1,ndim
296  sizes_srl_send_stg(idir,i^d)={(ixs_srl_stg_max^d(idir,i^d)-ixs_srl_stg_min^d(idir,i^d)+1)|*}
297  sizes_srl_recv_stg(idir,i^d)={(ixr_srl_stg_max^d(idir,i^d)-ixr_srl_stg_min^d(idir,i^d)+1)|*}
298  sizes_r_send_stg(idir,i^d)={(ixs_r_stg_max^d(idir,i^d)-ixs_r_stg_min^d(idir,i^d)+1)|*}
299  end do
302  sizes_r_send_total(i^d)=sum(sizes_r_send_stg(:,i^d))
303  {end do\}
304 
305  {do i^db=0,3\}
306  ! Staggered (face-allocated) variables
307  do idir=1,ndim
308  sizes_r_recv_stg(idir,i^d)={(ixr_r_stg_max^d(idir,i^d)-ixr_r_stg_min^d(idir,i^d)+1)|*}
309  sizes_p_send_stg(idir,i^d)={(ixs_p_stg_max^d(idir,i^d)-ixs_p_stg_min^d(idir,i^d)+1)|*}
310  sizes_p_recv_stg(idir,i^d)={(ixr_p_stg_max^d(idir,i^d)-ixr_p_stg_min^d(idir,i^d)+1)|*}
311  end do
312  sizes_r_recv_total(i^d)=sum(sizes_r_recv_stg(:,i^d))
313  sizes_p_send_total(i^d)=sum(sizes_p_send_stg(:,i^d))
314  sizes_p_recv_total(i^d)=sum(sizes_p_recv_stg(:,i^d))
315  {end do\}
316  else
317  ! extend index range to physical boundary
318  {
319  ixs_srl_min^d(-1,0)=1
320  ixs_srl_min^d( 1,0)=ixmmin^d
321  ixs_srl_min^d( 2,0)=1
322  ixs_srl_max^d(-1,0)=ixmmax^d
323  ixs_srl_max^d( 1,0)=ixgmax^d
324  ixs_srl_max^d( 2,0)=ixgmax^d
325 
326  ixr_srl_min^d(-1,0)=1
327  ixr_srl_min^d( 1,0)=ixmmin^d
328  ixr_srl_min^d( 2,0)=1
329  ixr_srl_max^d(-1,0)=ixmmax^d
330  ixr_srl_max^d( 1,0)=ixgmax^d
331  ixr_srl_max^d( 2,0)=ixgmax^d
332 
333  ixs_r_min^d(-1,0)=1
334  ixs_r_min^d( 1,0)=ixcommin^d
335  ixs_r_max^d(-1,0)=ixcommax^d
336  ixs_r_max^d( 1,0)=ixcogmax^d
337 
338  ixr_r_min^d(-1,1)=1
339  ixr_r_max^d(-1,1)=ixmmin^d-1+nxco^d
340  ixr_r_min^d( 1,2)=ixmmin^d+nxco^d
341  ixr_r_max^d( 1,2)=ixgmax^d
342 
343  ixs_p_min^d(-1,1)=1
344  ixs_p_max^d( 1,2)=ixgmax^d
345 
346  ixr_p_min^d(-1,1)=1
347  ixr_p_max^d( 1,2)=ixcogmax^d
348  \}
349  end if
350 
351  end subroutine init_bc
352 
353  subroutine create_bc_mpi_datatype(nwstart,nwbc)
355 
356  integer, intent(in) :: nwstart, nwbc
357  integer :: i^D, ic^D, inc^D, iib^D
358 
359  {do i^db=-1,1\}
360  if (i^d==0|.and.) cycle
361  {do iib^db=-1,2\}
362  call get_bc_comm_type(type_send_srl(iib^d,i^d),ixs_srl_^l(iib^d,i^d),ixg^ll,nwstart,nwbc)
363  call get_bc_comm_type(type_recv_srl(iib^d,i^d),ixr_srl_^l(iib^d,i^d),ixg^ll,nwstart,nwbc)
364  if (iib^d==2|.or.) cycle
365  call get_bc_comm_type(type_send_r(iib^d,i^d), ixs_r_^l(iib^d,i^d),ixcog^l,nwstart,nwbc)
366  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
367  inc^db=2*i^db+ic^db\}
368  call get_bc_comm_type(type_recv_r(iib^d,inc^d),ixr_r_^l(iib^d,inc^d), ixg^ll,nwstart,nwbc)
369  call get_bc_comm_type(type_send_p(iib^d,inc^d),ixs_p_^l(iib^d,inc^d), ixg^ll,nwstart,nwbc)
370  call get_bc_comm_type(type_recv_p(iib^d,inc^d),ixr_p_^l(iib^d,inc^d),ixcog^l,nwstart,nwbc)
371  {end do\}
372  {end do\}
373  {end do\}
374 
375  end subroutine create_bc_mpi_datatype
376 
377  subroutine get_bc_comm_type(comm_type,ix^L,ixG^L,nwstart,nwbc)
379 
380  integer, intent(inout) :: comm_type
381  integer, intent(in) :: ix^L, ixG^L, nwstart, nwbc
382 
383  integer, dimension(ndim+1) :: fullsize, subsize, start
384 
385  ^d&fullsize(^d)=ixgmax^d;
386  fullsize(ndim+1)=nw
387  ^d&subsize(^d)=ixmax^d-ixmin^d+1;
388  subsize(ndim+1)=nwbc
389  ^d&start(^d)=ixmin^d-1;
390  start(ndim+1)=nwstart-1
391 
392  call mpi_type_create_subarray(ndim+1,fullsize,subsize,start,mpi_order_fortran, &
393  mpi_double_precision,comm_type,ierrmpi)
394  call mpi_type_commit(comm_type,ierrmpi)
395 
396  end subroutine get_bc_comm_type
397 
398  subroutine put_bc_comm_types()
400 
401  integer :: i^D, ic^D, inc^D, iib^D
402 
403  {do i^db=-1,1\}
404  if (i^d==0|.and.) cycle
405  {do iib^db=-1,2\}
406  call mpi_type_free(type_send_srl(iib^d,i^d),ierrmpi)
407  call mpi_type_free(type_recv_srl(iib^d,i^d),ierrmpi)
408  if (levmin==levmax) cycle
409  if (iib^d==2|.or.) cycle
410  call mpi_type_free(type_send_r(iib^d,i^d),ierrmpi)
411  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
412  inc^db=2*i^db+ic^db\}
413  call mpi_type_free(type_recv_r(iib^d,inc^d),ierrmpi)
414  call mpi_type_free(type_send_p(iib^d,inc^d),ierrmpi)
415  call mpi_type_free(type_recv_p(iib^d,inc^d),ierrmpi)
416  {end do\}
417  {end do\}
418  {end do\}
419 
420  end subroutine put_bc_comm_types
421 
422  !> do update ghost cells of all blocks including physical boundaries
423  subroutine getbc(time,qdt,psb,nwstart,nwbc,req_diag)
425  use mod_physics
426 
427  double precision, intent(in) :: time, qdt
428  type(state), target :: psb(max_blocks)
429  integer, intent(in) :: nwstart ! Fill from nwstart
430  integer, intent(in) :: nwbc ! Number of variables to fill
431  logical, intent(in), optional :: req_diag ! If false, skip diagonal ghost cells
432 
433  double precision :: time_bcin
434  integer :: my_neighbor_type, ipole, idims, iside, nwhead, nwtail
435  integer :: iigrid, igrid, ineighbor, ipe_neighbor
436  integer :: nrecvs, nsends, isizes
437  integer :: ixG^L, ixR^L, ixS^L, ixB^L, ixI^L, k^L
438  integer :: i^D, n_i^D, ic^D, inc^D, n_inc^D, iib^D, idir
439  ! store physical boundary indicating index
440  integer :: idphyb(ndim,max_blocks),bindex(ndim)
441  integer :: isend_buf(npwbuf), ipwbuf, nghostcellsco,iB
442  ! index pointer for buffer arrays as a start for a segment
443  integer :: ibuf_start, ibuf_next
444  ! shapes of reshape
445  integer, dimension(1) :: shapes
446  logical :: req_diagonal, NeedProlong(-1:1^d&)
447  type(wbuffer) :: pwbuf(npwbuf)
448 
449  ! Stretching grid parameters for coarsened block of the current block
450 
451  nwhead=nwstart
452  nwtail=nwstart+nwbc-1
453 
454  req_diagonal = .true.
455  if (present(req_diag)) req_diagonal = req_diag
456 
457  time_bcin=mpi_wtime()
458  ixg^l=ixg^ll;
459 
460  if (internalboundary) then
461  call getintbc(time,ixg^l)
462  end if
463  ! fill ghost cells in physical boundaries
464  if(bcphys.and. .not.stagger_grid) then
465  do iigrid=1,igridstail; igrid=igrids(iigrid);
466  if(.not.phyboundblock(igrid)) cycle
467  saveigrid=igrid
468  block=>psb(igrid)
469  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
470  do idims=1,ndim
471  ! to avoid using as yet unknown corner info in more than 1D, we
472  ! fill only interior mesh ranges of the ghost cell ranges at first,
473  ! and progressively enlarge the ranges to include corners later
474  {
475  kmin^d=merge(0, 1, idims==^d)
476  kmax^d=merge(0, 1, idims==^d)
477  ixbmin^d=ixgmin^d+kmin^d*nghostcells
478  ixbmax^d=ixgmax^d-kmax^d*nghostcells
479  \}
480  {^iftwod
481  if(idims > 1 .and. neighbor_type(-1,0,igrid)==neighbor_boundary) ixbmin1=ixgmin1
482  if(idims > 1 .and. neighbor_type( 1,0,igrid)==neighbor_boundary) ixbmax1=ixgmax1}
483  {^ifthreed
484  if(idims > 1 .and. neighbor_type(-1,0,0,igrid)==neighbor_boundary) ixbmin1=ixgmin1
485  if(idims > 1 .and. neighbor_type( 1,0,0,igrid)==neighbor_boundary) ixbmax1=ixgmax1
486  if(idims > 2 .and. neighbor_type(0,-1,0,igrid)==neighbor_boundary) ixbmin2=ixgmin2
487  if(idims > 2 .and. neighbor_type(0, 1,0,igrid)==neighbor_boundary) ixbmax2=ixgmax2}
488  do iside=1,2
489  i^d=kr(^d,idims)*(2*iside-3);
490  if (aperiodb(idims)) then
491  if (neighbor_type(i^d,igrid) /= neighbor_boundary .and. &
492  .not. psb(igrid)%is_physical_boundary(2*idims-2+iside)) cycle
493  else
494  if (neighbor_type(i^d,igrid) /= neighbor_boundary) cycle
495  end if
496  call bc_phys(iside,idims,time,qdt,psb(igrid),ixg^l,ixb^l)
497  end do
498  end do
499  end do
500  end if
501 
502  ! default : no singular axis
503  ipole=0
504 
505  irecv=0
506  isend=0
507  isend_buf=0
508  ipwbuf=1
509  ! total number of times to call MPI_IRECV in each processor between sibling blocks or from finer neighbors
510  nrecvs=nrecv_bc_srl+nrecv_bc_r
511  ! total number of times to call MPI_ISEND in each processor between sibling blocks or to coarser neighors
512  nsends=nsend_bc_srl+nsend_bc_r
513 
514  allocate(recvstatus(mpi_status_size,nrecvs),recvrequest(nrecvs))
515  recvrequest=mpi_request_null
516 
517  allocate(sendstatus(mpi_status_size,nsends),sendrequest(nsends))
518  sendrequest=mpi_request_null
519 
520  if(stagger_grid) then
521  ibuf_recv_srl=1
522  ibuf_recv_r=1
523  ibuf_recv_p=1
524  ibuf_send_srl=1
525  ibuf_send_r=1
526  ibuf_send_p=1
527  irecv_srl=0
528  irecv_r=0
529  irecv_p=0
530  isend_srl=0
531  isend_r=0
532  isend_p=0
533  end if
534  ! receiving ghost-cell values from sibling blocks and finer neighbors
535  do iigrid=1,igridstail; igrid=igrids(iigrid);
536  saveigrid=igrid
537  call identifyphysbound(ps(igrid),iib^d)
538  ^d&idphyb(^d,igrid)=iib^d;
539  {do i^db=-1,1\}
540  if (skip_direction([ i^d ])) cycle
541  my_neighbor_type=neighbor_type(i^d,igrid)
542  select case (my_neighbor_type)
543  case (neighbor_sibling)
544  call bc_recv_srl
545  case (neighbor_fine)
546  call bc_recv_restrict
547  end select
548  {end do\}
549  end do
550 
551  ! sending ghost-cell values to sibling blocks and coarser neighbors
552  nghostcellsco=ceiling(nghostcells*0.5d0)
553  do iigrid=1,igridstail; igrid=igrids(iigrid);
554  saveigrid=igrid
555  block=>psb(igrid)
556 
557  ! Used stored data to identify physical boundaries
558  ^d&iib^d=idphyb(^d,igrid);
559 
560  if (any(neighbor_type(:^d&,igrid)==neighbor_coarse)) then
561  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
562  {#IFDEF EVOLVINGBOUNDARY
563  if(phyboundblock(igrid)) then
564  ! coarsen finer ghost cells at physical boundaries
565  ixcommin^d=ixcogmin^d+nghostcellsco;
566  ixcommax^d=ixcogmax^d-nghostcellsco;
567  ixmmin^d=ixgmin^d+(nghostcellsco-1);
568  ixmmax^d=ixgmax^d-(nghostcellsco-1);
569  else
570  ixcom^l=ixcog^l^lsubnghostcells;
571  ixm^l=ixg^l^lsubnghostcells;
572  end if
573  }
574  call coarsen_grid(psb(igrid),ixg^l,ixm^l,psc(igrid),ixcog^l,ixcom^l)
575  end if
576 
577  {do i^db=-1,1\}
578  if (skip_direction([ i^d ])) cycle
579  if (phi_ > 0) ipole=neighbor_pole(i^d,igrid)
580  my_neighbor_type=neighbor_type(i^d,igrid)
581  select case (my_neighbor_type)
582  case (neighbor_sibling)
583  call bc_send_srl
584  case (neighbor_coarse)
585  call bc_send_restrict
586  end select
587  {end do\}
588  end do
589 
590  !if (irecv/=nrecvs) then
591  ! call mpistop("number of recvs in phase1 in amr_ghostcells is incorrect")
592  !end if
593  !if (isend/=nsends) then
594  ! call mpistop("number of sends in phase1 in amr_ghostcells is incorrect")
595  !end if
596 
597  call mpi_waitall(irecv,recvrequest,recvstatus,ierrmpi)
598  call mpi_waitall(isend,sendrequest,sendstatus,ierrmpi)
599 
600  if(stagger_grid) then
603  call mpi_waitall(nrecv_bc_r,recvrequest_r,recvstatus_r,ierrmpi)
604  call mpi_waitall(nsend_bc_r,sendrequest_r,sendstatus_r,ierrmpi)
605  ! unpack the received data to fill ghost cells
606  ibuf_recv_srl=1
607  ibuf_recv_r=1
608  do iigrid=1,igridstail; igrid=igrids(iigrid);
609  ^d&iib^d=idphyb(^d,igrid);
610  {do i^db=-1,1\}
611  if (skip_direction([ i^d ])) cycle
612  select case (neighbor_type(i^d,igrid))
613  case (neighbor_sibling)
614  call bc_fill_srl
615  case (neighbor_fine)
616  call bc_fill_r
617  end select
618  {end do\}
619  end do
620  end if
621 
622  do ipwbuf=1,npwbuf
623  if (isend_buf(ipwbuf)/=0) deallocate(pwbuf(ipwbuf)%w)
624  end do
625  deallocate(recvstatus,recvrequest)
626  deallocate(sendstatus,sendrequest)
627 
628  irecv=0
629  isend=0
630  isend_buf=0
631  ipwbuf=1
632  ! total number of times to call MPI_IRECV in each processor from coarser neighbors
633  nrecvs=nrecv_bc_p
634  ! total number of times to call MPI_ISEND in each processor to finer neighbors
635  nsends=nsend_bc_p
636 
637  allocate(recvstatus(mpi_status_size,nrecvs),recvrequest(nrecvs))
638  recvrequest=mpi_request_null
639  allocate(sendstatus(mpi_status_size,nsends),sendrequest(nsends))
640  sendrequest=mpi_request_null
641 
642  ! receiving ghost-cell values from coarser neighbors
643  do iigrid=1,igridstail; igrid=igrids(iigrid);
644  saveigrid=igrid
645  ^d&iib^d=idphyb(^d,igrid);
646  {do i^db=-1,1\}
647  if (skip_direction([ i^d ])) cycle
648  my_neighbor_type=neighbor_type(i^d,igrid)
649  if (my_neighbor_type==neighbor_coarse) call bc_recv_prolong
650  {end do\}
651  end do
652  ! sending ghost-cell values to finer neighbors
653  do iigrid=1,igridstail; igrid=igrids(iigrid);
654  saveigrid=igrid
655  block=>psb(igrid)
656  ^d&iib^d=idphyb(^d,igrid);
657  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
658  if (any(neighbor_type(:^d&,igrid)==neighbor_fine)) then
659  {do i^db=-1,1\}
660  if (skip_direction([ i^d ])) cycle
661  if (phi_ > 0) ipole=neighbor_pole(i^d,igrid)
662  my_neighbor_type=neighbor_type(i^d,igrid)
663  if (my_neighbor_type==neighbor_fine) call bc_send_prolong
664  {end do\}
665  end if
666  end do
667 
668  !if (irecv/=nrecvs) then
669  ! call mpistop("number of recvs in phase2 in amr_ghostcells is incorrect")
670  !end if
671  !if (isend/=nsends) then
672  ! call mpistop("number of sends in phase2 in amr_ghostcells is incorrect")
673  !end if
674 
675  call mpi_waitall(irecv,recvrequest,recvstatus,ierrmpi)
676  call mpi_waitall(isend,sendrequest,sendstatus,ierrmpi)
677  deallocate(recvstatus,recvrequest)
678  deallocate(sendstatus,sendrequest)
679 
680  if(stagger_grid) then
681  call mpi_waitall(nrecv_bc_p,recvrequest_p,recvstatus_p,ierrmpi)
682  call mpi_waitall(nsend_bc_p,sendrequest_p,sendstatus_p,ierrmpi)
683 
684  ! fill coarser representative after receipt
685  ibuf_recv_p=1
686  do iigrid=1,igridstail; igrid=igrids(iigrid);
687  ^d&iib^d=idphyb(^d,igrid);
688  {do i^db=-1,1\}
689  if (skip_direction([ i^d ])) cycle
690  if(neighbor_type(i^d,igrid)==neighbor_coarse) call bc_fill_p
691  {end do\}
692  end do
693  end if
694  ! do prolongation on the ghost-cell values received from coarser neighbors
695  do iigrid=1,igridstail; igrid=igrids(iigrid);
696  ^d&iib^d=idphyb(^d,igrid);
697  if (any(neighbor_type(:^d&,igrid)==neighbor_coarse)) then
698  needprolong=.false.
699  {do i^db=-1,1\}
700  if (skip_direction([ i^d ])) cycle
701  my_neighbor_type=neighbor_type(i^d,igrid)
702  if (my_neighbor_type==neighbor_coarse) then
703  call bc_prolong
704  needprolong(i^d)=.true.
705  end if
706  {end do\}
707  if(stagger_grid) then
708  ! Ghost cell prolongation for staggered variables
709  ! must be done in a specific order.
710  ! First the first neighbours, which have 2 indices=0 in 3D
711  ! or one index=0 in 2D
712  do idims=1,ndim
713  i^d=0;
714  select case(idims)
715  {case(^d)
716  do i^d=-1,1,2
717  if (needprolong(i^dd)) call bc_prolong_stg(needprolong)
718  end do
719  \}
720  end select
721  end do
722  ! Then the second neighbours which have 1 index=0 in 3D
723  ! (Only in 3D)
724  {^ifthreed
725  i1=0;
726  do i2=-1,1,2
727  do i3=-1,1,2
728  if (needprolong(i^d)) call bc_prolong_stg(needprolong)
729  end do
730  end do
731  i2=0;
732  do i3=-1,1,2
733  do i1=-1,1,2
734  if (needprolong(i^d)) call bc_prolong_stg(needprolong)
735  end do
736  end do
737  i3=0;
738  do i1=-1,1,2
739  do i2=-1,1,2
740  if (needprolong(i^d)) call bc_prolong_stg(needprolong)
741  end do
742  end do
743  }
744  ! Finally, the corners, that have no index=0
745  {do i^d=-1,1,2\}
746  if (needprolong(i^d)) call bc_prolong_stg(needprolong)
747  {end do\}
748  end if
749  end if
750  end do
751 
752  do ipwbuf=1,npwbuf
753  if (isend_buf(ipwbuf)/=0) deallocate(pwbuf(ipwbuf)%w)
754  end do
755 
756  if(stagger_grid) then
757  do iigrid=1,igridstail; igrid=igrids(iigrid);
758  if(.not.phyboundblock(igrid)) cycle
759  saveigrid=igrid
760  block=>psb(igrid)
761  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
762  do idims=1,ndim
763  ! to avoid using as yet unknown corner info in more than 1D, we
764  ! fill only interior mesh ranges of the ghost cell ranges at first,
765  ! and progressively enlarge the ranges to include corners later
766  kmin1=0; kmax1=0;
767  {^iftwod
768  kmin2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0,-1,igrid)==1)
769  kmax2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0, 1,igrid)==1)}
770  {^ifthreed
771  kmin2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0,-1,0,igrid)==1)
772  kmax2=merge(1, 0, idims .lt. 2 .and. neighbor_type(0, 1,0,igrid)==1)
773  kmin3=merge(1, 0, idims .lt. 3 .and. neighbor_type(0,0,-1,igrid)==1)
774  kmax3=merge(1, 0, idims .lt. 3 .and. neighbor_type(0,0, 1,igrid)==1)}
775  ixbmin^d=ixglo^d+kmin^d*nghostcells;
776  ixbmax^d=ixghi^d-kmax^d*nghostcells;
777  do iside=1,2
778  i^d=kr(^d,idims)*(2*iside-3);
779  if (aperiodb(idims)) then
780  if (neighbor_type(i^d,igrid) /= neighbor_boundary .and. &
781  .not. psb(igrid)%is_physical_boundary(2*idims-2+iside)) cycle
782  else
783  if (neighbor_type(i^d,igrid) /= neighbor_boundary) cycle
784  end if
785  call bc_phys(iside,idims,time,qdt,psb(igrid),ixg^l,ixb^l)
786  end do
787  end do
788  end do
789  end if
790 
791  ! modify normal component of magnetic field to fix divB=0
792  if(bcphys.and.associated(phys_boundary_adjust)) call phys_boundary_adjust()
793 
794  if (nwaux>0) call fix_auxiliary
795 
796  time_bc=time_bc+(mpi_wtime()-time_bcin)
797 
798  contains
799 
800  logical function skip_direction(dir)
801  integer, intent(in) :: dir(^nd)
802 
803  if (all(dir == 0)) then
804  skip_direction = .true.
805  else if (.not. req_diagonal .and. count(dir /= 0) > 1) then
806  skip_direction = .true.
807  else
808  skip_direction = .false.
809  end if
810  end function skip_direction
811 
812  !> Receive from sibling at same refinement level
813  subroutine bc_recv_srl
815  ipe_neighbor=neighbor(2,i^d,igrid)
816  if (ipe_neighbor/=mype) then
817  irecv=irecv+1
818  itag=(3**^nd+4**^nd)*(igrid-1)+{(i^d+1)*3**(^d-1)+}
819  call mpi_irecv(psb(igrid)%w,1,type_recv_srl(iib^d,i^d), &
820  ipe_neighbor,itag,icomm,recvrequest(irecv),ierrmpi)
821  if(stagger_grid) then
823  call mpi_irecv(recvbuffer_srl(ibuf_recv_srl),sizes_srl_recv_total(i^d),mpi_double_precision, &
826  end if
827  end if
828 
829  end subroutine bc_recv_srl
830 
831  !> Receive from fine neighbor
832  subroutine bc_recv_restrict
834  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
835  inc^db=2*i^db+ic^db\}
836  ipe_neighbor=neighbor_child(2,inc^d,igrid)
837  if (ipe_neighbor/=mype) then
838  irecv=irecv+1
839  itag=(3**^nd+4**^nd)*(igrid-1)+3**^nd+{inc^d*4**(^d-1)+}
840  call mpi_irecv(psb(igrid)%w,1,type_recv_r(iib^d,inc^d), &
841  ipe_neighbor,itag,icomm,recvrequest(irecv),ierrmpi)
842  if(stagger_grid) then
843  irecv_r=irecv_r+1
844  call mpi_irecv(recvbuffer_r(ibuf_recv_r),sizes_r_recv_total(inc^d), &
845  mpi_double_precision,ipe_neighbor,itag, &
848  end if
849  end if
850  {end do\}
851 
852  end subroutine bc_recv_restrict
853 
854  !> Send to sibling at same refinement level
855  subroutine bc_send_srl
857  ineighbor=neighbor(1,i^d,igrid)
858  ipe_neighbor=neighbor(2,i^d,igrid)
859 
860  if (ipole==0) then
861  n_i^d=-i^d;
862  if (ipe_neighbor==mype) then
863  ixs^l=ixs_srl_^l(iib^d,i^d);
864  ixr^l=ixr_srl_^l(iib^d,n_i^d);
865  psb(ineighbor)%w(ixr^s,nwhead:nwtail)=&
866  psb(igrid)%w(ixs^s,nwhead:nwtail)
867  else
868  isend=isend+1
869  itag=(3**^nd+4**^nd)*(ineighbor-1)+{(n_i^d+1)*3**(^d-1)+}
870  call mpi_isend(psb(igrid)%w,1,type_send_srl(iib^d,i^d), &
871  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
872  if(stagger_grid) then
873  ibuf_start=ibuf_send_srl
874  do idir=1,ndim
875  ixs^l=ixs_srl_stg_^l(idir,i^d);
876  ibuf_next=ibuf_start+sizes_srl_send_stg(idir,i^d)
877  shapes=(/sizes_srl_send_stg(idir,i^d)/)
878  sendbuffer_srl(ibuf_start:ibuf_next-1)=&
879  reshape(psb(igrid)%ws(ixs^s,idir),shapes)
880  ibuf_start=ibuf_next
881  end do
883  call mpi_isend(sendbuffer_srl(ibuf_send_srl),sizes_srl_send_total(i^d),&
884  mpi_double_precision, ipe_neighbor,itag,icomm,sendrequest_srl(isend_srl),ierrmpi)
885  ibuf_send_srl=ibuf_next
886  end if
887  end if
888  else
889  ixs^l=ixs_srl_^l(iib^d,i^d);
890  select case (ipole)
891  {case (^d)
892  n_i^d=i^d^d%n_i^dd=-i^dd;\}
893  end select
894  if (ipe_neighbor==mype) then
895  ixr^l=ixr_srl_^l(iib^d,n_i^d);
896  call pole_copy(psb(ineighbor)%w,ixg^l,ixr^l,psb(igrid)%w,ixg^l,ixs^l)
897  else
898  if (isend_buf(ipwbuf)/=0) then
899  call mpi_wait(sendrequest(isend_buf(ipwbuf)), &
900  sendstatus(:,isend_buf(ipwbuf)),ierrmpi)
901  deallocate(pwbuf(ipwbuf)%w)
902  end if
903  allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
904  call pole_buffer(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psb(igrid)%w,ixg^l,ixs^l)
905  isend=isend+1
906  isend_buf(ipwbuf)=isend
907  itag=(3**^nd+4**^nd)*(ineighbor-1)+{(n_i^d+1)*3**(^d-1)+}
908  isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
909  call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
910  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
911  ipwbuf=1+modulo(ipwbuf,npwbuf)
912  if(stagger_grid) then
913  ibuf_start=ibuf_send_srl
914  do idir=1,ndim
915  ixs^l=ixs_srl_stg_^l(idir,i^d);
916  ibuf_next=ibuf_start+sizes_srl_send_stg(idir,i^d)
917  shapes=(/sizes_srl_send_stg(idir,i^d)/)
918  sendbuffer_srl(ibuf_start:ibuf_next-1)=&
919  reshape(psb(igrid)%ws(ixs^s,idir),shapes)
920  ibuf_start=ibuf_next
921  end do
923  call mpi_isend(sendbuffer_srl(ibuf_send_srl),sizes_srl_send_total(i^d),&
924  mpi_double_precision, ipe_neighbor,itag,icomm,sendrequest_srl(isend_srl),ierrmpi)
925  ibuf_send_srl=ibuf_next
926  end if
927  end if
928  end if
929 
930  end subroutine bc_send_srl
931 
932  !> Send to coarser neighbor
933  subroutine bc_send_restrict
934  integer :: ii^D
935 
936  ic^d=1+modulo(node(pig^d_,igrid)-1,2);
937  if ({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
938  if(phyboundblock(igrid).and..not.stagger_grid) then
939  ! to use block in physical boundary setup for coarse representative
940  block=>psc(igrid)
941  ! filling physical boundary ghost cells of a coarser representative block for
942  ! sending swap region with width of nghostcells to its coarser neighbor
943  do idims=1,ndim
944  ! to avoid using as yet unknown corner info in more than 1D, we
945  ! fill only interior mesh ranges of the ghost cell ranges at first,
946  ! and progressively enlarge the ranges to include corners later
947  {kmin^d=merge(0, 1, idims==^d)
948  kmax^d=merge(0, 1, idims==^d)
949  ixbmin^d=ixcogmin^d+kmin^d*nghostcells
950  ixbmax^d=ixcogmax^d-kmax^d*nghostcells\}
951  {^iftwod
952  if(idims > 1 .and. neighbor_type(-1,0,igrid)==neighbor_boundary) ixbmin1=ixcogmin1
953  if(idims > 1 .and. neighbor_type( 1,0,igrid)==neighbor_boundary) ixbmax1=ixcogmax1}
954  {^ifthreed
955  if(idims > 1 .and. neighbor_type(-1,0,0,igrid)==neighbor_boundary) ixbmin1=ixcogmin1
956  if(idims > 1 .and. neighbor_type( 1,0,0,igrid)==neighbor_boundary) ixbmax1=ixcogmax1
957  if(idims > 2 .and. neighbor_type(0,-1,0,igrid)==neighbor_boundary) ixbmin2=ixcogmin2
958  if(idims > 2 .and. neighbor_type(0, 1,0,igrid)==neighbor_boundary) ixbmax2=ixcogmax2}
959  {if(i^d==-1) then
960  ixbmin^d=ixcogmin^d+nghostcells
961  ixbmax^d=ixcogmin^d+2*nghostcells-1
962  else if(i^d==1) then
963  ixbmin^d=ixcogmax^d-2*nghostcells+1
964  ixbmax^d=ixcogmax^d-nghostcells
965  end if\}
966  do iside=1,2
967  ii^d=kr(^d,idims)*(2*iside-3);
968  if ({abs(i^d)==1.and.abs(ii^d)==1|.or.}) cycle
969  if (neighbor_type(ii^d,igrid)/=neighbor_boundary) cycle
970  call bc_phys(iside,idims,time,0.d0,psc(igrid),ixcog^l,ixb^l)
971  end do
972  end do
973  end if
974 
975  ineighbor=neighbor(1,i^d,igrid)
976  ipe_neighbor=neighbor(2,i^d,igrid)
977 
978  if (ipole==0) then
979  n_inc^d=-2*i^d+ic^d;
980  if (ipe_neighbor==mype) then
981  ixs^l=ixs_r_^l(iib^d,i^d);
982  ixr^l=ixr_r_^l(iib^d,n_inc^d);
983  psb(ineighbor)%w(ixr^s,nwhead:nwtail)=&
984  psc(igrid)%w(ixs^s,nwhead:nwtail)
985  else
986  isend=isend+1
987  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
988  call mpi_isend(psc(igrid)%w,1,type_send_r(iib^d,i^d), &
989  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
990  if(stagger_grid) then
991  ibuf_start=ibuf_send_r
992  do idir=1,ndim
993  ixs^l=ixs_r_stg_^l(idir,i^d);
994  ibuf_next=ibuf_start+sizes_r_send_stg(idir,i^d)
995  shapes=(/sizes_r_send_stg(idir,i^d)/)
996  sendbuffer_r(ibuf_start:ibuf_next-1)=&
997  reshape(psc(igrid)%ws(ixs^s,idir),shapes)
998  ibuf_start=ibuf_next
999  end do
1000  isend_r=isend_r+1
1001  call mpi_isend(sendbuffer_r(ibuf_send_r),sizes_r_send_total(i^d),&
1002  mpi_double_precision,ipe_neighbor,itag, &
1004  ibuf_send_r=ibuf_next
1005  end if
1006  end if
1007  else
1008  ixs^l=ixs_r_^l(iib^d,i^d);
1009  select case (ipole)
1010  {case (^d)
1011  n_inc^d=2*i^d+(3-ic^d)^d%n_inc^dd=-2*i^dd+ic^dd;\}
1012  end select
1013  if (ipe_neighbor==mype) then
1014  ixr^l=ixr_r_^l(iib^d,n_inc^d);
1015  call pole_copy(psb(ineighbor)%w,ixg^l,ixr^l,psc(igrid)%w,ixcog^l,ixs^l)
1016  else
1017  if (isend_buf(ipwbuf)/=0) then
1018  call mpi_wait(sendrequest(isend_buf(ipwbuf)), &
1019  sendstatus(:,isend_buf(ipwbuf)),ierrmpi)
1020  deallocate(pwbuf(ipwbuf)%w)
1021  end if
1022  allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
1023  call pole_buffer(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psc(igrid)%w,ixcog^l,ixs^l)
1024  isend=isend+1
1025  isend_buf(ipwbuf)=isend
1026  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
1027  isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
1028  call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
1029  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
1030  ipwbuf=1+modulo(ipwbuf,npwbuf)
1031  if(stagger_grid) then
1032  ibuf_start=ibuf_send_r
1033  do idir=1,ndim
1034  ixs^l=ixs_r_stg_^l(idir,i^d);
1035  ibuf_next=ibuf_start+sizes_r_send_stg(idir,i^d)
1036  shapes=(/sizes_r_send_stg(idir,i^d)/)
1037  sendbuffer_r(ibuf_start:ibuf_next-1)=&
1038  reshape(psc(igrid)%ws(ixs^s,idir),shapes)
1039  ibuf_start=ibuf_next
1040  end do
1041  isend_r=isend_r+1
1042  call mpi_isend(sendbuffer_r(ibuf_send_r),sizes_r_send_total(i^d),&
1043  mpi_double_precision,ipe_neighbor,itag, &
1045  ibuf_send_r=ibuf_next
1046  end if
1047  end if
1048  end if
1049 
1050  end subroutine bc_send_restrict
1051 
1052 
1053  !> fill siblings ghost cells with received data
1054  subroutine bc_fill_srl
1055  double precision :: tmp(ixgs^t)
1056  integer :: ixS^L,ixR^L,n_i^D,ixSsync^L,ixRsync^L
1057  integer :: idir, idirect
1058 
1059  ineighbor=neighbor(1,i^d,igrid)
1060  ipe_neighbor=neighbor(2,i^d,igrid)
1061  ipole=neighbor_pole(i^d,igrid)
1062  idirect={abs(i^d)|+}
1063 
1064  !! Now the special treatment of the pole is done here, at the receive step
1065  if (ipole==0) then
1066  ixr^l=ixr_srl_^l(iib^d,i^d);
1067  if (ipe_neighbor==mype) then
1068  n_i^d=-i^d;
1069  do idir=1,ndim
1070  ixs^l=ixs_srl_stg_^l(idir,n_i^d);
1071  ixr^l=ixr_srl_stg_^l(idir,i^d);
1072  !if (idirect == 1 .and. qdt>0.d0) then
1073  ! ! use the same value at the face shared by two neighors
1074  ! call indices_for_syncing(idir,i^D,ixR^L,ixS^L,ixRsync^L,ixSsync^L)
1075  ! if({idir==^D .and. i^D /=0 | .or.}) write(*,*) it,'difference',maxval(abs(psb(igrid)%ws(ixRsync^S,idir)-psb(ineighbor)%ws(ixSsync^S,idir)))
1076  ! !psb(igrid)%ws(ixRsync^S,idir) = half*(psb(igrid)%ws(ixRsync^S,idir)+psb(ineighbor)%ws(ixSsync^S,idir))
1077  !end if
1078  psb(igrid)%ws(ixr^s,idir) = psb(ineighbor)%ws(ixs^s,idir)
1079  end do
1080  else
1081  !! Unpack the buffer and fill the ghost cells
1082  n_i^d=-i^d;
1083  do idir=1,ndim
1084  ixs^l=ixs_srl_stg_^l(idir,n_i^d);
1085  ixr^l=ixr_srl_stg_^l(idir,i^d);
1086  ibuf_next=ibuf_recv_srl+sizes_srl_recv_stg(idir,i^d)
1087  tmp(ixs^s) = reshape(source=recvbuffer_srl(ibuf_recv_srl:ibuf_next-1),shape=shape(psb(igrid)%ws(ixs^s,idir)))
1088  !if (idirect == 1) then
1089  ! ! ixR ixS maybe changed
1090  ! call indices_for_syncing(idir,i^D,ixR^L,ixS^L,ixRsync^L,ixSsync^L) ! Overwrites ixR, ixS
1091  ! if(qdt==0) psb(igrid)%ws(ixRsync^S,idir) = half*(tmp(ixSsync^S) + psb(igrid)%ws(ixRsync^S,idir))
1092  ! if({idir==^D .and. i^D /=0 | .or.}) write(*,*) it,'betweenpe',maxval(abs(psb(igrid)%ws(ixRsync^S,idir)-tmp(ixSsync^S)))
1093  !end if
1094  psb(igrid)%ws(ixr^s,idir) = tmp(ixs^s)
1095  ibuf_recv_srl=ibuf_next
1096  end do
1097  end if
1098 
1099  else ! There is a pole
1100  select case (ipole)
1101  {case (^d)
1102  n_i^d=i^d^d%n_i^dd=-i^dd;\}
1103  end select
1104  if (ipe_neighbor==mype) then
1105  !! Fill ghost cells
1106  do idir=1,ndim
1107  ixr^l=ixr_srl_stg_^l(idir,i^d);
1108  ixs^l=ixs_srl_stg_^l(idir,n_i^d);
1109  !! Fill ghost cells
1110  call pole_copy_stg(psb(igrid)%ws,ixr^l,psb(ineighbor)%ws,ixs^l,idir)
1111  end do
1112  else
1113  pole_buf%ws=zero
1114  do idir=1,ndim
1115  ixr^l=ixr_srl_stg_^l(idir,i^d);
1116  ixs^l=ixs_srl_stg_^l(idir,n_i^d);
1117  ibuf_next=ibuf_recv_srl+sizes_srl_recv_stg(idir,i^d)
1118  pole_buf%ws(ixs^s,idir)=reshape(source=recvbuffer_srl(ibuf_recv_srl:ibuf_next-1),&
1119  shape=shape(psb(igrid)%ws(ixs^s,idir)))
1120  ibuf_recv_srl=ibuf_next
1121  call pole_copy_stg(psb(igrid)%ws,ixr^l,pole_buf%ws,ixs^l,idir)
1122  end do
1123  end if
1124  end if
1125 
1126  end subroutine bc_fill_srl
1127 
1128  subroutine indices_for_syncing(idir,i^D,ixR^L,ixS^L,ixRsync^L,ixSsync^L)
1129  integer, intent(in) :: i^D,idir
1130  integer, intent(inout) :: ixR^L,ixS^L
1131  integer, intent(out) :: ixRsync^L,ixSsync^L
1132 
1133  ixrsync^l=ixr^l;
1134  ixssync^l=ixs^l;
1135 
1136  {
1137  if (i^d == -1 .and. idir == ^d) then
1138  ixrsyncmin^d = ixrmax^d
1139  ixrsyncmax^d = ixrmax^d
1140  ixssyncmin^d = ixsmax^d
1141  ixssyncmax^d = ixsmax^d
1142  ixrmax^d = ixrmax^d - 1
1143  ixsmax^d = ixsmax^d - 1
1144  else if (i^d == 1 .and. idir == ^d) then
1145  ixrsyncmin^d = ixrmin^d
1146  ixrsyncmax^d = ixrmin^d
1147  ixssyncmin^d = ixsmin^d
1148  ixssyncmax^d = ixsmin^d
1149  ixrmin^d = ixrmin^d + 1
1150  ixsmin^d = ixsmin^d + 1
1151  end if
1152  \}
1153 
1154  end subroutine indices_for_syncing
1155 
1156  !> fill restricted ghost cells after receipt
1157  subroutine bc_fill_r
1159  ipole=neighbor_pole(i^d,igrid)
1160  if (ipole==0) then
1161  ! Loop over the children ic^D to and their neighbors inc^D
1162  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1163  inc^db=2*i^db+ic^db\}
1164  n_i^d=-i^d;
1165  ineighbor=neighbor_child(1,inc^d,igrid)
1166  ipe_neighbor=neighbor_child(2,inc^d,igrid)
1167  if (ipe_neighbor==mype) then ! Same processor
1168  do idir=1,ndim
1169  ixs^l=ixs_r_stg_^l(idir,n_i^d);
1170  ixr^l=ixr_r_stg_^l(idir,inc^d);
1171  psb(igrid)%ws(ixr^s,idir)=psc(ineighbor)%ws(ixs^s,idir)
1172  end do
1173  else ! Different processor
1174  !! Unpack the buffer and fill the ghost cells
1175  do idir=1,ndim
1176  ixr^l=ixr_r_stg_^l(idir,inc^d);
1177  ibuf_next=ibuf_recv_r+sizes_r_recv_stg(idir,inc^d)
1178  psb(igrid)%ws(ixr^s,idir)=reshape(source=recvbuffer_r(ibuf_recv_r:ibuf_next-1),&
1179  shape=shape(psb(igrid)%ws(ixr^s,idir)))
1180  ibuf_recv_r=ibuf_next
1181  end do
1182  end if
1183  {end do\}
1184 
1185  else !! There is a pole
1186  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1187  inc^db=2*i^db+ic^db\}
1188  select case(ipole)
1189  {case (^d)
1190  n_i^d=i^d^d%n_i^dd=-i^dd;\}
1191  end select
1192  ineighbor=neighbor_child(1,inc^d,igrid)
1193  ipe_neighbor=neighbor_child(2,inc^d,igrid)
1194  ixr^l=ixr_r_^l(iib^d,inc^d);
1195  if (ipe_neighbor==mype) then ! Same processor
1196  do idir=1,ndim
1197  ixs^l=ixs_r_stg_^l(idir,n_i^d);
1198  ixr^l=ixr_r_stg_^l(idir,inc^d);
1199  !! Fill ghost cells
1200  call pole_copy_stg(psb(igrid)%ws,ixr^l,psc(ineighbor)%ws,ixs^l,idir)
1201  end do
1202  else ! Different processor
1203  !! Unpack the buffer and fill an auxiliary array
1204  pole_buf%ws=zero
1205  do idir=1,ndim
1206  ixs^l=ixs_r_stg_^l(idir,n_i^d);
1207  ixr^l=ixr_r_stg_^l(idir,inc^d);
1208  ibuf_next=ibuf_recv_r+sizes_r_recv_stg(idir,inc^d)
1209  pole_buf%ws(ixr^s,idir)=reshape(source=recvbuffer_r(ibuf_recv_r:ibuf_next-1),&
1210  shape=shape(psb(igrid)%ws(ixr^s,idir)))
1211  call pole_copy_stg(psb(igrid)%ws,ixr^l,pole_buf%ws,ixr^l,idir)
1212  ibuf_recv_r=ibuf_next
1213  end do
1214  end if
1215  {end do\}
1216 
1217  end if
1218 
1219  end subroutine bc_fill_r
1220 
1221  !> Receive from coarse neighbor
1222  subroutine bc_recv_prolong
1224  ic^d=1+modulo(node(pig^d_,igrid)-1,2);
1225  if ({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
1226 
1227  ipe_neighbor=neighbor(2,i^d,igrid)
1228  if (ipe_neighbor/=mype) then
1229  irecv=irecv+1
1230  inc^d=ic^d+i^d;
1231  itag=(3**^nd+4**^nd)*(igrid-1)+3**^nd+{inc^d*4**(^d-1)+}
1232  call mpi_irecv(psc(igrid)%w,1,type_recv_p(iib^d,inc^d), &
1233  ipe_neighbor,itag,icomm,recvrequest(irecv),ierrmpi)
1234  if(stagger_grid) then
1235  irecv_p=irecv_p+1
1236  call mpi_irecv(recvbuffer_p(ibuf_recv_p),sizes_p_recv_total(inc^d),&
1237  mpi_double_precision,ipe_neighbor,itag,&
1238  icomm,recvrequest_p(irecv_p),ierrmpi)
1240  end if
1241  end if
1242 
1243  end subroutine bc_recv_prolong
1244 
1245  !> Send to finer neighbor
1246  subroutine bc_send_prolong
1247  integer :: ii^D
1248 
1249  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1250  inc^db=2*i^db+ic^db\}
1251  ixs^l=ixs_p_^l(iib^d,inc^d);
1252 
1253  ineighbor=neighbor_child(1,inc^d,igrid)
1254  ipe_neighbor=neighbor_child(2,inc^d,igrid)
1255 
1256  if (ipole==0) then
1257  n_i^d=-i^d;
1258  n_inc^d=ic^d+n_i^d;
1259  if (ipe_neighbor==mype) then
1260  ixr^l=ixr_p_^l(iib^d,n_inc^d);
1261  psc(ineighbor)%w(ixr^s,nwhead:nwtail) &
1262  =psb(igrid)%w(ixs^s,nwhead:nwtail)
1263  else
1264  isend=isend+1
1265  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
1266  call mpi_isend(psb(igrid)%w,1,type_send_p(iib^d,inc^d), &
1267  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
1268  if(stagger_grid) then
1269  ibuf_start=ibuf_send_p
1270  do idir=1,ndim
1271  ixs^l=ixs_p_stg_^l(idir,inc^d);
1272  ibuf_next=ibuf_start+sizes_p_send_stg(idir,inc^d)
1273  shapes=(/sizes_p_send_stg(idir,inc^d)/)
1274  sendbuffer_p(ibuf_start:ibuf_next-1)=&
1275  reshape(psb(igrid)%ws(ixs^s,idir),shapes)
1276  ibuf_start=ibuf_next
1277  end do
1278  isend_p=isend_p+1
1279  call mpi_isend(sendbuffer_p(ibuf_send_p),sizes_p_send_total(inc^d),&
1280  mpi_double_precision,ipe_neighbor,itag, &
1281  icomm,sendrequest_p(isend_p),ierrmpi)
1282  ibuf_send_p=ibuf_next
1283  end if
1284  end if
1285  else
1286  select case (ipole)
1287  {case (^d)
1288  n_inc^d=inc^d^d%n_inc^dd=ic^dd-i^dd;\}
1289  end select
1290  if (ipe_neighbor==mype) then
1291  ixr^l=ixr_p_^l(iib^d,n_inc^d);
1292  call pole_copy(psc(ineighbor)%w,ixcog^l,ixr^l,psb(igrid)%w,ixg^l,ixs^l)
1293  if(stagger_grid) then
1294  do idir=1,ndim
1295  ixs^l=ixs_p_stg_^l(idir,inc^d);
1296  ixr^l=ixr_p_stg_^l(idir,n_inc^d);
1297  call pole_copy_stg(psc(ineighbor)%ws,ixr^l,psb(igrid)%ws,ixs^l,idir)
1298  end do
1299  end if
1300  else
1301  if (isend_buf(ipwbuf)/=0) then
1302  call mpi_wait(sendrequest(isend_buf(ipwbuf)), &
1303  sendstatus(:,isend_buf(ipwbuf)),ierrmpi)
1304  deallocate(pwbuf(ipwbuf)%w)
1305  end if
1306  allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
1307  call pole_buffer(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psb(igrid)%w,ixg^l,ixs^l)
1308  isend=isend+1
1309  isend_buf(ipwbuf)=isend
1310  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
1311  isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
1312  call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
1313  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
1314  ipwbuf=1+modulo(ipwbuf,npwbuf)
1315  if(stagger_grid) then
1316  ibuf_start=ibuf_send_p
1317  do idir=1,ndim
1318  ixs^l=ixs_p_stg_^l(idir,inc^d);
1319  ibuf_next=ibuf_start+sizes_p_send_stg(idir,inc^d)
1320  shapes=(/sizes_p_send_stg(idir,inc^d)/)
1321  sendbuffer_p(ibuf_start:ibuf_next-1)=&
1322  reshape(psb(igrid)%ws(ixs^s,idir),shapes)
1323  ibuf_start=ibuf_next
1324  end do
1325  isend_p=isend_p+1
1326  call mpi_isend(sendbuffer_p(ibuf_send_p),sizes_p_send_total(inc^d),&
1327  mpi_double_precision,ipe_neighbor,itag, &
1328  icomm,sendrequest_p(isend_p),ierrmpi)
1329  ibuf_send_p=ibuf_next
1330  end if
1331  end if
1332  end if
1333  {end do\}
1334 
1335  end subroutine bc_send_prolong
1336 
1337  !> fill coarser representative with data from coarser neighbors
1338  subroutine bc_fill_p
1339  ic^d=1+modulo(node(pig^d_,igrid)-1,2);
1340  if ({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
1341 
1342  ineighbor=neighbor(1,i^d,igrid)
1343  ipe_neighbor=neighbor(2,i^d,igrid)
1344  ipole=neighbor_pole(i^d,igrid)
1345 
1346  if (ipole==0) then !! There is no pole
1347  inc^d=ic^d+i^d;
1348  ixr^l=ixr_p_^l(iib^d,inc^d);
1349  if(ipe_neighbor==mype) then !! Same processor
1350  n_inc^d=-2*i^d+ic^d;
1351  do idir=1,ndim
1352  ixs^l=ixs_p_stg_^l(idir,n_inc^d);
1353  ixr^l=ixr_p_stg_^l(idir,inc^d);
1354  psc(igrid)%ws(ixr^s,idir)=psb(ineighbor)%ws(ixs^s,idir)
1355  end do
1356  else !! Different processor
1357  do idir=1,ndim
1358  ixr^l=ixr_p_stg_^l(idir,inc^d);
1359  ibuf_next=ibuf_recv_p+sizes_p_recv_stg(idir,inc^d)
1360  psc(igrid)%ws(ixr^s,idir)=reshape(source=recvbuffer_p(ibuf_recv_p:ibuf_next-1),&
1361  shape=shape(psc(igrid)%ws(ixr^s,idir)))
1362  ibuf_recv_p=ibuf_next
1363  end do
1364  end if
1365 
1366  else !! There is a pole
1367  inc^d=ic^d+i^d;
1368  select case (ipole)
1369  {case (^d)
1370  n_inc^d=2*i^d+(3-ic^d)^d%n_inc^dd=-2*i^dd+ic^dd;\}
1371  end select
1372  if (ipe_neighbor==mype) then
1373  do idir=1,ndim
1374  ixs^l=ixs_p_stg_^l(idir,n_inc^d);
1375  ixr^l=ixr_p_stg_^l(idir,inc^d);
1376  call pole_copy_stg(psc(igrid)%ws,ixr^l,psb(ineighbor)%ws,ixs^l,idir)
1377  end do
1378  else
1379  !! Unpack the buffer and fill an auxiliary array
1380  pole_buf%ws=zero
1381  do idir=1,ndim
1382  ixr^l=ixr_p_stg_^l(idir,inc^d);
1383  ibuf_next=ibuf_recv_p+sizes_p_recv_stg(idir,inc^d)
1384  pole_buf%ws(ixr^s,idir)=reshape(source=recvbuffer_p(ibuf_recv_p:ibuf_next-1),&
1385  shape=shape(psc(igrid)%ws(ixr^s,idir)))
1386  call pole_copy_stg(psc(igrid)%ws,ixr^l,pole_buf%ws,ixr^l,idir)
1387  ibuf_recv_p=ibuf_next
1388  end do
1389  end if
1390 
1391  end if
1392 
1393  end subroutine bc_fill_p
1394 
1395  !> do prolongation for fine blocks after receipt data from coarse neighbors
1396  subroutine bc_prolong
1398 
1399  integer :: ixFi^L,ixCo^L,ii^D
1400  double precision :: dxFi^D, dxCo^D, xFimin^D, xComin^D, invdxCo^D
1401 
1402  ixfi^l=ixr_srl_^l(iib^d,i^d);
1403  dxfi^d=rnode(rpdx^d_,igrid);
1404  dxco^d=two*dxfi^d;
1405  invdxco^d=1.d0/dxco^d;
1406 
1407  ! compute the enlarged grid lower left corner coordinates
1408  ! these are true coordinates for an equidistant grid,
1409  ! but we can temporarily also use them for getting indices
1410  ! in stretched grids
1411  xfimin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxfi^d;
1412  xcomin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxco^d;
1413 
1414  if(stagger_grid.and.phyboundblock(igrid)) then
1415  block=>psc(igrid)
1416  ixcomin^d=int((xfimin^d+(dble(ixfimin^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1-1;
1417  ixcomax^d=int((xfimin^d+(dble(ixfimax^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1+1;
1418  do idims=1,ndim
1419  do iside=1,2
1420  ii^d=kr(^d,idims)*(2*iside-3);
1421  if(neighbor_type(ii^d,igrid)/=neighbor_boundary) cycle
1422  if(( {(iside==1.and.idims==^d.and.ixcomin^d<ixcogmin^d+nghostcells)|.or.} ) &
1423  .or.( {(iside==2.and.idims==^d.and.ixcomax^d>ixcogmax^d-nghostcells)|.or. })) then
1424  {ixbmin^d=merge(ixcogmin^d,ixcomin^d,idims==^d);}
1425  {ixbmax^d=merge(ixcogmax^d,ixcomax^d,idims==^d);}
1426  call bc_phys(iside,idims,time,0.d0,psc(igrid),ixcog^l,ixb^l)
1427  end if
1428  end do
1429  end do
1430  end if
1431 
1432  if(prolongprimitive) then
1433  ! following line again assumes equidistant grid, but
1434  ! just computes indices, so also ok for stretched case
1435  ! reason for +1-1 and +1+1: the coarse representation has
1436  ! also nghostcells at each side. During
1437  ! prolongation, we need cells to left and right, hence -1/+1
1438  ixcomin^d=int((xfimin^d+(dble(ixfimin^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1-1;
1439  ixcomax^d=int((xfimin^d+(dble(ixfimax^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1+1;
1440  call phys_to_primitive(ixcog^l,ixco^l,&
1441  psc(igrid)%w,psc(igrid)%x)
1442  endif
1443 
1444  select case (typeghostfill)
1445  case ("linear")
1446  call interpolation_linear(ixfi^l,dxfi^d,xfimin^d,dxco^d,invdxco^d,xcomin^d)
1447  case ("copy")
1448  call interpolation_copy(ixfi^l,dxfi^d,xfimin^d,dxco^d,invdxco^d,xcomin^d)
1449  case default
1450  write (unitterm,*) "Undefined typeghostfill ",typeghostfill
1451  call mpistop("Undefined typeghostfill")
1452  end select
1453 
1454  if(prolongprimitive) call phys_to_conserved(ixcog^l,ixco^l,&
1455  psc(igrid)%w,psc(igrid)%x)
1456 
1457  end subroutine bc_prolong
1458 
1459  subroutine bc_prolong_stg(NeedProlong)
1461  logical,dimension(-1:1^D&) :: NeedProlong
1462  logical :: fine_^Lin
1463  integer :: ixFi^L,ixCo^L
1464  double precision :: dxFi^D,dxCo^D,xFimin^D,xComin^D,invdxCo^D
1465  ! Check what is already at the desired level
1466  fine_^lin=.false.;
1467  {
1468  if(i^d>-1) fine_min^din=(.not.needprolong(i^dd-kr(^d,^dd)).and.neighbor_type(i^dd-kr(^d,^dd),igrid)/=1)
1469  if(i^d<1) fine_max^din=(.not.needprolong(i^dd+kr(^d,^dd)).and.neighbor_type(i^dd+kr(^d,^dd),igrid)/=1)
1470  \}
1471 
1472  ixfi^l=ixr_srl_^l(iib^d,i^d);
1473 
1474  dxfi^d=rnode(rpdx^d_,igrid);
1475  dxco^d=two*dxfi^d;
1476  invdxco^d=1.d0/dxco^d;
1477 
1478  xfimin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxfi^d;
1479  xcomin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxco^d;
1480 
1481  ! moved the physical boundary filling here, to only fill the
1482  ! part needed
1483 
1484  ixcomin^d=int((xfimin^d+(dble(ixfimin^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1-1;
1485  ixcomax^d=int((xfimin^d+(dble(ixfimax^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1+1;
1486 
1487  call prolong_2nd_stg(psc(igrid),psb(igrid),ixco^l,ixfi^l,dxco^d,xcomin^d,dxfi^d,xfimin^d,.true.,fine_^lin)
1488 
1489  ! The current region has already been refined, so it does not need to be prolonged again
1490  needprolong(i^d)=.false.
1491 
1492  end subroutine bc_prolong_stg
1493 
1494  subroutine interpolation_linear(ixFi^L,dxFi^D,xFimin^D, &
1495  dxCo^D,invdxCo^D,xComin^D)
1497  integer, intent(in) :: ixFi^L
1498  double precision, intent(in) :: dxFi^D, xFimin^D,dxCo^D, invdxCo^D, xComin^D
1499 
1500  integer :: ixCo^D, jxCo^D, hxCo^D, ixFi^D, ix^D, iw, idims, nwmin,nwmax
1501  double precision :: xCo^D, xFi^D, eta^D
1502  double precision :: slopeL, slopeR, slopeC, signC, signR
1503  double precision :: slope(1:nw,ndim)
1504  !!double precision :: local_invdxCo^D
1505  double precision :: signedfactorhalf^D
1506  !integer :: ixshift^D, icase
1507 
1508  !icase=mod(nghostcells,2)
1509 
1510  if(prolongprimitive) then
1511  nwmin=1
1512  nwmax=nw
1513  else
1514  nwmin=nwhead
1515  nwmax=nwtail
1516  end if
1517 
1518  {do ixfi^db = ixfi^lim^db
1519  ! cell-centered coordinates of fine grid point
1520  ! here we temporarily use an equidistant grid
1521  xfi^db=xfimin^db+(dble(ixfi^db)-half)*dxfi^db
1522 
1523  ! indices of coarse cell which contains the fine cell
1524  ! since we computed lower left corner earlier
1525  ! in equidistant fashion: also ok for stretched case
1526  ixco^db=int((xfi^db-xcomin^db)*invdxco^db)+1
1527 
1528  ! cell-centered coordinates of coarse grid point
1529  ! here we temporarily use an equidistant grid
1530  xco^db=xcomin^db+(dble(ixco^db)-half)*dxco^db \}
1531 
1532  !if(.not.slab) then
1533  ! ^D&local_invdxCo^D=1.d0/psc(igrid)%dx({ixCo^DD},^D)\
1534  !endif
1535 
1536  if(slab) then
1537  ! actual cell-centered coordinates of fine grid point
1538  !!^D&xFi^D=block%x({ixFi^DD},^D)\
1539  ! actual cell-centered coordinates of coarse grid point
1540  !!^D&xCo^D=psc(igrid)%x({ixCo^DD},^D)\
1541  ! normalized distance between fine/coarse cell center
1542  ! in coarse cell: ranges from -0.5 to 0.5 in each direction
1543  ! (origin is coarse cell center)
1544  ! this is essentially +1/4 or -1/4 on cartesian mesh
1545  eta^d=(xfi^d-xco^d)*invdxco^d;
1546  else
1547  !select case(icase)
1548  ! case(0)
1549  !{! here we assume an even number of ghostcells!!!
1550  !ixshift^D=2*(mod(ixFi^D,2)-1)+1
1551  !if(ixshift^D>0.0d0)then
1552  ! ! oneven fine grid points
1553  ! eta^D=-0.5d0*(one-block%dvolume(ixFi^DD) &
1554  ! /sum(block%dvolume(ixFi^D:ixFi^D+1^D%ixFi^DD)))
1555  !else
1556  ! ! even fine grid points
1557  ! eta^D=+0.5d0*(one-block%dvolume(ixFi^DD) &
1558  ! /sum(block%dvolume(ixFi^D-1:ixFi^D^D%ixFi^DD)))
1559  !endif\}
1560  ! case(1)
1561  !{! here we assume an odd number of ghostcells!!!
1562  !ixshift^D=2*(mod(ixFi^D,2)-1)+1
1563  !if(ixshift^D>0.0d0)then
1564  ! ! oneven fine grid points
1565  ! eta^D=+0.5d0*(one-block%dvolume(ixFi^DD) &
1566  ! /sum(block%dvolume(ixFi^D-1:ixFi^D^D%ixFi^DD)))
1567  !else
1568  ! ! even fine grid points
1569  ! eta^D=-0.5d0*(one-block%dvolume(ixFi^DD) &
1570  ! /sum(block%dvolume(ixFi^D:ixFi^D+1^D%ixFi^DD)))
1571  !endif\}
1572  ! case default
1573  ! call mpistop("no such case")
1574  !end select
1575  ! the different cases for even/uneven number of ghost cells
1576  ! are automatically handled using the relative index to ixMlo
1577  ! as well as the pseudo-coordinates xFi and xCo
1578  ! these latter differ from actual cell centers when stretching is used
1579  ix^d=2*int((ixfi^d+ixmlo^d)/2)-ixmlo^d;
1580  {signedfactorhalf^d=(xfi^d-xco^d)*invdxco^d*two
1581  if(dabs(signedfactorhalf^d**2-1.0d0/4.0d0)>smalldouble) call mpistop("error in bc_prolong")
1582  eta^d=signedfactorhalf^d*(one-psb(igrid)%dvolume(ixfi^dd) &
1583  /sum(psb(igrid)%dvolume(ix^d:ix^d+1^d%ixFi^dd))) \}
1584  !{eta^D=(xFi^D-xCo^D)*invdxCo^D &
1585  ! *two*(one-block%dvolume(ixFi^DD) &
1586  ! /sum(block%dvolume(ix^D:ix^D+1^D%ixFi^DD))) \}
1587  end if
1588 
1589  do idims=1,ndim
1590  hxco^d=ixco^d-kr(^d,idims)\
1591  jxco^d=ixco^d+kr(^d,idims)\
1592 
1593  do iw=nwmin,nwmax
1594  slopel=psc(igrid)%w(ixco^d,iw)-psc(igrid)%w(hxco^d,iw)
1595  sloper=psc(igrid)%w(jxco^d,iw)-psc(igrid)%w(ixco^d,iw)
1596  slopec=half*(sloper+slopel)
1597 
1598  ! get limited slope
1599  signr=sign(one,sloper)
1600  signc=sign(one,slopec)
1601  select case(typeprolonglimit)
1602  case('unlimit')
1603  slope(iw,idims)=slopec
1604  case('minmod')
1605  slope(iw,idims)=signr*max(zero,min(dabs(sloper), &
1606  signr*slopel))
1607  case('woodward')
1608  slope(iw,idims)=two*signr*max(zero,min(dabs(sloper), &
1609  signr*slopel,signr*half*slopec))
1610  case('koren')
1611  slope(iw,idims)=signr*max(zero,min(two*signr*slopel, &
1612  (dabs(sloper)+two*slopel*signr)*third,two*dabs(sloper)))
1613  case default
1614  slope(iw,idims)=signc*max(zero,min(dabs(slopec), &
1615  signc*slopel,signc*sloper))
1616  end select
1617  end do
1618  end do
1619 
1620  ! Interpolate from coarse cell using limited slopes
1621  psb(igrid)%w(ixfi^d,nwmin:nwmax)=psc(igrid)%w(ixco^d,nwmin:nwmax)+&
1622  {(slope(nwmin:nwmax,^d)*eta^d)+}
1623 
1624  {end do\}
1625 
1626  if(prolongprimitive) call phys_to_conserved(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
1627 
1628  end subroutine interpolation_linear
1629 
1630  subroutine interpolation_copy(ixFi^L,dxFi^D,xFimin^D, &
1631  dxCo^D,invdxCo^D,xComin^D)
1633  integer, intent(in) :: ixFi^L
1634  double precision, intent(in) :: dxFi^D, xFimin^D,dxCo^D, invdxCo^D, xComin^D
1635 
1636  integer :: ixCo^D, ixFi^D, nwmin,nwmax
1637  double precision :: xFi^D
1638 
1639  if(prolongprimitive) then
1640  nwmin=1
1641  nwmax=nw
1642  else
1643  nwmin=nwhead
1644  nwmax=nwtail
1645  end if
1646 
1647  {do ixfi^db = ixfi^lim^db
1648  ! cell-centered coordinates of fine grid point
1649  xfi^db=xfimin^db+(dble(ixfi^db)-half)*dxfi^db
1650 
1651  ! indices of coarse cell which contains the fine cell
1652  ! note: this also works for stretched grids
1653  ixco^db=int((xfi^db-xcomin^db)*invdxco^db)+1\}
1654 
1655  ! Copy from coarse cell
1656  psb(igrid)%w(ixfi^d,nwmin:nwmax)=psc(igrid)%w(ixco^d,nwmin:nwmax)
1657 
1658  {end do\}
1659 
1660  if(prolongprimitive) call phys_to_conserved(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
1661 
1662  end subroutine interpolation_copy
1663 
1664  subroutine pole_copy(wrecv,ixIR^L,ixR^L,wsend,ixIS^L,ixS^L)
1666  integer, intent(in) :: ixIR^L,ixR^L,ixIS^L,ixS^L
1667  double precision :: wrecv(ixir^s,1:nw), wsend(ixis^s,1:nw)
1668 
1669  integer :: iw, iB
1670 
1671  select case (ipole)
1672  {case (^d)
1673  iside=int((i^d+3)/2)
1674  ib=2*(^d-1)+iside
1675  do iw=nwhead,nwtail
1676  select case (typeboundary(iw,ib))
1677  case ("symm")
1678  wrecv(ixr^s,iw) = wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1679  case ("asymm")
1680  wrecv(ixr^s,iw) =-wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1681  case default
1682  call mpistop("Pole boundary condition should be symm or asymm")
1683  end select
1684  end do \}
1685  end select
1686 
1687  end subroutine pole_copy
1688 
1689  subroutine pole_copy_stg(wrecv,ixR^L,wsend,ixS^L,idirs)
1691  integer, intent(in) :: ixR^L,ixS^L,idirs
1692  double precision :: wrecv(ixgs^t,1:nws), wsend(ixgs^t,1:nws)
1693 
1694  select case (ipole)
1695  {case (^d)
1696  iside=int((i^d+3)/2)
1697  ib=2*(^d-1)+iside
1698  select case (typeboundary(iw_mag(idirs),ib))
1699  case ("symm")
1700  wrecv(ixr^s,idirs) = wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,idirs)
1701  case ("asymm")
1702  wrecv(ixr^s,idirs) =-wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,idirs)
1703  case default
1704  call mpistop("Pole boundary condition should be symm or asymm")
1705  end select
1706  \}
1707  end select
1708 
1709  end subroutine pole_copy_stg
1710 
1711  subroutine pole_buffer(wrecv,ixIR^L,ixR^L,wsend,ixIS^L,ixS^L)
1713  integer, intent(in) :: ixIR^L,ixR^L,ixIS^L,ixS^L
1714  double precision :: wrecv(ixir^s,nwhead:nwtail), wsend(ixis^s,1:nw)
1715 
1716  integer :: iw, iB
1717 
1718  select case (ipole)
1719  {case (^d)
1720  iside=int((i^d+3)/2)
1721  ib=2*(^d-1)+iside
1722  do iw=nwhead,nwtail
1723  select case (typeboundary(iw,ib))
1724  case ("symm")
1725  wrecv(ixr^s,iw) = wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1726  case ("asymm")
1727  wrecv(ixr^s,iw) =-wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1728  case default
1729  call mpistop("Pole boundary condition should be symm or asymm")
1730  end select
1731  end do \}
1732  end select
1733 
1734  end subroutine pole_buffer
1735 
1736  subroutine fix_auxiliary
1738 
1739  integer :: ix^L
1740 
1741  do iigrid=1,igridstail; igrid=igrids(iigrid);
1742  saveigrid=igrid
1743  block=>psb(igrid)
1744  call identifyphysbound(psb(igrid),iib^d)
1745 
1746  {do i^db=-1,1\}
1747  if (skip_direction([ i^d ])) cycle
1748 
1749  ix^l=ixr_srl_^l(iib^d,i^d);
1750  call phys_get_aux(.true.,psb(igrid)%w,ps(igrid)%x,ixg^l,ix^l,"bc")
1751  {end do\}
1752  end do
1753 
1754  end subroutine fix_auxiliary
1755 
1756  end subroutine getbc
1757 
1758  subroutine identifyphysbound(s,iib^D)
1760 
1761  type(state) :: s
1762  integer, intent(out) :: iib^D
1763 
1764  {
1765  if(s%is_physical_boundary(2*^d) .and. &
1766  s%is_physical_boundary(2*^d-1)) then
1767  iib^d=2
1768  else if(s%is_physical_boundary(2*^d-1)) then
1769  iib^d=-1
1770  else if(s%is_physical_boundary(2*^d)) then
1771  iib^d=1
1772  else
1773  iib^d=0
1774  end if
1775  \}
1776 
1777  end subroutine identifyphysbound
1778 
1779 end module mod_ghostcells_update
integer, dimension(-1:1^d &,-1:1^d &), target type_send_r_p2
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_p_f
integer, dimension(-1:1, 0:3) ixs_p_
integer, dimension(-1:1, 0:3) ixr_r_
This module contains definitions of global parameters and variables and some generic functions/subrou...
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_r_p1
subroutine pole_copy_stg(wrecv, ixRL, wsend, ixSL, idirs)
integer, dimension(-1:2^d &,-1:1^d &), target type_recv_srl_f
update ghost cells of all blocks including physical boundaries
integer, dimension(-1:2,-1:1) ixs_srl_
integer, dimension(:^d &,:^d &), pointer type_recv_srl
subroutine bc_recv_restrict
Receive from fine neighbor.
subroutine create_bc_mpi_datatype(nwstart, nwbc)
integer, dimension(-1:1, 0:3) ixr_p_
integer, dimension(:^d &,:^d &), pointer type_send_r
integer max_blocks
The maximum number of grid blocks in a processor.
subroutine coarsen_grid(sFi, ixFiGL, ixFiL, sCo, ixCoGL, ixCoL)
coarsen one grid to its coarser representative
Definition: coarsen.t:82
integer, dimension(:,:), allocatable recvstatus_srl
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_p_p2
integer, dimension(-1:2^d &,-1:1^d &), target type_recv_srl_p2
integer, dimension(:), allocatable sendrequest_r
subroutine bc_fill_p
fill coarser representative with data from coarser neighbors
subroutine bc_prolong
do prolongation for fine blocks after receipt data from coarse neighbors
integer, dimension(-1:1^d &) sizes_srl_recv_total
integer, dimension(0:3^d &) sizes_r_recv_total
integer, dimension(:,:), allocatable recvstatus_r
integer, dimension(^nd, 0:3^d &) sizes_r_recv_stg
integer, dimension(-1:2^d &,-1:1^d &), target type_send_srl_f
integer itag
MPI recv send variables for AMR.
subroutine bc_recv_srl
Receive from sibling at same refinement level.
integer, dimension(:), allocatable sendrequest_p
procedure(sub_boundary_adjust), pointer phys_boundary_adjust
Definition: mod_physics.t:60
logical function skip_direction(dir)
logical phys_req_diagonal
Whether the physics routines require diagonal ghost cells, for example for computing a curl...
Definition: mod_physics.t:22
integer, dimension(:^d &,:^d &), pointer type_recv_r
integer, dimension(-1:1,-1:1) ixs_r_
integer, dimension(:^d &,:^d &), pointer type_recv_p
integer, dimension(:,:), allocatable sendstatus_r
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_r_f
integer, parameter ixglo
Lower index of grid block arrays (always 1)
subroutine interpolation_copy(ixFiL, dxFiD, xFiminD, dxCoD, invdxCoD, xCominD)
subroutine bc_recv_prolong
Receive from coarse neighbor.
integer, dimension(-1:1^d &, 0:3^d &), target type_send_p_p2
integer, dimension(^nd,-1:1^d &) sizes_srl_send_stg
integer, dimension(:,:), allocatable sendstatus
logical, dimension(ndim) aperiodb
True for dimensions with aperiodic boundaries.
integer, dimension(-1:2^d &,-1:1^d &), target type_send_srl_p1
double precision, dimension(:), allocatable sendbuffer_srl
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_p_p1
integer nghostcells
Number of ghost cells surrounding a grid.
subroutine prolong_2nd_stg(sCo, sFi, ixCoLin, ixFiLin, dxCoD, xCominD, dxFiD, xFiminD, ghost, fine_Lin)
This subroutine performs a 2nd order prolongation for a staggered field F, preserving the divergence ...
Definition: mod_amr_fct.t:30
integer, dimension(^nd,-1:1) ixs_srl_stg_
integer ixghi
Upper index of grid block arrays.
character(len=std_len) typeghostfill
double precision, dimension(:), allocatable sendbuffer_r
integer, dimension(:), allocatable recvrequest_r
logical stagger_grid
True for using stagger grid.
integer, dimension(:), allocatable recvrequest
procedure(sub_convert), pointer phys_to_conserved
Definition: mod_physics.t:46
integer ierrmpi
A global MPI error return code.
integer, dimension(^nd,-1:1^d &) sizes_srl_recv_stg
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_r_p2
integer, dimension(0:3^d &) sizes_p_recv_total
integer, dimension(:), allocatable recvrequest_p
integer, dimension(^nd,-1:1) ixr_srl_stg_
subroutine getbc(time, qdt, psb, nwstart, nwbc, req_diag)
do update ghost cells of all blocks including physical boundaries
integer, parameter unitterm
Unit for standard output.
integer, dimension(3, 3) kr
Kronecker delta tensor.
subroutine pole_copy(wrecv, ixIRL, ixRL, wsend, ixISL, ixSL)
integer, dimension(^nd,-1:1^d &) sizes_r_send_stg
integer, dimension(:), allocatable, parameter d
integer mype
The rank of the current MPI task.
subroutine fix_auxiliary
subroutine identifyphysbound(s, iibD)
integer, dimension(^nd, 0:3) ixr_r_stg_
integer, dimension(^nd, 0:3^d &) sizes_p_recv_stg
subroutine interpolation_linear(ixFiL, dxFiD, xFiminD, dxCoD, invdxCoD, xCominD)
integer, parameter npwbuf
The number of interleaving sending buffers for ghost cells.
integer, dimension(:,:), allocatable sendstatus_srl
integer, dimension(-1:1^d &) sizes_r_send_total
integer, dimension(0:3^d &) sizes_p_send_total
subroutine bc_phys(iside, idims, time, qdt, s, ixGL, ixBL)
fill ghost cells at a physical boundary
integer, dimension(^nd, 0:3) ixs_p_stg_
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:198
integer, dimension(^nd,-1:1) ixs_r_stg_
integer, dimension(-1:1^d &, 0:3^d &), target type_send_p_f
subroutine bc_fill_r
fill restricted ghost cells after receipt
subroutine bc_send_prolong
Send to finer neighbor.
integer, dimension(-1:1^d &) sizes_srl_send_total
double precision, dimension(ndim) dxlevel
integer, dimension(-1:2,-1:1) ixr_srl_
integer, dimension(:,:), allocatable recvstatus
subroutine bc_send_srl
Send to sibling at same refinement level.
integer, parameter ndim
Number of spatial dimensions for grid variables.
double precision, dimension(:), allocatable recvbuffer_p
subroutine pole_buffer(wrecv, ixIRL, ixRL, wsend, ixISL, ixSL)
integer, dimension(^nd, 0:3) ixr_p_stg_
integer, dimension(:), allocatable sendrequest_srl
integer, dimension(:^d &,:^d &), pointer type_send_p
integer icomm
The MPI communicator.
integer, dimension(^nd, 0:3) l
integer, dimension(:,:), allocatable sendstatus_p
double precision, dimension(:), allocatable recvbuffer_r
subroutine get_bc_comm_type(comm_type, ixL, ixGL, nwstart, nwbc)
integer, dimension(:), allocatable recvrequest_srl
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
procedure(sub_convert), pointer phys_to_primitive
Definition: mod_physics.t:47
integer, dimension(:,:), allocatable node
procedure(sub_get_aux), pointer phys_get_aux
Definition: mod_physics.t:57
integer, dimension(-1:1^d &,-1:1^d &), target type_send_r_f
integer, dimension(:^d &,:^d &), pointer type_send_srl
integer, dimension(-1:1^d &, 0:3^d &), target type_send_p_p1
subroutine bc_prolong_stg(NeedProlong)
integer, dimension(:), allocatable sendrequest
double precision, dimension(:), allocatable recvbuffer_srl
subroutine bc_send_restrict
Send to coarser neighbor.
double precision time_bc
accumulated wall-clock time spent on boundary conditions
integer, dimension(-1:2^d &,-1:1^d &), target type_send_srl_p2
logical, dimension(:), allocatable phyboundblock
True if a block has any physical boundary.
subroutine indices_for_syncing(idir, iD, ixRL, ixSL, ixRsyncL, ixSsyncL)
double precision, dimension(:), allocatable sendbuffer_p
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
subroutine getintbc(time, ixGL)
fill inner boundary values
integer, dimension(^nd, 0:3^d &) sizes_p_send_stg
integer, dimension(-1:1^d &,-1:1^d &), target type_send_r_p1
integer, dimension(-1:2^d &,-1:1^d &), target type_recv_srl_p1
subroutine bc_fill_srl
fill siblings ghost cells with received data
integer, dimension(:,:), allocatable recvstatus_p