MPI-AMRVAC  2.0
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_ghostcells_update.t
Go to the documentation of this file.
1 !> update ghost cells of all blocks including physical boundaries
3 
4  implicit none
5  ! Special buffer for pole copy
6  type wbuffer
7  double precision, dimension(:^D&,:), allocatable :: w
8  end type wbuffer
9 
10  ! A switch of update physical boundary or not
11  logical, public :: bcphys=.true.
12  integer :: ixm^l, ixcog^l, ixcom^l
13 
14  !> The number of interleaving sending buffers for ghost cells
15  integer, parameter :: npwbuf=2
16 
17  ! index ranges to send (S) to sibling blocks, receive (R) from
18  ! sibling blocks, send restricted (r) ghost cells to coarser blocks
19  integer, dimension(-1:2,-1:1) :: ixs_srl_^l, ixr_srl_^l, ixs_r_^l
20 
21  ! index ranges to receive restriced ghost cells from finer blocks,
22  ! send prolongated (p) ghost cells to finer blocks, receive prolongated
23  ! ghost from coarser blocks
24  integer, dimension(-1:1, 0:3) :: ixr_r_^l, ixs_p_^l, ixr_p_^l
25 
26  ! MPI derived datatype to send and receive subarrays of ghost cells to
27  ! neighbor blocks in a different processor.
28  !
29  ! The first index goes from -1:2, where -1 is used when a block touches the
30  ! lower boundary, 1 when a block touches an upper boundary, and 0 a situation
31  ! away from boundary conditions, 2 when a block touched both lower and upper
32  ! boundary
33  !
34  ! There are two variants, _f indicates that all flux variables are filled,
35  ! whereas _p means that part of the variables is filled
36  ! Furthermore _r_ stands for restrict, _p_ for prolongation.
37  integer, dimension(-1:2^D&,-1:1^D&), target :: type_send_srl_f, type_recv_srl_f
38  integer, dimension(-1:1^D&,-1:1^D&), target :: type_send_r_f
39  integer, dimension(-1:1^D&, 0:3^D&), target :: type_recv_r_f, type_send_p_f, type_recv_p_f
40  integer, dimension(-1:2^D&,-1:1^D&), target :: type_send_srl_p1, type_recv_srl_p1
41  integer, dimension(-1:1^D&,-1:1^D&), target :: type_send_r_p1
42  integer, dimension(-1:1^D&, 0:3^D&), target :: type_recv_r_p1, type_send_p_p1, type_recv_p_p1
43  integer, dimension(-1:2^D&,-1:1^D&), target :: type_send_srl_p2, type_recv_srl_p2
44  integer, dimension(-1:1^D&,-1:1^D&), target :: type_send_r_p2
45  integer, dimension(-1:1^D&, 0:3^D&), target :: type_recv_r_p2, type_send_p_p2, type_recv_p_p2
46  integer, dimension(:^D&,:^D&), pointer :: type_send_srl, type_recv_srl, type_send_r
47  integer, dimension(:^D&,:^D&), pointer :: type_recv_r, type_send_p, type_recv_p
48 
49 contains
50 
51  subroutine init_bc()
54 
55  integer :: nghostcellsCo, interpolation_order
56  integer :: nx^D, nxCo^D, ixG^L, i^D, ic^D, inc^D, iib^D
57 
58  ixg^l=ixg^ll;
59  ixm^l=ixg^l^lsubnghostcells;
60  ixcogmin^d=1;
61  !ixCoGmax^D=ixGmax^D/2+nghostcells;
62  ixcogmax^d=(ixghi^d-2*nghostcells)/2+2*nghostcells;
63 
64  ixcom^l=ixcog^l^lsubnghostcells;
65 
66  nx^d=ixmmax^d-ixmmin^d+1;
67  nxco^d=nx^d/2;
68 
69  select case (typeghostfill)
70  case ("copy")
71  interpolation_order=1
72  case ("linear")
73  interpolation_order=2
74  case default
75  write (unitterm,*) "Undefined typeghostfill ",typeghostfill
76  call mpistop("Undefined typeghostfill")
77  end select
78  nghostcellsco=int((nghostcells+1)/2)
79 
80  if (nghostcellsco+interpolation_order-1>nghostcells) then
81  call mpistop("interpolation order for prolongation in getbc too high")
82  end if
83 
84  ! (iib,i) index has following meanings: iib = 0 means it is not at any physical boundary
85  ! iib=-1 means it is at the minimum side of a physical boundary
86  ! iib= 1 means it is at the maximum side of a physical boundary
87  ! i=-1 means subregion prepared for the neighbor at its minimum side
88  ! i= 1 means subregion prepared for the neighbor at its maximum side
89  {
90  ixs_srl_min^d(:,-1)=ixmmin^d
91  ixs_srl_min^d(:, 1)=ixmmax^d+1-nghostcells
92  ixs_srl_max^d(:,-1)=ixmmin^d-1+nghostcells
93  ixs_srl_max^d(:, 1)=ixmmax^d
94 
95  ixs_srl_min^d(-1,0)=1
96  ixs_srl_min^d( 0,0)=ixmmin^d
97  ixs_srl_min^d( 1,0)=ixmmin^d
98  ixs_srl_min^d( 2,0)=1
99  ixs_srl_max^d(-1,0)=ixmmax^d
100  ixs_srl_max^d( 0,0)=ixmmax^d
101  ixs_srl_max^d( 1,0)=ixgmax^d
102  ixs_srl_max^d( 2,0)=ixgmax^d
103 
104  ixr_srl_min^d(:,-1)=1
105  ixr_srl_min^d(:, 1)=ixmmax^d+1
106  ixr_srl_max^d(:,-1)=nghostcells
107  ixr_srl_max^d(:, 1)=ixgmax^d
108 
109  ixr_srl_min^d(-1,0)=1
110  ixr_srl_min^d( 0,0)=ixmmin^d
111  ixr_srl_min^d( 1,0)=ixmmin^d
112  ixr_srl_min^d( 2,0)=1
113  ixr_srl_max^d(-1,0)=ixmmax^d
114  ixr_srl_max^d( 0,0)=ixmmax^d
115  ixr_srl_max^d( 1,0)=ixgmax^d
116  ixr_srl_max^d( 2,0)=ixgmax^d
117 
118  ixs_r_min^d(:,-1)=ixcommin^d
119  ixs_r_min^d(:, 1)=ixcommax^d+1-nghostcells
120  ixs_r_max^d(:,-1)=ixcommin^d-1+nghostcells
121  ixs_r_max^d(:, 1)=ixcommax^d
122 
123  ixs_r_min^d(-1,0)=1
124  ixs_r_min^d( 0,0)=ixcommin^d
125  ixs_r_min^d( 1,0)=ixcommin^d
126  ixs_r_max^d(-1,0)=ixcommax^d
127  ixs_r_max^d( 0,0)=ixcommax^d
128  ixs_r_max^d( 1,0)=ixcogmax^d
129 
130  ixr_r_min^d(:, 0)=1
131  ixr_r_min^d(:, 1)=ixmmin^d
132  ixr_r_min^d(:, 2)=ixmmin^d+nxco^d
133  ixr_r_min^d(:, 3)=ixmmax^d+1
134  ixr_r_max^d(:, 0)=nghostcells
135  ixr_r_max^d(:, 1)=ixmmin^d-1+nxco^d
136  ixr_r_max^d(:, 2)=ixmmax^d
137  ixr_r_max^d(:, 3)=ixgmax^d
138 
139  ixr_r_min^d(-1,1)=1
140  ixr_r_max^d(-1,1)=ixmmin^d-1+nxco^d
141  ixr_r_min^d( 1,2)=ixmmin^d+nxco^d
142  ixr_r_max^d( 1,2)=ixgmax^d
143 
144  ixs_p_min^d(:, 0)=ixmmin^d-(interpolation_order-1)
145  ixs_p_min^d(:, 1)=ixmmin^d-(interpolation_order-1)
146  ixs_p_min^d(:, 2)=ixmmin^d+nxco^d-nghostcellsco-(interpolation_order-1)
147  ixs_p_min^d(:, 3)=ixmmax^d+1-nghostcellsco-(interpolation_order-1)
148  ixs_p_max^d(:, 0)=ixmmin^d-1+nghostcellsco+(interpolation_order-1)
149  ixs_p_max^d(:, 1)=ixmmin^d-1+nxco^d+nghostcellsco+(interpolation_order-1)
150  ixs_p_max^d(:, 2)=ixmmax^d+(interpolation_order-1)
151  ixs_p_max^d(:, 3)=ixmmax^d+(interpolation_order-1)
152 
153  if(.not.phys_req_diagonal) then
154  ! exclude ghost-cell region when diagonal cells are unknown
155  ixs_p_min^d(:, 0)=ixmmin^d
156  ixs_p_max^d(:, 3)=ixmmax^d
157  ixs_p_max^d(:, 1)=ixmmin^d-1+nxco^d+(interpolation_order-1)
158  ixs_p_min^d(:, 2)=ixmmin^d+nxco^d-(interpolation_order-1)
159  end if
160 
161  ! extend index range to physical boundary
162  ixs_p_min^d(-1,1)=1
163  ixs_p_max^d( 1,2)=ixgmax^d
164 
165  ixr_p_min^d(:, 0)=ixcommin^d-nghostcellsco-(interpolation_order-1)
166  ixr_p_min^d(:, 1)=ixcommin^d-(interpolation_order-1)
167  ixr_p_min^d(:, 2)=ixcommin^d-nghostcellsco-(interpolation_order-1)
168  ixr_p_min^d(:, 3)=ixcommax^d+1-(interpolation_order-1)
169  ixr_p_max^d(:, 0)=nghostcells+(interpolation_order-1)
170  ixr_p_max^d(:, 1)=ixcommax^d+nghostcellsco+(interpolation_order-1)
171  ixr_p_max^d(:, 2)=ixcommax^d+(interpolation_order-1)
172  ixr_p_max^d(:, 3)=ixcommax^d+nghostcellsco+(interpolation_order-1)
173 
174  if(.not.phys_req_diagonal) then
175  ixr_p_max^d(:, 0)=nghostcells
176  ixr_p_min^d(:, 3)=ixcommax^d+1
177  ixr_p_max^d(:, 1)=ixcommax^d+(interpolation_order-1)
178  ixr_p_min^d(:, 2)=ixcommin^d-(interpolation_order-1)
179  end if
180 
181  ! extend index range to physical boundary
182  ixr_p_min^d(-1,1)=1
183  ixr_p_max^d( 1,2)=ixcogmax^d
184  \}
185 
186  end subroutine init_bc
187 
188  subroutine create_bc_mpi_datatype(nwstart,nwbc)
190 
191  integer, intent(in) :: nwstart, nwbc
192  integer :: i^D, ic^D, inc^D, iib^D
193 
194  {do i^db=-1,1\}
195  if (i^d==0|.and.) cycle
196  {do iib^db=-1,2\}
197  call get_bc_comm_type(type_send_srl(iib^d,i^d),ixs_srl_^l(iib^d,i^d),ixg^ll,nwstart,nwbc)
198  call get_bc_comm_type(type_recv_srl(iib^d,i^d),ixr_srl_^l(iib^d,i^d),ixg^ll,nwstart,nwbc)
199  if (iib^d==2|.or.) cycle
200  call get_bc_comm_type(type_send_r(iib^d,i^d), ixs_r_^l(iib^d,i^d),ixcog^l,nwstart,nwbc)
201  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
202  inc^db=2*i^db+ic^db\}
203  call get_bc_comm_type(type_recv_r(iib^d,inc^d),ixr_r_^l(iib^d,inc^d), ixg^ll,nwstart,nwbc)
204  call get_bc_comm_type(type_send_p(iib^d,inc^d),ixs_p_^l(iib^d,inc^d), ixg^ll,nwstart,nwbc)
205  call get_bc_comm_type(type_recv_p(iib^d,inc^d),ixr_p_^l(iib^d,inc^d),ixcog^l,nwstart,nwbc)
206  {end do\}
207  {end do\}
208  {end do\}
209 
210  end subroutine create_bc_mpi_datatype
211 
212  subroutine get_bc_comm_type(comm_type,ix^L,ixG^L,nwstart,nwbc)
214 
215  integer, intent(inout) :: comm_type
216  integer, intent(in) :: ix^L, ixG^L, nwstart, nwbc
217 
218  integer, dimension(ndim+1) :: fullsize, subsize, start
219 
220  ^d&fullsize(^d)=ixgmax^d;
221  fullsize(ndim+1)=nw
222  ^d&subsize(^d)=ixmax^d-ixmin^d+1;
223  subsize(ndim+1)=nwbc
224  ^d&start(^d)=ixmin^d-1;
225  start(ndim+1)=nwstart
226 
227  call mpi_type_create_subarray(ndim+1,fullsize,subsize,start,mpi_order_fortran, &
228  mpi_double_precision,comm_type,ierrmpi)
229  call mpi_type_commit(comm_type,ierrmpi)
230 
231  end subroutine get_bc_comm_type
232 
233  subroutine put_bc_comm_types()
235 
236  integer :: i^D, ic^D, inc^D, iib^D
237 
238  {do i^db=-1,1\}
239  if (i^d==0|.and.) cycle
240  {do iib^db=-1,2\}
241  call mpi_type_free(type_send_srl(iib^d,i^d),ierrmpi)
242  call mpi_type_free(type_recv_srl(iib^d,i^d),ierrmpi)
243  if (levmin==levmax) cycle
244  if (iib^d==2|.or.) cycle
245  call mpi_type_free(type_send_r(iib^d,i^d),ierrmpi)
246  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
247  inc^db=2*i^db+ic^db\}
248  call mpi_type_free(type_recv_r(iib^d,inc^d),ierrmpi)
249  call mpi_type_free(type_send_p(iib^d,inc^d),ierrmpi)
250  call mpi_type_free(type_recv_p(iib^d,inc^d),ierrmpi)
251  {end do\}
252  {end do\}
253  {end do\}
254 
255  end subroutine put_bc_comm_types
256 
257  subroutine getbc(time,qdt,psb,nwstart,nwbc,req_diag)
259  use mod_physics
260 
261  double precision, intent(in) :: time, qdt
262  type(state), target :: psb(max_blocks)
263  integer, intent(in) :: nwstart ! Fill from nwstart+1
264  integer, intent(in) :: nwbc ! Number of variables to fill
265  logical, intent(in), optional :: req_diag ! If false, skip diagonal ghost cells
266 
267  integer :: my_neighbor_type, ipole, idims, iside, nwhead, nwtail
268  integer :: iigrid, igrid, ineighbor, ipe_neighbor
269  integer :: nrecvs, nsends, isizes
270  integer :: ixG^L, ixR^L, ixS^L, ixB^L, ixI^L, k^L
271  integer :: i^D, n_i^D, ic^D, inc^D, n_inc^D, iib^D
272  ! store physical boundary indicating index
273  integer :: idphyb(ndim,max_blocks),bindex(ndim)
274  integer :: isend_buf(npwbuf), ipwbuf, nghostcellsco,iB
275  logical :: req_diagonal
276  type(wbuffer) :: pwbuf(npwbuf)
277 
278  double precision :: time_bcin
279  ! Stretching grid parameters for coarsened block of the current block
280 
281  nwhead=nwstart+1
282  nwtail=nwstart+nwbc
283 
284  req_diagonal = .true.
285  if (present(req_diag)) req_diagonal = req_diag
286 
287  time_bcin=mpi_wtime()
288  ixg^l=ixg^ll;
289 
290  if (internalboundary) then
291  call getintbc(time,ixg^l)
292  end if
293  ! fill ghost cells in physical boundaries
294  if(bcphys) then
295  do iigrid=1,igridstail; igrid=igrids(iigrid);
296  if(.not.phyboundblock(igrid)) cycle
297  saveigrid=igrid
298  block=>ps(igrid)
299  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
300  do idims=1,ndim
301  ! to avoid using as yet unknown corner info in more than 1D, we
302  ! fill only interior mesh ranges of the ghost cell ranges at first,
303  ! and progressively enlarge the ranges to include corners later
304  {
305  kmin^d=merge(0, 1, idims==^d)
306  kmax^d=merge(0, 1, idims==^d)
307  ixbmin^d=ixgmin^d+kmin^d*nghostcells
308  ixbmax^d=ixgmax^d-kmax^d*nghostcells
309  \}
310  {^iftwod
311  if(idims > 1 .and. neighbor_type(-1,0,igrid)==neighbor_boundary) ixbmin1=ixgmin1
312  if(idims > 1 .and. neighbor_type( 1,0,igrid)==neighbor_boundary) ixbmax1=ixgmax1}
313  {^ifthreed
314  if(idims > 1 .and. neighbor_type(-1,0,0,igrid)==neighbor_boundary) ixbmin1=ixgmin1
315  if(idims > 1 .and. neighbor_type( 1,0,0,igrid)==neighbor_boundary) ixbmax1=ixgmax1
316  if(idims > 2 .and. neighbor_type(0,-1,0,igrid)==neighbor_boundary) ixbmin2=ixgmin2
317  if(idims > 2 .and. neighbor_type(0, 1,0,igrid)==neighbor_boundary) ixbmax2=ixgmax2}
318  do iside=1,2
319  i^d=kr(^d,idims)*(2*iside-3);
320  if (aperiodb(idims)) then
321  if (neighbor_type(i^d,igrid) /= neighbor_boundary .and. &
322  .not. psb(igrid)%is_physical_boundary(2*idims-2+iside)) cycle
323  else
324  if (neighbor_type(i^d,igrid) /= neighbor_boundary) cycle
325  end if
326  call bc_phys(iside,idims,time,qdt,psb(igrid)%w,ps(igrid)%x,ixg^l,ixb^l)
327  end do
328  end do
329  end do
330  end if
331 
332  ! default : no singular axis
333  ipole=0
334 
335  irecv=0
336  isend=0
337  isend_buf=0
338  ipwbuf=1
339  ! total number of times to call MPI_IRECV in each processor between sibling blocks or from finer neighbors
340  nrecvs=nrecv_bc_srl+nrecv_bc_r
341  ! total number of times to call MPI_ISEND in each processor between sibling blocks or to coarser neighors
342  nsends=nsend_bc_srl+nsend_bc_r
343 
344  allocate(recvstatus(mpi_status_size,nrecvs),recvrequest(nrecvs))
345  recvrequest=mpi_request_null
346 
347  allocate(sendstatus(mpi_status_size,nsends),sendrequest(nsends))
348  sendrequest=mpi_request_null
349 
350  ! receiving ghost-cell values from sibling blocks and finer neighbors
351  do iigrid=1,igridstail; igrid=igrids(iigrid);
352  saveigrid=igrid
353  call identifyphysbound(ps(igrid),iib^d)
354  ^d&idphyb(^d,igrid)=iib^d;
355  {do i^db=-1,1\}
356  if (skip_direction([ i^d ])) cycle
357  my_neighbor_type=neighbor_type(i^d,igrid)
358  select case (my_neighbor_type)
359  case (neighbor_sibling)
360  call bc_recv_srl
361  case (neighbor_fine)
362  call bc_recv_restrict
363  end select
364  {end do\}
365  end do
366 
367  ! sending ghost-cell values to sibling blocks and coarser neighbors
368  nghostcellsco=ceiling(nghostcells*0.5d0)
369  do iigrid=1,igridstail; igrid=igrids(iigrid);
370  saveigrid=igrid
371  block=>ps(igrid)
372 
373  ! Used stored data to identify physical boundaries
374  ^d&iib^d=idphyb(^d,igrid);
375 
376  if (any(neighbor_type(:^d&,igrid)==neighbor_coarse)) then
377  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
378  {#IFDEF EVOLVINGBOUNDARY
379  if(phyboundblock(igrid)) then
380  ! coarsen finer ghost cells at physical boundaries
381  ixcommin^d=ixcogmin^d+nghostcellsco;
382  ixcommax^d=ixcogmax^d-nghostcellsco;
383  ixmmin^d=ixgmin^d+(nghostcellsco-1);
384  ixmmax^d=ixgmax^d-(nghostcellsco-1);
385  else
386  ixcom^l=ixcog^l^lsubnghostcells;
387  ixm^l=ixg^l^lsubnghostcells;
388  end if
389  }
390  call coarsen_grid(psb(igrid)%w,ps(igrid)%x,ixg^l,ixm^l,psc(igrid)%w,psc(igrid)%x,&
391  ixcog^l,ixcom^l,igrid,igrid)
392  end if
393 
394  {do i^db=-1,1\}
395  if (skip_direction([ i^d ])) cycle
396  if (phi_ > 0) ipole=neighbor_pole(i^d,igrid)
397  my_neighbor_type=neighbor_type(i^d,igrid)
398  select case (my_neighbor_type)
399  case (neighbor_coarse)
400  call bc_send_restrict
401  case (neighbor_sibling)
402  call bc_send_srl
403  end select
404  {end do\}
405  end do
406 
407  !if (irecv/=nrecvs) then
408  ! call mpistop("number of recvs in phase1 in amr_ghostcells is incorrect")
409  !end if
410  !if (isend/=nsends) then
411  ! call mpistop("number of sends in phase1 in amr_ghostcells is incorrect")
412  !end if
413 
414  call mpi_waitall(irecv,recvrequest,recvstatus,ierrmpi)
415  deallocate(recvstatus,recvrequest)
416 
417  call mpi_waitall(isend,sendrequest,sendstatus,ierrmpi)
418  do ipwbuf=1,npwbuf
419  if (isend_buf(ipwbuf)/=0) deallocate(pwbuf(ipwbuf)%w)
420  end do
421  deallocate(sendstatus,sendrequest)
422 
423  irecv=0
424  isend=0
425  isend_buf=0
426  ipwbuf=1
427  ! total number of times to call MPI_IRECV in each processor from coarser neighbors
428  nrecvs=nrecv_bc_p
429  ! total number of times to call MPI_ISEND in each processor to finer neighbors
430  nsends=nsend_bc_p
431 
432  allocate(recvstatus(mpi_status_size,nrecvs),recvrequest(nrecvs))
433  recvrequest=mpi_request_null
434  allocate(sendstatus(mpi_status_size,nsends),sendrequest(nsends))
435  sendrequest=mpi_request_null
436 
437  ! receiving ghost-cell values from coarser neighbors
438  do iigrid=1,igridstail; igrid=igrids(iigrid);
439  saveigrid=igrid
440  ^d&iib^d=idphyb(^d,igrid);
441  {do i^db=-1,1\}
442  if (skip_direction([ i^d ])) cycle
443  my_neighbor_type=neighbor_type(i^d,igrid)
444  if (my_neighbor_type==neighbor_coarse) call bc_recv_prolong
445  {end do\}
446  end do
447  ! sending ghost-cell values to finer neighbors
448  do iigrid=1,igridstail; igrid=igrids(iigrid);
449  saveigrid=igrid
450  block=>ps(igrid)
451  ^d&iib^d=idphyb(^d,igrid);
452  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
453  if (any(neighbor_type(:^d&,igrid)==neighbor_fine)) then
454  {do i^db=-1,1\}
455  if (skip_direction([ i^d ])) cycle
456  if (phi_ > 0) ipole=neighbor_pole(i^d,igrid)
457  my_neighbor_type=neighbor_type(i^d,igrid)
458  if (my_neighbor_type==neighbor_fine) call bc_send_prolong
459  {end do\}
460  end if
461  end do
462 
463  !if (irecv/=nrecvs) then
464  ! call mpistop("number of recvs in phase2 in amr_ghostcells is incorrect")
465  !end if
466  !if (isend/=nsends) then
467  ! call mpistop("number of sends in phase2 in amr_ghostcells is incorrect")
468  !end if
469 
470  call mpi_waitall(irecv,recvrequest,recvstatus,ierrmpi)
471  deallocate(recvstatus,recvrequest)
472 
473  ! do prolongation on the ghost-cell values received from coarser neighbors
474  do iigrid=1,igridstail; igrid=igrids(iigrid);
475  saveigrid=igrid
476  block=>ps(igrid)
477  ^d&iib^d=idphyb(^d,igrid);
478  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
479  if (any(neighbor_type(:^d&,igrid)==neighbor_coarse)) then
480  {do i^db=-1,1\}
481  if (skip_direction([ i^d ])) cycle
482  my_neighbor_type=neighbor_type(i^d,igrid)
483  if (my_neighbor_type==neighbor_coarse) call bc_prolong
484  {end do\}
485  end if
486  end do
487 
488  call mpi_waitall(isend,sendrequest,sendstatus,ierrmpi)
489  do ipwbuf=1,npwbuf
490  if (isend_buf(ipwbuf)/=0) deallocate(pwbuf(ipwbuf)%w)
491  end do
492  deallocate(sendstatus,sendrequest)
493 
494  ! modify normal component of magnetic field to fix divB=0
495  if(bcphys .and. physics_type=='mhd' .and. ndim>1) call phys_boundary_adjust()
496 
497  if (nwaux>0) call fix_auxiliary
498 
499  time_bc=time_bc+(mpi_wtime()-time_bcin)
500 
501  contains
502 
503  logical function skip_direction(dir)
504  integer, intent(in) :: dir(^nd)
505 
506  if (all(dir == 0)) then
507  skip_direction = .true.
508  else if (.not. req_diagonal .and. count(dir /= 0) > 1) then
509  skip_direction = .true.
510  else
511  skip_direction = .false.
512  end if
513  end function skip_direction
514 
515  !> Send to sibling at same refinement level
516  subroutine bc_send_srl
518  ineighbor=neighbor(1,i^d,igrid)
519  ipe_neighbor=neighbor(2,i^d,igrid)
520 
521  if (ipole==0) then
522  n_i^d=-i^d;
523  if (ipe_neighbor==mype) then
524  ixs^l=ixs_srl_^l(iib^d,i^d);
525  ixr^l=ixr_srl_^l(iib^d,n_i^d);
526  psb(ineighbor)%w(ixr^s,nwhead:nwtail)=&
527  psb(igrid)%w(ixs^s,nwhead:nwtail)
528  else
529  isend=isend+1
530  itag=(3**^nd+4**^nd)*(ineighbor-1)+{(n_i^d+1)*3**(^d-1)+}
531  call mpi_isend(psb(igrid)%w,1,type_send_srl(iib^d,i^d), &
532  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
533  end if
534  else
535  ixs^l=ixs_srl_^l(iib^d,i^d);
536  select case (ipole)
537  {case (^d)
538  n_i^d=i^d^d%n_i^dd=-i^dd;\}
539  end select
540  if (ipe_neighbor==mype) then
541  ixr^l=ixr_srl_^l(iib^d,n_i^d);
542  call pole_copy(psb(ineighbor)%w,ixg^l,ixr^l,psb(igrid)%w,ixg^l,ixs^l)
543  else
544  if (isend_buf(ipwbuf)/=0) then
545  call mpi_wait(sendrequest(isend_buf(ipwbuf)), &
546  sendstatus(:,isend_buf(ipwbuf)),ierrmpi)
547  deallocate(pwbuf(ipwbuf)%w)
548  end if
549  allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
550  call pole_buf(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psb(igrid)%w,ixg^l,ixs^l)
551  isend=isend+1
552  isend_buf(ipwbuf)=isend
553  itag=(3**^nd+4**^nd)*(ineighbor-1)+{(n_i^d+1)*3**(^d-1)+}
554  isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
555  call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
556  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
557  ipwbuf=1+modulo(ipwbuf,npwbuf)
558  end if
559  end if
560 
561  end subroutine bc_send_srl
562 
563  !> Send to coarser neighbor
564  subroutine bc_send_restrict
565  integer :: ii^D
566 
567  ic^d=1+modulo(node(pig^d_,igrid)-1,2);
568  if ({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
569  if(phyboundblock(igrid)) then
570  ! filling physical boundary ghost cells of a coarser representative block for
571  ! sending swap region with width of nghostcells to its coarser neighbor
572  do idims=1,ndim
573  ! to avoid using as yet unknown corner info in more than 1D, we
574  ! fill only interior mesh ranges of the ghost cell ranges at first,
575  ! and progressively enlarge the ranges to include corners later
576  {kmin^d=merge(0, 1, idims==^d)
577  kmax^d=merge(0, 1, idims==^d)
578  ixbmin^d=ixcogmin^d+kmin^d*nghostcells
579  ixbmax^d=ixcogmax^d-kmax^d*nghostcells\}
580  {^iftwod
581  if(idims > 1 .and. neighbor_type(-1,0,igrid)==neighbor_boundary) ixbmin1=ixcogmin1
582  if(idims > 1 .and. neighbor_type( 1,0,igrid)==neighbor_boundary) ixbmax1=ixcogmax1}
583  {^ifthreed
584  if(idims > 1 .and. neighbor_type(-1,0,0,igrid)==neighbor_boundary) ixbmin1=ixcogmin1
585  if(idims > 1 .and. neighbor_type( 1,0,0,igrid)==neighbor_boundary) ixbmax1=ixcogmax1
586  if(idims > 2 .and. neighbor_type(0,-1,0,igrid)==neighbor_boundary) ixbmin2=ixcogmin2
587  if(idims > 2 .and. neighbor_type(0, 1,0,igrid)==neighbor_boundary) ixbmax2=ixcogmax2}
588  {if(i^d==-1) then
589  ixbmin^d=ixcogmin^d+nghostcells
590  ixbmax^d=ixcogmin^d+2*nghostcells-1
591  else if(i^d==1) then
592  ixbmin^d=ixcogmax^d-2*nghostcells+1
593  ixbmax^d=ixcogmax^d-nghostcells
594  end if\}
595  do iside=1,2
596  ii^d=kr(^d,idims)*(2*iside-3);
597  if ({abs(i^d)==1.and.abs(ii^d)==1|.or.}) cycle
598  if (neighbor_type(ii^d,igrid)/=neighbor_boundary) cycle
599  call bc_phys(iside,idims,time,0.d0,psc(igrid)%w,&
600  psc(igrid)%x,ixcog^l,ixb^l)
601  end do
602  end do
603  end if
604 
605  ineighbor=neighbor(1,i^d,igrid)
606  ipe_neighbor=neighbor(2,i^d,igrid)
607 
608  if (ipole==0) then
609  n_inc^d=-2*i^d+ic^d;
610  if (ipe_neighbor==mype) then
611  ixs^l=ixs_r_^l(iib^d,i^d);
612  ixr^l=ixr_r_^l(iib^d,n_inc^d);
613  psb(ineighbor)%w(ixr^s,nwhead:nwtail)=&
614  psc(igrid)%w(ixs^s,nwhead:nwtail)
615  else
616  isend=isend+1
617  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
618  call mpi_isend(psc(igrid)%w,1,type_send_r(iib^d,i^d), &
619  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
620  end if
621  else
622  ixs^l=ixs_r_^l(iib^d,i^d);
623  select case (ipole)
624  {case (^d)
625  n_inc^d=2*i^d+(3-ic^d)^d%n_inc^dd=-2*i^dd+ic^dd;\}
626  end select
627  if (ipe_neighbor==mype) then
628  ixr^l=ixr_r_^l(iib^d,n_inc^d);
629  call pole_copy(psb(ineighbor)%w,ixg^l,ixr^l,psc(igrid)%w,ixcog^l,ixs^l)
630  else
631  if (isend_buf(ipwbuf)/=0) then
632  call mpi_wait(sendrequest(isend_buf(ipwbuf)), &
633  sendstatus(:,isend_buf(ipwbuf)),ierrmpi)
634  deallocate(pwbuf(ipwbuf)%w)
635  end if
636  allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
637  call pole_buf(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psc(igrid)%w,ixcog^l,ixs^l)
638  isend=isend+1
639  isend_buf(ipwbuf)=isend
640  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
641  isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
642  call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
643  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
644  ipwbuf=1+modulo(ipwbuf,npwbuf)
645  end if
646  end if
647 
648  end subroutine bc_send_restrict
649 
650  !> Send to finer neighbor
651  subroutine bc_send_prolong
652  integer :: ii^D
653 
654  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
655  inc^db=2*i^db+ic^db\}
656  ixs^l=ixs_p_^l(iib^d,inc^d);
657 
658  ineighbor=neighbor_child(1,inc^d,igrid)
659  ipe_neighbor=neighbor_child(2,inc^d,igrid)
660 
661  if (ipole==0) then
662  n_i^d=-i^d;
663  n_inc^d=ic^d+n_i^d;
664  if (ipe_neighbor==mype) then
665  ixr^l=ixr_p_^l(iib^d,n_inc^d);
666  psc(ineighbor)%w(ixr^s,nwhead:nwtail) &
667  =psb(igrid)%w(ixs^s,nwhead:nwtail)
668  else
669  isend=isend+1
670  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
671  call mpi_isend(psb(igrid)%w,1,type_send_p(iib^d,inc^d), &
672  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
673  end if
674  else
675  select case (ipole)
676  {case (^d)
677  n_inc^d=inc^d^d%n_inc^dd=ic^dd-i^dd;\}
678  end select
679  if (ipe_neighbor==mype) then
680  ixr^l=ixr_p_^l(iib^d,n_inc^d);
681  call pole_copy(psc(ineighbor)%w,ixcog^l,ixr^l,psb(igrid)%w,ixg^l,ixs^l)
682  else
683  if (isend_buf(ipwbuf)/=0) then
684  call mpi_wait(sendrequest(isend_buf(ipwbuf)), &
685  sendstatus(:,isend_buf(ipwbuf)),ierrmpi)
686  deallocate(pwbuf(ipwbuf)%w)
687  end if
688  allocate(pwbuf(ipwbuf)%w(ixs^s,nwhead:nwtail))
689  call pole_buf(pwbuf(ipwbuf)%w,ixs^l,ixs^l,psb(igrid)%w,ixg^l,ixs^l)
690  isend=isend+1
691  isend_buf(ipwbuf)=isend
692  itag=(3**^nd+4**^nd)*(ineighbor-1)+3**^nd+{n_inc^d*4**(^d-1)+}
693  isizes={(ixsmax^d-ixsmin^d+1)*}*nwbc
694  call mpi_isend(pwbuf(ipwbuf)%w,isizes,mpi_double_precision, &
695  ipe_neighbor,itag,icomm,sendrequest(isend),ierrmpi)
696  ipwbuf=1+modulo(ipwbuf,npwbuf)
697  end if
698  end if
699  {end do\}
700 
701  end subroutine bc_send_prolong
702 
703  !> Receive from sibling at same refinement level
704  subroutine bc_recv_srl
706  ipe_neighbor=neighbor(2,i^d,igrid)
707  if (ipe_neighbor/=mype) then
708  irecv=irecv+1
709  itag=(3**^nd+4**^nd)*(igrid-1)+{(i^d+1)*3**(^d-1)+}
710  call mpi_irecv(psb(igrid)%w,1,type_recv_srl(iib^d,i^d), &
711  ipe_neighbor,itag,icomm,recvrequest(irecv),ierrmpi)
712  end if
713 
714  end subroutine bc_recv_srl
715 
716  !> Receive from fine neighbor
717  subroutine bc_recv_restrict
719  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
720  inc^db=2*i^db+ic^db\}
721  ipe_neighbor=neighbor_child(2,inc^d,igrid)
722  if (ipe_neighbor/=mype) then
723  irecv=irecv+1
724  itag=(3**^nd+4**^nd)*(igrid-1)+3**^nd+{inc^d*4**(^d-1)+}
725  call mpi_irecv(psb(igrid)%w,1,type_recv_r(iib^d,inc^d), &
726  ipe_neighbor,itag,icomm,recvrequest(irecv),ierrmpi)
727  end if
728  {end do\}
729 
730  end subroutine bc_recv_restrict
731 
732  !> Receive from coarse neighbor
733  subroutine bc_recv_prolong
735  ic^d=1+modulo(node(pig^d_,igrid)-1,2);
736  if ({.not.(i^d==0.or.i^d==2*ic^d-3)|.or.}) return
737 
738  ipe_neighbor=neighbor(2,i^d,igrid)
739  if (ipe_neighbor/=mype) then
740  irecv=irecv+1
741  inc^d=ic^d+i^d;
742  itag=(3**^nd+4**^nd)*(igrid-1)+3**^nd+{inc^d*4**(^d-1)+}
743  call mpi_irecv(psc(igrid)%w,1,type_recv_p(iib^d,inc^d), &
744  ipe_neighbor,itag,icomm,recvrequest(irecv),ierrmpi)
745  end if
746 
747  end subroutine bc_recv_prolong
748 
749  subroutine bc_prolong
751 
752  integer :: ixFi^L,ixCo^L,ii^D
753  double precision :: dxFi^D, dxCo^D, xFimin^D, xComin^D, invdxCo^D
754 
755  ixfi^l=ixr_srl_^l(iib^d,i^d);
756  dxfi^d=rnode(rpdx^d_,igrid);
757  dxco^d=two*dxfi^d;
758  invdxco^d=1.d0/dxco^d;
759 
760  ! compute the enlarged grid lower left corner coordinates
761  ! these are true coordinates for an equidistant grid,
762  ! but we can temporarily also use them for getting indices
763  ! in stretched grids
764  xfimin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxfi^d;
765  xcomin^d=rnode(rpxmin^d_,igrid)-dble(nghostcells)*dxco^d;
766 
767  if(prolongprimitive) then
768  ! following line again assumes equidistant grid, but
769  ! just computes indices, so also ok for stretched case
770  ! reason for +1-1 and +1+1: the coarse representation has
771  ! also nghostcells at each side. During
772  ! prolongation, we need cells to left and right, hence -1/+1
773  ixcomin^d=int((xfimin^d+(dble(ixfimin^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1-1;
774  ixcomax^d=int((xfimin^d+(dble(ixfimax^d)-half)*dxfi^d-xcomin^d)*invdxco^d)+1+1;
775  call phys_to_primitive(ixcog^l,ixco^l,&
776  psc(igrid)%w,psc(igrid)%x)
777  endif
778 
779  select case (typeghostfill)
780  case ("linear")
781  call interpolation_linear(ixfi^l,dxfi^d,xfimin^d,dxco^d,invdxco^d,xcomin^d)
782  case ("copy")
783  call interpolation_copy(ixfi^l,dxfi^d,xfimin^d,dxco^d,invdxco^d,xcomin^d)
784  case default
785  write (unitterm,*) "Undefined typeghostfill ",typeghostfill
786  call mpistop("Undefined typeghostfill")
787  end select
788 
789  if(prolongprimitive) call phys_to_conserved(ixcog^l,ixco^l,&
790  psc(igrid)%w,psc(igrid)%x)
791 
792  end subroutine bc_prolong
793 
794  subroutine interpolation_linear(ixFi^L,dxFi^D,xFimin^D, &
795  dxCo^D,invdxCo^D,xComin^D)
797  integer, intent(in) :: ixFi^L
798  double precision, intent(in) :: dxFi^D, xFimin^D,dxCo^D, invdxCo^D, xComin^D
799 
800  integer :: ixCo^D, jxCo^D, hxCo^D, ixFi^D, ix^D, iw, idims, nwmin,nwmax
801  double precision :: xCo^D, xFi^D, eta^D
802  double precision :: slopeL, slopeR, slopeC, signC, signR
803  double precision :: slope(1:nw,ndim)
804  !!double precision :: local_invdxCo^D
805  double precision :: signedfactorhalf^D
806  !integer :: ixshift^D, icase
807 
808  !icase=mod(nghostcells,2)
809 
810  if(prolongprimitive) then
811  nwmin=1
812  nwmax=nw
813  else
814  nwmin=nwhead
815  nwmax=nwtail
816  end if
817 
818  {do ixfi^db = ixfi^lim^db
819  ! cell-centered coordinates of fine grid point
820  ! here we temporarily use an equidistant grid
821  xfi^db=xfimin^db+(dble(ixfi^db)-half)*dxfi^db
822 
823  ! indices of coarse cell which contains the fine cell
824  ! since we computed lower left corner earlier
825  ! in equidistant fashion: also ok for stretched case
826  ixco^db=int((xfi^db-xcomin^db)*invdxco^db)+1
827 
828  ! cell-centered coordinates of coarse grid point
829  ! here we temporarily use an equidistant grid
830  xco^db=xcomin^db+(dble(ixco^db)-half)*dxco^db \}
831 
832  !if(.not.slab) then
833  ! ^D&local_invdxCo^D=1.d0/psc(igrid)%dx({ixCo^DD},^D)\
834  !endif
835 
836  if(slab) then
837  ! actual cell-centered coordinates of fine grid point
838  !!^D&xFi^D=block%x({ixFi^DD},^D)\
839  ! actual cell-centered coordinates of coarse grid point
840  !!^D&xCo^D=psc(igrid)%x({ixCo^DD},^D)\
841  ! normalized distance between fine/coarse cell center
842  ! in coarse cell: ranges from -0.5 to 0.5 in each direction
843  ! (origin is coarse cell center)
844  ! this is essentially +1/4 or -1/4 on cartesian mesh
845  eta^d=(xfi^d-xco^d)*invdxco^d;
846  else
847  !select case(icase)
848  ! case(0)
849  !{! here we assume an even number of ghostcells!!!
850  !ixshift^D=2*(mod(ixFi^D,2)-1)+1
851  !if(ixshift^D>0.0d0)then
852  ! ! oneven fine grid points
853  ! eta^D=-0.5d0*(one-block%dvolume(ixFi^DD) &
854  ! /sum(block%dvolume(ixFi^D:ixFi^D+1^D%ixFi^DD)))
855  !else
856  ! ! even fine grid points
857  ! eta^D=+0.5d0*(one-block%dvolume(ixFi^DD) &
858  ! /sum(block%dvolume(ixFi^D-1:ixFi^D^D%ixFi^DD)))
859  !endif\}
860  ! case(1)
861  !{! here we assume an odd number of ghostcells!!!
862  !ixshift^D=2*(mod(ixFi^D,2)-1)+1
863  !if(ixshift^D>0.0d0)then
864  ! ! oneven fine grid points
865  ! eta^D=+0.5d0*(one-block%dvolume(ixFi^DD) &
866  ! /sum(block%dvolume(ixFi^D-1:ixFi^D^D%ixFi^DD)))
867  !else
868  ! ! even fine grid points
869  ! eta^D=-0.5d0*(one-block%dvolume(ixFi^DD) &
870  ! /sum(block%dvolume(ixFi^D:ixFi^D+1^D%ixFi^DD)))
871  !endif\}
872  ! case default
873  ! call mpistop("no such case")
874  !end select
875  ! the different cases for even/uneven number of ghost cells
876  ! are automatically handled using the relative index to ixMlo
877  ! as well as the pseudo-coordinates xFi and xCo
878  ! these latter differ from actual cell centers when stretching is used
879  ix^d=2*int((ixfi^d+ixmlo^d)/2)-ixmlo^d;
880  {signedfactorhalf^d=(xfi^d-xco^d)*invdxco^d*two
881  if(dabs(signedfactorhalf^d**2-1.0d0/4.0d0)>smalldouble) call mpistop("error in bc_prolong")
882  eta^d=signedfactorhalf^d*(one-block%dvolume(ixfi^dd) &
883  /sum(block%dvolume(ix^d:ix^d+1^d%ixFi^dd))) \}
884  !{eta^D=(xFi^D-xCo^D)*invdxCo^D &
885  ! *two*(one-block%dvolume(ixFi^DD) &
886  ! /sum(block%dvolume(ix^D:ix^D+1^D%ixFi^DD))) \}
887  end if
888 
889  do idims=1,ndim
890  hxco^d=ixco^d-kr(^d,idims)\
891  jxco^d=ixco^d+kr(^d,idims)\
892 
893  do iw=nwmin,nwmax
894  slopel=psc(igrid)%w(ixco^d,iw)-psc(igrid)%w(hxco^d,iw)
895  sloper=psc(igrid)%w(jxco^d,iw)-psc(igrid)%w(ixco^d,iw)
896  slopec=half*(sloper+slopel)
897 
898  ! get limited slope
899  signr=sign(one,sloper)
900  signc=sign(one,slopec)
901  select case(typeprolonglimit)
902  case('unlimit')
903  slope(iw,idims)=slopec
904  case('minmod')
905  slope(iw,idims)=signr*max(zero,min(dabs(sloper), &
906  signr*slopel))
907  case('woodward')
908  slope(iw,idims)=two*signr*max(zero,min(dabs(sloper), &
909  signr*slopel,signr*half*slopec))
910  case('koren')
911  slope(iw,idims)=signr*max(zero,min(two*signr*slopel, &
912  (dabs(sloper)+two*slopel*signr)*third,two*dabs(sloper)))
913  case default
914  slope(iw,idims)=signc*max(zero,min(dabs(slopec), &
915  signc*slopel,signc*sloper))
916  end select
917  end do
918  end do
919 
920  ! Interpolate from coarse cell using limited slopes
921  psb(igrid)%w(ixfi^d,nwmin:nwmax)=psc(igrid)%w(ixco^d,nwmin:nwmax)+&
922  {(slope(nwmin:nwmax,^d)*eta^d)+}
923 
924  {end do\}
925 
926  if(prolongprimitive) call phys_to_conserved(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
927 
928  end subroutine interpolation_linear
929 
930  subroutine interpolation_copy(ixFi^L,dxFi^D,xFimin^D, &
931  dxCo^D,invdxCo^D,xComin^D)
933  integer, intent(in) :: ixFi^L
934  double precision, intent(in) :: dxFi^D, xFimin^D,dxCo^D, invdxCo^D, xComin^D
935 
936  integer :: ixCo^D, ixFi^D, nwmin,nwmax
937  double precision :: xFi^D
938 
939  if(prolongprimitive) then
940  nwmin=1
941  nwmax=nw
942  else
943  nwmin=nwhead
944  nwmax=nwtail
945  end if
946 
947  {do ixfi^db = ixfi^lim^db
948  ! cell-centered coordinates of fine grid point
949  xfi^db=xfimin^db+(dble(ixfi^db)-half)*dxfi^db
950 
951  ! indices of coarse cell which contains the fine cell
952  ! note: this also works for stretched grids
953  ixco^db=int((xfi^db-xcomin^db)*invdxco^db)+1\}
954 
955  ! Copy from coarse cell
956  psb(igrid)%w(ixfi^d,nwmin:nwmax)=psc(igrid)%w(ixco^d,nwmin:nwmax)
957 
958  {end do\}
959 
960  if(prolongprimitive) call phys_to_conserved(ixg^ll,ixfi^l,psb(igrid)%w,psb(igrid)%x)
961 
962  end subroutine interpolation_copy
963 
964  subroutine pole_copy(wrecv,ixIR^L,ixR^L,wsend,ixIS^L,ixS^L)
965 
966  integer, intent(in) :: ixIR^L,ixR^L,ixIS^L,ixS^L
967  double precision :: wrecv(ixir^s,1:nw), wsend(ixis^s,1:nw)
968 
969  integer :: iw, iB
970 
971  select case (ipole)
972  {case (^d)
973  iside=int((i^d+3)/2)
974  ib=2*(^d-1)+iside
975  do iw=nwhead,nwtail
976  select case (typeboundary(iw,ib))
977  case ("symm")
978  wrecv(ixr^s,iw) = wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
979  case ("asymm")
980  wrecv(ixr^s,iw) =-wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
981  case default
982  call mpistop("Pole boundary condition should be symm or asymm")
983  end select
984  end do \}
985  end select
986 
987  end subroutine pole_copy
988 
989  subroutine pole_buf(wrecv,ixIR^L,ixR^L,wsend,ixIS^L,ixS^L)
990 
991  integer, intent(in) :: ixIR^L,ixR^L,ixIS^L,ixS^L
992  double precision :: wrecv(ixir^s,nwhead:nwtail), wsend(ixis^s,1:nw)
993 
994  integer :: iw, iB
995 
996  select case (ipole)
997  {case (^d)
998  iside=int((i^d+3)/2)
999  ib=2*(^d-1)+iside
1000  do iw=nwhead,nwtail
1001  select case (typeboundary(iw,ib))
1002  case ("symm")
1003  wrecv(ixr^s,iw) = wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1004  case ("asymm")
1005  wrecv(ixr^s,iw) =-wsend(ixsmax^d:ixsmin^d:-1^d%ixS^s,iw)
1006  case default
1007  call mpistop("Pole boundary condition should be symm or asymm")
1008  end select
1009  end do \}
1010  end select
1011 
1012  end subroutine pole_buf
1013 
1014  subroutine fix_auxiliary
1016 
1017  integer :: ix^L
1018 
1019  do iigrid=1,igridstail; igrid=igrids(iigrid);
1020  saveigrid=igrid
1021  block=>ps(igrid)
1022  call identifyphysbound(ps(igrid),iib^d)
1023 
1024  {do i^db=-1,1\}
1025  if (skip_direction([ i^d ])) cycle
1026 
1027  ix^l=ixr_srl_^l(iib^d,i^d);
1028  call phys_get_aux(.true.,psb(igrid)%w,ps(igrid)%x,ixg^l,ix^l,"bc")
1029  {end do\}
1030  end do
1031 
1032  end subroutine fix_auxiliary
1033 
1034  end subroutine getbc
1035 
1036  subroutine identifyphysbound(s,iib^D)
1038 
1039  type(state) :: s
1040  integer, intent(out) :: iib^D
1041 
1042  {
1043  if(s%is_physical_boundary(2*^d) .and. &
1044  s%is_physical_boundary(2*^d-1)) then
1045  iib^d=2
1046  else if(s%is_physical_boundary(2*^d-1)) then
1047  iib^d=-1
1048  else if(s%is_physical_boundary(2*^d)) then
1049  iib^d=1
1050  else
1051  iib^d=0
1052  end if
1053  \}
1054 
1055  end subroutine identifyphysbound
1056 
1057 end module mod_ghostcells_update
integer, dimension(-1:1^d &,-1:1^d &), target type_send_r_p2
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_p_f
integer, dimension(-1:1, 0:3) ixs_p_
integer, dimension(-1:1, 0:3) ixr_r_
This module contains definitions of global parameters and variables and some generic functions/subrou...
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_r_p1
integer, dimension(-1:2^d &,-1:1^d &), target type_recv_srl_f
update ghost cells of all blocks including physical boundaries
integer, dimension(-1:2,-1:1) ixs_srl_
integer, dimension(:^d &,:^d &), pointer type_recv_srl
subroutine bc_recv_restrict
Receive from fine neighbor.
subroutine create_bc_mpi_datatype(nwstart, nwbc)
integer, dimension(-1:1, 0:3) ixr_p_
integer, dimension(:^d &,:^d &), pointer type_send_r
integer max_blocks
The maximum number of grid blocks in a processor.
character(len=name_len) physics_type
String describing the physics type of the simulation.
Definition: mod_physics.t:14
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_p_p2
integer, dimension(-1:2^d &,-1:1^d &), target type_recv_srl_p2
subroutine bc_prolong
integer, dimension(-1:2^d &,-1:1^d &), target type_send_srl_f
subroutine bc_recv_srl
Receive from sibling at same refinement level.
procedure(sub_boundary_adjust), pointer phys_boundary_adjust
Definition: mod_physics.t:50
logical function skip_direction(dir)
logical phys_req_diagonal
Whether the physics routines require diagonal ghost cells, for example for computing a curl...
Definition: mod_physics.t:22
integer, dimension(:^d &,:^d &), pointer type_recv_r
integer, dimension(:^d &,:^d &), pointer type_recv_p
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_r_f
subroutine interpolation_copy(ixFiL, dxFiD, xFiminD, dxCoD, invdxCoD, xCominD)
subroutine bc_recv_prolong
Receive from coarse neighbor.
integer, dimension(-1:1^d &, 0:3^d &), target type_send_p_p2
integer, dimension(-1:2,-1:1) ixs_r_
integer, dimension(:,:), allocatable sendstatus
logical, dimension(ndim) aperiodb
True for dimensions with aperiodic boundaries.
subroutine coarsen_grid(wFi, xFi, ixFiGL, ixFiL, wCo, xCo, ixCoGL, ixCoL, igridFi, igridCo)
Definition: coarsen.t:77
integer, dimension(-1:2^d &,-1:1^d &), target type_send_srl_p1
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_p_p1
integer nghostcells
Number of ghost cells surrounding a grid.
subroutine pole_buf(wrecv, ixIRL, ixRL, wsend, ixISL, ixSL)
integer ixghi
Upper index of grid block arrays.
character(len=std_len) typeghostfill
integer, dimension(:), allocatable recvrequest
procedure(sub_convert), pointer phys_to_conserved
Definition: mod_physics.t:36
integer ierrmpi
A global MPI error return code.
integer, dimension(-1:1^d &, 0:3^d &), target type_recv_r_p2
subroutine getbc(time, qdt, psb, nwstart, nwbc, req_diag)
integer, parameter unitterm
Unit for standard output.
integer, dimension(3, 3) kr
Kronecker delta tensor.
subroutine pole_copy(wrecv, ixIRL, ixRL, wsend, ixISL, ixSL)
integer, dimension(:), allocatable, parameter d
integer mype
The rank of the current MPI task.
subroutine fix_auxiliary
subroutine identifyphysbound(s, iibD)
subroutine interpolation_linear(ixFiL, dxFiD, xFiminD, dxCoD, invdxCoD, xCominD)
integer, parameter npwbuf
The number of interleaving sending buffers for ghost cells.
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:160
integer, dimension(-1:1^d &, 0:3^d &), target type_send_p_f
subroutine bc_send_prolong
Send to finer neighbor.
double precision, dimension(ndim) dxlevel
integer, dimension(-1:2,-1:1) ixr_srl_
integer, dimension(:,:), allocatable recvstatus
subroutine bc_send_srl
Send to sibling at same refinement level.
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, dimension(:^d &,:^d &), pointer type_send_p
integer icomm
The MPI communicator.
subroutine get_bc_comm_type(comm_type, ixL, ixGL, nwstart, nwbc)
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
procedure(sub_convert), pointer phys_to_primitive
Definition: mod_physics.t:37
integer, dimension(:,:), allocatable node
integer, dimension(-1:1, 0:3) l
procedure(sub_get_aux), pointer phys_get_aux
Definition: mod_physics.t:47
integer, dimension(-1:1^d &,-1:1^d &), target type_send_r_f
integer, dimension(:^d &,:^d &), pointer type_send_srl
integer, dimension(-1:1^d &, 0:3^d &), target type_send_p_p1
integer, dimension(:), allocatable sendrequest
subroutine bc_send_restrict
Send to coarser neighbor.
double precision time_bc
accumulated wall-clock time spent on boundary conditions
integer, dimension(-1:2^d &,-1:1^d &), target type_send_srl_p2
logical, dimension(:), allocatable phyboundblock
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
subroutine getintbc(time, ixGL)
integer, dimension(-1:1^d &,-1:1^d &), target type_send_r_p1
integer, dimension(-1:2^d &,-1:1^d &), target type_recv_srl_p1
subroutine bc_phys(iside, idims, time, qdt, w, x, ixGL, ixBL)