MPI-AMRVAC  3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_coarsen_refine.t
Go to the documentation of this file.
1 !> Module to coarsen and refine grids for AMR
3  implicit none
4  private
5  !> MPI recv send variables for AMR
6  integer :: itag, irecv, isend
7  integer, dimension(:), allocatable :: recvrequest, sendrequest
8  integer, dimension(:,:), allocatable :: recvstatus, sendstatus
9  !> MPI recv send variables for staggered-variable AMR
10  integer :: itag_stg
11  integer, dimension(:), allocatable :: recvrequest_stg, sendrequest_stg
12  integer, dimension(:,:), allocatable :: recvstatus_stg, sendstatus_stg
13 
14  ! Public subroutines
15  public :: amr_coarsen_refine
16 
17 contains
18 
19  !> coarsen and refine blocks to update AMR grid
20  subroutine amr_coarsen_refine
21  use mod_forest
25  use mod_amr_fct
31  use mod_selectgrids, only: selectgrids
32  use mod_refine, only: refine_grids
33 
34 
35  {^nooned
37  }
38 
39  integer :: iigrid, igrid, ipe, igridco, ipeco, level, ic^d
40  integer, dimension(2^D&) :: igridfi, ipefi
41  integer :: n_coarsen, n_refine
42  type(tree_node_ptr) :: tree, sibling
43  logical :: active
44 
45  call proper_nesting
46 
47  if(stagger_grid) then
48  call store_faces
49  call comm_faces
50  end if
51 
52  n_coarsen = count(coarsen(:, :))
53  n_refine = count(refine(:, :))
54 
55  ! to save memory: first coarsen then refine
56  irecv=0
57  isend=0
58  allocate(recvstatus(mpi_status_size,max_blocks),recvrequest(max_blocks), &
59  sendstatus(mpi_status_size,max_blocks),sendrequest(max_blocks))
60  recvrequest=mpi_request_null
61  sendrequest=mpi_request_null
62 
63  if(stagger_grid) then
64  allocate(recvstatus_stg(mpi_status_size,max_blocks*^nd),recvrequest_stg(max_blocks*^nd), &
65  sendstatus_stg(mpi_status_size,max_blocks*^nd),sendrequest_stg(max_blocks*^nd))
66  recvrequest_stg=mpi_request_null
67  sendrequest_stg=mpi_request_null
68  end if
69 
70  do ipe=0,npe-1
71  do igrid=1,max_blocks
72  if (coarsen(igrid,ipe)) then
73  if (.not.associated(igrid_to_node(igrid,ipe)%node)) cycle
74 
75  tree%node => igrid_to_node(igrid,ipe)%node%parent%node
76  {do ic^db=1,2\}
77  sibling%node => tree%node%child(ic^d)%node
78  ipefi(ic^d)=sibling%node%ipe
79  igridfi(ic^d)=sibling%node%igrid
80  {end do\}
81 
82  ipeco=ipefi(1^d&)
83  igridco=getnode(ipeco)
84 
85  call coarsen_tree_leaf(igridco,ipeco,igridfi,ipefi,active)
86 
87  call coarsen_grid_siblings(igridco,ipeco,igridfi,ipefi,active)
88 
89  ! local coarsening done
90  {do ic^db=1,2\}
91  if (ipefi(ic^d)==ipeco) then
92  call putnode(igridfi(ic^d),ipefi(ic^d))
93  coarsen(igridfi(ic^d),ipefi(ic^d))=.false.
94  end if
95  {end do\}
96  end if
97  end do
98  end do
99 
100  if (irecv>0) then
101  call mpi_waitall(irecv,recvrequest,recvstatus,ierrmpi)
102  if(stagger_grid) call mpi_waitall(irecv,recvrequest_stg,recvstatus_stg,ierrmpi)
103  end if
104  if (isend>0) then
105  call mpi_waitall(isend,sendrequest,sendstatus,ierrmpi)
106  if(stagger_grid) call mpi_waitall(isend,sendrequest_stg,sendstatus_stg,ierrmpi)
107  end if
108 
109  deallocate(recvstatus,recvrequest,sendstatus,sendrequest)
110  if(stagger_grid) deallocate(recvstatus_stg,recvrequest_stg,sendstatus_stg,sendrequest_stg)
111 
112  ! non-local coarsening done
113  do ipe=0,npe-1
114  do igrid=1,max_blocks
115  if (coarsen(igrid,ipe)) then
116  !if (ipe==mype) call dealloc_node(igrid) ! do not deallocate node
117  ! memory preventing fragmentization of system memory as a result
118  ! of frequent allocating and deallocating memory
119 
120  ! put the node (igrid number) into unused.
121  call putnode(igrid,ipe)
122  coarsen(igrid,ipe)=.false.
123  end if
124  end do
125  end do
126 
127  do ipe=0,npe-1
128  do igrid=1,max_blocks
129  if (refine(igrid,ipe)) then
130 
131  {do ic^db=1,2\}
132  igridfi(ic^d)=getnode(ipe)
133  ipefi(ic^d)=ipe
134  {end do\}
135 
136  call refine_tree_leaf(igridfi,ipefi,igrid,ipe,active)
137 
138  if (ipe==mype) call refine_grids(igridfi,ipefi,igrid,ipe,active)
139 
140  ! refinement done
141  call putnode(igrid,ipe)
142  refine(igrid,ipe)=.false.
143  end if
144  end do
145  end do
146 
147  ! A crash occurs in later MPI_WAITALL when initial condition comsumes too
148  ! much time to filling new blocks with both gfortran and intel fortran compiler.
149  ! This barrier cure this problem
150  !TODO to find the reason
151  if(.not.time_advance) call mpi_barrier(icomm,ierrmpi)
152 
153  if(stagger_grid) call end_comm_faces
154 
155  call get_level_range
156 
157  ! Update sfc array: igrid and ipe info in space filling curve
158  call amr_morton_order()
159 
160  call load_balance
161 
162  ! Rebuild tree connectivity
163  call getigrids
164  call build_connectivity
165 
166  ! Update the list of active grids
167  call selectgrids
168  ! grid structure now complete again.
169 
170  ! since we only filled mesh values, and advance assumes filled
171  ! ghost cells, do boundary filling for the new levels
172  if (time_advance) then
173  call getbc(global_time+dt,0.d0,ps,iwstart,nwgc)
174  else
175  call getbc(global_time,0.d0,ps,iwstart,nwgc)
176  end if
177 
178  {^nooned
179  if (use_multigrid) call mg_update_refinement(n_coarsen, n_refine)
180  }
181 
182  if (associated(usr_after_refine)) then
183  call usr_after_refine(n_coarsen, n_refine)
184  end if
185 
186  end subroutine amr_coarsen_refine
187 
188  !> For all grids on all processors, do a check on refinement flags. Make
189  !> sure that neighbors will not differ more than one level of refinement.
190  subroutine proper_nesting
191  use mod_forest
194 
195  logical, dimension(:,:), allocatable :: refine2
196  integer :: iigrid, igrid, level, ic^D, inp^D, i^D, my_neighbor_type,ipe
197  logical :: coarsening, pole(ndim), sendbuf(max_blocks)
198  type(tree_node_ptr) :: tree, p_neighbor, my_parent, sibling, my_neighbor, &
199  neighborchild
200 
201  if (nbufferx^d/=0|.or.) then
202  allocate(refine2(max_blocks,npe))
203  call mpi_allreduce(refine,refine2,max_blocks*npe,mpi_logical,mpi_lor, &
204  icomm,ierrmpi)
205  refine=refine2
206  else
207  sendbuf(:)=refine(:,mype)
208  call mpi_allgather(sendbuf,max_blocks,mpi_logical,refine,max_blocks, &
209  mpi_logical,icomm,ierrmpi)
210  end if
211 
212  do level=min(levmax,refine_max_level-1),levmin+1,-1
213  tree%node => level_head(level)%node
214  do
215  if (.not.associated(tree%node)) exit
216 
217  if (refine(tree%node%igrid,tree%node%ipe)) then
218  ic^d=1+modulo(tree%node%ig^d-1,2);
219  {do inp^db=ic^db-2,ic^db-1\}
220  if (inp^d==0|.and.) cycle
221  p_neighbor%node => tree%node%parent%node
222  {if (inp^d/=0) then
223  p_neighbor%node => p_neighbor%node%neighbor(ic^d,^d)%node
224  if (.not.associated(p_neighbor%node)) cycle
225  end if\}
226  if (p_neighbor%node%leaf) then
227  refine(p_neighbor%node%igrid,p_neighbor%node%ipe)=.true.
228  end if
229  {end do\}
230  end if
231 
232  tree%node => tree%node%next%node
233  end do
234  end do
235 
236  ! On each processor locally, check if grids set for coarsening are already
237  ! set for refinement.
238 
239  do iigrid=1,igridstail; igrid=igrids(iigrid);
240  if (refine(igrid,mype).and.coarsen(igrid,mype)) coarsen(igrid,mype)=.false.
241  end do
242 
243  ! For all grids on all processors, do a check on coarse refinement flags
244  sendbuf(:)=coarsen(:,mype)
245  call mpi_allgather(sendbuf,max_blocks,mpi_logical,coarsen,max_blocks, &
246  mpi_logical,icomm,ierrmpi)
247 
248  do level=levmax,max(2,levmin),-1
249  tree%node => level_head(level)%node
250  do
251  if (.not.associated(tree%node)) exit
252 
253  if (coarsen(tree%node%igrid,tree%node%ipe)) then
254  coarsening=.true.
255  my_parent%node => tree%node%parent%node
256 
257  ! are all siblings flagged for coarsen ?
258  check1: {do ic^db=1,2\}
259  sibling%node => my_parent%node%child(ic^d)%node
260  if (sibling%node%leaf) then
261  if (coarsen(sibling%node%igrid,sibling%node%ipe)) cycle
262  end if
264  exit check1
265  {end do\} check1
266 
267  ! Make sure that neighbors will not differ more than one level of
268  ! refinement, otherwise unflag all siblings
269  if (coarsening) then
270  check2: {do ic^db=1,2\}
271  sibling%node => my_parent%node%child(ic^d)%node
272  {do i^db=ic^db-2,ic^db-1\}
273  if (i^d==0|.and.) cycle
274  call find_neighbor(my_neighbor,my_neighbor_type, &
275  sibling,i^d,pole)
276  select case (my_neighbor_type)
277  case (neighbor_sibling)
278  if (refine(my_neighbor%node%igrid, &
279  my_neighbor%node%ipe)) then
281  exit check2
282  else
283  cycle
284  end if
285  case (neighbor_fine)
286  neighborchild%node=>my_neighbor%node%child(1^d&)%node
287  if (neighborchild%node%leaf) then
288  if (coarsen(neighborchild%node%igrid, &
289  neighborchild%node%ipe)) then
290  cycle
291  end if
292  end if
294  exit check2
295  end select
296  {end do\}
297  {end do\} check2
298  end if
299 
300  end if
301 
302  tree%node => tree%node%next%node
303  end do
304  end do
305 
306  contains
307 
309 
310  integer :: ic^D
311  type(tree_node_ptr) :: sibling
312 
313  {do ic^db=1,2\}
314  sibling%node => my_parent%node%child(ic^d)%node
315  if (sibling%node%leaf) then
316  coarsen(sibling%node%igrid,sibling%node%ipe)=.false.
317  end if
318  {end do\}
319  coarsening=.false.
320 
321  end subroutine unflag_coarsen_siblings
322 
323  end subroutine proper_nesting
324 
325  !> coarsen sibling blocks into one block
326  subroutine coarsen_grid_siblings(igrid,ipe,child_igrid,child_ipe,active)
328  use mod_coarsen, only: coarsen_grid
331 
332  integer, intent(in) :: igrid, ipe
333  integer, dimension(2^D&), intent(in) :: child_igrid, child_ipe
334  logical, intent(in) :: active
335 
336  integer :: igridFi, ipeFi, ixCo^L, ixCoG^L, ixCoM^L, ic^D, idir
337 
338  if (ipe==mype) call alloc_node(igrid)
339 
340  ! New passive cell, coarsen from initial condition:
341  if (.not. active) then
342  if (ipe == mype) then
343  call initial_condition(igrid)
344  {do ic^db=1,2\}
345  igridfi=child_igrid(ic^d)
346  ipefi=child_ipe(ic^d)
347  !if (ipeFi==mype) then
348  ! ! remove solution space of child
349  ! call dealloc_node(igridFi)
350  !end if
351  {end do\}
352  end if
353  return
354  end if
355 
356  {do ic^db=1,2\}
357  igridfi=child_igrid(ic^d)
358  ipefi=child_ipe(ic^d)
359 
360  if (ipefi==mype) then
361  ^d&dxlevel(^d)=rnode(rpdx^d_,igridfi);
362  if (ipe==mype) then
363  ixcomin^d=ixmlo^d+(ic^d-1)*(ixmhi^d-ixmlo^d+1)/2;
364  ixcomax^d=ixmhi^d+(ic^d-2)*(ixmhi^d-ixmlo^d+1)/2;
365 
366  call coarsen_grid(ps(igridfi),ixg^ll,ixm^ll,ps(igrid),ixg^ll,ixco^l)
367  ! remove solution space of child
368  !call dealloc_node(igridFi)
369  else
370  ixcogmin^d=1;
371  ixcogmax^d=ixghi^d/2+nghostcells;
372  ixcom^l=ixcog^l^lsubnghostcells;
373  call coarsen_grid(ps(igridfi),ixg^ll,ixm^ll,psc(igridfi), &
374  ixcog^l,ixcom^l)
375 
376  !itag=ipeFi*max_blocks+igridFi
377  itag=ipefi+igridfi
378  isend=isend+1
379  call mpi_isend(psc(igridfi)%w,1,type_coarse_block,ipe,itag, &
380  icomm,sendrequest(isend),ierrmpi)
381  if(stagger_grid) then
382  do idir=1,ndim
383  !itag_stg=(npe+ipeFi+1)*max_blocks+igridFi*(ndir-1+idir)
384  itag_stg=(npe+ipefi+1)+igridfi*(ndir-1+idir)
385  call mpi_isend(psc(igridfi)%ws,1,type_coarse_block_stg(idir,ic^d),ipe,itag_stg, &
386  icomm,sendrequest_stg(isend),ierrmpi)
387  end do
388  end if
389  end if
390  else
391  if (ipe==mype) then
392  !itag=ipeFi*max_blocks+igridFi
393  itag=ipefi+igridfi
394  irecv=irecv+1
395  call mpi_irecv(ps(igrid)%w,1,type_sub_block(ic^d),ipefi,itag, &
396  icomm,recvrequest(irecv),ierrmpi)
397  if(stagger_grid) then
398  do idir=1,ndim
399  !itag_stg=(npe+ipeFi+1)*max_blocks+igridFi*(ndir-1+idir)
400  itag_stg=(npe+ipefi+1)+igridfi*(ndir-1+idir)
401  call mpi_irecv(ps(igrid)%ws,1,type_sub_block_stg(idir,ic^d),ipefi,itag_stg, &
402  icomm,recvrequest_stg(irecv),ierrmpi)
403  end do
404  end if
405  end if
406  end if
407  {end do\}
408 
409  end subroutine coarsen_grid_siblings
410 
411 end module mod_coarsen_refine
subroutine unflag_coarsen_siblings
subroutine, public store_faces
To achive consistency and thus conservation of divergence, when refining a block we take into account...
Definition: mod_amr_fct.t:458
subroutine, public comm_faces
When refining a coarse block with fine neighbours, it is necessary prolong consistently with the alre...
Definition: mod_amr_fct.t:529
subroutine, public find_neighbor(my_neighbor, my_neighbor_type, tree, iD, pole)
find neighors of all blocks
subroutine, public putnode(igrid, ipe)
subroutine, public alloc_node(igrid)
allocate arrays on igrid node
integer function, public getnode(ipe)
Get first available igrid on processor ipe.
Module to coarsen and refine grids for AMR.
subroutine proper_nesting
For all grids on all processors, do a check on refinement flags. Make sure that neighbors will not di...
subroutine, public amr_coarsen_refine
coarsen and refine blocks to update AMR grid
subroutine, public coarsen_grid(sFi, ixFiGL, ixFiL, sCo, ixCoGL, ixCoL)
coarsen one grid to its coarser representative
Definition: mod_coarsen.t:13
Module with basic grid data structures.
Definition: mod_forest.t:2
logical, dimension(:,:), allocatable, save refine
Definition: mod_forest.t:70
logical, dimension(:,:), allocatable, save coarsen
AMR flags and grids-in-use identifier per processor (igrid,ipe)
Definition: mod_forest.t:70
type(tree_node_ptr), dimension(:,:), allocatable, save igrid_to_node
Array to go from an [igrid, ipe] index to a node pointer.
Definition: mod_forest.t:32
type(tree_node_ptr), dimension(:), allocatable, save level_head
The head pointer of the linked list per refinement level.
Definition: mod_forest.t:35
subroutine, public refine_tree_leaf(child_igrid, child_ipe, igrid, ipe, active)
subroutine, public coarsen_tree_leaf(igrid, ipe, child_igrid, child_ipe, active)
update ghost cells of all blocks including physical boundaries
This module contains definitions of global parameters and variables and some generic functions/subrou...
logical stagger_grid
True for using stagger grid.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer, dimension(:), allocatable, parameter d
integer ierrmpi
A global MPI error return code.
integer npe
The number of MPI tasks.
integer nbufferx
Number of cells as buffer zone.
integer refine_max_level
Maximal number of AMR levels.
integer max_blocks
The maximum number of grid blocks in a processor.
subroutine, public initial_condition(igrid)
fill in initial condition
Module to couple the octree-mg library to AMRVAC. This file uses the VACPP preprocessor,...
subroutine, public refine_grids(child_igrid, child_ipe, igrid, ipe, active)
refine one block to its children blocks
Definition: mod_refine.t:13
subroutine, public selectgrids
Module with all the methods that users can customize in AMRVAC.
procedure(after_refine), pointer usr_after_refine
Pointer to a tree_node.
Definition: mod_forest.t:6