6 integer :: itag, irecv, isend
7 integer,
dimension(:),
allocatable :: recvrequest, sendrequest
8 integer,
dimension(:,:),
allocatable :: recvstatus, sendstatus
11 integer,
dimension(:),
allocatable :: recvrequest_stg, sendrequest_stg
12 integer,
dimension(:,:),
allocatable :: recvstatus_stg, sendstatus_stg
32 integer :: iigrid, igrid, ipe, igridco, ipeco, level, ic^
d
33 integer,
dimension(2^D&) :: igridfi, ipefi
34 integer :: n_coarsen, n_refine
46 n_coarsen = count(
coarsen(:, :))
47 n_refine = count(
refine(:, :))
54 recvrequest=mpi_request_null
55 sendrequest=mpi_request_null
60 recvrequest_stg=mpi_request_null
61 sendrequest_stg=mpi_request_null
71 sibling%node => tree%node%child(ic^
d)%node
72 ipefi(ic^
d)=sibling%node%ipe
73 igridfi(ic^
d)=sibling%node%igrid
81 call coarsen_grid_siblings(igridco,ipeco,igridfi,ipefi,active)
85 if (ipefi(ic^d)==ipeco)
then
86 call putnode(igridfi(ic^d),ipefi(ic^d))
87 coarsen(igridfi(ic^d),ipefi(ic^d))=.false.
95 call mpi_waitall(irecv,recvrequest,recvstatus,ierrmpi)
96 if(stagger_grid)
call mpi_waitall(irecv,recvrequest_stg,recvstatus_stg,ierrmpi)
99 call mpi_waitall(isend,sendrequest,sendstatus,ierrmpi)
100 if(stagger_grid)
call mpi_waitall(isend,sendrequest_stg,sendstatus_stg,ierrmpi)
103 deallocate(recvstatus,recvrequest,sendstatus,sendrequest)
104 if(stagger_grid)
deallocate(recvstatus_stg,recvrequest_stg,sendstatus_stg,sendrequest_stg)
108 do igrid=1,max_blocks
109 if (coarsen(igrid,ipe))
then
116 coarsen(igrid,ipe)=.false.
122 do igrid=1,max_blocks
123 if (refine(igrid,ipe))
then
132 if (ipe==mype)
call refine_grids(igridfi,ipefi,igrid,ipe,active)
136 refine(igrid,ipe)=.false.
145 if(.not.time_advance)
call mpi_barrier(icomm,ierrmpi)
147 if(stagger_grid)
call end_comm_faces
152 call amr_morton_order()
166 if (time_advance)
then
167 call getbc(global_time+dt,0.d0,ps,iwstart,nwgc)
169 call getbc(global_time,0.d0,ps,iwstart,nwgc)
173 if (use_multigrid)
call mg_update_refinement(n_coarsen, n_refine)
176 if (
associated(usr_after_refine))
then
177 call usr_after_refine(n_coarsen, n_refine)
188 logical,
dimension(:,:),
allocatable :: refine2
189 integer :: iigrid, igrid, level, ic^D, inp^D, i^D, my_neighbor_type,ipe
190 logical :: coarsening, pole(ndim), sendbuf(max_blocks)
191 type(
tree_node_ptr) :: tree, p_neighbor, my_parent, sibling, my_neighbor, &
195 allocate(refine2(max_blocks,
npe))
196 call mpi_allreduce(
refine,refine2,max_blocks*
npe,mpi_logical,mpi_lor, &
201 call mpi_allgather(sendbuf,max_blocks,mpi_logical,
refine,max_blocks, &
208 if (.not.
associated(tree%node))
exit
210 if (
refine(tree%node%igrid,tree%node%ipe))
then
211 ic^d=1+modulo(tree%node%ig^d-1,2);
212 {
do inp^db=ic^db-2,ic^db-1\}
213 if (inp^d==0|.and.) cycle
214 p_neighbor%node => tree%node%parent%node
216 p_neighbor%node => p_neighbor%node%neighbor(ic^d,^d)%node
217 if (.not.
associated(p_neighbor%node)) cycle
219 if (p_neighbor%node%leaf)
then
220 refine(p_neighbor%node%igrid,p_neighbor%node%ipe)=.true.
225 tree%node => tree%node%next%node
232 do iigrid=1,igridstail; igrid=igrids(iigrid);
233 if (refine(igrid,mype).and.coarsen(igrid,mype)) coarsen(igrid,mype)=.false.
237 sendbuf(:)=coarsen(:,mype)
238 call mpi_allgather(sendbuf,max_blocks,mpi_logical,coarsen,max_blocks, &
239 mpi_logical,icomm,ierrmpi)
241 do level=levmax,max(2,levmin),-1
242 tree%node => level_head(level)%node
244 if (.not.
associated(tree%node))
exit
246 if (coarsen(tree%node%igrid,tree%node%ipe))
then
248 my_parent%node => tree%node%parent%node
251 check1: {
do ic^db=1,2\}
252 sibling%node => my_parent%node%child(ic^d)%node
253 if (sibling%node%leaf)
then
254 if (coarsen(sibling%node%igrid,sibling%node%ipe)) cycle
263 check2: {
do ic^db=1,2\}
264 sibling%node => my_parent%node%child(ic^d)%node
265 {
do i^db=ic^db-2,ic^db-1\}
266 if (i^d==0|.and.) cycle
269 select case (my_neighbor_type)
270 case (neighbor_sibling)
271 if (refine(my_neighbor%node%igrid, &
272 my_neighbor%node%ipe))
then
279 neighborchild%node=>my_neighbor%node%child(1^d&)%node
280 if (neighborchild%node%leaf)
then
281 if (coarsen(neighborchild%node%igrid, &
282 neighborchild%node%ipe))
then
295 tree%node => tree%node%next%node
304 type(tree_node_ptr) :: sibling
307 sibling%node => my_parent%node%child(ic^d)%node
308 if (sibling%node%leaf)
then
309 coarsen(sibling%node%igrid,sibling%node%ipe)=.false.
319 subroutine coarsen_grid_siblings(igrid,ipe,child_igrid,child_ipe,active)
322 integer,
intent(in) :: igrid, ipe
323 integer,
dimension(2^D&),
intent(in) :: child_igrid, child_ipe
324 logical,
intent(in) :: active
326 integer :: igridFi, ipeFi, ixCo^L, ixCoG^L, ixCoM^L, ic^D, idir
331 if (.not. active)
then
332 if (ipe ==
mype)
then
335 igridfi=child_igrid(ic^d)
336 ipefi=child_ipe(ic^d)
347 igridfi=child_igrid(ic^d)
348 ipefi=child_ipe(ic^d)
350 if (ipefi==mype)
then
351 ^d&dxlevel(^d)=rnode(rpdx^d_,igridfi);
353 ixcomin^d=ixmlo^d+(ic^d-1)*(ixmhi^d-ixmlo^d+1)/2;
354 ixcomax^d=ixmhi^d+(ic^d-2)*(ixmhi^d-ixmlo^d+1)/2;
356 call coarsen_grid(ps(igridfi),ixg^ll,ixm^ll,ps(igrid),ixg^ll,ixco^l)
361 ixcogmax^d=ixghi^d/2+nghostcells;
362 ixcom^l=ixcog^l^lsubnghostcells;
363 call coarsen_grid(ps(igridfi),ixg^ll,ixm^ll,psc(igridfi), &
369 call mpi_isend(psc(igridfi)%w,1,type_coarse_block,ipe,itag, &
370 icomm,sendrequest(isend),ierrmpi)
371 if(stagger_grid)
then
374 itag_stg=(npe+ipefi+1)+igridfi*(ndir-1+idir)
375 call mpi_isend(psc(igridfi)%ws,1,type_coarse_block_stg(idir,ic^d),ipe,itag_stg, &
376 icomm,sendrequest_stg(isend),ierrmpi)
385 call mpi_irecv(ps(igrid)%w,1,type_sub_block(ic^d),ipefi,itag, &
386 icomm,recvrequest(irecv),ierrmpi)
387 if(stagger_grid)
then
390 itag_stg=(npe+ipefi+1)+igridfi*(ndir-1+idir)
391 call mpi_irecv(ps(igrid)%ws,1,type_sub_block_stg(idir,ic^d),ipefi,itag_stg, &
392 icomm,recvrequest_stg(irecv),ierrmpi)
399 end subroutine coarsen_grid_siblings
subroutine find_neighbor(my_neighbor, my_neighbor_type, tree, iD, pole)
find neighors of all blocks
subroutine alloc_node(igrid)
allocate arrays on igrid node
integer function getnode(ipe)
Get first available igrid on processor ipe.
subroutine putnode(igrid, ipe)
subroutine initial_condition(igrid)
fill in initial condition
subroutine coarsen_grid(sFi, ixFiGL, ixFiL, sCo, ixCoGL, ixCoL)
coarsen one grid to its coarser representative
subroutine build_connectivity
subroutine get_level_range
subroutine coarsen_tree_leaf(igrid, ipe, child_igrid, child_ipe, active)
subroutine refine_tree_leaf(child_igrid, child_ipe, igrid, ipe, active)
subroutine unflag_coarsen_siblings
subroutine, public store_faces
To achive consistency and thus conservation of divergence, when refining a block we take into account...
subroutine, public comm_faces
When refining a coarse block with fine neighbours, it is necessary prolong consistently with the alre...
Module to coarsen and refine grids for AMR.
subroutine proper_nesting
For all grids on all processors, do a check on refinement flags. Make sure that neighbors will not di...
subroutine, public amr_coarsen_refine
coarsen and refine blocks to update AMR grid
Module with basic grid data structures.
logical, dimension(:,:), allocatable, save refine
logical, dimension(:,:), allocatable, save coarsen
AMR flags and grids-in-use identifier per processor (igrid,ipe)
type(tree_node_ptr), dimension(:,:), allocatable, save igrid_to_node
Array to go from an [igrid, ipe] index to a node pointer.
type(tree_node_ptr), dimension(:), allocatable, save level_head
The head pointer of the linked list per refinement level.
update ghost cells of all blocks including physical boundaries
This module contains definitions of global parameters and variables and some generic functions/subrou...
logical stagger_grid
True for using stagger grid.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer, dimension(:), allocatable, parameter d
integer ierrmpi
A global MPI error return code.
integer npe
The number of MPI tasks.
integer nbufferx
Number of cells as buffer zone.
integer refine_max_level
Maximal number of AMR levels.
integer max_blocks
The maximum number of grid blocks in a processor.
Module to couple the octree-mg library to AMRVAC. This file uses the VACPP preprocessor,...
Module with all the methods that users can customize in AMRVAC.
procedure(after_refine), pointer usr_after_refine
subroutine refine_grids(child_igrid, child_ipe, igrid, ipe, active)
refine one block to its children blocks