MPI-AMRVAC  3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_initialize.t
Go to the documentation of this file.
1 !> This module handles the initialization of various components of amrvac
3  use mod_comm_lib, only: mpistop
4 
5  implicit none
6  private
7 
8  logical :: initialized_already = .false.
9 
10  ! Public methods
11  public :: initialize_amrvac
12 
13 contains
14 
15  !> Initialize amrvac: read par files and initialize variables
16  subroutine initialize_amrvac()
21  use mod_bc_data, only: bc_data_init
23  use mod_comm_lib, only: init_comm_types
24 
25  if (initialized_already) return
26 
27  ! add auxiliary variable(s) to update boundary ghost cells
28  nwgc=nwgc+nwaux
29 
30  ! Check whether the user has loaded a physics module
31  call phys_check()
32 
33  ! Read input files
34  call read_par_files()
35  call initialize_vars()
36  call init_comm_types()
37 
38  ! Possibly load boundary condition data or initial data
39  call bc_data_init()
40  call read_data_init()
41 
42  if(associated(usr_set_parameters)) call usr_set_parameters()
43 
44  call phys_check_params()
45 
46  initialized_already = .true.
47  end subroutine initialize_amrvac
48 
49  !> Initialize (and allocate) simulation and grid variables
50  subroutine initialize_vars
51  use mod_forest
54  use mod_fix_conserve, only: pflux
56  use mod_geometry
57 
58  integer :: igrid, level, ipe, ig^D
59  logical :: ok
60 
61  allocate(ps(max_blocks))
62  allocate(ps1(max_blocks))
63  allocate(ps2(max_blocks))
64  allocate(ps3(max_blocks))
65  allocate(ps4(max_blocks))
66  allocate(psc(max_blocks))
67  allocate(ps_sub(max_blocks))
68  allocate(neighbor(2,-1:1^d&,max_blocks),neighbor_child(2,0:3^d&,max_blocks))
69  allocate(neighbor_type(-1:1^d&,max_blocks),neighbor_active(-1:1^d&,max_blocks))
70  allocate(neighbor_pole(-1:1^d&,max_blocks))
71  allocate(igrids(max_blocks),igrids_active(max_blocks),igrids_passive(max_blocks))
74  allocate(pflux(2,^nd,max_blocks))
75  ! allocate mesh for particles
76  if(use_particles) allocate(gridvars(max_blocks))
77  if(stagger_grid) then
78  allocate(pface(2,^nd,max_blocks),fine_neighbors(2^d&,^nd,max_blocks))
79  allocate(old_neighbor(2,-1:1^d,max_blocks))
80  end if
81 
82  it=it_init
84 
85  dt=zero
86 
87  ! no poles initially
88  neighbor_pole=0
89 
90  ! check resolution
91  if ({mod(ixghi^d,2)/=0|.or.}) then
92  call mpistop("mesh widths must give even number grid points")
93  end if
94  ixm^ll=ixg^ll^lsubnghostcells;
95 
96  if (nbufferx^d>(ixmhi^d-ixmlo^d+1)|.or.) then
97  write(unitterm,*) "nbufferx^D bigger than mesh size makes no sense."
98  write(unitterm,*) "Decrease nbufferx or increase mesh size"
99  call mpistop("")
100  end if
101 
102  ! initialize dx arrays on finer (>1) levels
103  do level=2,refine_max_level
104  {dx(^d,level) = dx(^d,level-1) * half\} ! refine ratio 2
105  end do
106 
107  ! domain decomposition
108  ! physical extent of a grid block at level 1, per dimension
109  ^d&dg^d(1)=dx(^d,1)*dble(block_nx^d)\
110  ! number of grid blocks at level 1 in simulation domain, per dimension
111  ^d&ng^d(1)=nint((xprobmax^d-xprobmin^d)/dg^d(1))\
112  ! total number of grid blocks at level 1
113  nglev1={ng^d(1)*}
114 
115  do level=2,refine_max_level
116  dg^d(level)=half*dg^d(level-1);
117  ng^d(level)=ng^d(level-1)*2;
118  end do
119 
120  ! check that specified stepsize correctly divides domain
121  ok=({(abs(dble(ng^d(1))*dg^d(1)-(xprobmax^d-xprobmin^d))<=smalldouble)|.and.})
122  if (.not.ok) then
123  write(unitterm,*)"domain cannot be divided by meshes of given gridsize"
124  call mpistop("domain cannot be divided by meshes of given gridsize")
125  end if
126 
127  poleb=.false.
128  if (.not.slab) call set_pole
129 
130  ! number of grid blocks at level 1 along a dimension, which does not have a pole or periodic boundary,
131  ! must be larger than 1 for a rectangular AMR mesh
132  if(({ng^d(1)/=1|.or.}).and.refine_max_level>1) then
133  {
134  if(ng^d(1)==1.and..not.poleb(1,^d).and.&
135  .not.poleb(2,^d).and..not.periodb(^d).and..not.aperiodb(^d)) then
136  write(unitterm,"(a,i2,a)") "number of grid blocks at level 1 in dimension",^d,&
137  " be larger than 1 for a rectangular AMR mesh!"
138  write(unitterm,"(a,i1)") "increase domain_nx",^d
139  call mpistop("")
140  end if
141  \}
142  end if
143 
144  ! initialize connectivity data
145  igridstail=0
146 
147  ! allocate memory for forest data structures
149  do level=1,refine_max_level
150  nullify(level_head(level)%node,level_tail(level)%node)
151  end do
152 
153  allocate(igrid_to_node(max_blocks,0:npe-1))
154  do ipe=0,npe-1
155  do igrid=1,max_blocks
156  nullify(igrid_to_node(igrid,ipe)%node)
157  end do
158  end do
159 
160  allocate(sfc(1:3,max_blocks*npe))
161 
162  allocate(igrid_to_sfc(max_blocks))
163 
164  sfc=0
165  allocate(morton_start(0:npe-1),morton_stop(0:npe-1))
166  allocate(morton_sub_start(0:npe-1),morton_sub_stop(0:npe-1))
167 
168  allocate(nleafs_level(1:nlevelshi))
169 
170  allocate(coarsen(max_blocks,0:npe-1),refine(max_blocks,0:npe-1))
171  coarsen=.false.
172  refine=.false.
173  if (nbufferx^d/=0|.or.) then
174  allocate(buffer(max_blocks,0:npe-1))
175  buffer=.false.
176  end if
177  allocate(igrid_inuse(max_blocks,0:npe-1))
178  igrid_inuse=.false.
179 
180  allocate(tree_root(1:ng^d(1)))
181  {do ig^db=1,ng^db(1)\}
182  nullify(tree_root(ig^d)%node)
183  {end do\}
184 
185  ! define index ranges and MPI send/receive derived datatype for ghost-cell swap
186  call init_bc()
187  type_send_srl=>type_send_srl_f
188  type_recv_srl=>type_recv_srl_f
189  type_send_r=>type_send_r_f
190  type_recv_r=>type_recv_r_f
191  type_send_p=>type_send_p_f
192  type_recv_p=>type_recv_p_f
193  call create_bc_mpi_datatype(iwstart,nwgc)
194 
195  end subroutine initialize_vars
196 
197 
198 end module mod_initialize
type(fake_neighbors), dimension(:^d &,:,:), allocatable, public fine_neighbors
Definition: mod_amr_fct.t:16
integer, dimension(:,:^d &,:), allocatable, public old_neighbor
Definition: mod_amr_fct.t:18
type(facealloc), dimension(:,:,:), allocatable, public pface
Definition: mod_amr_fct.t:14
Module to set boundary conditions from user data.
Definition: mod_bc_data.t:2
subroutine, public bc_data_init()
Definition: mod_bc_data.t:44
subroutine, public init_comm_types
Create and store the MPI types that will be used for parallel communication.
Definition: mod_comm_lib.t:58
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: mod_comm_lib.t:208
Module for flux conservation near refinement boundaries.
type(fluxalloc), dimension(:,:,:), allocatable, public pflux
store flux to fix conservation
Module with basic grid data structures.
Definition: mod_forest.t:2
integer, dimension(:), allocatable, save nleafs_level
How many leaves are present per refinement level.
Definition: mod_forest.t:81
logical, dimension(:,:), allocatable, save refine
Definition: mod_forest.t:70
integer, dimension(:), allocatable, save morton_start
First Morton number per processor.
Definition: mod_forest.t:62
integer, dimension(:), allocatable, save morton_sub_start
Definition: mod_forest.t:67
logical, dimension(:,:), allocatable, save buffer
Definition: mod_forest.t:70
logical, dimension(:,:), allocatable, save igrid_inuse
Definition: mod_forest.t:70
integer, dimension(:), allocatable, save igrid_to_sfc
Go from a grid index to Morton number (for a single processor)
Definition: mod_forest.t:56
integer nglev1
Definition: mod_forest.t:78
integer, dimension(:), allocatable, save morton_sub_stop
Definition: mod_forest.t:67
logical, dimension(:,:), allocatable, save coarsen
AMR flags and grids-in-use identifier per processor (igrid,ipe)
Definition: mod_forest.t:70
type(tree_node_ptr), dimension(:), allocatable, save level_tail
The tail pointer of the linked list per refinement level.
Definition: mod_forest.t:38
integer, dimension(:), allocatable, save morton_stop
Last Morton number per processor.
Definition: mod_forest.t:65
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Definition: mod_forest.t:29
integer, dimension(:,:), allocatable, save sfc
Array to go from a Morton number to an igrid and processor index. Sfc(1:3, MN) contains [igrid,...
Definition: mod_forest.t:43
type(tree_node_ptr), dimension(:,:), allocatable, save igrid_to_node
Array to go from an [igrid, ipe] index to a node pointer.
Definition: mod_forest.t:32
type(tree_node_ptr), dimension(:), allocatable, save level_head
The head pointer of the linked list per refinement level.
Definition: mod_forest.t:35
Module with geometry-related routines (e.g., divergence, curl)
Definition: mod_geometry.t:2
subroutine set_pole
Definition: mod_geometry.t:101
update ghost cells of all blocks including physical boundaries
This module contains definitions of global parameters and variables and some generic functions/subrou...
logical, dimension(ndim) aperiodb
True for dimensions with aperiodic boundaries.
integer ixghi
Upper index of grid block arrays.
double precision global_time
The global simulation time.
double precision time_init
Start time for the simulation.
integer it
Number of time steps taken.
double precision, dimension(:), allocatable dg
extent of grid blocks in domain per dimension, in array over levels
integer it_init
initial iteration count
integer, parameter nlevelshi
The maximum number of levels in the grid refinement.
logical stagger_grid
True for using stagger grid.
logical, dimension(:), allocatable phyboundblock
True if a block has any physical boundary.
logical use_particles
Use particles module or not.
integer, dimension(:), allocatable ng
number of grid blocks in domain per dimension, in array over levels
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
integer, parameter nodehi
grid hierarchy info (level and grid indices)
logical slab
Cartesian geometry or not.
integer npe
The number of MPI tasks.
integer, parameter unitterm
Unit for standard output.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nbufferx
Number of cells as buffer zone.
double precision, dimension(:,:), allocatable dx
double precision, dimension(:,:), allocatable rnode_sub
integer, parameter rnodehi
grid location info (corner coordinates and grid spacing)
integer refine_max_level
Maximal number of AMR levels.
integer max_blocks
The maximum number of grid blocks in a processor.
logical, dimension(2, ndim) poleb
Indicates whether there is a pole at a boundary.
integer, dimension(:,:), allocatable node
integer, dimension(:,:), allocatable node_sub
Module to set (or derive) initial conditions from user data We read in a vtk file that provides value...
subroutine, public read_data_init()
This module handles the initialization of various components of amrvac.
Definition: mod_initialize.t:2
subroutine, public initialize_amrvac()
Initialize amrvac: read par files and initialize variables.
subroutine initialize_vars
Initialize (and allocate) simulation and grid variables.
Module for reading input and writing output.
subroutine read_par_files()
Read in the user-supplied parameter-file.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
procedure(sub_check_params), pointer phys_check_params
Definition: mod_physics.t:56
subroutine phys_check()
Definition: mod_physics.t:368
Module with all the methods that users can customize in AMRVAC.
procedure(p_no_args), pointer usr_set_parameters
Initialize the user's settings (after initializing amrvac)