MPI-AMRVAC  2.0
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
setdt.t
Go to the documentation of this file.
1 !>setdt - set dt for all levels between levmin and levmax.
2 !> dtpar>0 --> use fixed dtpar for all level
3 !> dtpar<=0 --> determine CFL limited timestep
4 subroutine setdt()
6 use mod_physics
9 
10 integer :: iigrid, igrid, ncycle, ncycle2, ifile, idim
11 double precision :: dtnew, qdtnew, dtmin_mype, factor, dx^D, dxmin^D
12 
13 double precision :: dtmax, dxmin, cmax_mype, v(ixg^t)
14 !----------------------------------------------------------------------------
15 
16 if (dtpar<=zero) then
17  dtmin_mype=bigdouble
18  cmax_mype = zero
19 !$OMP PARALLEL DO PRIVATE(igrid,qdtnew,dtnew,dx^D)
20  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
21  dtnew=bigdouble
22  dx^d=rnode(rpdx^d_,igrid);
23  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
24  saveigrid = igrid
25  block=>ps(igrid)
26  block%iw0=0
27 
28  if (nwaux>0) then
29  call phys_get_aux(.true.,ps(igrid)%w,&
30  ps(igrid)%x,ixg^ll,ixm^ll,'setdt')
31  end if
32 
33  call getdt_courant(ps(igrid)%w,ixg^ll,ixm^ll,qdtnew,ps(igrid)%x)
34  dtnew=min(dtnew,qdtnew)
35 
36  call phys_get_dt(ps(igrid)%w,ixg^ll,ixm^ll,qdtnew,dx^d,ps(igrid)%x)
37  dtnew=min(dtnew,qdtnew)
38 
39  if (associated(usr_get_dt)) then
40  call usr_get_dt(ps(igrid)%w,ixg^ll,ixm^ll,qdtnew,dx^d,ps(igrid)%x)
41  end if
42 
43  dtnew = min(dtnew,qdtnew)
44  dtmin_mype = min(dtmin_mype,dtnew)
45  dt_grid(igrid) = dtnew
46  end do
47 !$OMP END PARALLEL DO
48 else
49  dtmin_mype=dtpar
50 end if
51 
52 if (dtmin_mype<dtmin) then
53  write(unitterm,*)"Error: Time step too small!", dtmin_mype
54  write(unitterm,*)"on processor:", mype, "at time:", global_time," step:", it
55  write(unitterm,*)"Lower limit of time step:", dtmin
56  crash=.true.
57 end if
58 
59 if (slowsteps>it-it_init+1) then
60  factor=one-(one-dble(it-it_init+1)/dble(slowsteps))**2
61  dtmin_mype=dtmin_mype*factor
62 end if
63 
64 
65 dtmin_mype=min(dtmin_mype,time_max-global_time)
66 
67 if (dtpar<=zero) then
68  call mpi_allreduce(dtmin_mype,dt,1,mpi_double_precision,mpi_min, &
69  icomm,ierrmpi)
70 else
71  dt=dtmin_mype
72 end if
73 
74 if(any(dtsave(1:nfile)<bigdouble).or.any(tsave(isavet(1:nfile),1:nfile)<bigdouble))then
75  dtmax = minval(ceiling(global_time/dtsave(1:nfile))*dtsave(1:nfile))-global_time
76  do ifile=1,nfile
77  dtmax = min(tsave(isavet(ifile),ifile)-global_time,dtmax)
78  end do
79  if(dtmax > smalldouble)then
80  dt=min(dt,dtmax)
81  else
82  ! dtmax=0 means dtsave is divisible by global_time
83  dt=min(dt,minval(dtsave(1:nfile)))
84  end if
85 end if
86 
87 if(mype==0) then
88  if(any(dtsave(1:nfile)<dt)) then
89  write(unitterm,*) 'Warning: timesteps: ',dt,' exceeding output intervals ', dtsave(1:nfile)
90  endif
91 endif
92 
93 ! estimate time step of thermal conduction
94 if(associated(phys_getdt_heatconduct)) then
95  dtmin_mype=bigdouble
96 !$OMP PARALLEL DO PRIVATE(igrid,qdtnew,&
97 !$OMP& dx^D) REDUCTION(min:dtmin_mype)
98  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
99  dx^d=rnode(rpdx^d_,igrid);
100  saveigrid = igrid
101  block=>ps(igrid)
102  qdtnew=bigdouble
103  call phys_getdt_heatconduct(ps(igrid)%w,ixg^ll,ixm^ll,qdtnew,dx^d,ps(igrid)%x)
104  dtmin_mype=min(dtmin_mype,qdtnew)
105  end do
106 !$OMP END PARALLEL DO
107  call mpi_allreduce(dtmin_mype,dtnew,1,mpi_double_precision,mpi_min, &
108  icomm,ierrmpi)
109  if(all(flux_scheme=='nul')) dt=min(dt,dtnew)
110  ncycle=ceiling(dt/dtnew)
111  if (ncycle>tc_ncycles) then
112  if(mype==0 .and. .false.) then
113  write(*,*) 'CLF time step is too many times larger than conduction time step',ncycle
114  write(*,*) 'reducing dt to',tc_ncycles,'times of dt_impl!!'
115  endif
116  dt=tc_ncycles*dtnew
117  endif
118  ! get number of sub-steps of supertime stepping (Meyer 2012 MNRAS 422,2102)
119  if(dt/dtnew< 0.5d0) then
120  s=1
121  else if(dt/dtnew< 2.d0) then
122  s=2
123  else
124  s=ceiling((dsqrt(9.d0+8.d0*dt/dtnew)-1.d0)/2.d0)
125  ! only use odd s number
126  s=s/2*2+1
127  endif
128  dt_tc=dt*0.5d0
129  if(mype==0 .and. .false.) write(*,*) 'supertime steps:',s,' normal subcycles:',&
130  ceiling(dt/dtnew/2.d0)
131 endif
132 
133 !$OMP PARALLEL DO PRIVATE(igrid)
134 do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
135  dt_grid(igrid)=dt
136 end do
137 !$OMP END PARALLEL DO
138 
139 
140 ! global Lax-Friedrich finite difference flux splitting needs fastest wave-speed
141 ! so does GLM:
142 if(need_global_cmax) call mpi_allreduce(cmax_mype,cmax_global,1,&
143  mpi_double_precision,mpi_max,icomm,ierrmpi)
144 ! some scheme need maximal speed of flow
145 if(need_global_vmax) then
146  cmax_mype=0.d0
147  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
148  do idim=1,ndim
149  call phys_get_v_idim(ps(igrid)%w,ps(igrid)%x,ixg^ll,ixm^ll,idim,v)
150  cmax_mype=max(cmax_mype,maxval(abs(v(ixm^t))))
151  end do
152  end do
153  call mpi_allreduce(cmax_mype,vmax_global,1,&
154  mpi_double_precision,mpi_max,icomm,ierrmpi)
156 end if
157 
158 contains
159 
160  !> compute CFL limited dt (for variable time stepping)
161  subroutine getdt_courant(w,ixI^L,ixO^L,dtnew,x)
162 
164  use mod_physics, only: phys_get_cmax
165 
166  integer, intent(in) :: ixI^L, ixO^L
167  double precision, intent(in) :: x(ixi^s,1:ndim)
168  double precision, intent(inout) :: w(ixi^s,1:nw), dtnew
169 
170  integer :: idims
171  double precision :: courantmax, dxinv(1:ndim), courantmaxtot, courantmaxtots
172  double precision :: cmax(ixi^s), cmaxtot(ixi^s), tmp(ixi^s)
173  !-----------------------------------------------------------------------------
174  dtnew=bigdouble
175 
176  courantmax=zero
177  courantmaxtot=zero
178  courantmaxtots=zero
179 
180  ^d&dxinv(^d)=one/dx^d;
181 
182  cmaxtot(ixo^s)=zero
183 
184  do idims=1,ndim
185  call phys_get_cmax(w,x,ixi^l,ixo^l,idims,cmax)
186  if(need_global_cmax) cmax_mype = max(cmax_mype,maxval(cmax(ixo^s)))
187  if (.not.slab) then
188  tmp(ixo^s)=cmax(ixo^s)/block%ds(ixo^s,idims)
189  cmaxtot(ixo^s)=cmaxtot(ixo^s)+tmp(ixo^s)
190  courantmax=max(courantmax,maxval(tmp(ixo^s)))
191  else
192  cmaxtot(ixo^s)=cmaxtot(ixo^s)+cmax(ixo^s)*dxinv(idims)
193  courantmax=max(courantmax,maxval(cmax(ixo^s)*dxinv(idims)))
194  end if
195  courantmaxtot=courantmaxtot+courantmax
196  end do
197 
198  select case (typecourant)
199  case ('minimum')
200  ! courantmax='max(c/dx)'
201  if (courantmax>smalldouble) dtnew=min(dtnew,courantpar/courantmax)
202  case ('summax')
203  ! courantmaxtot='summed max(c/dx)'
204  if (courantmaxtot>smalldouble) dtnew=min(dtnew,courantpar/courantmaxtot)
205  case ('maxsum')
206  ! courantmaxtots='max(summed c/dx)'
207  courantmaxtots=max(courantmaxtots,maxval(cmaxtot(ixo^s)))
208  if (courantmaxtots>smalldouble) dtnew=min(dtnew,courantpar/courantmaxtots)
209  case default
210  write(unitterm,*)'Unknown typecourant=',typecourant
211  call mpistop("Error from getdt_courant: no such typecourant!")
212  end select
213 
214  end subroutine getdt_courant
215 
216 end subroutine setdt
This module contains definitions of global parameters and variables and some generic functions/subrou...
double precision vmax_global
global fastest flow speed needed in glm method
procedure(get_dt), pointer usr_get_dt
double precision time_max
End time for the simulation.
logical need_global_cmax
need global maximal wave speed
integer, public s
Number of sub-steps of supertime stepping.
integer it_init
initial iteration count
double precision dtmin
Stop the simulation when the time step becomes smaller than this value.
double precision courantpar
The Courant (CFL) number used for the simulation.
character(len=std_len), dimension(:), allocatable flux_scheme
Which spatial discretization to use (per grid level)
double precision dt_tc
Time step of thermal conduction.
integer, dimension(nfile) isavet
Module with all the methods that users can customize in AMRVAC.
procedure(getdt_heatconduct), pointer phys_getdt_heatconduct
integer tc_ncycles
Maximal substeps of TC within one fluid time step to limit fluid time step.
Thermal conduction for HD and MHD.
integer ierrmpi
A global MPI error return code.
integer ixm
the mesh range (within a block with ghost cells)
double precision dtpar
If dtpar is positive, it sets the timestep dt, otherwise courantpar is used to limit the time step ba...
double precision, dimension(:), allocatable dt_grid
integer, parameter unitterm
Unit for standard output.
integer mype
The rank of the current MPI task.
double precision global_time
The global simulation time.
procedure(sub_get_dt), pointer phys_get_dt
Definition: mod_physics.t:42
double precision, dimension(nfile) dtsave
Repeatedly save output of type N when dtsave(N) simulation time has passed.
subroutine getdt_courant(w, ixIL, ixOL, dtnew, x)
compute CFL limited dt (for variable time stepping)
Definition: setdt.t:162
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:160
double precision, dimension(ndim) dxlevel
double precision, dimension(nsavehi, nfile) tsave
Save output of type N on times tsave(:, N)
integer, parameter ndim
Number of spatial dimensions for grid variables.
logical crash
Save a snapshot before crash a run met unphysical values.
logical slab
Cartesian geometry or not.
integer slowsteps
If > 1, then in the first slowsteps-1 time steps dt is reduced by a factor .
integer icomm
The MPI communicator.
double precision cmax_global
global fastest wave speed needed in fd scheme and glm method
procedure(sub_get_v_idim), pointer phys_get_v_idim
Definition: mod_physics.t:43
procedure(sub_get_cmax), pointer phys_get_cmax
Definition: mod_physics.t:39
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
integer, parameter nfile
Number of output methods.
subroutine setdt()
setdt - set dt for all levels between levmin and levmax. dtpar>0 –> use fixed dtpar for all level dt...
Definition: setdt.t:5
procedure(sub_get_aux), pointer phys_get_aux
Definition: mod_physics.t:47
character(len=std_len) typecourant
How to compute the CFL-limited time step.
integer it
Number of time steps taken.
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
logical need_global_vmax
need global maximal flow speed