MPI-AMRVAC  2.2
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_advance.t
Go to the documentation of this file.
1 !> Module containing all the time stepping schemes
2 module mod_advance
3 
4  implicit none
5  private
6 
7  logical :: firstsweep, lastsweep
8 
9  !> Whether to conserve fluxes at the current sub-step
10  logical :: fix_conserve_at_step = .true.
11 
12  public :: advance
13  public :: process
14  public :: process_advanced
15 
16 contains
17 
18  !> Advance all the grids over one time step, including all sources
19  subroutine advance(iit)
21  use mod_particles, only: handle_particles
22  use mod_source, only: add_split_source
23 
24  integer, intent(in) :: iit
25 
26  integer :: iigrid, igrid, idimsplit
27 
28  ! old solution values at t_n-1 no longer needed: make copy of w(t_n)
29  !$OMP PARALLEL DO PRIVATE(igrid)
30  do iigrid=1,igridstail; igrid=igrids(iigrid);
31  pso(igrid)%w=ps(igrid)%w
32  if(stagger_grid) pso(igrid)%ws=ps(igrid)%ws
33  end do
34  !$OMP END PARALLEL DO
35 
36  {#IFDEF RAY
37  call update_rays
38  }
39 
40  ! split source addition
41  call add_split_source(prior=.true.)
42 
43  firstsweep=.true.
44  if (dimsplit) then
45  if ((iit/2)*2==iit .or. typedimsplit=='xy') then
46  ! do the sweeps in order of increasing idim,
47  do idimsplit=1,ndim
48  lastsweep= idimsplit==ndim
49  call advect(idimsplit,idimsplit)
50  end do
51  else
52  ! If the parity of "iit" is odd and typedimsplit=xyyx,
53  ! do sweeps backwards
54  do idimsplit=ndim,1,-1
55  lastsweep= idimsplit==1
56  call advect(idimsplit,idimsplit)
57  end do
58  end if
59  else
60  ! Add fluxes from all directions at once
61  lastsweep= .true.
62  call advect(1,ndim)
63  end if
64 
65  ! split source addition
66  call add_split_source(prior=.false.)
67 
68  if(use_particles) call handle_particles
69 
70  end subroutine advance
71 
72  !> Advance all grids over one time step, but without taking dimensional
73  !> splitting or split source terms into account
74  subroutine advect(idim^LIM)
79 
80  integer, intent(in) :: idim^LIM
81  integer :: iigrid, igrid
82 
83  call init_comm_fix_conserve(idim^lim,nwflux)
84  fix_conserve_at_step = time_advance .and. levmax>levmin
85 
86  ! copy w instead of wold because of potential use of dimsplit or sourcesplit
87  !$OMP PARALLEL DO PRIVATE(igrid)
88  do iigrid=1,igridstail; igrid=igrids(iigrid);
89  ps1(igrid)%w=ps(igrid)%w
90  if(stagger_grid) ps1(igrid)%ws=ps(igrid)%ws
91  end do
92  !$OMP END PARALLEL DO
93 
94  istep = 0
95 
96  select case (time_stepper)
97  case ("onestep")
98  select case (time_integrator)
99  case ("Forward_Euler")
100  call advect1(flux_scheme,one,idim^lim,global_time,ps1,global_time,ps)
101 
102  case ("IMEX_Euler")
103  call advect1(flux_scheme,one,idim^lim,global_time,ps,global_time,ps1)
104  call global_implicit_update(one,dt,global_time+dt,ps,ps1)
105 
106  case ("IMEX_SP")
107  call global_implicit_update(one,dt,global_time,ps,ps1)
108  call advect1(flux_scheme,one,idim^lim,global_time,ps1,global_time,ps)
109 
110  case default
111  write(unitterm,*) "time_integrator=",time_integrator,"time_stepper=",time_stepper
112  write(unitterm,*) "Error in advect: Unknown time integration method"
113  call mpistop("Correct time_integrator")
114  end select
115 
116  case ("twostep")
117  select case (time_integrator)
118  case ("Predictor_Corrector")
119  ! PC or explicit midpoint
120  ! predictor step
121  fix_conserve_at_step = .false.
122  call advect1(typepred1,half,idim^lim,global_time,ps,global_time,ps1)
123  ! corrector step
124  fix_conserve_at_step = time_advance .and. levmax>levmin
125  call advect1(flux_scheme,one,idim^lim,global_time+half*dt,ps1,global_time,ps)
126 
127  case ("RK2_alfa")
128  ! RK2 with alfa parameter, where rk_a21=alfa
129  call advect1(flux_scheme,rk_a21, idim^lim,global_time,ps,global_time,ps1)
130  !$OMP PARALLEL DO PRIVATE(igrid)
131  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
132  ps(igrid)%w = ps(igrid)%w+rk_b1*(ps1(igrid)%w-ps(igrid)%w)/rk_a21
133  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+(one-rk_b2)*(ps1(igrid)%ws-ps(igrid)%ws)/rk_a21
134  end do
135  !$OMP END PARALLEL DO
137 
138  case ("ssprk2")
139  ! ssprk2 or Heun's method
140  call advect1(flux_scheme,one, idim^lim,global_time,ps,global_time,ps1)
141  !$OMP PARALLEL DO PRIVATE(igrid)
142  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
143  ps(igrid)%w = half*ps(igrid)%w+half*ps1(igrid)%w
144  if(stagger_grid) ps(igrid)%ws = half*ps(igrid)%ws+half*ps1(igrid)%ws
145  end do
146  !$OMP END PARALLEL DO
147  call advect1(flux_scheme,half,idim^lim,global_time+dt,ps1,global_time+half*dt,ps)
148 
149  case ("IMEX_Midpoint")
150  !$OMP PARALLEL DO PRIVATE(igrid)
151  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
152  ps2(igrid)%w = ps(igrid)%w
153  if(stagger_grid) ps2(igrid)%ws = ps(igrid)%ws
154  end do
155  !$OMP END PARALLEL DO
156  call advect1(flux_scheme,half, idim^lim,global_time,ps,global_time,ps1)
157  call global_implicit_update(half,dt,global_time+half*dt,ps2,ps1)
158  !$OMP PARALLEL DO PRIVATE(igrid)
159  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
160  ps(igrid)%w = ps(igrid)%w+2.0d0*(ps2(igrid)%w-ps1(igrid)%w)
161  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+2.0d0*(ps2(igrid)%ws-ps1(igrid)%ws)
162  end do
163  !$OMP END PARALLEL DO
164  call advect1(flux_scheme,one, idim^lim,global_time+half*dt,ps2,global_time,ps)
165 
166  case ("IMEX_Trapezoidal")
167  call advect1(flux_scheme,one, idim^lim,global_time,ps,global_time,ps1)
168  !$OMP PARALLEL DO PRIVATE(igrid)
169  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
170  ps2(igrid)%w = half*(ps(igrid)%w+ps1(igrid)%w)
171  if(stagger_grid) ps2(igrid)%ws = half*(ps(igrid)%ws+ps1(igrid)%ws)
172  end do
173  !$OMP END PARALLEL DO
175  !$OMP PARALLEL DO PRIVATE(igrid)
176  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
177  ps1(igrid)%w = ps1(igrid)%w+half*dt*ps(igrid)%w
178  if(stagger_grid) ps1(igrid)%ws = ps1(igrid)%ws+half*dt*ps(igrid)%ws
179  end do
180  !$OMP END PARALLEL DO
181  !$OMP PARALLEL DO PRIVATE(igrid)
182  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
183  ps(igrid)%w = ps2(igrid)%w+half*dt*ps(igrid)%w
184  if(stagger_grid) ps(igrid)%ws = ps2(igrid)%ws+half*dt*ps(igrid)%ws
185  end do
186  !$OMP END PARALLEL DO
187  call getbc(global_time+dt,dt,ps1,1,nwflux+nwaux,phys_req_diagonal)
188  call global_implicit_update(half,dt,global_time+dt,ps2,ps1)
189  !$OMP PARALLEL DO PRIVATE(igrid)
190  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
191  ps(igrid)%w = ps(igrid)%w+ps2(igrid)%w-ps1(igrid)%w
192  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+ps2(igrid)%ws-ps1(igrid)%ws
193  end do
194  !$OMP END PARALLEL DO
195  call advect1(flux_scheme,half, idim^lim,global_time+dt,ps2,global_time+half*dt,ps)
196 
197  case default
198  write(unitterm,*) "time_integrator=",time_integrator,"time_stepper=",time_stepper
199  write(unitterm,*) "Error in advect: Unknown time integration method"
200  call mpistop("Correct time_integrator")
201  end select
202 
203  case ("threestep")
204  select case (time_integrator)
205  case ("ssprk3")
206  ! this is SSPRK(3,3) Gottlieb-Shu 1998 or SSP(3,2) depending on ssprk_order (3 vs 2)
207  call advect1(flux_scheme,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
208  !$OMP PARALLEL DO PRIVATE(igrid)
209  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
210  ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
211  if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
212  end do
213  !$OMP END PARALLEL DO
215  !$OMP PARALLEL DO PRIVATE(igrid)
216  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
217  ps(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
218  if(stagger_grid) ps(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
219  end do
220  !$OMP END PARALLEL DO
221  call advect1(flux_scheme,rk_beta33, idim^lim,global_time+rk_c3*dt,ps2,global_time+(1.0d0-rk_beta33)*dt,ps)
222 
223  case ("RK3_BT")
224  ! this is a general threestep RK according to its Butcher Table
225  call advect1(flux_scheme,rk3_a21, idim^lim,global_time,ps,global_time,ps1)
226  !$OMP PARALLEL DO PRIVATE(igrid)
227  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
228  ps3(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/rk3_a21
229  if(stagger_grid) ps3(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/rk3_a21
230  end do
231  !$OMP END PARALLEL DO
232  !$OMP PARALLEL DO PRIVATE(igrid)
233  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
234  ps2(igrid)%w=ps(igrid)%w+rk3_a31*ps3(igrid)%w
235  if(stagger_grid) ps2(igrid)%ws=ps(igrid)%ws+rk3_a31*ps3(igrid)%ws
236  end do
237  !$OMP END PARALLEL DO
239  !$OMP PARALLEL DO PRIVATE(igrid)
240  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
241  ps(igrid)%w=ps(igrid)%w+rk3_b1*ps3(igrid)%w &
242  +rk3_b2*(ps2(igrid)%w-(ps(igrid)%w+rk3_a31*ps3(igrid)%w))/rk3_a32
243  if(stagger_grid)then
244  ps(igrid)%ws=ps(igrid)%ws+rk3_b1*ps3(igrid)%ws &
245  +rk3_b2*(ps2(igrid)%ws-(ps(igrid)%ws+rk3_a31*ps3(igrid)%ws))/rk3_a32
246  endif
247  end do
248  !$OMP END PARALLEL DO
249  call advect1(flux_scheme,rk3_b3, idim^lim,global_time+rk3_c3*dt,ps2,global_time+(1.0d0-rk3_b3)*dt,ps)
250 
251  case ("IMEX_ARS3")
252  ! this is IMEX scheme ARS3
253  call advect1(flux_scheme,ars_gamma, idim^lim,global_time,ps,global_time,ps1)
254  !$OMP PARALLEL DO PRIVATE(igrid)
255  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
256  ps4(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/ars_gamma
257  if(stagger_grid) ps4(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/ars_gamma
258  end do
259  !$OMP END PARALLEL DO
261  !$OMP PARALLEL DO PRIVATE(igrid)
262  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
263  ps1(igrid)%w=(ps2(igrid)%w-ps1(igrid)%w)/ars_gamma
264  if(stagger_grid) ps1(igrid)%ws=(ps2(igrid)%ws-ps1(igrid)%ws)/ars_gamma
265  end do
266  !$OMP END PARALLEL DO
267  !$OMP PARALLEL DO PRIVATE(igrid)
268  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
269  ps3(igrid)%w=ps(igrid)%w+(ars_gamma-1.0d0)*ps4(igrid)%w+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%w
270  if(stagger_grid) then
271  ps3(igrid)%ws=ps(igrid)%ws+(ars_gamma-1.0d0)*ps4(igrid)%ws+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%ws
272  endif
273  end do
274  !$OMP END PARALLEL DO
275  call advect1(flux_scheme,2.0d0*(1.0d0-ars_gamma), idim^lim,global_time+ars_gamma*dt,ps2,global_time+(ars_gamma-1.0d0)*dt,ps3)
276  !$OMP PARALLEL DO PRIVATE(igrid)
277  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
278  ps2(igrid)%w=ps1(igrid)%w+(ps3(igrid)%w-(ps(igrid)%w+ &
279  (ars_gamma-1.0d0)*ps4(igrid)%w+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%w))/(2.0d0*(1.0d0-ars_gamma))
280  if(stagger_grid) then
281  ps2(igrid)%ws=ps1(igrid)%ws+(ps3(igrid)%ws-(ps(igrid)%ws+ &
282  (ars_gamma-1.0d0)*ps4(igrid)%ws+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%ws))/(2.0d0*(1.0d0-ars_gamma))
283  endif
284  end do
285  !$OMP END PARALLEL DO
287  !$OMP PARALLEL DO PRIVATE(igrid)
288  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
289  ps(igrid)%w=ps(igrid)%w+half*ps2(igrid)%w &
290  +half*(ps4(igrid)%w-ps3(igrid)%w)/ars_gamma
291  if(stagger_grid) then
292  ps(igrid)%ws=ps(igrid)%ws+half*ps2(igrid)%ws &
293  +half*(ps4(igrid)%ws-ps3(igrid)%ws)/ars_gamma
294  endif
295  end do
296  !$OMP END PARALLEL DO
297  call advect1(flux_scheme,half, idim^lim,global_time+(1.0d0-ars_gamma)*dt,ps4,global_time+half*dt,ps)
298 
299  case ("IMEX_232")
300  ! this is IMEX_ARK(2,3,2) or IMEX_SSP(2,3,2)
301  call advect1(flux_scheme,imex_a21, idim^lim,global_time,ps,global_time,ps1)
302  !$OMP PARALLEL DO PRIVATE(igrid)
303  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
304  ps4(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/imex_a21
305  ps3(igrid)%w=ps(igrid)%w
306  if(stagger_grid) then
307  ps4(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/imex_a21
308  ps3(igrid)%ws=ps(igrid)%ws
309  endif
310  end do
311  !$OMP END PARALLEL DO
313  !$OMP PARALLEL DO PRIVATE(igrid)
314  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
315  ps1(igrid)%w=ps1(igrid)%w+imex_ha21*dt*ps3(igrid)%w
316  if(stagger_grid) ps1(igrid)%ws=ps1(igrid)%ws+imex_ha21*dt*ps3(igrid)%ws
317  end do
318  !$OMP END PARALLEL DO
319  call getbc(global_time+imex_a21*dt,dt,ps1,1,nwflux+nwaux,phys_req_diagonal)
321  !$OMP PARALLEL DO PRIVATE(igrid)
322  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
323  ps(igrid)%w=ps(igrid)%w+imex_a31*ps4(igrid)%w &
324  +imex_b1*dt*ps3(igrid)%w+imex_b2*(ps2(igrid)%w-ps1(igrid)%w)/imex_ha22
325  if(stagger_grid) then
326  ps(igrid)%ws=ps(igrid)%ws+imex_a31*ps4(igrid)%ws &
327  +imex_b1*dt*ps3(igrid)%ws+imex_b2*(ps2(igrid)%ws-ps1(igrid)%ws)/imex_ha22
328  endif
329  end do
330  !$OMP END PARALLEL DO
331  !$OMP PARALLEL DO PRIVATE(igrid)
332  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
333  ps3(igrid)%w=ps1(igrid)%w-imex_a21*ps4(igrid)%w &
334  -imex_ha21*dt*ps3(igrid)%w+imex_b1*dt*ps3(igrid)%w
335  if(stagger_grid) then
336  ps3(igrid)%ws=ps1(igrid)%ws-imex_a21*ps4(igrid)%ws &
337  -imex_ha21*dt*ps3(igrid)%ws+imex_b1*dt*ps3(igrid)%ws
338  endif
339  end do
340  !$OMP END PARALLEL DO
342  !$OMP PARALLEL DO PRIVATE(igrid)
343  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
344  ps2(igrid)%w=(ps(igrid)%w-ps3(igrid)%w-imex_a31*ps4(igrid)%w)/imex_a32 &
345  +(1.0d0-imex_b2/imex_a32)*(ps2(igrid)%w-ps1(igrid)%w)/imex_ha22
346  if(stagger_grid) then
347  ps2(igrid)%ws=(ps(igrid)%ws-ps3(igrid)%ws-imex_a31*ps4(igrid)%ws)/imex_a32 &
348  +(1.0d0-imex_b2/imex_a32)*(ps2(igrid)%ws-ps1(igrid)%ws)/imex_ha22
349  endif
350  end do
351  !$OMP END PARALLEL DO
352  !$OMP PARALLEL DO PRIVATE(igrid)
353  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
354  ps1(igrid)%w=ps3(igrid)%w+imex_b1*ps4(igrid)%w+imex_b2*ps2(igrid)%w
355  if(stagger_grid) then
356  ps1(igrid)%ws=ps3(igrid)%ws+imex_b1*ps4(igrid)%ws+imex_b2*ps2(igrid)%ws
357  endif
358  end do
359  !$OMP END PARALLEL DO
361  !$OMP PARALLEL DO PRIVATE(igrid)
362  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
363  ps(igrid)%w=ps1(igrid)%w+ps2(igrid)%w-ps(igrid)%w
364  if(stagger_grid) then
365  ps(igrid)%ws=ps1(igrid)%ws+ps2(igrid)%ws-ps(igrid)%ws
366  endif
367  end do
368  !$OMP END PARALLEL DO
369  call advect1(flux_scheme,imex_b3, idim^lim,global_time+imex_c3*dt,ps2,global_time+(1.0d0-imex_b3)*dt,ps)
370 
371  case default
372  write(unitterm,*) "time_integrator=",time_integrator,"time_stepper=",time_stepper
373  write(unitterm,*) "Error in advect: Unknown time integration method"
374  call mpistop("Correct time_integrator")
375  end select
376 
377  case ("fourstep")
378  select case (time_integrator)
379  case ("ssprk4")
380  ! SSPRK(4,3) or SSP(4,2) depending on ssprk_order (3 vs 2)
381  ! ssprk43: Strong stability preserving 4 stage RK 3rd order by Ruuth and Spiteri
382  ! Ruuth & Spiteri J. S C, 17 (2002) p. 211 - 220
383  ! supposed to be stable up to CFL=2.
384  ! ssp42: stable up to CFL=3
385  call advect1(flux_scheme,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
386  !$OMP PARALLEL DO PRIVATE(igrid)
387  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
388  ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
389  if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
390  end do
391  !$OMP END PARALLEL DO
393  !$OMP PARALLEL DO PRIVATE(igrid)
394  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
395  ps1(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
396  if(stagger_grid) ps1(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
397  end do
398  !$OMP END PARALLEL DO
400  !$OMP PARALLEL DO PRIVATE(igrid)
401  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
402  ps(igrid)%w=rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w
403  if(stagger_grid) ps(igrid)%ws=rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws
404  end do
405  !$OMP END PARALLEL DO
406  call advect1(flux_scheme,rk_beta44, idim^lim,global_time+rk_c4*dt,ps1,global_time+(1.0d0-rk_beta44)*dt,ps)
407 
408  case ("rk4")
409  ! the standard RK(4,4) method
410  !$OMP PARALLEL DO PRIVATE(igrid)
411  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
412  ps2(igrid)%w=ps(igrid)%w
413  ps3(igrid)%w=ps(igrid)%w
414  if(stagger_grid) then
415  ps2(igrid)%ws=ps(igrid)%ws
416  ps3(igrid)%ws=ps(igrid)%ws
417  endif
418  end do
419  !$OMP END PARALLEL DO
420  call advect1(flux_scheme,half, idim^lim,global_time,ps,global_time,ps1)
421  call advect1(flux_scheme,half, idim^lim,global_time+half*dt,ps1,global_time,ps2)
422  call advect1(flux_scheme,1.0d0, idim^lim,global_time+half*dt,ps2,global_time,ps3)
423  !$OMP PARALLEL DO PRIVATE(igrid)
424  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
425  ps(igrid)%w=(1.0d0/3.0d0)*(-ps(igrid)%w+ps1(igrid)%w+2.0d0*ps2(igrid)%w+ps3(igrid)%w)
426  if(stagger_grid) ps(igrid)%ws=(1.0d0/3.0d0) &
427  *(-ps(igrid)%ws+ps1(igrid)%ws+2.0d0*ps2(igrid)%ws+ps3(igrid)%ws)
428  end do
429  !$OMP END PARALLEL DO
430  call advect1(flux_scheme,1.0d0/6.0d0, idim^lim,global_time+dt,ps3,global_time+dt*5.0d0/6.0d0,ps)
431 
432  case ("jameson")
433  !$OMP PARALLEL DO PRIVATE(igrid)
434  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
435  ps2(igrid)%w=ps(igrid)%w
436  if(stagger_grid) ps2(igrid)%ws=ps(igrid)%ws
437  end do
438  !$OMP END PARALLEL DO
439  call advect1(flux_scheme,1.0d0/4.0d0, idim^lim,global_time,ps,global_time,ps1)
440  call advect1(flux_scheme,1.0d0/3.0d0, idim^lim,global_time+dt/4.0d0,ps1,global_time,ps2)
441  !$OMP PARALLEL DO PRIVATE(igrid)
442  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
443  ps1(igrid)%w=ps(igrid)%w
444  if(stagger_grid) ps1(igrid)%ws=ps(igrid)%ws
445  end do
446  !$OMP END PARALLEL DO
447  call advect1(flux_scheme,half, idim^lim,global_time+dt/3.0d0,ps2,global_time,ps1)
448  call advect1(flux_scheme,1.0d0, idim^lim,global_time+half*dt,ps1,global_time,ps)
449 
450  case default
451  write(unitterm,*) "time_integrator=",time_integrator,"time_stepper=",time_stepper
452  write(unitterm,*) "Error in advect: Unknown time integration method"
453  call mpistop("Correct time_integrator")
454  end select
455 
456  case ("fivestep")
457  select case (time_integrator)
458  case ("ssprk5")
459  ! SSPRK(5,4) by Ruuth and Spiteri
460  call advect1(flux_scheme,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
461  !$OMP PARALLEL DO PRIVATE(igrid)
462  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
463  ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
464  if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
465  end do
466  !$OMP END PARALLEL DO
468  !$OMP PARALLEL DO PRIVATE(igrid)
469  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
470  ps1(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
471  if(stagger_grid) ps1(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
472  end do
473  !$OMP END PARALLEL DO
475  !$OMP PARALLEL DO PRIVATE(igrid)
476  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
477  ps3(igrid)%w=rk_alfa53*ps2(igrid)%w+rk_alfa54*ps1(igrid)%w
478  if(stagger_grid) ps3(igrid)%ws=rk_alfa53*ps2(igrid)%ws+rk_alfa54*ps1(igrid)%ws
479  end do
480  !$OMP END PARALLEL DO
481  !$OMP PARALLEL DO PRIVATE(igrid)
482  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
483  ps2(igrid)%w=rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w
484  if(stagger_grid) ps2(igrid)%ws=rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws
485  end do
486  !$OMP END PARALLEL DO
488  !$OMP PARALLEL DO PRIVATE(igrid)
489  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
490  ps(igrid)%w=ps3(igrid)%w+rk_alfa55*ps2(igrid)%w &
491  +(rk_beta54/rk_beta44)*(ps2(igrid)%w-(rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w))
492  if(stagger_grid) then
493  ps(igrid)%ws=ps3(igrid)%ws+rk_alfa55*ps2(igrid)%ws &
494  +(rk_beta54/rk_beta44)*(ps2(igrid)%ws-(rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws))
495  endif
496  end do
497  !$OMP END PARALLEL DO
498  call advect1(flux_scheme,rk_beta55, idim^lim,global_time+rk_c5*dt,ps2,global_time+(1.0d0-rk_beta55)*dt,ps)
499 
500  case default
501  write(unitterm,*) "time_integrator=",time_integrator,"time_stepper=",time_stepper
502  write(unitterm,*) "Error in advect: Unknown time integration method"
503  call mpistop("Correct time_integrator")
504  end select
505 
506  case default
507  write(unitterm,*) "time_stepper=",time_stepper
508  write(unitterm,*) "Error in advect: Unknown time stepping method"
509  call mpistop("Correct time_stepper")
510  end select
511 
512  firstsweep=.false.
513  end subroutine advect
514 
515  !> Implicit global update step within IMEX schemes, advance psa=psb+dtfactor*qdt*F_im(psa)
516  subroutine global_implicit_update(dtfactor,qdt,qtC,psa,psb)
520 
521  type(state), target :: psa(max_blocks) !< Compute implicit part from this state and update it
522  type(state), target :: psb(max_blocks) !< Will be unchanged, as on entry
523  double precision, intent(in) :: qdt !< overall time step dt
524  double precision, intent(in) :: qtC !< Both states psa and psb at this time level
525  double precision, intent(in) :: dtfactor !< Advance psa=psb+dtfactor*qdt*F_im(psa)
526 
527  if (associated(phys_implicit_update)) then
528  call phys_implicit_update(dtfactor,qdt,qtc,psa,psb)
529  end if
530 
531  ! enforce boundary conditions for psa
532  call getbc(qtc,0.d0,psa,1,nwflux+nwaux,phys_req_diagonal)
533 
534  end subroutine global_implicit_update
535 
536  !> Evaluate Implicit part in place, i.e. psa==>F_im(psa)
537  subroutine evaluate_implicit(qtC,psa)
540 
541  type(state), target :: psa(max_blocks) !< Compute implicit part from this state and update it
542  double precision, intent(in) :: qtC !< psa at this time level
543 
544  if (associated(phys_evaluate_implicit)) then
545  call phys_evaluate_implicit(qtc,psa)
546  end if
547 
548  end subroutine evaluate_implicit
549 
550  !> Integrate all grids by one partial step
551  subroutine advect1(method,dtfactor,idim^LIM,qtC,psa,qt,psb)
554  use mod_fix_conserve
555  use mod_physics
556 
557  integer, intent(in) :: idim^LIM
558  type(state), target :: psa(max_blocks) !< Compute fluxes based on this state
559  type(state), target :: psb(max_blocks) !< Update solution on this state
560  double precision, intent(in) :: dtfactor !< Advance over dtfactor * dt
561  double precision, intent(in) :: qtC
562  double precision, intent(in) :: qt
563  character(len=*), intent(in) :: method(nlevelshi)
564 
565  double precision :: qdt
566  integer :: iigrid, igrid, level
567 
568  logical :: setigrid
569 
570  istep = istep+1
571 
572  ! opedit: Just advance the active grids:
573  !$OMP PARALLEL DO PRIVATE(igrid,level,qdt)
574  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
575  level=node(plevel_,igrid)
576  qdt=dtfactor*dt_grid(igrid)
577 
578  call process1_grid(method(level),igrid,qdt,ixg^ll,idim^lim,qtc,&
579  psa(igrid),qt,psb(igrid),pso(igrid))
580  end do
581  !$OMP END PARALLEL DO
582 
583  ! opedit: Send flux for all grids, expects sends for all
584  ! nsend_fc(^D), set in connectivity.t.
585 
586  if (fix_conserve_global .and. fix_conserve_at_step) then
587  call recvflux(idim^lim)
588  call sendflux(idim^lim)
589  call fix_conserve(psb,idim^lim,1,nwflux)
590  if(stagger_grid) call fix_edges(psb,idim^lim)
591  end if
592 
593  if(stagger_grid) then
594  ! Now fill the cell-center values for the staggered variables
595  !$OMP PARALLEL DO PRIVATE(igrid)
596  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
597  call phys_face_to_center(ixg^ll,psb(igrid))
598  end do
599  !$OMP END PARALLEL DO
600  end if
601 
602  ! For all grids: fill ghost cells
603  qdt = dtfactor*dt
604  call getbc(qt+qdt,qdt,psb,1,nwflux+nwaux,phys_req_diagonal)
605 
606  end subroutine advect1
607 
608  !> Prepare to advance a single grid over one partial time step
609  subroutine process1_grid(method,igrid,qdt,ixI^L,idim^LIM,qtC,sCT,qt,s,sold)
611  use mod_fix_conserve
612 
613  character(len=*), intent(in) :: method
614  integer, intent(in) :: igrid, ixI^L, idim^LIM
615  double precision, intent(in) :: qdt, qtC, qt
616  type(state), target :: sCT, s, sold
617 
618  double precision :: dx^D
619  ! cell face flux
620  double precision :: fC(ixi^s,1:nwflux,1:ndim)
621  ! cell edge flux
622  double precision :: fE(ixi^s,7-2*ndim:3)
623 
624  dx^d=rnode(rpdx^d_,igrid);
625  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
626  saveigrid=igrid
627 
628  block0=>sct
629  block=>s
632 
633  call advect1_grid(method,qdt,ixi^l,idim^lim,qtc,sct,qt,s,sold,fc,fe,dx^d, &
634  ps(igrid)%x)
635 
636 
637  ! opedit: Obviously, flux is stored only for active grids.
638  ! but we know in fix_conserve wether there is a passive neighbor
639  ! but we know in conserve_fix wether there is a passive neighbor
640  ! via neighbor_active(i^D,igrid) thus we skip the correction for those.
641  ! This violates strict conservation when the active/passive interface
642  ! coincides with a coarse/fine interface.
643  if (fix_conserve_global .and. fix_conserve_at_step) then
644  call store_flux(igrid,fc,idim^lim,nwflux)
645  if(stagger_grid) call store_edge(igrid,ixi^l,fe,idim^lim)
646  end if
647 
648  end subroutine process1_grid
649 
650  !> Advance a single grid over one partial time step
651  subroutine advect1_grid(method,qdt,ixI^L,idim^LIM,qtC,sCT,qt,s,sold,fC,fE,dx^D,x)
653  ! integrate one grid by one partial step
656  use mod_tvd
657  use mod_source, only: addsource2
659 
660  character(len=*), intent(in) :: method
661  integer, intent(in) :: ixI^L, idim^LIM
662  double precision, intent(in) :: qdt, qtC, qt, dx^D, x(ixi^s,1:ndim)
663  type(state), target :: sCT, s, sold
664  double precision :: fC(ixi^s,1:nwflux,1:ndim)
665  double precision :: fE(ixi^s,7-2*ndim:3)
666 
667  integer :: ixO^L
668 
669  ixo^l=ixi^l^lsubnghostcells;
670  select case (method)
671  case ('tvdmu','tvdlf','hll','hllc','hllcd','hlld')
672  call finite_volume(method,qdt,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,sold,fc,fe,dx^d,x)
673  case ('cd','cd4')
674  call centdiff(method,qdt,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dx^d,x)
675  case ('hancock')
676  call hancock(qdt,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,dx^d,x)
677  case ('fd')
678  call fd(method,qdt,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dx^d,x)
679  case ('tvd')
680  call centdiff('cd',qdt,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dx^d,x)
681  call tvdlimit(method,qdt,ixi^l,ixo^l,idim^lim,sct,qt+qdt,s,fc,dx^d,x)
682  case ('source')
683  call addsource2(qdt*dble(idimmax-idimmin+1)/dble(ndim),&
684  ixi^l,ixo^l,1,nw,qtc,sct%w,qt,s%w,x,.false.)
685  case ('nul')
686  ! There is nothing to do
687  case default
688  write(unitterm,*)'Error in advect1_grid:',method,' is unknown!'
689  call mpistop("")
690  end select
691 
692  end subroutine advect1_grid
693 
694  !> process is a user entry in time loop, before output and advance
695  !> allows to modify solution, add extra variables, etc.
696  !> Warning: CFL dt already determined (and is not recomputed)!
697  subroutine process(iit,qt)
701  use mod_physics, only: phys_req_diagonal
702  ! .. scalars ..
703  integer,intent(in) :: iit
704  double precision, intent(in):: qt
705 
706  integer:: iigrid, igrid,level
707 
708  if (associated(usr_process_global)) then
709  call usr_process_global(iit,qt)
710  end if
711 
712  if (associated(usr_process_grid)) then
713  !$OMP PARALLEL DO PRIVATE(igrid,level)
714  do iigrid=1,igridstail; igrid=igrids(iigrid);
715  level=node(plevel_,igrid)
716  ! next few lines ensure correct usage of routines like divvector etc
717  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
718  block=>ps(igrid)
721  call usr_process_grid(igrid,level,ixg^ll,ixm^ll,qt,ps(igrid)%w,ps(igrid)%x)
722  end do
723  !$OMP END PARALLEL DO
724  call getbc(qt,dt,ps,1,nwflux+nwaux,phys_req_diagonal)
725  end if
726  end subroutine process
727 
728  !> process_advanced is user entry in time loop, just after advance
729  !> allows to modify solution, add extra variables, etc.
730  !> added for handling two-way coupled PIC-MHD
731  !> Warning: w is now at global_time^(n+1), global time and iteration at global_time^n, it^n
732  subroutine process_advanced(iit,qt)
737  use mod_physics, only: phys_req_diagonal
738  ! .. scalars ..
739  integer,intent(in) :: iit
740  double precision, intent(in):: qt
741 
742  integer:: iigrid, igrid,level
743 
744  if (associated(usr_process_adv_global)) then
745  call usr_process_adv_global(iit,qt)
746  end if
747 
748  if (associated(usr_process_adv_grid)) then
749  !$OMP PARALLEL DO PRIVATE(igrid,level)
750  do iigrid=1,igridstail; igrid=igrids(iigrid);
751  level=node(plevel_,igrid)
752  ! next few lines ensure correct usage of routines like divvector etc
753  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
754  block=>ps(igrid)
757 
758  call usr_process_adv_grid(igrid,level,ixg^ll,ixm^ll, &
759  qt,ps(igrid)%w,ps(igrid)%x)
760  end do
761  !$OMP END PARALLEL DO
762  call getbc(qt,dt,ps,1,nwflux+nwaux,phys_req_diagonal)
763  end if
764  end subroutine process_advanced
765 
766 end module mod_advance
This module contains definitions of global parameters and variables and some generic functions/subrou...
Module for handling split source terms (split from the fluxes)
Definition: mod_source.t:2
character(len=std_len) time_integrator
Which time integrator to use.
subroutine, public process_advanced(iit, qt)
process_advanced is user entry in time loop, just after advance allows to modify solution, add extra variables, etc. added for handling two-way coupled PIC-MHD Warning: w is now at global_time^(n+1), global time and iteration at global_time^n, it^n
Definition: mod_advance.t:733
update ghost cells of all blocks including physical boundaries
subroutine, public init_comm_fix_conserve(idimLIM, nwfluxin)
character(len=std_len) time_stepper
Which time stepper to use.
double precision ars_gamma
IMEX_ARS3 parameter ars_gamma.
character(len=std_len), dimension(:), allocatable typepred1
The spatial discretization for the predictor step when using a two step PC method.
subroutine, public hancock(qdt, ixIL, ixOL, idimsLIM, qtC, sCT, qt, snew, dxD, x)
The non-conservative Hancock predictor for TVDLF.
integer max_blocks
The maximum number of grid blocks in a processor.
subroutine advect1(method, dtfactor, idimLIM, qtC, psa, qt, psb)
Integrate all grids by one partial step.
Definition: mod_advance.t:552
procedure(process_adv_global), pointer usr_process_adv_global
integer, dimension(:), allocatable type_gradient_limiter
Type of slope limiter used for computing gradients or divergences, when typegrad or typediv are set t...
subroutine advect(idimLIM)
Advance all grids over one time step, but without taking dimensional splitting or split source terms ...
Definition: mod_advance.t:75
procedure(process_global), pointer usr_process_global
subroutine, public store_flux(igrid, fC, idimLIM, nwfluxin)
Module containing all the time stepping schemes.
Definition: mod_advance.t:2
integer, parameter plevel_
character(len=std_len) typedimsplit
integer, dimension(:), allocatable type_limiter
Type of slope limiter used for reconstructing variables on cell edges.
procedure(process_adv_grid), pointer usr_process_adv_grid
procedure(sub_face_to_center), pointer phys_face_to_center
Definition: mod_physics.t:85
logical fix_conserve_global
Whether to apply flux conservation at refinement boundaries.
subroutine, public centdiff(method, qdt, ixIL, ixOL, idimsLIM, qtC, sCT, qt, s, fC, fE, dxD, x)
Module for flux conservation near refinement boundaries.
subroutine global_implicit_update(dtfactor, qdt, qtC, psa, psb)
Implicit global update step within IMEX schemes, advance psa=psb+dtfactor*qdt*F_im(psa) ...
Definition: mod_advance.t:517
integer istep
Index of the sub-step in a multi-step time integrator.
subroutine evaluate_implicit(qtC, psa)
Evaluate Implicit part in place, i.e. psa==>F_im(psa)
Definition: mod_advance.t:538
subroutine, public finite_volume(method, qdt, ixIL, ixOL, idimsLIM, qtC, sCT, qt, snew, sold, fC, fE, dxD, x)
finite volume method
logical phys_req_diagonal
Whether the physics routines require diagonal ghost cells, for example for computing a curl...
Definition: mod_physics.t:22
subroutine, public fix_edges(psuse, idimLIM)
character(len=std_len), dimension(:), allocatable flux_scheme
Which spatial discretization to use (per grid level)
subroutine, public sendflux(idimLIM)
Module with finite volume methods for fluxes.
Module with all the methods that users can customize in AMRVAC.
subroutine, public advance(iit)
Advance all the grids over one time step, including all sources.
Definition: mod_advance.t:20
procedure(process_grid), pointer usr_process_grid
logical stagger_grid
True for using stagger grid.
subroutine, public add_split_source(prior)
Definition: mod_source.t:13
double precision, dimension(:), allocatable dt_grid
subroutine getbc(time, qdt, psb, nwstart, nwbc, req_diag)
do update ghost cells of all blocks including physical boundaries
integer, parameter unitterm
Unit for standard output.
integer, dimension(:), allocatable, parameter d
double precision global_time
The global simulation time.
procedure(sub_evaluate_implicit), pointer phys_evaluate_implicit
Definition: mod_physics.t:87
logical use_particles
Use particles module or not.
subroutine, public fix_conserve(psb, idimLIM, nw0, nwfluxin)
Module containing all the particle routines.
Definition: mod_particles.t:2
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:198
subroutine update_rays
Definition: raytracing.t:774
double precision, dimension(ndim) dxlevel
procedure(sub_implicit_update), pointer phys_implicit_update
Definition: mod_physics.t:86
integer, parameter ndim
Number of spatial dimensions for grid variables.
subroutine advect1_grid(method, qdt, ixIL, idimLIM, qtC, sCT, qt, s, sold, fC, fE, dxD, x)
Advance a single grid over one partial time step.
Definition: mod_advance.t:652
Module with finite difference methods for fluxes.
subroutine, public fd(method, qdt, ixIL, ixOL, idimsLIM, qtC, sCT, qt, snew, fC, fE, dxD, x)
subroutine, public recvflux(idimLIM)
integer, parameter nlevelshi
The maximum number of levels in the grid refinement.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
subroutine, public process(iit, qt)
process is a user entry in time loop, before output and advance allows to modify solution, add extra variables, etc. Warning: CFL dt already determined (and is not recomputed)!
Definition: mod_advance.t:698
Subroutines for TVD-MUSCL schemes.
Definition: mod_tvd.t:2
integer, dimension(:,:), allocatable node
subroutine, public tvdlimit(method, qdt, ixIL, ixOL, idimLIM, s, qt, snew, fC, dxD, x)
Definition: mod_tvd.t:14
subroutine process1_grid(method, igrid, qdt, ixIL, idimLIM, qtC, sCT, qt, s, sold)
Prepare to advance a single grid over one partial time step.
Definition: mod_advance.t:610
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
subroutine, public addsource2(qdt, ixIL, ixOL, iwLIM, qtC, wCT, qt, w, x, qsourcesplit, src_active)
Add source within ixO for iws: w=w+qdt*S[wCT].
Definition: mod_source.t:106
subroutine, public store_edge(igrid, ixIL, fE, idimLIM)