MPI-AMRVAC  3.0
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
amrvac.t
Go to the documentation of this file.
1 !> AMRVAC solves a set of hyperbolic equations
2 !> \f$\vec{u}_t + \nabla_x \cdot \vec{f}(\vec{u}) = \vec{s}\f$
3 !> using adaptive mesh refinement.
4 program amrvac
5 
10  use mod_usr
11  use mod_initialize
12  use mod_particles
14  use mod_advance, only: process
16  use mod_convert, only: init_convert
17  use mod_physics
18 
19  double precision :: time0, time_in
20  logical,save :: part_file_exists=.false.
21 
22 
23  call comm_start()
24 
25  time0 = mpi_wtime()
26  time_advance = .false.
27  time_bc = zero
28 
29  ! read command line arguments first
30  call read_arguments()
31 
32  ! init_convert is called before usr_init as user might associate a convert method
33  call init_convert()
34  ! the user_init routine should load a physics module
35  call usr_init()
36 
37  call initialize_amrvac()
38 
39  if (restart_from_file /= undefined) then
40  ! restart from previous file or dat file conversion
41  ! get input data from previous AMRVAC run
42 
43  ! read in dat file
44  call read_snapshot()
45 
46  ! rewrite it=0 snapshot when restart from it=0 state
47  if(it==0.and.itsave(1,2)==0) snapshotnext=snapshotnext-1
48 
49  if (reset_time) then
50  ! reset it and global time to original value
51  it = it_init
53  ! reset snapshot number
54  snapshotnext=0
55  end if
56 
57  if (reset_it) then
58  ! reset it to original value
59  it = it_init
60  end if
61 
62  ! modify initial condition
63  if (firstprocess) call modify_ic
64 
65  ! select active grids
66  call selectgrids
67 
68  ! update ghost cells for all need-boundary variables
69  call getbc(global_time,0.d0,ps,1,nwflux+nwaux)
70 
71  ! reset AMR grid
72  if (reset_grid) then
73  call settree
74  else
75  ! set up boundary flux conservation arrays
76  if (levmax>levmin) call allocatebflux
77  end if
78 
79  if(use_particles) then
80  call read_particles_snapshot(part_file_exists)
81  if (.not. part_file_exists) call particles_create()
82  if(convert) then
83  call handle_particles()
84  call time_spent_on_particles()
85  call comm_finalize
86  stop
87  end if
88  end if
89 
91 
92  if (convert) then
93  if (npe/=1.and.(.not.(index(convert_type,'mpi')>=1)) &
94  .and. convert_type .ne. 'user') &
95  call mpistop("non-mpi conversion only uses 1 cpu")
96 
97  ! Optionally call a user method that can modify the grid variables
98  ! before saving the converted data
99  if (associated(usr_process_grid) .or. &
100  associated(usr_process_global)) then
101  call process(it,global_time)
102  end if
103  !here requires -1 snapshot
105 
106  if(associated(phys_special_advance)) then
107  ! e.g. calculate MF velocity from magnetic field
109  end if
110 
111  call generate_plotfile
112  call comm_finalize
113  stop
114  end if
115 
116  else
117 
118  ! form and initialize all grids at level one
119  call initlevelone
120 
121  ! set up and initialize finer level grids, if needed
122  call settree
123 
125 
126  {^nooned
127  ! improve initial condition
129  }
130 
131  ! select active grids
132  call selectgrids
133 
134  if (use_particles) call particles_create()
135 
136  end if
137 
138  ! initialize something base on tree information
140 
141  if (mype==0) then
142  print*,'-------------------------------------------------------------------------------'
143  write(*,'(a,f17.3,a)')' Startup phase took : ',mpi_wtime()-time0,' sec'
144  print*,'-------------------------------------------------------------------------------'
145  end if
146 
147  ! an interface to allow user to do special things before the main loop
148  if (associated(usr_before_main_loop)) &
149  call usr_before_main_loop()
150 
151  ! do time integration of all grids on all levels
152  call timeintegration()
153 
154  if (mype==0) then
155  print*,'-------------------------------------------------------------------------------'
156  write(*,'(a,f17.3,a)')' Finished AMRVAC in : ',mpi_wtime()-time0,' sec'
157  print*,'-------------------------------------------------------------------------------'
158  end if
159 
160  call comm_finalize
161 
162 contains
163 
164  subroutine timeintegration()
165  use mod_timing
167  use mod_forest, only: nleafs_active
171 
172  integer :: level, ifile, fixcount, ncells_block, igrid, iigrid
173  integer(kind=8) ncells_update
174  logical :: crashall
175  double precision :: time_last_print, time_write0, time_write, time_before_advance, dt_loop
176 
177  time_in=mpi_wtime()
178  time_last_print = -bigdouble
179  fixcount=1
180 
182 
183  do ifile=nfile,1,-1
184  if(resume_previous_run) then
185  tsavelast(ifile)=aint((global_time+smalldouble)/dtsave(ifile))*dtsave(ifile)
186  itsavelast(ifile)=it/ditsave(ifile)*ditsave(ifile)
187  else
188  tsavelast(ifile)=global_time
189  itsavelast(ifile)=it
190  end if
191  end do
192 
193  ! the next two are used to keep track of the performance during runtime:
194  ittimelast=it
195  timelast=mpi_wtime()
196 
197  ! ------ start of integration loop. ------------------
198  if (mype==0) then
199  write(*, '(A,ES9.2,A)') ' Start integrating, print status every ', &
200  time_between_print, ' seconds'
201  write(*, '(A4,A10,A12,A12,A12)') ' #', 'it', 'time', 'dt', 'wc-time(s)'
202  end if
203 
204  timeloop0=mpi_wtime()
205  time_bc=0.d0
206  time_write=0.d0
207  ncells_block={(ixghi^d-2*nghostcells)*}
208  ncells_update=0
209  dt_loop=0.d0
210 
211  time_advance=.true.
212 
213  time_evol : do
214 
215  time_before_advance=mpi_wtime()
216  ! set time step
217  call setdt()
218 
219  ! Optionally call a user method that can modify the grid variables at the
220  ! beginning of a time step
221  if (associated(usr_process_grid) .or. &
222  associated(usr_process_global)) then
223  call process(it,global_time)
224  end if
225 
226  ! Check if output needs to be written
227  do ifile=nfile,1,-1
228  save_file(ifile) = timetosave(ifile)
229  end do
230 
231  timeio0=mpi_wtime()
232 
233  if (timeio0 - time_last_print > time_between_print) then
234  time_last_print = timeio0
235  if (mype == 0) then
236  write(*, '(A4,I10,ES12.4,ES12.4,ES12.4)') " #", &
238  end if
239  end if
240 
241  ! output data
242  if (any(save_file)) then
243  if(associated(usr_modify_output)) then
244  ! Users can modify or set variables before output is written
245  do iigrid=1,igridstail; igrid=igrids(iigrid);
246  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
247  block=>ps(igrid)
248  call usr_modify_output(ixg^ll,ixm^ll,global_time,ps(igrid)%w,ps(igrid)%x)
249  end do
250  end if
251  time_write=0.d0
252  do ifile=nfile,1,-1
253  if (save_file(ifile)) then
254  time_write0=mpi_wtime()
255  call saveamrfile(ifile)
256  time_write=time_write+mpi_wtime()-time_write0
257  end if
258  end do
259  end if
260 
261 
262  ! output a snapshot when user write a file named 'savenow' in the same
263  ! folder as the executable amrvac
264  if (mype==0) inquire(file='savenow',exist=save_now)
265  if (npe>1) call mpi_bcast(save_now,1,mpi_logical,0,icomm,ierrmpi)
266 
267  if (save_now) then
268  if(mype==0) write(*,'(a,i7,a,i7,a,es12.4)') ' save a snapshot No.',&
269  snapshotnext,' at it=',it,' global_time=',global_time
270  call saveamrfile(1)
271  call saveamrfile(2)
272  call mpi_file_delete('savenow',mpi_info_null,ierrmpi)
273  end if
274  timeio_tot=timeio_tot+mpi_wtime()-timeio0
275 
276  pass_wall_time=mpi_wtime()-time0+dt_loop+4.d0*time_write >=wall_time_max
277 
278  ! exit time loop if time is up
279  if (it>=it_max .or. global_time>=time_max .or. pass_wall_time .or. final_dt_exit) exit time_evol
280 
281  ! solving equations
282  call advance(it)
283 
284  ! if met unphysical values, output the last good status and stop the run
285  call mpi_allreduce(crash,crashall,1,mpi_logical,mpi_lor,icomm,ierrmpi)
286  if (crashall) then
287  do iigrid=1,igridstail; igrid=igrids(iigrid);
288  ps(igrid)%w=pso(igrid)%w
289  end do
290  call saveamrfile(1)
291  call saveamrfile(2)
292  if(mype==0) write(*,*) "Error: small value encountered, run crash."
293  call mpi_abort(icomm, iigrid, ierrmpi)
294  end if
295 
296  ! Optionally call a user method that can modify the grid variables at the
297  ! end of a time step: this is for two-way coupling to PIC, e.g.
298  if (associated(usr_process_adv_grid) .or. &
299  associated(usr_process_adv_global)) then
301  end if
302 
303  ! update AMR mesh and tree
304  timegr0=mpi_wtime()
305  if(ditregrid>1) then
306  if(fixcount<ditregrid) then
307  fixcount=fixcount+1
308  else
309  if (refine_max_level>1 .and. .not.(fixgrid())) call resettree
310  fixcount=1
311  end if
312  else
313  if (refine_max_level>1 .and. .not.(fixgrid())) call resettree
314  end if
315  timegr_tot=timegr_tot+(mpi_wtime()-timegr0)
316 
317  ! update time variables
318  it = it + 1
320 
321  if(it>9000000)then
323  itsavelast(:)=0
324  end if
325 
326  ! count updated cells
327  ncells_update=ncells_update+ncells_block*nleafs_active
328 
329  ! time lapses in one loop
330  dt_loop=mpi_wtime()-time_before_advance
331  end do time_evol
332 
333  time_advance=.false.
334 
335  timeloop=mpi_wtime()-timeloop0
336 
337  if (mype==0) then
338  write(*,'(a,f12.3,a)')' Total timeloop took : ',timeloop,' sec'
339  write(*,'(a,f12.3,a)')' Time spent on AMR : ',timegr_tot,' sec'
340  write(*,'(a,f12.2,a)')' Percentage: ',100.0*timegr_tot/timeloop,' %'
341  write(*,'(a,f12.3,a)')' Time spent on IO in loop : ',timeio_tot,' sec'
342  write(*,'(a,f12.2,a)')' Percentage: ',100.0*timeio_tot/timeloop,' %'
343  write(*,'(a,f12.3,a)')' Time spent on ghost cells : ',time_bc,' sec'
344  write(*,'(a,f12.2,a)')' Percentage: ',100.0*time_bc/timeloop,' %'
345  write(*,'(a,f12.3,a)')' Time spent on computing : ',timeloop-timeio_tot-timegr_tot-time_bc,' sec'
346  write(*,'(a,f12.2,a)')' Percentage: ',100.0*(timeloop-timeio_tot-timegr_tot-time_bc)/timeloop,' %'
347  write(*,'(a,es12.3 )')' Cells updated / proc / sec : ',dble(ncells_update)*dble(nstep)/dble(npe)/timeloop
348  end if
349 
350  ! output end state
351  timeio0=mpi_wtime()
352  do ifile=nfile,1,-1
353  if(itsavelast(ifile)<it) call saveamrfile(ifile)
354  end do
355  if (mype==0) call mpi_file_close(log_fh,ierrmpi)
356  timeio_tot=timeio_tot+(mpi_wtime()-timeio0)
357 
358  if (mype==0) then
359  write(*,'(a,f12.3,a)')' Total time spent on IO : ',timeio_tot,' sec'
360  write(*,'(a,f12.3,a)')' Total timeintegration took : ',mpi_wtime()-time_in,' sec'
361  write(*, '(A4,I10,ES12.3,ES12.3,ES12.3)') " #", &
363  end if
364 
365  {#IFDEF RAY
366  call time_spent_on_rays
367  }
368 
369  if(use_particles) call time_spent_on_particles
370 
371  if (use_multigrid) call mg_timers_show(mg)
372  end subroutine timeintegration
373 
374  !> Save times are defined by either tsave(isavet(ifile),ifile) or
375  !> itsave(isaveit(ifile),ifile) or dtsave(ifile) or ditsave(ifile)
376  !> tsavestart(ifile) determines first start time. This only affects
377  !> read out times determined by dtsave(ifiles).
378  !> Other conditions may be included.
379  logical function timetosave(ifile)
381 
382  integer:: ifile
383  logical:: oksave
384 
385  oksave=.false.
386  if (it==itsave(isaveit(ifile),ifile)) then
387  oksave=.true.
388  isaveit(ifile)=isaveit(ifile)+1
389  end if
390  if (it==itsavelast(ifile)+ditsave(ifile)) oksave=.true.
391 
392  if (global_time>=tsave(isavet(ifile),ifile).and.global_time-dt<tsave(isavet(ifile),ifile)) then
393  oksave=.true.
394  isavet(ifile)=isavet(ifile)+1
395  end if
396 
397  if(global_time>=tsavestart(ifile)-smalldouble)then
398  if (global_time>=tsavelast(ifile)+dtsave(ifile)-smalldouble)then
399  oksave=.true.
400  n_saves(ifile) = n_saves(ifile) + 1
401  endif
402  endif
403 
404  if (oksave) then
405  tsavelast(ifile) =global_time
406  itsavelast(ifile)=it
407  end if
408  timetosave=oksave
409 
410  return
411  end function timetosave
412 
413  !> Return true if the AMR grid should not be adapted any more. This is
414  !> controlled by tfixgrid or itfixgrid. Other conditions may be included.
415  logical function fixgrid()
417 
419  end function fixgrid
420 
421 end program amrvac
subroutine initialize_after_settree
Definition: amrgrid.t:95
subroutine settree
Build up AMR.
Definition: amrgrid.t:3
subroutine resettree
reset AMR and (de)allocate boundary flux storage at level changes
Definition: amrgrid.t:49
subroutine initlevelone
Generate and initialize all grids at the coarsest level (level one)
Definition: amrini.t:3
subroutine modify_ic
modify initial condition
Definition: amrini.t:59
subroutine improve_initial_condition()
improve initial condition after initialization
Definition: amrini.t:80
logical function fixgrid()
Return true if the AMR grid should not be adapted any more. This is controlled by tfixgrid or itfixgr...
Definition: amrvac.t:416
logical function timetosave(ifile)
Save times are defined by either tsave(isavet(ifile),ifile) or itsave(isaveit(ifile),...
Definition: amrvac.t:380
program amrvac
AMRVAC solves a set of hyperbolic equations using adaptive mesh refinement.
Definition: amrvac.t:4
subroutine timeintegration()
Definition: amrvac.t:165
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:194
subroutine comm_start
Initialize the MPI environment.
Definition: comm_lib.t:3
subroutine comm_finalize
Finalize (or shutdown) the MPI environment.
Definition: comm_lib.t:35
subroutine generate_plotfile
Definition: convert.t:3
Module containing all the time stepping schemes.
Definition: mod_advance.t:2
subroutine, public process_advanced(iit, qt)
process_advanced is user entry in time loop, just after advance allows to modify solution,...
Definition: mod_advance.t:802
subroutine, public advance(iit)
Advance all the grids over one time step, including all sources.
Definition: mod_advance.t:18
subroutine, public process(iit, qt)
process is a user entry in time loop, before output and advance allows to modify solution,...
Definition: mod_advance.t:769
subroutine init_convert()
Definition: mod_convert.t:33
Module for flux conservation near refinement boundaries.
subroutine, public allocatebflux
Module with basic grid data structures.
Definition: mod_forest.t:2
integer nleafs_active
Definition: mod_forest.t:78
update ghost cells of all blocks including physical boundaries
subroutine getbc(time, qdt, psb, nwstart, nwbc, req_diag)
do update ghost cells of all blocks including physical boundaries
This module contains definitions of global parameters and variables and some generic functions/subrou...
double precision, dimension(nfile) tsavelast
type(state), pointer block
Block pointer for using one block and its previous state.
integer nstep
How many sub-steps the time integrator takes.
integer it_max
Stop the simulation after this many time steps have been taken.
logical reset_it
If true, reset iteration count to 0.
integer ixghi
Upper index of grid block arrays.
logical, dimension(nfile) save_file
whether or not to save an output file
logical resume_previous_run
If true, restart a previous run from the latest snapshot.
double precision global_time
The global simulation time.
integer, dimension(nsavehi, nfile) itsave
Save output of type N on iterations itsave(:, N)
double precision time_max
End time for the simulation.
double precision time_init
Start time for the simulation.
logical firstprocess
If true, call initonegrid_usr upon restarting.
integer snapshotini
Resume from the snapshot with this index.
integer it
Number of time steps taken.
integer it_init
initial iteration count
integer ditregrid
Reconstruct the AMR grid once every ditregrid iteration(s)
character(len=std_len) convert_type
Which format to use when converting.
integer itfixgrid
Fix the AMR grid after this many time steps.
logical use_particles
Use particles module or not.
integer icomm
The MPI communicator.
logical reset_time
If true, reset iteration count and global_time to original values, and start writing snapshots at ind...
integer mype
The rank of the current MPI task.
integer, dimension(1:nfile) n_saves
Number of saved files of each type.
double precision, dimension(nfile) tsavestart
Start of read out (not counting specified read outs)
integer, dimension(nfile) ditsave
Repeatedly save output of type N when ditsave(N) time steps have passed.
integer, dimension(:), allocatable, parameter d
double precision wall_time_max
Ending wall time (in hours) for the simulation.
integer ierrmpi
A global MPI error return code.
logical autoconvert
If true, already convert to output format during the run.
integer slowsteps
If > 1, then in the first slowsteps-1 time steps dt is reduced by a factor .
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
integer, dimension(nfile) itsavelast
double precision time_between_print
to monitor timeintegration loop at given wall-clock time intervals
double precision, dimension(nfile) dtsave
Repeatedly save output of type N when dtsave(N) simulation time has passed.
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer, parameter filelog_
Constant indicating log output.
integer, parameter fileout_
Constant indicating regular output.
double precision time_bc
accumulated wall-clock time spent on boundary conditions
double precision tfixgrid
Fix the AMR grid after this time.
integer nghostcells
Number of ghost cells surrounding a grid.
character(len= *), parameter undefined
double precision, dimension(nsavehi, nfile) tsave
Save output of type N on times tsave(:, N)
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
logical reset_grid
If true, rebuild the AMR grid upon restarting.
logical crash
Save a snapshot before crash a run met unphysical values.
logical use_multigrid
Use multigrid (only available in 2D and 3D)
integer refine_max_level
Maximal number of AMR levels.
integer, parameter nfile
Number of output methods.
logical pass_wall_time
If true, wall time is up, modify snapshotnext for later overwrite.
logical final_dt_exit
Force timeloop exit when final dt < dtmin.
integer, dimension(nfile) isaveit
double precision, dimension(ndim) dxlevel
integer, dimension(nfile) isavet
integer log_fh
MPI file handle for logfile.
This module handles the initialization of various components of amrvac.
Definition: mod_initialize.t:2
subroutine, public initialize_amrvac()
Initialize amrvac: read par files and initialize variables.
Module for reading input and writing output.
subroutine saveamrfile(ifile)
logical save_now
whether a manually inserted snapshot is saved
subroutine read_arguments()
Read the command line arguments passed to amrvac.
subroutine read_snapshot
Routine to read in snapshots (.dat files). When it cannot recognize the file version,...
Module to couple the octree-mg library to AMRVAC. This file uses the VACPP preprocessor,...
type(mg_t) mg
Data structure containing the multigrid tree.
subroutine mg_setup_multigrid()
Setup multigrid for usage.
Module containing all the particle routines.
Definition: mod_particles.t:2
subroutine particles_create()
Create initial particles.
Definition: mod_particles.t:42
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
procedure(sub_special_advance), pointer phys_special_advance
Definition: mod_physics.t:73
integer ittimelast
Definition: mod_timing.t:15
double precision timegr_tot
Definition: mod_timing.t:11
double precision timeio_tot
Definition: mod_timing.t:10
double precision time_in
Definition: mod_timing.t:10
double precision timelast
Definition: mod_timing.t:16
double precision timeio0
Definition: mod_timing.t:10
double precision timeloop
Definition: mod_timing.t:11
double precision timeloop0
Definition: mod_timing.t:11
double precision timegr0
Definition: mod_timing.t:11
Module with all the methods that users can customize in AMRVAC.
procedure(process_grid), pointer usr_process_grid
procedure(process_adv_grid), pointer usr_process_adv_grid
procedure(sub_modify_io), pointer usr_modify_output
procedure(p_no_args), pointer usr_before_main_loop
procedure(process_global), pointer usr_process_global
procedure(process_adv_global), pointer usr_process_adv_global
subroutine selectgrids
Definition: selectgrids.t:3
subroutine setdt()
setdt - set dt for all levels between levmin and levmax. dtpar>0 --> use fixed dtpar for all level dt...
Definition: setdt.t:5