MPI-AMRVAC  2.2
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_input_output.t
Go to the documentation of this file.
1 !> Module for reading input and writing output
3 
4  implicit none
5  public
6 
7  !> Version number of the .dat file output
8  integer, parameter :: version_number = 5
9 
10  !> List of compatible versions
11  integer, parameter :: compatible_versions(3) = [3, 4, 5]
12 
13  !> number of w found in dat files
14  integer :: nw_found
15 
16  !> tag for MPI message
17  integer, private :: itag
18 
19  ! Formats used in output
20  character(len=*), parameter :: fmt_r = 'es16.8' ! Default precision
21  character(len=*), parameter :: fmt_r2 = 'es10.2' ! Two digits
22  character(len=*), parameter :: fmt_i = 'i8' ! Integer format
23 
24 contains
25 
26  !> Read the command line arguments passed to amrvac
27  subroutine read_arguments()
30  use mod_slice, only: slicenext
31 
32  integer :: len, ier, n
33  integer, parameter :: max_files = 20 ! Maximum number of par files
34  integer :: n_par_files
35  integer :: ibegin(max_files)
36  integer :: iterm(max_files)
37  character(len=max_files*std_len) :: all_par_files
38  character(len=std_len) :: tmp_files(max_files)
39  logical :: resume
40 
41  if (mype == 0) then
42  print *, '-----------------------------------------------------------------------------'
43  print *, '-----------------------------------------------------------------------------'
44  print *, '| __ __ ____ ___ _ __ __ ______ ___ ____ |'
45  print *, '| | \/ | _ \_ _| / \ | \/ | _ \ \ / / \ / ___| |'
46  print *, '| | |\/| | |_) | |_____ / _ \ | |\/| | |_) \ \ / / _ \| | |'
47  print *, '| | | | | __/| |_____/ ___ \| | | | _ < \ V / ___ \ |___ |'
48  print *, '| |_| |_|_| |___| /_/ \_\_| |_|_| \_\ \_/_/ \_\____| |'
49  print *, '-----------------------------------------------------------------------------'
50  print *, '-----------------------------------------------------------------------------'
51  end if
52 
53  ! Specify the options and their default values
54  call kracken('cmd','-i amrvac.par -if ' // undefined // &
55  ' -slicenext -1 -collapsenext -1 -snapshotnext -1' // &
56  ' --help .false. -convert .false. -resume .false.')
57 
58  ! Get the par file(s)
59  call retrev('cmd_i', all_par_files, len, ier)
60 
61  ! Show the usage if the help flag was given, or no par file was specified
62  if (lget('cmd_-help') .or. len == 0) then
63  if (mype == 0) then
64  print *, 'Usage example:'
65  print *, 'mpirun -np 4 ./amrvac -i file.par [file2.par ...]'
66  print *, ' (later .par files override earlier ones)'
67  print *, ''
68  print *, 'Optional arguments:'
69  print *, '-resume Resume previous run'
70  print *, '-convert Convert snapshot files'
71  print *, '-if file0001.dat Use this snapshot to restart from'
72  print *, ' (you can modify e.g. output names)'
73  print *, '-resume Automatically resume previous run'
74  print *, ' (useful for long runs on HPC systems)'
75  print *, '-snapshotnext N Manual index for next snapshot'
76  print *, '-slicenext N Manual index for next slice output'
77  print *, '-collapsenext N Manual index for next collapsed output'
78  print *, ''
79  end if
80  call mpi_finalize(ierrmpi)
81  stop
82  end if
83 
84  ! Split the input files, in case multiple were given
85  call get_fields_string(all_par_files, " ,'"""//char(9), max_files, &
86  tmp_files, n_par_files)
87 
88  allocate(par_files(n_par_files))
89  par_files = tmp_files(1:n_par_files)
90 
91  ! Read in the other command line arguments
92  call retrev('cmd_if', restart_from_file, len, ier)
93 
94  !> \todo Document these command line options
95  slicenext = iget('cmd_slicenext')
96  collapsenext = iget('cmd_collapsenext')
97  snapshotnext = iget('cmd_snapshotnext')
98  convert = lget('cmd_convert') ! -convert present?
99  resume_previous_run = lget('cmd_resume') ! -resume present?
100 
101  end subroutine read_arguments
102 
103  !> Read in the user-supplied parameter-file
104  subroutine read_par_files()
107  use mod_small_values
108  use mod_limiter
109  use mod_slice
110  use mod_geometry
111 
112  logical :: fileopen, file_exists
113  integer :: i, j, k, ifile, io_state
114  integer :: iB, isave, iw, level, idim, islice
115  integer :: nx_vec(^nd), block_nx_vec(^nd)
116  integer :: my_unit, iostate
117  integer :: ilev
118  double precision :: dx_vec(^nd)
119 
120  character :: c_ndim
121  character(len=80) :: fmt_string
122  character(len=std_len) :: err_msg, restart_from_file_arg
123  character(len=std_len) :: basename_full, basename_prev, dummy_file
124  character(len=std_len), dimension(:), allocatable :: &
125  typeboundary_min^D, typeboundary_max^D
126  character(len=std_len), allocatable :: limiter(:)
127  character(len=std_len), allocatable :: gradient_limiter(:)
128  character(len=name_len) :: stretch_dim(ndim)
129 
130  double precision, dimension(nsavehi) :: tsave_log, tsave_dat, tsave_slice, &
131  tsave_collapsed, tsave_custom
132  double precision :: dtsave_log, dtsave_dat, dtsave_slice, &
133  dtsave_collapsed, dtsave_custom
134  integer :: ditsave_log, ditsave_dat, ditsave_slice, &
135  ditsave_collapsed, ditsave_custom
136  double precision :: tsavestart_log, tsavestart_dat, tsavestart_slice, &
137  tsavestart_collapsed, tsavestart_custom
138  integer :: windex, ipower
139  double precision :: sizeuniformpart^D
140  double precision :: im_delta,im_nu,rka54,rka51,rkb54,rka55
141 
142  namelist /filelist/ base_filename,restart_from_file, &
150 
151  namelist /savelist/ tsave,itsave,dtsave,ditsave,nslices,slicedir, &
153  tsave_log, tsave_dat, tsave_slice, tsave_collapsed, tsave_custom, &
154  dtsave_log, dtsave_dat, dtsave_slice, dtsave_collapsed, dtsave_custom, &
155  ditsave_log, ditsave_dat, ditsave_slice, ditsave_collapsed, ditsave_custom,&
156  tsavestart_log, tsavestart_dat, tsavestart_slice, tsavestart_collapsed,&
157  tsavestart_custom, tsavestart
158 
161 
162  namelist /methodlist/ time_stepper,time_integrator, &
166  limiter,gradient_limiter,cada3_radius,&
171  flatcd,flatsh,&
176  schmid_rad^d, trac
177 
180 
181  namelist /meshlist/ refine_max_level,nbufferx^d,refine_threshold,&
183  stretch_dim, stretch_uncentered, &
189  namelist /paramlist/ courantpar, dtpar, dtdiffpar, &
191 
192  namelist /euvlist/ filename_euv,image,spectrum,wavelength,direction_los,&
194 
195  ! default maximum number of grid blocks in a processor
196  max_blocks=4000
197 
198  ! allocate cell size of all levels
199  allocate(dx(ndim,nlevelshi))
200  {allocate(dg^d(nlevelshi))\}
201  {allocate(ng^d(nlevelshi))\}
202 
203  ! default block size excluding ghost cells
204  {block_nx^d = 16\}
205 
206  ! default resolution of level-1 mesh (full domain)
207  {domain_nx^d = 32\}
208 
209 
210 
211  ! defaults for boundary treatments
212  typeghostfill = 'linear'
213 
214  ! default number of ghost-cell layers at each boundary of a block
215  nghostcells = 2
216 
217  ! Allocate boundary conditions arrays in new and old style
218  {
219  allocate(typeboundary_min^d(nwfluxbc))
220  allocate(typeboundary_max^d(nwfluxbc))
221  typeboundary_min^d = undefined
222  typeboundary_max^d = undefined
223  }
224 
225  allocate(typeboundary(nwflux, 2 * ndim))
226  typeboundary(:, :) = undefined
227 
228  ! not save physical boundary in dat files by default
229  save_physical_boundary = .false.
230 
231  internalboundary = .false.
232 
233  ! defaults for specific options
234  typegrad = 'central'
235  typediv = 'central'
236  typecurl = 'central'
237 
238  ! defaults for smallest physical values allowed
239  small_temperature = 0.d0
240  small_pressure = 0.d0
241  small_density = 0.d0
242 
243  allocate(small_values_fix_iw(nw))
244  small_values_fix_iw(:) = .true.
245 
246  ! defaults for convert behavior
247 
248  ! store the -if value from argument in command line
249  restart_from_file_arg = restart_from_file
250  nwauxio = 0
251  nocartesian = .false.
252  saveprim = .false.
253  autoconvert = .false.
254  convert_type = 'vtuBCCmpi'
255  slice_type = 'vtuCC'
256  collapse_type = 'vti'
257  allocate(w_write(nw))
258  w_write(1:nw) = .true.
259  allocate(writelevel(nlevelshi))
260  writelevel(1:nlevelshi) = .true.
261  writespshift(1:ndim,1:2) = zero
262  level_io = -1
263  level_io_min = 1
265  ! endianness: littleendian (default) is 1, bigendian otherwise
266  type_endian = 1
267 
268  ! normalization of primitive variables: only for output
269  ! note that length_convert_factor is for length
270  ! this scaling is optional, and must be set consistently if used
271  allocate(w_convert_factor(nw))
272  w_convert_factor(:) = 1.0d0
273  time_convert_factor = 1.0d0
274  length_convert_factor = 1.0d0
275 
276  ! AMR related defaults
277  refine_max_level = 1
278  {nbufferx^d = 0\}
279  allocate(refine_threshold(nlevelshi))
280  refine_threshold(1:nlevelshi) = 0.1d0
281  allocate(derefine_ratio(nlevelshi))
282  derefine_ratio(1:nlevelshi) = 1.0d0/8.0d0
283  prolongation_method = 'linear'
284  typeprolonglimit = 'default'
285  refine_criterion = 3
286  allocate(w_refine_weight(nw+1))
287  w_refine_weight = 0.d0
288  allocate(logflag(nw+1))
289  logflag = .false.
290  allocate(amr_wavefilter(nlevelshi))
291  amr_wavefilter(1:nlevelshi) = 1.0d-2
292  tfixgrid = bigdouble
293  itfixgrid = biginteger
294  ditregrid = 1
295 
296  ! Grid stretching defaults
297  stretch_uncentered = .true.
298  stretch_dim(1:ndim) = undefined
299  qstretch_baselevel(1:ndim) = bigdouble
301 
302  ! IO defaults
303  it_init = 0
304  it_max = biginteger
305  time_init = 0.d0
306  time_max = bigdouble
307  wall_time_max = bigdouble
308  final_dt_reduction=.true.
309  final_dt_exit=.false.
310  dtmin = 1.0d-10
311  nslices = 0
312  collapse = .false.
313  collapselevel = 1
314  time_between_print = 30.0d0 ! Print status every 30 seconds
315 
316  do ifile=1,nfile
317  do isave=1,nsavehi
318  tsave(isave,ifile) = bigdouble ! global_time of saves into the output files
319  itsave(isave,ifile) = biginteger ! it of saves into the output files
320  end do
321  dtsave(ifile) = bigdouble ! time between saves
322  ditsave(ifile) = biginteger ! timesteps between saves
323  isavet(ifile) = 1 ! index for saves by global_time
324  isaveit(ifile) = 1 ! index for saves by it
325  tsavestart(ifile) = 0.0d0
326  end do
327 
328  tsave_log = bigdouble
329  tsave_dat = bigdouble
330  tsave_slice = bigdouble
331  tsave_collapsed = bigdouble
332  tsave_custom = bigdouble
333 
334  dtsave_log = bigdouble
335  dtsave_dat = bigdouble
336  dtsave_slice = bigdouble
337  dtsave_collapsed = bigdouble
338  dtsave_custom = bigdouble
339 
340  ditsave_log = biginteger
341  ditsave_dat = biginteger
342  ditsave_slice = biginteger
343  ditsave_collapsed = biginteger
344  ditsave_custom = biginteger
345 
346  tsavestart_log = bigdouble
347  tsavestart_dat = bigdouble
348  tsavestart_slice = bigdouble
349  tsavestart_collapsed = bigdouble
350  tsavestart_custom = bigdouble
351 
352  typefilelog = 'default'
353 
354  ! defaults for input
355  reset_time = .false.
356  reset_it = .false.
357  firstprocess = .false.
358  reset_grid = .false.
359  base_filename = 'data'
360  usr_filename = ''
361 
362 
363  ! Defaults for discretization methods
364  typeaverage = 'default'
365  tvdlfeps = one
366  nxdiffusehllc = 0
367  flathllc = .false.
368  slowsteps = -1
369  courantpar = 0.8d0
370  typecourant = 'maxsum'
371  dimsplit = .false.
372  typedimsplit = 'default'
373  if(physics_type=='mhd') then
374  cada3_radius = 0.5d0
375  else
376  cada3_radius = 0.1d0
377  end if
378  {schmid_rad^d = 1.d0\}
379  typetvd = 'roe'
380  typeboundspeed = 'Einfeldt'
381  source_split_usr= .false.
382  time_stepper = 'twostep'
383  time_integrator = 'default'
384  angmomfix = .false.
385  ! default PC or explicit midpoint, hence alfa=0.5
386  rk2_alfa = half
387  ! default SSPRK(3,3) or Gottlieb-Shu 1998 for threestep
388  ! default SSPRK(4,3) or Spireti-Ruuth for fourstep
389  ! default SSPRK(5,4) using Gottlieb coeffs
390  ssprk_order = 3
391  ! default RK3 butcher table: Heun 3rd order
392  rk3_switch = 3
393  ! default IMEX threestep is IMEX_ARK(232)
394  imex_switch = 1
395 
397  allocate(limiter(nlevelshi),gradient_limiter(nlevelshi))
398  do level=1,nlevelshi
399  flux_scheme(level) = 'tvdlf'
400  typepred1(level) = 'default'
401  limiter(level) = 'minmod'
402  gradient_limiter(level) = 'minmod'
403  end do
404 
405  flatcd = .false.
406  flatsh = .false.
407  typesourcesplit = 'sfs'
408  allocate(loglimit(nw))
409  loglimit(1:nw) = .false.
410 
411  allocate(typeentropy(nw))
412 
413  do iw=1,nw
414  typeentropy(iw)='nul' ! Entropy fix type
415  end do
416 
417  dtdiffpar = 0.5d0
418  dtpar = -1.d0
419 
420  ! problem setup defaults
421  iprob = 1
422 
423  ! end defaults
424 
425  ! Initialize Kronecker delta, and Levi-Civita tensor
426  do i=1,3
427  do j=1,3
428  if(i==j)then
429  kr(i,j)=1
430  else
431  kr(i,j)=0
432  endif
433  do k=1,3
434  if(i==j.or.j==k.or.k==i)then
435  lvc(i,j,k)=0
436  else if(i+1==j.or.i-2==j)then
437  lvc(i,j,k)=1
438  else
439  lvc(i,j,k)=-1
440  endif
441  enddo
442  enddo
443  enddo
444 
445  ! These are used to construct file and log names from multiple par files
446  basename_full = ''
447  basename_prev = ''
448 
449  do i = 1, size(par_files)
450  if (mype == 0) print *, "Reading " // trim(par_files(i))
451 
452  ! Check whether the file exists
453  inquire(file=trim(par_files(i)), exist=file_exists)
454 
455  if (.not. file_exists) then
456  write(err_msg, *) "The parameter file " // trim(par_files(i)) // &
457  " does not exist"
458  call mpistop(trim(err_msg))
459  end if
460 
461  open(unitpar, file=trim(par_files(i)), status='old')
462 
463  ! Try to read in the namelists. They can be absent or in a different
464  ! order, since we rewind before each read.
465  rewind(unitpar)
466  read(unitpar, filelist, end=101)
467 
468 101 rewind(unitpar)
469  read(unitpar, savelist, end=102)
470 
471 102 rewind(unitpar)
472  read(unitpar, stoplist, end=103)
473 
474 103 rewind(unitpar)
475  read(unitpar, methodlist, end=104)
476 
477 104 rewind(unitpar)
478  read(unitpar, boundlist, end=105)
479 
480 105 rewind(unitpar)
481  read(unitpar, meshlist, end=106)
482 
483 106 rewind(unitpar)
484  read(unitpar, paramlist, end=107)
485 
486 107 rewind(unitpar)
487  read(unitpar, euvlist, end=108)
488 
489 108 close(unitpar)
490 
491  ! Append the log and file names given in the par files
492  if (base_filename /= basename_prev) &
493  basename_full = trim(basename_full) // trim(base_filename)
494  basename_prev = base_filename
495  end do
496 
497  base_filename = basename_full
498 
499  ! Check whether output directory is writable
500  if(mype==0) then
501  dummy_file = trim(base_filename)//"DUMMY"
502  open(newunit=my_unit, file=trim(dummy_file), iostat=iostate)
503  if (iostate /= 0) then
504  call mpistop("Can't write to output directory (" // &
505  trim(base_filename) // ")")
506  else
507  close(my_unit, status='delete')
508  end if
509  end if
510 
511  ! restart filename from command line overwrites the one in par file
512  if(restart_from_file_arg /= undefined) &
513  restart_from_file=restart_from_file_arg
514 
515  if (resume_previous_run) then
516  ! Root process will search snapshot
517  if (mype == 0) then
518  do i = -1, 9998
519  ! Check if the next snapshot is missing
520  if (.not. snapshot_exists(i+1)) exit
521  end do
522 
523  if (i == -1) then
524  ! If initial data is missing (e.g. moved due to lack of space),
525  ! search file with highest index
526  do i = 9999, 0, -1
527  if (snapshot_exists(i)) exit
528  end do
529  end if
530  end if
531  call mpi_bcast(i, 1, mpi_integer, 0, icomm, ierrmpi)
532 
533  if (i == -1) then
534  if(mype==0) write(*,*) "No snapshots found to resume from, start a new run..."
535  else
536  !call mpistop("No snapshots found to resume from")
537  ! Set file name to restart from
538  write(restart_from_file, "(a,i4.4,a)") trim(base_filename), i, ".dat"
539  end if
540  end if
541 
542  if (restart_from_file == undefined) then
543  snapshotnext = 0
544  slicenext = 0
545  collapsenext = 0
546  if (firstprocess) &
547  call mpistop("Please restart from a snapshot when firstprocess=T")
548  if (convert) &
549  call mpistop('Change convert to .false. for a new run!')
550  end if
551 
552  if (small_pressure < 0.d0) call mpistop("small_pressure should be positive.")
553  if (small_density < 0.d0) call mpistop("small_density should be positive.")
554  ! Give priority to non-zero small temperature
556 
557  if(convert) autoconvert=.false.
558 
559  where (tsave_log < bigdouble) tsave(:, 1) = tsave_log
560  where (tsave_dat < bigdouble) tsave(:, 2) = tsave_dat
561  where (tsave_slice < bigdouble) tsave(:, 3) = tsave_slice
562  where (tsave_collapsed < bigdouble) tsave(:, 4) = tsave_collapsed
563  where (tsave_custom < bigdouble) tsave(:, 5) = tsave_custom
564 
565  if (dtsave_log < bigdouble) dtsave(1) = dtsave_log
566  if (dtsave_dat < bigdouble) dtsave(2) = dtsave_dat
567  if (dtsave_slice < bigdouble) dtsave(3) = dtsave_slice
568  if (dtsave_collapsed < bigdouble) dtsave(4) = dtsave_collapsed
569  if (dtsave_custom < bigdouble) dtsave(5) = dtsave_custom
570 
571  if (tsavestart_log < bigdouble) tsavestart(1) = tsavestart_log
572  if (tsavestart_dat < bigdouble) tsavestart(2) = tsavestart_dat
573  if (tsavestart_slice < bigdouble) tsavestart(3) = tsavestart_slice
574  if (tsavestart_collapsed < bigdouble) tsavestart(4) = tsavestart_collapsed
575  if (tsavestart_custom < bigdouble) tsavestart(5) = tsavestart_custom
576 
577  if (ditsave_log < bigdouble) ditsave(1) = ditsave_log
578  if (ditsave_dat < bigdouble) ditsave(2) = ditsave_dat
579  if (ditsave_slice < bigdouble) ditsave(3) = ditsave_slice
580  if (ditsave_collapsed < bigdouble) ditsave(4) = ditsave_collapsed
581  if (ditsave_custom < bigdouble) ditsave(5) = ditsave_custom
582  ! convert hours to seconds for ending wall time
583  if (wall_time_max < bigdouble) wall_time_max=wall_time_max*3600.d0
584 
585  if (mype == 0) then
586  write(unitterm, *) ''
587  write(unitterm, *) 'Output type | tsavestart | dtsave | ditsave | itsave(1) | tsave(1)'
588  write(fmt_string, *) '(A12," | ",E9.3E2," | ",E9.3E2," | ",I6," | "'//&
589  ',I6, " | ",E9.3E2)'
590  end if
591 
592  do ifile = 1, nfile
593  if (mype == 0) write(unitterm, fmt_string) trim(output_names(ifile)), &
594  tsavestart(ifile), dtsave(ifile), ditsave(ifile), itsave(1, ifile), tsave(1, ifile)
595  end do
596 
597  if (mype == 0) write(unitterm, *) ''
598 
599  do islice=1,nslices
600  if(slicedir(islice) > ndim) &
601  write(uniterr,*)'Warning in read_par_files: ', &
602  'Slice ', islice,' direction',slicedir(islice),'larger than ndim=',ndim
603  if(slicedir(islice) < 1) &
604  write(uniterr,*)'Warning in read_par_files: ', &
605  'Slice ', islice,' direction',slicedir(islice),'too small, should be [',1,ndim,']'
606  end do
607 
608  if(it_max==biginteger .and. time_max==bigdouble.and.mype==0) write(uniterr,*) &
609  'Warning in read_par_files: it_max or time_max not given!'
610 
611  do level=1,nlevelshi
612  if(flux_scheme(level)=='tvd'.and.time_stepper/='onestep') &
613  call mpistop(" tvd is onestep method, reset time_stepper='onestep'")
614  if(flux_scheme(level)=='tvd')then
615  if(mype==0.and.(.not.dimsplit)) write(unitterm,*) &
616  'Warning: setting dimsplit=T for tvd, as used for level=',level
617  dimsplit=.true.
618  endif
619  if(flux_scheme(level)=='hlld'.and.physics_type/='mhd') &
620  call mpistop("Cannot use hlld flux if not using MHD physics!")
621 
622  if(flux_scheme(level)=='hllc'.and.physics_type=='mf') &
623  call mpistop("Cannot use hllc flux if using magnetofriction physics!")
624 
625  if(flux_scheme(level)=='tvd'.and.physics_type=='mf') &
626  call mpistop("Cannot use tvd flux if using magnetofriction physics!")
627 
628  if(flux_scheme(level)=='tvdmu'.and.physics_type=='mf') &
629  call mpistop("Cannot use tvdmu flux if using magnetofriction physics!")
630 
631  if (typepred1(level)=='default') then
632  select case (flux_scheme(level))
633  case ('cd')
634  typepred1(level)='cd'
635  case ('cd4')
636  typepred1(level)='cd4'
637  case ('fd')
638  typepred1(level)='fd'
639  case ('tvdlf','tvdmu')
640  typepred1(level)='hancock'
641  case ('hll')
642  typepred1(level)='hll'
643  case ('hllc')
644  typepred1(level)='hllc'
645  case ('hllcd')
646  typepred1(level)='hllcd'
647  case ('hlld')
648  typepred1(level)='hlld'
649  case ('nul','source','tvd')
650  typepred1(level)='nul'
651  case default
652  call mpistop("No default predictor for this full step")
653  end select
654  end if
655  end do
656 
657  ! finite difference scheme fd need global maximal speed
658  if(any(flux_scheme=='fd')) need_global_cmax=.true.
659  if(any(limiter=='schmid1')) need_global_a2max=.true.
660 
661  select case (time_stepper)
662  case ("onestep")
663  nstep=1
664  if (time_integrator=='default') then
665  time_integrator='Forward_Euler'
666  endif
667  use_imex_scheme=(time_integrator=='IMEX_Euler'.or.time_integrator=='IMEX_SP')
668  if ((time_integrator/='Forward_Euler'.and.&
669  time_integrator/='IMEX_Euler').and.&
670  time_integrator/='IMEX_SP') then
671  call mpistop("No such time_integrator for onestep")
672  endif
673  case ("twostep")
674  nstep=2
675  if (time_integrator=='default') then
676  time_integrator='Predictor_Corrector'
677  endif
678  if (time_integrator=='RK2_alfa') then
679  if(rk2_alfa<smalldouble.or.rk2_alfa>one)call mpistop("set rk2_alfa within ]0,1]")
681  rk_b2=1.0d0/(2.0d0*rk2_alfa)
682  rk_b1=1.0d0-rk_b2
683  endif
684  use_imex_scheme=(time_integrator=='IMEX_Midpoint'.or.time_integrator=='IMEX_Trapezoidal')
685  if (((time_integrator/='Predictor_Corrector'.and.&
686  time_integrator/='RK2_alfa').and.&
687  (time_integrator/='ssprk2'.and.&
688  time_integrator/='IMEX_Midpoint')).and.&
689  time_integrator/='IMEX_Trapezoidal') then
690  call mpistop("No such time_integrator for twostep")
691  endif
692  case ("threestep")
693  nstep=3
694  if (time_integrator=='default') then
695  time_integrator='ssprk3'
696  endif
697  if(time_integrator=='RK3_BT') then
698  select case(rk3_switch)
699  case(1)
700  ! we code up Ralston 3rd order here
701  rk3_a21=1.0d0/2.0d0
702  rk3_a31=0.0d0
703  rk3_a32=3.0d0/4.0d0
704  rk3_b1=2.0d0/9.0d0
705  rk3_b2=1.0d0/3.0d0
706  case(2)
707  ! we code up RK-Wray 3rd order here
708  rk3_a21=8.0d0/15.0d0
709  rk3_a31=1.0d0/4.0d0
710  rk3_a32=5.0d0/12.0d0
711  rk3_b1=1.0d0/4.0d0
712  rk3_b2=0.0d0
713  case(3)
714  ! we code up Heun 3rd order here
715  rk3_a21=1.0d0/3.0d0
716  rk3_a31=0.0d0
717  rk3_a32=2.0d0/3.0d0
718  rk3_b1=1.0d0/4.0d0
719  rk3_b2=0.0d0
720  case(4)
721  ! we code up Nystrom 3rd order here
722  rk3_a21=2.0d0/3.0d0
723  rk3_a31=0.0d0
724  rk3_a32=2.0d0/3.0d0
725  rk3_b1=1.0d0/4.0d0
726  rk3_b2=3.0d0/8.0d0
727  case default
728  call mpistop("Unknown rk3_switch")
729  end select
730  ! the rest is fixed from above
731  rk3_b3=1.0d0-rk3_b1-rk3_b2
734  endif
735  if(time_integrator=='ssprk3') then
736  select case(ssprk_order)
737  case(3) ! this is SSPRK(3,3) Gottlieb-Shu
738  rk_beta11=1.0d0
739  rk_beta22=1.0d0/4.0d0
740  rk_beta33=2.0d0/3.0d0
741  rk_alfa21=3.0d0/4.0d0
742  rk_alfa31=1.0d0/3.0d0
743  rk_c2=1.0d0
744  rk_c3=1.0d0/2.0d0
745  case(2) ! this is SSP(3,2)
746  rk_beta11=1.0d0/2.0d0
747  rk_beta22=1.0d0/2.0d0
748  rk_beta33=1.0d0/3.0d0
749  rk_alfa21=0.0d0
750  rk_alfa31=1.0d0/3.0d0
751  rk_c2=1.0d0/2.0d0
752  rk_c3=1.0d0
753  case default
754  call mpistop("Unknown ssprk3_order")
755  end select
756  rk_alfa22=1.0d0-rk_alfa21
757  rk_alfa33=1.0d0-rk_alfa31
758  endif
759  if(time_integrator=='IMEX_ARS3') then
760  ars_gamma=(3.0d0+dsqrt(3.0d0))/6.0d0
761  endif
762  if(time_integrator=='IMEX_232') then
763  select case(imex_switch)
764  case(1) ! this is IMEX_ARK(232)
765  im_delta=1.0d0-1.0d0/dsqrt(2.0d0)
766  im_nu=(3.0d0+2.0d0*dsqrt(2.0d0))/6.0d0
767  imex_a21=2.0d0*im_delta
768  imex_a31=1.0d0-im_nu
769  imex_a32=im_nu
770  imex_b1=1.0d0/(2.0d0*dsqrt(2.0d0))
771  imex_b2=1.0d0/(2.0d0*dsqrt(2.0d0))
772  imex_ha21=im_delta
773  imex_ha22=im_delta
774  case(2) ! this is IMEX_SSP(232)
775  ! doi 10.1002/2017MS001065 Rokhzadi et al
776  imex_a21=0.711664700366941d0
777  imex_a31=0.077338168947683d0
778  imex_a32=0.917273367886007d0
779  imex_b1=0.398930808264688d0
780  imex_b2=0.345755244189623d0
781  imex_ha21=0.353842865099275d0
782  imex_ha22=0.353842865099275d0
783  case default
784  call mpistop("Unknown imex_siwtch")
785  end select
788  imex_b3=1.0d0-imex_b1-imex_b2
789  endif
790  use_imex_scheme=(time_integrator=='IMEX_ARS3'.or.time_integrator=='IMEX_232')
791  if ((time_integrator/='ssprk3'.and.&
792  time_integrator/='RK3_BT').and.&
793  (time_integrator/='IMEX_ARS3'.and.&
794  time_integrator/='IMEX_232')) then
795  call mpistop("No such time_integrator for threestep")
796  endif
797  case ("fourstep")
798  nstep=4
799  if (time_integrator=='default') then
800  time_integrator="ssprk4"
801  endif
802  if(time_integrator=='ssprk4') then
803  select case(ssprk_order)
804  case(3) ! this is SSPRK(4,3) Spireti-Ruuth
805  rk_beta11=1.0d0/2.0d0
806  rk_beta22=1.0d0/2.0d0
807  rk_beta33=1.0d0/6.0d0
808  rk_beta44=1.0d0/2.0d0
809  rk_alfa21=0.0d0
810  rk_alfa31=2.0d0/3.0d0
811  rk_alfa41=0.0d0
812  rk_c2=1.0d0/2.0d0
813  rk_c3=1.0d0
814  rk_c4=1.0d0/2.0d0
815  case(2) ! this is SSP(4,2)
816  rk_beta11=1.0d0/3.0d0
817  rk_beta22=1.0d0/3.0d0
818  rk_beta33=1.0d0/3.0d0
819  rk_beta44=1.0d0/4.0d0
820  rk_alfa21=0.0d0
821  rk_alfa31=0.0d0
822  rk_alfa41=1.0d0/4.0d0
823  rk_c2=1.0d0/3.0d0
824  rk_c3=2.0d0/3.0d0
825  rk_c4=1.0d0
826  case default
827  call mpistop("Unknown ssprk_order")
828  end select
829  rk_alfa22=1.0d0-rk_alfa21
830  rk_alfa33=1.0d0-rk_alfa31
831  rk_alfa44=1.0d0-rk_alfa41
832  endif
833  use_imex_scheme=.false.
834  if ((time_integrator/='ssprk4'.and.&
835  time_integrator/='rk4').and.&
836  time_integrator/='jameson') then
837  call mpistop("No such time_integrator for fourstep")
838  endif
839  case ("fivestep")
840  nstep=5
841  if (time_integrator=='default') then
842  time_integrator="ssprk5"
843  endif
844  if(time_integrator=='ssprk5') then
845  select case(ssprk_order)
846  ! we use ssprk_order to intercompare the different coefficient choices
847  case(3) ! From Gottlieb 2005
848  rk_beta11=0.391752226571890d0
849  rk_beta22=0.368410593050371d0
850  rk_beta33=0.251891774271694d0
851  rk_beta44=0.544974750228521d0
852  rk_beta54=0.063692468666290d0
853  rk_beta55=0.226007483236906d0
854  rk_alfa21=0.444370493651235d0
855  rk_alfa31=0.620101851488403d0
856  rk_alfa41=0.178079954393132d0
857  rk_alfa53=0.517231671970585d0
858  rk_alfa54=0.096059710526147d0
859 
860  rk_alfa22=0.555629506348765d0
861  rk_alfa33=0.379898148511597d0
862  rk_alfa44=0.821920045606868d0
863  rk_alfa55=0.386708617503269d0
864  rk_alfa22=1.0d0-rk_alfa21
865  rk_alfa33=1.0d0-rk_alfa31
866  rk_alfa44=1.0d0-rk_alfa41
872  case(2) ! From Spireti-Ruuth
873  rk_beta11=0.39175222700392d0
874  rk_beta22=0.36841059262959d0
875  rk_beta33=0.25189177424738d0
876  rk_beta44=0.54497475021237d0
877  !rk_beta54=0.06369246925946d0
878  !rk_beta55=0.22600748319395d0
879  rk_alfa21=0.44437049406734d0
880  rk_alfa31=0.62010185138540d0
881  rk_alfa41=0.17807995410773d0
882  rk_alfa53=0.51723167208978d0
883  !rk_alfa54=0.09605971145044d0
884 
885  rk_alfa22=1.0d0-rk_alfa21
886  rk_alfa33=1.0d0-rk_alfa31
887  rk_alfa44=1.0d0-rk_alfa41
888 
889  rka51=0.00683325884039d0
890  rka54=0.12759831133288d0
891  rkb54=0.08460416338212d0
892  rk_beta54=rkb54-rk_beta44*rka51/rk_alfa41
893  rk_alfa54=rka54-rk_alfa44*rka51/rk_alfa41
894 
901  case default
902  call mpistop("Unknown ssprk_order")
903  end select
904  ! the following combinations must be unity
905  !print *,rk_beta55+rk_beta54+rk_alfa53*rk_c3+rk_alfa54*rk_c4+rk_alfa55*rk_c5
906  !print *,rk_alfa22+rk_alfa21
907  !print *,rk_alfa33+rk_alfa31
908  !print *,rk_alfa44+rk_alfa41
909  !print *,rk_alfa55+rk_alfa53+rk_alfa54
910  endif
911  use_imex_scheme=.false.
912  if (time_integrator/='ssprk5')then
913  call mpistop("No such time_integrator for fivestep")
914  endif
915  case default
916  call mpistop("Unknown time_stepper")
917  end select
918 
919  do i = 1, ndim
920  select case (stretch_dim(i))
921  case (undefined, 'none')
923  stretched_dim(i) = .false.
924  case ('uni','uniform')
926  stretched_dim(i) = .true.
927  case ('symm', 'symmetric')
929  stretched_dim(i) = .true.
930  case default
932  stretched_dim(i) = .false.
933  if (mype == 0) print *, 'Got stretch_type = ', stretch_type(i)
934  call mpistop('Unknown stretch type')
935  end select
936  end do
937 
938  ! Harmonize the parameters for dimensional splitting and source splitting
939  if(typedimsplit =='default'.and. dimsplit) typedimsplit='xyyx'
940  if(typedimsplit =='default'.and..not.dimsplit) typedimsplit='unsplit'
941  dimsplit = typedimsplit /='unsplit'
942 
943  if(coordinate==-1) then
945  if(mype==0) then
946  write(*,*) 'Warning: coordinate system is not specified!'
947  write(*,*) 'call set_coordinate_system in usr_init in mod_usr.t'
948  write(*,*) 'Now use Cartesian coordinate'
949  end if
950  end if
951 
952  if(coordinate==cartesian) then
953  slab=.true.
954  slab_uniform=.true.
955  if(any(stretched_dim)) then
957  slab_uniform=.false.
958  end if
959  else
960  slab=.false.
961  slab_uniform=.false.
962  end if
963 
964  if(coordinate==spherical) then
965  if(dimsplit) then
966  if(mype==0)print *,'Warning: spherical symmetry needs dimsplit=F, resetting'
967  dimsplit=.false.
968  end if
969  end if
970 
971  if (ndim==1) dimsplit=.false.
972 
973  ! Type limiter is of integer type for performance
974  allocate(type_limiter(nlevelshi))
976 
977  do level=1,nlevelshi
978  type_limiter(level) = limiter_type(limiter(level))
979  type_gradient_limiter(level) = limiter_type(gradient_limiter(level))
980  end do
981 
982  if (any(limiter(1:nlevelshi)== 'ppm')&
983  .and.(flatsh.and.physics_type=='rho')) then
984  call mpistop(" PPM with flatsh=.true. can not be used with physics_type='rho'!")
985  end if
986 
987  ! Copy boundary conditions to typeboundary, which is used internally
988  {
989  if (any(typeboundary_min^d /= undefined)) then
990  typeboundary(1:nwfluxbc, 2*^d-1) = typeboundary_min^d(1:nwfluxbc)
991  end if
992 
993  if (any(typeboundary_max^d /= undefined)) then
994  typeboundary(1:nwfluxbc, 2*^d) = typeboundary_max^d(1:nwfluxbc)
995  end if
996  }
997 
998  ! psi, tracers take the same boundary type as the first variable
999  if (nwfluxbc<nwflux) then
1000  do iw = nwfluxbc+1, nwflux
1001  typeboundary(iw,:) = typeboundary(1, :)
1002  end do
1003  end if
1004 
1005  if (any(typeboundary == undefined)) then
1006  call mpistop("Not all boundary conditions have been defined")
1007  end if
1008 
1009  do idim=1,ndim
1010  periodb(idim)=(any(typeboundary(:,2*idim-1:2*idim)=='periodic'))
1011  aperiodb(idim)=(any(typeboundary(:,2*idim-1:2*idim)=='aperiodic'))
1012  if (periodb(idim).or.aperiodb(idim)) then
1013  do iw=1,nwflux
1014  if (typeboundary(iw,2*idim-1) .ne. typeboundary(iw,2*idim)) &
1015  call mpistop("Wrong counterpart in periodic boundary")
1016 
1017  if (typeboundary(iw,2*idim-1) /= 'periodic' .and. &
1018  typeboundary(iw,2*idim-1) /= 'aperiodic') then
1019  call mpistop("Each dimension should either have all "//&
1020  "or no variables periodic, some can be aperiodic")
1021  end if
1022  end do
1023  end if
1024  end do
1025  {^nooned
1026  do idim=1,ndim
1027  if(any(typeboundary(:,2*idim-1)=='pole')) then
1028  if(any(typeboundary(:,2*idim-1)/='pole')) typeboundary(:,2*idim-1)='pole'
1029  if(phys_energy) then
1030  windex=2
1031  else
1032  windex=1
1033  end if
1034  typeboundary(:,2*idim-1)='symm'
1035  if(physics_type/='rho') then
1036  select case(coordinate)
1037  case(cylindrical)
1038  typeboundary(phi_+1,2*idim-1)='asymm'
1039  if(physics_type=='mhd') typeboundary(ndir+windex+phi_,2*idim-1)='asymm'
1040  case(spherical)
1041  typeboundary(3:ndir+1,2*idim-1)='asymm'
1042  if(physics_type=='mhd') typeboundary(ndir+windex+2:ndir+windex+ndir,2*idim-1)='asymm'
1043  case default
1044  call mpistop('Pole is in cylindrical, polar, spherical coordinates!')
1045  end select
1046  end if
1047  end if
1048  if(any(typeboundary(:,2*idim)=='pole')) then
1049  if(any(typeboundary(:,2*idim)/='pole')) typeboundary(:,2*idim)='pole'
1050  if(phys_energy) then
1051  windex=2
1052  else
1053  windex=1
1054  end if
1055  typeboundary(:,2*idim)='symm'
1056  if(physics_type/='rho') then
1057  select case(coordinate)
1058  case(cylindrical)
1059  typeboundary(phi_+1,2*idim)='asymm'
1060  if(physics_type=='mhd') typeboundary(ndir+windex+phi_,2*idim)='asymm'
1061  case(spherical)
1062  typeboundary(3:ndir+1,2*idim)='asymm'
1063  if(physics_type=='mhd') typeboundary(ndir+windex+2:ndir+windex+ndir,2*idim)='asymm'
1064  case default
1065  call mpistop('Pole is in cylindrical, polar, spherical coordinates!')
1066  end select
1067  end if
1068  end if
1069  end do
1070  }
1071 
1072  if(any(limiter(1:nlevelshi)=='mp5')) then
1073  nghostcells=3
1074  end if
1075 
1076  if(any(limiter(1:nlevelshi)=='weno5')) then
1077  nghostcells=3
1078  end if
1079 
1080  if(any(limiter(1:nlevelshi)=='weno5nm')) then
1081  nghostcells=3
1082  end if
1083 
1084  if(any(limiter(1:nlevelshi)=='wenoz5')) then
1085  nghostcells=3
1086  end if
1087 
1088  if(any(limiter(1:nlevelshi)=='wenoz5nm')) then
1089  nghostcells=3
1090  end if
1091 
1092  if(any(limiter(1:nlevelshi)=='wenozp5')) then
1093  nghostcells=3
1094  end if
1095 
1096  if(any(limiter(1:nlevelshi)=='wenozp5nm')) then
1097  nghostcells=3
1098  end if
1099 
1100  if(any(limiter(1:nlevelshi)=='weno5cu6')) then
1101  nghostcells=3
1102  end if
1103 
1104  if(any(limiter(1:nlevelshi)=='ppm')) then
1105  nghostcells=4
1106  end if
1107 
1108  if(any(limiter(1:nlevelshi)=='weno7')) then
1109  nghostcells=4
1110  end if
1111 
1112  if(any(limiter(1:nlevelshi)=='mpweno7')) then
1113  nghostcells=4
1114  end if
1115 
1116  if(any(limiter(1:nlevelshi)=='exeno7')) then
1117  nghostcells=4
1118  end if
1119 
1120  ! prolongation in AMR for constrained transport MHD needs even number ghosts
1121  if(stagger_grid .and. refine_max_level>1 .and. mod(nghostcells,2)/=0) then
1123  end if
1124 
1125  ! If a wider stencil is used, extend the number of ghost cells
1127 
1128  select case (coordinate)
1129  {^nooned
1130  case (spherical)
1131  xprob^lim^de=xprob^lim^de*two*dpi;
1132  \}
1133  case (cylindrical)
1134  {
1135  if (^d==phi_) then
1136  xprob^lim^d=xprob^lim^d*two*dpi;
1137  end if
1138  \}
1139  end select
1140 
1141  ! full block size including ghostcells
1142  {ixghi^d = block_nx^d + 2*nghostcells\}
1143  {ixgshi^d = ixghi^d\}
1144 
1145  nx_vec = [{domain_nx^d|, }]
1146  block_nx_vec = [{block_nx^d|, }]
1147 
1148  if (any(nx_vec < 4) .or. any(mod(nx_vec, 2) == 1)) &
1149  call mpistop('Grid size (domain_nx^D) has to be even and >= 4')
1150 
1151  if (any(block_nx_vec < 4) .or. any(mod(block_nx_vec, 2) == 1)) &
1152  call mpistop('Block size (block_nx^D) has to be even and >= 4')
1153 
1154  if (any([ domain_nx^d/block_nx^d ] == 1) .and. mype == 0) then
1155  print *, "TODO: possible bug when domain_nx^D/block_nx^D == 1"
1156  end if
1157 
1158  { if(mod(domain_nx^d,block_nx^d)/=0) &
1159  call mpistop('Grid (domain_nx^D) and block (block_nx^D) must be consistent') \}
1160 
1162  write(unitterm,*)'Error: refine_max_level',refine_max_level,'>nlevelshi ',nlevelshi
1163  call mpistop("Reset nlevelshi and recompile!")
1164  endif
1165 
1166  if (any(stretched_dim)) then
1167  allocate(qstretch(0:nlevelshi,1:ndim),dxfirst(0:nlevelshi,1:ndim),&
1169  allocate(nstretchedblocks(1:nlevelshi,1:ndim))
1170  qstretch(0:nlevelshi,1:ndim)=0.0d0
1171  dxfirst(0:nlevelshi,1:ndim)=0.0d0
1173  {if (stretch_type(^d) == stretch_uni) then
1174  ! first some sanity checks
1175  if(qstretch_baselevel(^d)<1.0d0.or.qstretch_baselevel(^d)==bigdouble) then
1176  if(mype==0) then
1177  write(*,*) 'stretched grid needs finite qstretch_baselevel>1'
1178  write(*,*) 'will try default value for qstretch_baselevel in dimension', ^d
1179  endif
1180  if(xprobmin^d>smalldouble)then
1181  qstretch_baselevel(^d)=(xprobmax^d/xprobmin^d)**(1.d0/dble(domain_nx^d))
1182  else
1183  call mpistop("can not set qstretch_baselevel automatically")
1184  endif
1185  endif
1186  if(mod(block_nx^d,2)==1) &
1187  call mpistop("stretched grid needs even block size block_nxD")
1188  if(mod(domain_nx^d/block_nx^d,2)/=0) &
1189  call mpistop("number level 1 blocks in D must be even")
1190  qstretch(1,^d)=qstretch_baselevel(^d)
1191  dxfirst(1,^d)=(xprobmax^d-xprobmin^d) &
1192  *(1.0d0-qstretch(1,^d))/(1.0d0-qstretch(1,^d)**domain_nx^d)
1193  qstretch(0,^d)=qstretch(1,^d)**2
1194  dxfirst(0,^d)=dxfirst(1,^d)*(1.0d0+qstretch(1,^d))
1195  if(refine_max_level>1)then
1196  do ilev=2,refine_max_level
1197  qstretch(ilev,^d)=dsqrt(qstretch(ilev-1,^d))
1198  dxfirst(ilev,^d)=dxfirst(ilev-1,^d) &
1199  /(1.0d0+dsqrt(qstretch(ilev-1,^d)))
1200  enddo
1201  endif
1202  endif \}
1203  if(mype==0) then
1204  {if(stretch_type(^d) == stretch_uni) then
1205  write(*,*) 'Stretched dimension ', ^d
1206  write(*,*) 'Using stretched grid with qs=',qstretch(0:refine_max_level,^d)
1207  write(*,*) ' and first cell sizes=',dxfirst(0:refine_max_level,^d)
1208  endif\}
1209  end if
1210  {if(stretch_type(^d) == stretch_symm) then
1211  if(mype==0) then
1212  write(*,*) 'will apply symmetric stretch in dimension', ^d
1213  endif
1214  if(mod(block_nx^d,2)==1) &
1215  call mpistop("stretched grid needs even block size block_nxD")
1216  ! checks on the input variable nstretchedblocks_baselevel
1217  if(nstretchedblocks_baselevel(^d)==0) &
1218  call mpistop("need finite even number of stretched blocks at baselevel")
1219  if(mod(nstretchedblocks_baselevel(^d),2)==1) &
1220  call mpistop("need even number of stretched blocks at baselevel")
1221  if(qstretch_baselevel(^d)<1.0d0.or.qstretch_baselevel(^d)==bigdouble) &
1222  call mpistop('stretched grid needs finite qstretch_baselevel>1')
1223  ! compute stretched part to ensure uniform center
1224  ipower=(nstretchedblocks_baselevel(^d)/2)*block_nx^d
1225  if(nstretchedblocks_baselevel(^d)==domain_nx^d/block_nx^d)then
1226  xstretch^d=0.5d0*(xprobmax^d-xprobmin^d)
1227  else
1228  xstretch^d=(xprobmax^d-xprobmin^d) &
1229  /(2.0d0+dble(domain_nx^d-nstretchedblocks_baselevel(^d)*block_nx^d) &
1230  *(1.0d0-qstretch_baselevel(^d))/(1.0d0-qstretch_baselevel(^d)**ipower))
1231  endif
1232  if(xstretch^d>(xprobmax^d-xprobmin^d)*0.5d0) &
1233  call mpistop(" stretched grid part should not exceed full domain")
1234  dxfirst(1,^d)=xstretch^d*(1.0d0-qstretch_baselevel(^d)) &
1235  /(1.0d0-qstretch_baselevel(^d)**ipower)
1236  nstretchedblocks(1,^d)=nstretchedblocks_baselevel(^d)
1237  qstretch(1,^d)=qstretch_baselevel(^d)
1238  qstretch(0,^d)=qstretch(1,^d)**2
1239  dxfirst(0,^d)=dxfirst(1,^d)*(1.0d0+qstretch(1,^d))
1240  dxmid(1,^d)=dxfirst(1,^d)
1241  dxmid(0,^d)=dxfirst(1,^d)*2.0d0
1242  if(refine_max_level>1)then
1243  do ilev=2,refine_max_level
1244  nstretchedblocks(ilev,^d)=2*nstretchedblocks(ilev-1,^d)
1245  qstretch(ilev,^d)=dsqrt(qstretch(ilev-1,^d))
1246  dxfirst(ilev,^d)=dxfirst(ilev-1,^d) &
1247  /(1.0d0+dsqrt(qstretch(ilev-1,^d)))
1248  dxmid(ilev,^d)=dxmid(ilev-1,^d)/2.0d0
1249  enddo
1250  endif
1251  ! sanity check on total domain size:
1252  sizeuniformpart^d=dxfirst(1,^d) &
1253  *(domain_nx^d-nstretchedblocks_baselevel(^d)*block_nx^d)
1254  if(mype==0) then
1255  print *,'uniform part of size=',sizeuniformpart^d
1256  print *,'setting of domain is then=',2*xstretch^d+sizeuniformpart^d
1257  print *,'versus=',xprobmax^d-xprobmin^d
1258  endif
1259  if(dabs(xprobmax^d-xprobmin^d-2*xstretch^d-sizeuniformpart^d)>smalldouble) then
1260  call mpistop('mismatch in domain size!')
1261  endif
1262  endif \}
1263  dxfirst_1mq(0:refine_max_level,1:ndim)=dxfirst(0:refine_max_level,1:ndim) &
1264  /(1.0d0-qstretch(0:refine_max_level,1:ndim))
1265  end if
1266 
1267  dx_vec = [{xprobmax^d-xprobmin^d|, }] / nx_vec
1268 
1269  if (mype==0) then
1270  write(c_ndim, '(I1)') ^nd
1271  write(unitterm, '(A30,' // c_ndim // '(I0," "))') &
1272  ' Domain size (cells): ', nx_vec
1273  write(unitterm, '(A30,' // c_ndim // '(E9.3," "))') &
1274  ' Level one dx: ', dx_vec
1275  end if
1276 
1277  if (any(dx_vec < smalldouble)) &
1278  call mpistop("Incorrect domain size (too small grid spacing)")
1279 
1280  dx(:, 1) = dx_vec
1281 
1282  if(sum(w_refine_weight(:))==0) w_refine_weight(1) = 1.d0
1283  if(dabs(sum(w_refine_weight(:))-1.d0)>smalldouble) then
1284  write(unitterm,*) "Sum of all elements in w_refine_weight be 1.d0"
1285  call mpistop("Reset w_refine_weight so the sum is 1.d0")
1286  end if
1287 
1288  ! TRAC is only for temperature (energy)
1289  if(.not.phys_energy .and. trac) then
1290  if (mype==0) write(unitterm, '(A)') "Warning, TRAC is not for energy-independent problems, change trac to false."
1291  trac=.false.
1292  end if
1293 
1294  if (mype==0) write(unitterm, '(A30)', advance='no') 'Refine estimation: '
1295 
1296  select case (refine_criterion)
1297  case (0)
1298  if (mype==0) write(unitterm, '(A)') "user defined"
1299  case (1)
1300  if (mype==0) write(unitterm, '(A)') "relative error"
1301  case (2)
1302  if (mype==0) write(unitterm, '(A)') "Lohner's original scheme"
1303  case (3)
1304  if (mype==0) write(unitterm, '(A)') "Lohner's scheme"
1305  case default
1306  call mpistop("Unknown error estimator, change refine_criterion")
1307  end select
1308 
1309  if (tfixgrid<bigdouble/2.0d0) then
1310  if(mype==0)print*,'Warning, at time=',tfixgrid,'the grid will be fixed'
1311  end if
1312  if (itfixgrid<biginteger/2) then
1313  if(mype==0)print*,'Warning, at iteration=',itfixgrid,'the grid will be fixed'
1314  end if
1315  if (ditregrid>1) then
1316  if(mype==0)print*,'Note, Grid is reconstructed once every',ditregrid,'iterations'
1317  end if
1318 
1319 
1320  do islice=1,nslices
1321  select case(slicedir(islice))
1322  {case(^d)
1323  if(slicecoord(islice)<xprobmin^d.or.slicecoord(islice)>xprobmax^d) &
1324  write(uniterr,*)'Warning in read_par_files: ', &
1325  'Slice ', islice, ' coordinate',slicecoord(islice),'out of bounds for dimension ',slicedir(islice)
1326  \}
1327  end select
1328  end do
1329 
1330  if (mype==0) then
1331  write(unitterm, '(A30,A,A)') 'restart_from_file: ', ' ', trim(restart_from_file)
1332  write(unitterm, '(A30,L1)') 'converting: ', convert
1333  write(unitterm, '(A)') ''
1334  endif
1335 
1336  end subroutine read_par_files
1337 
1338  !> Routine to find entries in a string
1339  subroutine get_fields_string(line, delims, n_max, fields, n_found, fully_read)
1340  !> The line from which we want to read
1341  character(len=*), intent(in) :: line
1342  !> A string with delimiters. For example delims = " ,'"""//char(9)
1343  character(len=*), intent(in) :: delims
1344  !> Maximum number of entries to read in
1345  integer, intent(in) :: n_max
1346  !> Number of entries found
1347  integer, intent(inout) :: n_found
1348  !> Fields in the strings
1349  character(len=*), intent(inout) :: fields(n_max)
1350  logical, intent(out), optional :: fully_read
1351 
1352  integer :: ixs_start(n_max)
1353  integer :: ixs_end(n_max)
1354  integer :: ix, ix_prev
1355 
1356  ix_prev = 0
1357  n_found = 0
1358 
1359  do while (n_found < n_max)
1360  ! Find the starting point of the next entry (a non-delimiter value)
1361  ix = verify(line(ix_prev+1:), delims)
1362  if (ix == 0) exit
1363 
1364  n_found = n_found + 1
1365  ixs_start(n_found) = ix_prev + ix ! This is the absolute position in 'line'
1366 
1367  ! Get the end point of the current entry (next delimiter index minus one)
1368  ix = scan(line(ixs_start(n_found)+1:), delims) - 1
1369 
1370  if (ix == -1) then ! If there is no last delimiter,
1371  ixs_end(n_found) = len(line) ! the end of the line is the endpoint
1372  else
1373  ixs_end(n_found) = ixs_start(n_found) + ix
1374  end if
1375 
1376  fields(n_found) = line(ixs_start(n_found):ixs_end(n_found))
1377  ix_prev = ixs_end(n_found) ! We continue to search from here
1378  end do
1379 
1380  if (present(fully_read)) then
1381  ix = verify(line(ix_prev+1:), delims)
1382  fully_read = (ix == 0) ! Are there only delimiters?
1383  end if
1384 
1385  end subroutine get_fields_string
1386 
1387  subroutine saveamrfile(ifile)
1391  use mod_particles, only: write_particles_snapshot
1392  use mod_slice, only: write_slice
1393  use mod_collapse, only: write_collapsed
1394  integer:: ifile
1395 
1396  select case (ifile)
1397  case (fileout_)
1398  ! Write .dat snapshot
1399  call write_snapshot()
1400 
1401  ! Generate formatted output (e.g., VTK)
1403 
1404  if(use_particles) call write_particles_snapshot()
1405 
1407  case (fileslice_)
1408  call write_slice
1409  case (filecollapse_)
1410  call write_collapsed
1411  case (filelog_)
1412  select case (typefilelog)
1413  case ('default')
1414  call printlog_default
1415  case ('regression_test')
1417  case ('special')
1418  if (.not. associated(usr_print_log)) then
1419  call mpistop("usr_print_log not defined")
1420  else
1421  call usr_print_log()
1422  end if
1423  case default
1424  call mpistop("Error in SaveFile: Unknown typefilelog")
1425  end select
1426  case (fileanalysis_)
1427  if (associated(usr_write_analysis)) then
1428  call usr_write_analysis()
1429  end if
1430  case default
1431  write(*,*) 'No save method is defined for ifile=',ifile
1432  call mpistop("")
1433  end select
1434 
1435  ! opedit: Flush stdout and stderr from time to time.
1436  flush(unit=unitterm)
1437 
1438  end subroutine saveamrfile
1439 
1440  !> Standard method for creating a new output file
1441  subroutine create_output_file(fh, ix, extension, suffix)
1443  integer, intent(out) :: fh !< File handle
1444  integer, intent(in) :: ix !< Index of file
1445  character(len=*), intent(in) :: extension !< Extension of file
1446  character(len=*), intent(in), optional :: suffix !< Optional suffix
1447  character(len=std_len) :: filename
1448  integer :: amode
1449 
1450  if (ix >= 10000) then
1451  call mpistop("Number of output files is limited to 10000 (0...9999)")
1452  end if
1453 
1454  if (present(suffix)) then
1455  write(filename,"(a,a,i4.4,a)") trim(base_filename), &
1456  trim(suffix), ix, extension
1457  else
1458  write(filename,"(a,i4.4,a)") trim(base_filename), ix, extension
1459  end if
1460 
1461  ! MPI cannot easily replace existing files
1462  open(unit=unitsnapshot,file=filename,status='replace')
1463  close(unitsnapshot, status='delete')
1464 
1465  amode = ior(mpi_mode_create, mpi_mode_wronly)
1466  call mpi_file_open(mpi_comm_self,filename,amode, &
1467  mpi_info_null, fh, ierrmpi)
1468 
1469  if (ierrmpi /= 0) then
1470  print *, "Error, cannot create file ", trim(filename)
1471  call mpistop("Fatal error")
1472  end if
1473 
1474  end subroutine create_output_file
1475 
1476  ! Check if a snapshot exists
1477  logical function snapshot_exists(ix)
1479  integer, intent(in) :: ix !< Index of snapshot
1480  character(len=std_len) :: filename
1481 
1482  write(filename, "(a,i4.4,a)") trim(base_filename), ix, ".dat"
1483  inquire(file=trim(filename), exist=snapshot_exists)
1484  end function snapshot_exists
1485 
1486  integer function get_snapshot_index(filename)
1487  character(len=*), intent(in) :: filename
1488  integer :: i
1489 
1490  ! Try to parse index in restart_from_file string (e.g. basename0000.dat)
1491  i = len_trim(filename) - 7
1492  read(filename(i:i+3), '(I4)') get_snapshot_index
1493  end function get_snapshot_index
1494 
1495  !> Write header for a snapshot
1496  !>
1497  !> If you edit the header, don't forget to update: snapshot_write_header(),
1498  !> snapshot_read_header(), doc/fileformat.md, tools/python/dat_reader.py
1499  subroutine snapshot_write_header(fh, offset_tree, offset_block)
1501  use mod_physics
1503  use mod_slice, only: slicenext
1504  integer, intent(in) :: fh !< File handle
1505  integer(kind=MPI_OFFSET_KIND), intent(in) :: offset_tree !< Offset of tree info
1506  integer(kind=MPI_OFFSET_KIND), intent(in) :: offset_block !< Offset of block data
1507  integer, dimension(MPI_STATUS_SIZE) :: st
1508  integer :: iw, er
1509 
1510  call mpi_file_write(fh, version_number, 1, mpi_integer, st, er)
1511  call mpi_file_write(fh, int(offset_tree), 1, mpi_integer, st, er)
1512  call mpi_file_write(fh, int(offset_block), 1, mpi_integer, st, er)
1513  call mpi_file_write(fh, nw, 1, mpi_integer, st, er)
1514  call mpi_file_write(fh, ndir, 1, mpi_integer, st, er)
1515  call mpi_file_write(fh, ndim, 1, mpi_integer, st, er)
1516  call mpi_file_write(fh, levmax, 1, mpi_integer, st, er)
1517  call mpi_file_write(fh, nleafs, 1, mpi_integer, st, er)
1518  call mpi_file_write(fh, nparents, 1, mpi_integer, st, er)
1519  call mpi_file_write(fh, it, 1, mpi_integer, st, er)
1520  ! Note: It is nice when this double has an even number of 4 byte
1521  ! integers before it (for alignment)
1522  call mpi_file_write(fh, global_time, 1, mpi_double_precision, st, er)
1523 
1524  call mpi_file_write(fh, [ xprobmin^d ], ndim, &
1525  mpi_double_precision, st, er)
1526  call mpi_file_write(fh, [ xprobmax^d ], ndim, &
1527  mpi_double_precision, st, er)
1528  call mpi_file_write(fh, [ domain_nx^d ], ndim, &
1529  mpi_integer, st, er)
1530  call mpi_file_write(fh, [ block_nx^d ], ndim, &
1531  mpi_integer, st, er)
1532 
1533  ! Periodicity (assume all variables are periodic if one is)
1534  call mpi_file_write(fh, periodb, ndim, mpi_logical, st, er)
1535 
1536  ! Geometry
1537  call mpi_file_write(fh, geometry_name(1:name_len), &
1538  name_len, mpi_character, st, er)
1539 
1540  ! Write stagger grid mark
1541  call mpi_file_write(fh, stagger_grid, 1, mpi_logical, st, er)
1542 
1543  do iw = 1, nw
1544  call mpi_file_write(fh, cons_wnames(iw), name_len, mpi_character, st, er)
1545  end do
1546 
1547  ! Physics related information
1548  call mpi_file_write(fh, physics_type, name_len, mpi_character, st, er)
1549 
1550  ! Format:
1551  ! integer :: n_par
1552  ! double precision :: values(n_par)
1553  ! character(n_par * name_len) :: names
1554  call phys_write_info(fh)
1555 
1556  ! Write snapshotnext etc., which is useful for restarting.
1557  ! Note we add one, since snapshotnext is updated *after* this procedure
1558  if(pass_wall_time) then
1559  call mpi_file_write(fh, snapshotnext, 1, mpi_integer, st, er)
1560  else
1561  call mpi_file_write(fh, snapshotnext+1, 1, mpi_integer, st, er)
1562  end if
1563  call mpi_file_write(fh, slicenext, 1, mpi_integer, st, er)
1564  call mpi_file_write(fh, collapsenext, 1, mpi_integer, st, er)
1565 
1566  end subroutine snapshot_write_header
1567 
1568  !> Read header for a snapshot
1569  !>
1570  !> If you edit the header, don't forget to update: snapshot_write_header(),
1571  !> snapshot_read_header(), doc/fileformat.md, tools/python/dat_reader.py
1572  subroutine snapshot_read_header(fh, offset_tree, offset_block)
1575  use mod_physics, only: physics_type
1576  use mod_slice, only: slicenext
1577  integer, intent(in) :: fh !< File handle
1578  integer(MPI_OFFSET_KIND), intent(out) :: offset_tree !< Offset of tree info
1579  integer(MPI_OFFSET_KIND), intent(out) :: offset_block !< Offset of block data
1580  integer :: i, version
1581  integer :: ibuf(ndim), iw
1582  double precision :: rbuf(ndim)
1583  integer, dimension(MPI_STATUS_SIZE) :: st
1584  character(len=name_len), allocatable :: var_names(:), param_names(:)
1585  double precision, allocatable :: params(:)
1586  character(len=name_len) :: phys_name, geom_name
1587  integer :: er, n_par, tmp_int
1588  logical :: stagger_mark_dat, periodic(ndim)
1589 
1590  ! Version number
1591  call mpi_file_read(fh, version, 1, mpi_integer, st, er)
1592  if (all(compatible_versions /= version)) then
1593  call mpistop("Incompatible file version (maybe old format?)")
1594  end if
1595 
1596  ! offset_tree
1597  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1598  offset_tree = ibuf(1)
1599 
1600  ! offset_block
1601  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1602  offset_block = ibuf(1)
1603 
1604  ! nw
1605  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1606  nw_found=ibuf(1)
1607  if (nw /= ibuf(1)) then
1608  write(*,*) "nw=",nw," and nw found in restart file=",ibuf(1)
1609  write(*,*) "Please be aware of changes in w at restart."
1610  !call mpistop("currently, changing nw at restart is not allowed")
1611  end if
1612 
1613  ! ndir
1614  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1615  if (ibuf(1) /= ndir) then
1616  write(*,*) "ndir in restart file = ",ibuf(1)
1617  write(*,*) "ndir = ",ndir
1618  call mpistop("reset ndir to ndir in restart file")
1619  end if
1620 
1621  ! ndim
1622  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1623  if (ibuf(1) /= ndim) then
1624  write(*,*) "ndim in restart file = ",ibuf(1)
1625  write(*,*) "ndim = ",ndim
1626  call mpistop("reset ndim to ndim in restart file")
1627  end if
1628 
1629  ! levmax
1630  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1631  if (ibuf(1) > refine_max_level) then
1632  write(*,*) "number of levels in restart file = ",ibuf(1)
1633  write(*,*) "refine_max_level = ",refine_max_level
1634  call mpistop("refine_max_level < num. levels in restart file")
1635  end if
1636 
1637  ! nleafs
1638  call mpi_file_read(fh, nleafs, 1, mpi_integer, st, er)
1639 
1640  ! nparents
1641  call mpi_file_read(fh, nparents, 1, mpi_integer, st, er)
1642 
1643  ! it
1644  call mpi_file_read(fh, it, 1, mpi_integer, st, er)
1645 
1646  ! global time
1647  call mpi_file_read(fh, global_time, 1, mpi_double_precision, st, er)
1648 
1649  ! xprobmin^D
1650  call mpi_file_read(fh,rbuf(1:ndim),ndim,mpi_double_precision,st,er)
1651  if (maxval(abs(rbuf(1:ndim) - [ xprobmin^d ])) > 0) then
1652  write(*,*) "Error: xprobmin differs from restart data: ", rbuf(1:ndim)
1653  call mpistop("change xprobmin^D in par file")
1654  end if
1655 
1656  ! xprobmax^D
1657  call mpi_file_read(fh,rbuf(1:ndim),ndim,mpi_double_precision,st,er)
1658  if (maxval(abs(rbuf(1:ndim) - [ xprobmax^d ])) > 0) then
1659  write(*,*) "Error: xprobmax differs from restart data: ", rbuf(1:ndim)
1660  call mpistop("change xprobmax^D in par file")
1661  end if
1662 
1663  ! domain_nx^D
1664  call mpi_file_read(fh,ibuf(1:ndim), ndim, mpi_integer,st,er)
1665  if (any(ibuf(1:ndim) /= [ domain_nx^d ])) then
1666  write(*,*) "Error: mesh size differs from restart data: ", ibuf(1:ndim)
1667  call mpistop("change domain_nx^D in par file")
1668  end if
1669 
1670  ! block_nx^D
1671  call mpi_file_read(fh,ibuf(1:ndim), ndim, mpi_integer,st,er)
1672  if (any(ibuf(1:ndim) /= [ block_nx^d ])) then
1673  write(*,*) "Error: block size differs from restart data:", ibuf(1:ndim)
1674  call mpistop("change block_nx^D in par file")
1675  end if
1676 
1677  ! From version 5, read more info about the grid
1678  if (version > 4) then
1679  call mpi_file_read(fh, periodic, ndim, mpi_logical, st, er)
1680  if (any(periodic .neqv. periodb)) &
1681  call mpistop("change in periodicity in par file")
1682 
1683  call mpi_file_read(fh, geom_name, name_len, mpi_character, st, er)
1684 
1685  if (geom_name /= geometry_name(1:name_len)) then
1686  write(*,*) "type of coordinates in data is: ", geom_name
1687  call mpistop("select the correct coordinates in mod_usr.t file")
1688  end if
1689 
1690  call mpi_file_read(fh, stagger_mark_dat, 1, mpi_logical, st, er)
1691  if (stagger_grid .neqv. stagger_mark_dat) then
1692  write(*,*) "Error: stagger grid flag differs from restart data:", stagger_mark_dat
1693  call mpistop("change parameter to use stagger grid")
1694  end if
1695  end if
1696 
1697  ! From version 4 onwards, the later parts of the header must be present
1698  if (version > 3) then
1699  ! w_names (not used here)
1700  allocate(var_names(nw_found))
1701  do iw = 1, nw_found
1702  call mpi_file_read(fh, var_names(iw), name_len, mpi_character, st, er)
1703  end do
1704 
1705  ! Physics related information
1706  call mpi_file_read(fh, phys_name, name_len, mpi_character, st, er)
1707 
1708  if (phys_name /= physics_type) then
1709 ! call mpistop("Cannot restart with a different physics type")
1710  end if
1711 
1712  call mpi_file_read(fh, n_par, 1, mpi_integer, st, er)
1713  allocate(params(n_par))
1714  allocate(param_names(n_par))
1715  call mpi_file_read(fh, params, n_par, mpi_double_precision, st, er)
1716  call mpi_file_read(fh, param_names, name_len * n_par, mpi_character, st, er)
1717 
1718  ! Read snapshotnext etc. for restarting
1719  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1720 
1721  ! Only set snapshotnext if the user hasn't specified it
1722  if (snapshotnext == -1) snapshotnext = tmp_int
1723 
1724  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1725  if (slicenext == -1) slicenext = tmp_int
1726 
1727  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1728  if (collapsenext == -1) collapsenext = tmp_int
1729  else
1730  ! Guess snapshotnext from file name if not set
1731  if (snapshotnext == -1) &
1733  ! Set slicenext and collapsenext if not set
1734  if (slicenext == -1) slicenext = 0
1735  if (collapsenext == -1) collapsenext = 0
1736  end if
1737 
1738  ! Still used in convert
1740 
1741  end subroutine snapshot_read_header
1742 
1743  !> Compute number of elements in index range
1744  pure integer function count_ix(ixO^L)
1745  integer, intent(in) :: ixO^L
1746 
1747  count_ix = product([ ixomax^d ] - [ ixomin^d ] + 1)
1748  end function count_ix
1749 
1750  !> Determine the shape of a block for output (whether to include ghost cells,
1751  !> and on which sides)
1752  subroutine block_shape_io(igrid, n_ghost, ixO^L, n_values)
1754 
1755  integer, intent(in) :: igrid
1756  !> nghost(1:ndim) contains the number of ghost cells on the block's minimum
1757  !> boundaries, and nghost(ndim+1:2*ndim) on the block's maximum boundaries
1758  integer, intent(out) :: n_ghost(2*ndim)
1759  !> Index range on output block
1760  integer, intent(out) :: ixO^L
1761  !> Number of cells/values in output
1762  integer, intent(out) :: n_values
1763 
1764  integer :: idim
1765 
1766  n_ghost(:) = 0
1767 
1768  if(save_physical_boundary) then
1769  do idim=1,ndim
1770  ! Include ghost cells on lower boundary
1771  if(ps(igrid)%is_physical_boundary(2*idim-1)) n_ghost(idim)=nghostcells
1772  ! Include ghost cells on upper boundary
1773  if(ps(igrid)%is_physical_boundary(2*idim)) n_ghost(ndim+idim)=nghostcells
1774  end do
1775  end if
1776 
1777  {ixomin^d = ixmlo^d - n_ghost(^d)\}
1778  {ixomax^d = ixmhi^d + n_ghost(ndim+^d)\}
1779 
1780  n_values = count_ix(ixo^l) * nw
1781 
1782  end subroutine block_shape_io
1783 
1784  subroutine write_snapshot
1787  use mod_physics
1788 
1789  integer :: file_handle, igrid, Morton_no, iwrite
1790  integer :: ipe, ix_buffer(2*ndim+1), n_values
1791  integer :: ixO^L, n_ghost(2*ndim)
1792  integer :: ixOs^L,n_values_stagger
1793  integer :: iorecvstatus(mpi_status_size)
1794  integer :: ioastatus(mpi_status_size)
1795  integer :: igrecvstatus(mpi_status_size)
1796  integer :: istatus(mpi_status_size)
1797  type(tree_node), pointer :: pnode
1798  integer(kind=MPI_OFFSET_KIND) :: offset_tree_info
1799  integer(kind=MPI_OFFSET_KIND) :: offset_block_data
1800  integer(kind=MPI_OFFSET_KIND) :: offset_offsets
1801  double precision, allocatable :: w_buffer(:)
1802 
1803  integer, allocatable :: block_ig(:, :)
1804  integer, allocatable :: block_lvl(:)
1805  integer(kind=MPI_OFFSET_KIND), allocatable :: block_offset(:)
1806 
1807  call mpi_barrier(icomm, ierrmpi)
1808 
1809  ! Allocate send/receive buffer
1810  n_values = count_ix(ixg^ll) * nw
1811  if(stagger_grid) then
1812  n_values = n_values + count_ix(ixgs^ll) * nws
1813  end if
1814  allocate(w_buffer(n_values))
1815 
1816  ! Allocate arrays with information about grid blocks
1817  allocate(block_ig(ndim, nleafs))
1818  allocate(block_lvl(nleafs))
1819  allocate(block_offset(nleafs+1))
1820 
1821  ! master processor
1822  if (mype==0) then
1823  call create_output_file(file_handle, snapshotnext, ".dat")
1824 
1825  ! Don't know offsets yet, we will write header again later
1826  offset_tree_info = -1
1827  offset_block_data = -1
1828  call snapshot_write_header(file_handle, offset_tree_info, &
1829  offset_block_data)
1830 
1831  call mpi_file_get_position(file_handle, offset_tree_info, ierrmpi)
1832 
1833  call write_forest(file_handle)
1834 
1835  ! Collect information about the spatial index (ig^D) and refinement level
1836  ! of leaves
1837  do morton_no = morton_start(0), morton_stop(npe-1)
1838  igrid = sfc(1, morton_no)
1839  ipe = sfc(2, morton_no)
1840  pnode => igrid_to_node(igrid, ipe)%node
1841 
1842  block_ig(:, morton_no) = [ pnode%ig^d ]
1843  block_lvl(morton_no) = pnode%level
1844  block_offset(morton_no) = 0 ! Will be determined later
1845  end do
1846 
1847  call mpi_file_write(file_handle, block_lvl, size(block_lvl), &
1848  mpi_integer, istatus, ierrmpi)
1849 
1850  call mpi_file_write(file_handle, block_ig, size(block_ig), &
1851  mpi_integer, istatus, ierrmpi)
1852 
1853  ! Block offsets are currently unknown, but will be overwritten later
1854  call mpi_file_get_position(file_handle, offset_offsets, ierrmpi)
1855  call mpi_file_write(file_handle, block_offset(1:nleafs), nleafs, &
1856  mpi_offset, istatus, ierrmpi)
1857 
1858  call mpi_file_get_position(file_handle, offset_block_data, ierrmpi)
1859 
1860  ! Check whether data was written as expected
1861  if (offset_block_data - offset_tree_info /= &
1862  (nleafs + nparents) * size_logical + &
1863  nleafs * ((1+ndim) * size_int + 2 * size_int)) then
1864  if (mype == 0) then
1865  print *, "Warning: MPI_OFFSET type /= 8 bytes"
1866  print *, "This *could* cause problems when reading .dat files"
1867  end if
1868  end if
1869 
1870  block_offset(1) = offset_block_data
1871  iwrite = 0
1872  end if
1873 
1874  do morton_no=morton_start(mype), morton_stop(mype)
1875  igrid = sfc_to_igrid(morton_no)
1876  itag = morton_no
1877 
1878  if (nwaux>0) then
1879  ! extra layer around mesh only for later averaging in convert
1880  ! set dxlevel value for use in gradient subroutine,
1881  ! which might be used in getaux
1882  saveigrid=igrid
1883  ^d&dxlevel(^d)=rnode(rpdx^d_, igrid);
1884  block=>ps(igrid)
1885  call phys_get_aux(.true., ps(igrid)%w, ps(igrid)%x, ixg^ll, &
1886  ixm^ll^ladd1, "write_snapshot")
1887  endif
1888 
1889  call block_shape_io(igrid, n_ghost, ixo^l, n_values)
1890  if(stagger_grid) then
1891  w_buffer(1:n_values) = pack(ps(igrid)%w(ixo^s, 1:nw), .true.)
1892  {ixosmin^d = ixomin^d -1\}
1893  {ixosmax^d = ixomax^d \}
1894  n_values_stagger= count_ix(ixos^l)*nws
1895  w_buffer(n_values+1:n_values+n_values_stagger) = pack(ps(igrid)%ws(ixos^s, 1:nws), .true.)
1896  n_values=n_values+n_values_stagger
1897  else
1898  w_buffer(1:n_values) = pack(ps(igrid)%w(ixo^s, 1:nw), .true.)
1899  end if
1900  ix_buffer(1) = n_values
1901  ix_buffer(2:) = n_ghost
1902 
1903  if (mype /= 0) then
1904  call mpi_send(ix_buffer, 2*ndim+1, &
1905  mpi_integer, 0, itag, icomm, ierrmpi)
1906  call mpi_send(w_buffer, n_values, &
1907  mpi_double_precision, 0, itag, icomm, ierrmpi)
1908  else
1909  iwrite = iwrite+1
1910  call mpi_file_write(file_handle, ix_buffer(2:), &
1911  2*ndim, mpi_integer, istatus, ierrmpi)
1912  call mpi_file_write(file_handle, w_buffer, &
1913  n_values, mpi_double_precision, istatus, ierrmpi)
1914 
1915  ! Set offset of next block
1916  block_offset(iwrite+1) = block_offset(iwrite) + &
1917  int(n_values, mpi_offset_kind) * size_double + &
1918  2 * ndim * size_int
1919  end if
1920  end do
1921 
1922  ! Write data communicated from other processors
1923  if (mype == 0) then
1924  do ipe = 1, npe-1
1925  do morton_no=morton_start(ipe), morton_stop(ipe)
1926  iwrite=iwrite+1
1927  itag=morton_no
1928 
1929  call mpi_recv(ix_buffer, 2*ndim+1, mpi_integer, ipe, itag, icomm,&
1930  igrecvstatus, ierrmpi)
1931  n_values = ix_buffer(1)
1932 
1933  call mpi_recv(w_buffer, n_values, mpi_double_precision,&
1934  ipe, itag, icomm, iorecvstatus, ierrmpi)
1935 
1936  call mpi_file_write(file_handle, ix_buffer(2:), &
1937  2*ndim, mpi_integer, istatus, ierrmpi)
1938  call mpi_file_write(file_handle, w_buffer, &
1939  n_values, mpi_double_precision, istatus, ierrmpi)
1940 
1941  ! Set offset of next block
1942  block_offset(iwrite+1) = block_offset(iwrite) + &
1943  int(n_values, mpi_offset_kind) * size_double + &
1944  2 * ndim * size_int
1945  end do
1946  end do
1947 
1948  ! Write block offsets (now we know them)
1949  call mpi_file_seek(file_handle, offset_offsets, mpi_seek_set, ierrmpi)
1950  call mpi_file_write(file_handle, block_offset(1:nleafs), nleafs, &
1951  mpi_offset, istatus, ierrmpi)
1952 
1953  ! Write header again, now with correct offsets
1954  call mpi_file_seek(file_handle, 0_mpi_offset_kind, mpi_seek_set, ierrmpi)
1955  call snapshot_write_header(file_handle, offset_tree_info, &
1956  offset_block_data)
1957 
1958  call mpi_file_close(file_handle, ierrmpi)
1959  end if
1960 
1961  call mpi_barrier(icomm, ierrmpi)
1962 
1963  end subroutine write_snapshot
1964 
1965  !> Routine to read in snapshots (.dat files). When it cannot recognize the
1966  !> file version, it will automatically try the 'old' reader.
1967  subroutine read_snapshot
1969  use mod_forest
1971  use mod_slice, only: slicenext
1972 
1973  integer :: ix_buffer(2*ndim+1), n_values, n_values_stagger
1974  integer :: ixO^L, ixOs^L
1975  integer :: file_handle, amode, igrid, Morton_no, iread
1976  integer :: istatus(mpi_status_size)
1977  integer :: iorecvstatus(mpi_status_size)
1978  integer :: ipe,inrecv,nrecv, file_version
1979  logical :: fexist
1980  integer(MPI_OFFSET_KIND) :: offset_tree_info
1981  integer(MPI_OFFSET_KIND) :: offset_block_data
1982  double precision, allocatable :: w_buffer(:)
1983  double precision, dimension(:^D&,:), allocatable :: w, ws
1984 
1985  if (mype==0) then
1986  inquire(file=trim(restart_from_file), exist=fexist)
1987  if (.not.fexist) call mpistop(trim(restart_from_file)//" not found!")
1988 
1989  call mpi_file_open(mpi_comm_self,restart_from_file,mpi_mode_rdonly, &
1990  mpi_info_null,file_handle,ierrmpi)
1991  call mpi_file_read(file_handle, file_version, 1, mpi_integer, &
1992  istatus, ierrmpi)
1993  end if
1994 
1995  call mpi_bcast(file_version,1,mpi_integer,0,icomm,ierrmpi)
1996 
1997  if (all(compatible_versions /= file_version)) then
1998  if (mype == 0) print *, "Unknown version, trying old snapshot reader..."
1999  call mpi_file_close(file_handle,ierrmpi)
2000  call read_snapshot_old()
2001 
2002  ! Guess snapshotnext from file name if not set
2003  if (snapshotnext == -1) &
2005  ! Set slicenext and collapsenext if not set
2006  if (slicenext == -1) slicenext = 0
2007  if (collapsenext == -1) collapsenext = 0
2008 
2009  ! Still used in convert
2011 
2012  return ! Leave this routine
2013  else if (mype == 0) then
2014  call mpi_file_seek(file_handle, 0_mpi_offset_kind, mpi_seek_set, ierrmpi)
2015  call snapshot_read_header(file_handle, offset_tree_info, &
2016  offset_block_data)
2017  end if
2018 
2019  ! Share information about restart file
2020  call mpi_bcast(nw_found,1,mpi_integer,0,icomm,ierrmpi)
2021  call mpi_bcast(nleafs,1,mpi_integer,0,icomm,ierrmpi)
2022  call mpi_bcast(nparents,1,mpi_integer,0,icomm,ierrmpi)
2023  call mpi_bcast(it,1,mpi_integer,0,icomm,ierrmpi)
2024  call mpi_bcast(global_time,1,mpi_double_precision,0,icomm,ierrmpi)
2025 
2026  call mpi_bcast(snapshotnext,1,mpi_integer,0,icomm,ierrmpi)
2027  call mpi_bcast(slicenext,1,mpi_integer,0,icomm,ierrmpi)
2028  call mpi_bcast(collapsenext,1,mpi_integer,0,icomm,ierrmpi)
2029 
2030  ! Allocate send/receive buffer
2031  n_values = count_ix(ixg^ll) * nw_found
2032  if(stagger_grid) then
2033  n_values = n_values + count_ix(ixgs^ll) * nws
2034  allocate(ws(ixgs^t,1:nws))
2035  end if
2036  allocate(w_buffer(n_values))
2037  allocate(w(ixg^t,1:nw_found))
2038 
2040 
2041  if (mype == 0) then
2042  call mpi_file_seek(file_handle, offset_tree_info, &
2043  mpi_seek_set, ierrmpi)
2044  end if
2045 
2046  call read_forest(file_handle)
2047 
2048  do morton_no=morton_start(mype),morton_stop(mype)
2049  igrid=sfc_to_igrid(morton_no)
2050  call alloc_node(igrid)
2051  end do
2052 
2053  if (mype==0) then
2054  call mpi_file_seek(file_handle, offset_block_data, mpi_seek_set, ierrmpi)
2055 
2056  iread = 0
2057  do ipe = 0, npe-1
2058  do morton_no=morton_start(ipe),morton_stop(ipe)
2059  iread=iread+1
2060  itag=morton_no
2061 
2062  call mpi_file_read(file_handle,ix_buffer(1:2*ndim), 2*ndim, &
2063  mpi_integer, istatus,ierrmpi)
2064 
2065  ! Construct ixO^L array from number of ghost cells
2066  {ixomin^d = ixmlo^d - ix_buffer(^d)\}
2067  {ixomax^d = ixmhi^d + ix_buffer(ndim+^d)\}
2068  n_values = count_ix(ixo^l) * nw_found
2069  if(stagger_grid) then
2070  {ixosmin^d = ixomin^d - 1\}
2071  {ixosmax^d = ixomax^d\}
2072  n_values_stagger = n_values
2073  n_values = n_values + count_ix(ixos^l) * nws
2074  end if
2075 
2076  call mpi_file_read(file_handle, w_buffer, n_values, &
2077  mpi_double_precision, istatus, ierrmpi)
2078 
2079  if (mype == ipe) then ! Root task
2080  igrid=sfc_to_igrid(morton_no)
2081  block=>ps(igrid)
2082  if(stagger_grid) then
2083  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values_stagger), &
2084  shape(w(ixo^s, 1:nw_found)))
2085  ps(igrid)%ws(ixos^s,1:nws)=reshape(w_buffer(n_values_stagger+1:n_values), &
2086  shape(ws(ixos^s, 1:nws)))
2087  else
2088  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values), &
2089  shape(w(ixo^s, 1:nw_found)))
2090  end if
2091  if (nw_found<nw) then
2092  if (associated(usr_transform_w)) then
2093  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
2094  else
2095  ps(igrid)%w(ixo^s,1:nw_found)=w(ixo^s,1:nw_found)
2096  end if
2097  else if (nw_found>nw) then
2098  if (associated(usr_transform_w)) then
2099  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
2100  else
2101  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
2102  end if
2103  else
2104  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
2105  end if
2106  else
2107  call mpi_send([ ixo^l, n_values ], 2*ndim+1, &
2108  mpi_integer, ipe, itag, icomm, ierrmpi)
2109  call mpi_send(w_buffer, n_values, &
2110  mpi_double_precision, ipe, itag, icomm, ierrmpi)
2111  end if
2112  end do
2113  end do
2114 
2115  call mpi_file_close(file_handle,ierrmpi)
2116 
2117  else ! mype > 0
2118 
2119  do morton_no=morton_start(mype),morton_stop(mype)
2120  igrid=sfc_to_igrid(morton_no)
2121  block=>ps(igrid)
2122  itag=morton_no
2123 
2124  call mpi_recv(ix_buffer, 2*ndim+1, mpi_integer, 0, itag, icomm,&
2125  iorecvstatus, ierrmpi)
2126  {ixomin^d = ix_buffer(^d)\}
2127  {ixomax^d = ix_buffer(ndim+^d)\}
2128  n_values = ix_buffer(2*ndim+1)
2129 
2130  call mpi_recv(w_buffer, n_values, mpi_double_precision,&
2131  0, itag, icomm, iorecvstatus, ierrmpi)
2132 
2133  if(stagger_grid) then
2134  n_values_stagger = count_ix(ixo^l) * nw_found
2135  {ixosmin^d = ixomin^d - 1\}
2136  {ixosmax^d = ixomax^d\}
2137  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values_stagger), &
2138  shape(w(ixo^s, 1:nw_found)))
2139  ps(igrid)%ws(ixos^s,1:nws)=reshape(w_buffer(n_values_stagger+1:n_values), &
2140  shape(ws(ixos^s, 1:nws)))
2141  else
2142  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values), &
2143  shape(w(ixo^s, 1:nw_found)))
2144  end if
2145  if (nw_found<nw) then
2146  if (associated(usr_transform_w)) then
2147  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
2148  else
2149  ps(igrid)%w(ixo^s,1:nw_found)=w(ixo^s,1:nw_found)
2150  end if
2151  else if (nw_found>nw) then
2152  if (associated(usr_transform_w)) then
2153  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
2154  else
2155  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
2156  end if
2157  else
2158  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
2159  end if
2160  end do
2161  end if
2162 
2163  call mpi_barrier(icomm,ierrmpi)
2164 
2165  end subroutine read_snapshot
2166 
2167  subroutine read_snapshot_old()
2170 
2171  double precision :: wio(ixg^t,1:nw)
2172  integer :: fh, igrid, Morton_no, iread
2173  integer :: levmaxini, ndimini, ndirini
2174  integer :: nwini, neqparini, nxini^D
2175  integer(kind=MPI_OFFSET_KIND) :: offset
2176  integer :: istatus(mpi_status_size)
2177  integer, allocatable :: iorecvstatus(:,:)
2178  integer :: ipe,inrecv,nrecv
2179  integer :: sendini(7+^nd)
2180  character(len=80) :: filename
2181  logical :: fexist
2182  double precision :: eqpar_dummy(100)
2183 
2184  if (mype==0) then
2185  call mpi_file_open(mpi_comm_self,trim(restart_from_file), &
2186  mpi_mode_rdonly,mpi_info_null,fh,ierrmpi)
2187 
2188  offset=-int(7*size_int+size_double,kind=mpi_offset_kind)
2189  call mpi_file_seek(fh,offset,mpi_seek_end,ierrmpi)
2190 
2191  call mpi_file_read(fh,nleafs,1,mpi_integer,istatus,ierrmpi)
2193  call mpi_file_read(fh,levmaxini,1,mpi_integer,istatus,ierrmpi)
2194  call mpi_file_read(fh,ndimini,1,mpi_integer,istatus,ierrmpi)
2195  call mpi_file_read(fh,ndirini,1,mpi_integer,istatus,ierrmpi)
2196  call mpi_file_read(fh,nwini,1,mpi_integer,istatus,ierrmpi)
2197  call mpi_file_read(fh,neqparini,1,mpi_integer,istatus,ierrmpi)
2198  call mpi_file_read(fh,it,1,mpi_integer,istatus,ierrmpi)
2199  call mpi_file_read(fh,global_time,1,mpi_double_precision,istatus,ierrmpi)
2200 
2201  ! check if settings are suitable for restart
2202  if (levmaxini>refine_max_level) then
2203  write(*,*) "number of levels in restart file = ",levmaxini
2204  write(*,*) "refine_max_level = ",refine_max_level
2205  call mpistop("refine_max_level < number of levels in restart file")
2206  end if
2207  if (ndimini/=ndim) then
2208  write(*,*) "ndim in restart file = ",ndimini
2209  write(*,*) "ndim = ",ndim
2210  call mpistop("reset ndim to ndim in restart file")
2211  end if
2212  if (ndirini/=ndir) then
2213  write(*,*) "ndir in restart file = ",ndirini
2214  write(*,*) "ndir = ",ndir
2215  call mpistop("reset ndir to ndir in restart file")
2216  end if
2217  if (nw/=nwini) then
2218  write(*,*) "nw=",nw," and nw in restart file=",nwini
2219  call mpistop("currently, changing nw at restart is not allowed")
2220  end if
2221 
2222  offset=offset-int(ndimini*size_int+neqparini*size_double,kind=mpi_offset_kind)
2223  call mpi_file_seek(fh,offset,mpi_seek_end,ierrmpi)
2224 
2225  {call mpi_file_read(fh,nxini^d,1,mpi_integer,istatus,ierrmpi)\}
2226  if (ixghi^d/=nxini^d+2*nghostcells|.or.) then
2227  write(*,*) "Error: reset resolution to ",nxini^d+2*nghostcells
2228  call mpistop("change with setamrvac")
2229  end if
2230 
2231  call mpi_file_read(fh,eqpar_dummy,neqparini, &
2232  mpi_double_precision,istatus,ierrmpi)
2233  end if
2234 
2235  ! broadcast the global parameters first
2236  if (npe>1) then
2237  if (mype==0) then
2238  sendini=(/nleafs,levmaxini,ndimini,ndirini,nwini,neqparini,it ,^d&nxini^d /)
2239  end if
2240  call mpi_bcast(sendini,7+^nd,mpi_integer,0,icomm,ierrmpi)
2241  nleafs=sendini(1);levmaxini=sendini(2);ndimini=sendini(3);
2242  ndirini=sendini(4);nwini=sendini(5);
2243  neqparini=sendini(6);it=sendini(7);
2244  nxini^d=sendini(7+^d);
2246  call mpi_bcast(global_time,1,mpi_double_precision,0,icomm,ierrmpi)
2247  end if
2248 
2249  if (mype == 0) then
2250  offset = int(size_block_io,kind=mpi_offset_kind) * &
2251  int(nleafs,kind=mpi_offset_kind)
2252  call mpi_file_seek(fh,offset,mpi_seek_set,ierrmpi)
2253  end if
2254 
2255  call read_forest(fh)
2256 
2257  do morton_no=morton_start(mype),morton_stop(mype)
2258  igrid=sfc_to_igrid(morton_no)
2259  call alloc_node(igrid)
2260  end do
2261 
2262  if (mype==0)then
2263  iread=0
2264 
2265  do morton_no=morton_start(0),morton_stop(0)
2266  igrid=sfc_to_igrid(morton_no)
2267  iread=iread+1
2268  offset=int(size_block_io,kind=mpi_offset_kind) &
2269  *int(morton_no-1,kind=mpi_offset_kind)
2270  call mpi_file_read_at(fh,offset,ps(igrid)%w,1,type_block_io, &
2271  istatus,ierrmpi)
2272  end do
2273  if (npe>1) then
2274  do ipe=1,npe-1
2275  do morton_no=morton_start(ipe),morton_stop(ipe)
2276  iread=iread+1
2277  itag=morton_no
2278  offset=int(size_block_io,kind=mpi_offset_kind)&
2279  *int(morton_no-1,kind=mpi_offset_kind)
2280  call mpi_file_read_at(fh,offset,wio,1,type_block_io,&
2281  istatus,ierrmpi)
2282  call mpi_send(wio,1,type_block_io,ipe,itag,icomm,ierrmpi)
2283  end do
2284  end do
2285  end if
2286  call mpi_file_close(fh,ierrmpi)
2287  else
2288  nrecv=(morton_stop(mype)-morton_start(mype)+1)
2289  allocate(iorecvstatus(mpi_status_size,nrecv))
2290  inrecv=0
2291  do morton_no=morton_start(mype),morton_stop(mype)
2292  igrid=sfc_to_igrid(morton_no)
2293  itag=morton_no
2294  inrecv=inrecv+1
2295  call mpi_recv(ps(igrid)%w,1,type_block_io,0,itag,icomm,&
2296  iorecvstatus(:,inrecv),ierrmpi)
2297  end do
2298  deallocate(iorecvstatus)
2299  end if
2300 
2301  call mpi_barrier(icomm,ierrmpi)
2302 
2303  end subroutine read_snapshot_old
2304 
2305  !> Write volume-averaged values and other information to the log file
2306  subroutine printlog_default
2308  use mod_timing
2311 
2312  logical :: fileopen
2313  integer :: i, iw, level
2314  double precision :: wmean(1:nw), total_volume
2315  double precision :: volume_coverage(refine_max_level)
2316  integer :: nx^D, nc, ncells, dit
2317  double precision :: dtTimeLast, now, cellupdatesPerSecond
2318  double precision :: activeBlocksPerCore, wctPerCodeTime, timeToFinish
2319  character(len=40) :: fmt_string
2320  character(len=80) :: filename
2321  character(len=2048) :: line
2322  logical, save :: opened = .false.
2323  integer :: amode, istatus(mpi_status_size)
2324  integer, parameter :: my_unit = 20
2325 
2326  ! Compute the volume-average of w**1 = w
2327  call get_volume_average(1, wmean, total_volume)
2328 
2329  ! Compute the volume coverage
2330  call get_volume_coverage(volume_coverage)
2331 
2332  if (mype == 0) then
2333 
2334  ! To compute cell updates per second, we do the following:
2335  nx^d=ixmhi^d-ixmlo^d+1;
2336  nc={nx^d*}
2337  ncells = nc * nleafs_active
2338 
2339  ! assumes the number of active leafs haven't changed since last compute.
2340  now = mpi_wtime()
2341  dit = it - ittimelast
2342  dttimelast = now - timelast
2343  ittimelast = it
2344  timelast = now
2345  cellupdatespersecond = dble(ncells) * dble(nstep) * &
2346  dble(dit) / (dttimelast * dble(npe))
2347 
2348  ! blocks per core:
2349  activeblockspercore = dble(nleafs_active) / dble(npe)
2350 
2351  ! Wall clock time per code time unit in seconds:
2352  wctpercodetime = dttimelast / max(dit * dt, epsilon(1.0d0))
2353 
2354  ! Wall clock time to finish in hours:
2355  timetofinish = (time_max - global_time) * wctpercodetime / 3600.0d0
2356 
2357  ! On first entry, open the file and generate the header
2358  if (.not. opened) then
2359 
2360  filename = trim(base_filename) // ".log"
2361 
2362  ! Delete the log when not doing a restart run
2363  if (restart_from_file == undefined) then
2364  open(unit=my_unit,file=trim(filename),status='replace')
2365  close(my_unit, status='delete')
2366  end if
2367 
2368  amode = ior(mpi_mode_create,mpi_mode_wronly)
2369  amode = ior(amode,mpi_mode_append)
2370 
2371  call mpi_file_open(mpi_comm_self, filename, amode, &
2372  mpi_info_null, log_fh, ierrmpi)
2373 
2374  opened = .true.
2375 
2376  ! Start of file headern
2377  line = "it global_time dt"
2378  do level=1,nw
2379  i = len_trim(line) + 2
2380  write(line(i:),"(a,a)") trim(cons_wnames(level)), " "
2381  end do
2382 
2383  ! Volume coverage per level
2384  do level = 1, refine_max_level
2385  i = len_trim(line) + 2
2386  write(line(i:), "(a,i0)") "c", level
2387  end do
2388 
2389  ! Cell counts per level
2390  do level=1,refine_max_level
2391  i = len_trim(line) + 2
2392  write(line(i:), "(a,i0)") "n", level
2393  end do
2394 
2395  ! Rest of file header
2396  line = trim(line) // " | Xload Xmemory 'Cell_Updates /second/core'"
2397  line = trim(line) // " 'Active_Blocks/Core' 'Wct Per Code Time [s]'"
2398  line = trim(line) // " 'TimeToFinish [hrs]'"
2399 
2400  ! Only write header if not restarting
2401  if (restart_from_file == undefined) then
2402  call mpi_file_write(log_fh, trim(line) // new_line('a'), &
2403  len_trim(line)+1, mpi_character, istatus, ierrmpi)
2404  end if
2405  end if
2406 
2407  ! Construct the line to be added to the log
2408 
2409  fmt_string = '(' // fmt_i // ',2' // fmt_r // ')'
2410  write(line, fmt_string) it, global_time, dt
2411  i = len_trim(line) + 2
2412 
2413  write(fmt_string, '(a,i0,a)') '(', nw, fmt_r // ')'
2414  write(line(i:), fmt_string) wmean(1:nw)
2415  i = len_trim(line) + 2
2416 
2417  write(fmt_string, '(a,i0,a)') '(', refine_max_level, fmt_r // ')'
2418  write(line(i:), fmt_string) volume_coverage(1:refine_max_level)
2419  i = len_trim(line) + 2
2420 
2421  write(fmt_string, '(a,i0,a)') '(', refine_max_level, fmt_i // ')'
2422  write(line(i:), fmt_string) nleafs_level(1:refine_max_level)
2423  i = len_trim(line) + 2
2424 
2425  fmt_string = '(a,6' // fmt_r2 // ')'
2426  write(line(i:), fmt_string) '| ', xload, xmemory, cellupdatespersecond, &
2427  activeblockspercore, wctpercodetime, timetofinish
2428 
2429  call mpi_file_write(log_fh, trim(line) // new_line('a') , &
2430  len_trim(line)+1, mpi_character, istatus, ierrmpi)
2431  end if
2432 
2433  end subroutine printlog_default
2434 
2435  !> Print a log that can be used to check whether the code still produces the
2436  !> same output (regression test)
2437  subroutine printlog_regression_test()
2439 
2440  integer, parameter :: n_modes = 2
2441  integer, parameter :: my_unit = 123
2442  character(len=40) :: fmt_string
2443  logical, save :: file_open = .false.
2444  integer :: power
2445  double precision :: modes(nw, n_modes), volume
2446 
2447  do power = 1, n_modes
2448  call get_volume_average(power, modes(:, power), volume)
2449  end do
2450 
2451  if (mype == 0) then
2452  if (.not. file_open) then
2453  open(my_unit, file = trim(base_filename) // ".log")
2454  file_open = .true.
2455 
2456  write(my_unit, *) "# time mean(w) mean(w**2)"
2457  end if
2458 
2459  write(fmt_string, "(a,i0,a)") "(", nw * n_modes + 1, fmt_r // ")"
2460  write(my_unit, fmt_string) global_time, modes
2461  end if
2462  end subroutine printlog_regression_test
2463 
2464  !> Compute mean(w**power) over the leaves of the grid. The first mode
2465  !> (power=1) corresponds to the mean, the second to the mean squared values
2466  !> and so on.
2467  subroutine get_volume_average(power, mode, volume)
2469 
2470  integer, intent(in) :: power !< Which mode to compute
2471  double precision, intent(out) :: mode(nw) !< The computed mode
2472  double precision, intent(out) :: volume !< The total grid volume
2473  integer :: iigrid, igrid, iw
2474  double precision :: wsum(nw+1)
2475  double precision :: dsum_recv(1:nw+1)
2476 
2477  wsum(:) = 0
2478 
2479  ! Loop over all the grids
2480  do iigrid = 1, igridstail
2481  igrid = igrids(iigrid)
2482 
2483  ! Store total volume in last element
2484  wsum(nw+1) = wsum(nw+1) + sum(ps(igrid)%dvolume(ixm^t))
2485 
2486  ! Compute the modes of the cell-centered variables, weighted by volume
2487  do iw = 1, nw
2488  wsum(iw) = wsum(iw) + &
2489  sum(ps(igrid)%dvolume(ixm^t)*ps(igrid)%w(ixm^t,iw)**power)
2490  end do
2491  end do
2492 
2493  ! Make the information available on all tasks
2494  call mpi_allreduce(wsum, dsum_recv, nw+1, mpi_double_precision, &
2495  mpi_sum, icomm, ierrmpi)
2496 
2497  ! Set the volume and the average
2498  volume = dsum_recv(nw+1)
2499  mode = dsum_recv(1:nw) / volume
2500 
2501  end subroutine get_volume_average
2502 
2503  !> Compute how much of the domain is covered by each grid level. This routine
2504  !> does not take a non-Cartesian geometry into account.
2505  subroutine get_volume_coverage(vol_cov)
2507 
2508  double precision, intent(out) :: vol_cov(1:refine_max_level)
2509  double precision :: dsum_recv(1:refine_max_level)
2510  integer :: iigrid, igrid, iw, level
2511 
2512  ! First determine the total 'flat' volume in each level
2513  vol_cov(1:refine_max_level)=zero
2514 
2515  do iigrid = 1, igridstail
2516  igrid = igrids(iigrid);
2517  level = node(plevel_,igrid)
2518  vol_cov(level) = vol_cov(level)+ &
2519  {(rnode(rpxmax^d_,igrid)-rnode(rpxmin^d_,igrid))|*}
2520  end do
2521 
2522  ! Make the information available on all tasks
2523  call mpi_allreduce(vol_cov, dsum_recv, refine_max_level, mpi_double_precision, &
2524  mpi_sum, icomm, ierrmpi)
2525 
2526  ! Normalize
2527  vol_cov = dsum_recv / sum(dsum_recv)
2528  end subroutine get_volume_coverage
2529 
2530  !> Compute the volume average of func(w) over the leaves of the grid.
2531  subroutine get_volume_average_func(func, f_avg, volume)
2533 
2534  interface
2535  pure function func(w_vec, w_size) result(val)
2536  integer, intent(in) :: w_size
2537  double precision, intent(in) :: w_vec(w_size)
2538  double precision :: val
2539  end function func
2540  end interface
2541  double precision, intent(out) :: f_avg !< The volume average of func
2542  double precision, intent(out) :: volume !< The total grid volume
2543  integer :: iigrid, igrid, i^D
2544  double precision :: wsum(2)
2545  double precision :: dsum_recv(2)
2546 
2547  wsum(:) = 0
2548 
2549  ! Loop over all the grids
2550  do iigrid = 1, igridstail
2551  igrid = igrids(iigrid)
2552 
2553  ! Store total volume in last element
2554  wsum(2) = wsum(2) + sum(ps(igrid)%dvolume(ixm^t))
2555 
2556  ! Compute the modes of the cell-centered variables, weighted by volume
2557  {do i^d = ixmlo^d, ixmhi^d\}
2558  wsum(1) = wsum(1) + ps(igrid)%dvolume(i^d) * &
2559  func(ps(igrid)%w(i^d, :), nw)
2560  {end do\}
2561  end do
2562 
2563  ! Make the information available on all tasks
2564  call mpi_allreduce(wsum, dsum_recv, 2, mpi_double_precision, &
2565  mpi_sum, icomm, ierrmpi)
2566 
2567  ! Set the volume and the average
2568  volume = dsum_recv(2)
2569  f_avg = dsum_recv(1) / volume
2570 
2571  end subroutine get_volume_average_func
2572 
2573  !> Compute global maxima of iw variables over the leaves of the grid.
2574  subroutine get_global_maxima(wmax)
2576 
2577  double precision, intent(out) :: wmax(nw) !< The global maxima
2578 
2579  integer :: iigrid, igrid, iw
2580  double precision :: wmax_mype(nw),wmax_recv(nw)
2581 
2582  wmax_mype(1:nw) = -bigdouble
2583 
2584  ! Loop over all the grids
2585  do iigrid = 1, igridstail
2586  igrid = igrids(iigrid)
2587  do iw = 1, nw
2588  wmax_mype(iw)=max(wmax_mype(iw),maxval(ps(igrid)%w(ixm^t,iw)))
2589  end do
2590  end do
2591 
2592  ! Make the information available on all tasks
2593  call mpi_allreduce(wmax_mype, wmax_recv, nw, mpi_double_precision, &
2594  mpi_max, icomm, ierrmpi)
2595 
2596  wmax(1:nw)=wmax_recv(1:nw)
2597 
2598  end subroutine get_global_maxima
2599 
2600  !> Compute global minima of iw variables over the leaves of the grid.
2601  subroutine get_global_minima(wmin)
2603 
2604  double precision, intent(out) :: wmin(nw) !< The global maxima
2605 
2606  integer :: iigrid, igrid, iw
2607  double precision :: wmin_mype(nw),wmin_recv(nw)
2608 
2609  wmin_mype(1:nw) = bigdouble
2610 
2611  ! Loop over all the grids
2612  do iigrid = 1, igridstail
2613  igrid = igrids(iigrid)
2614  do iw = 1, nw
2615  wmin_mype(iw)=min(wmin_mype(iw),minval(ps(igrid)%w(ixm^t,iw)))
2616  end do
2617  end do
2618 
2619  ! Make the information available on all tasks
2620  call mpi_allreduce(wmin_mype, wmin_recv, nw, mpi_double_precision, &
2621  mpi_min, icomm, ierrmpi)
2622 
2623  wmin(1:nw)=wmin_recv(1:nw)
2624 
2625  end subroutine get_global_minima
2626 
2627 
2628 
2629 end module mod_input_output
integer, parameter cylindrical
Definition: mod_geometry.t:9
This module contains definitions of global parameters and variables and some generic functions/subrou...
character(len=std_len) prolongation_method
integer, parameter cartesian_stretched
Definition: mod_geometry.t:8
integer direction_los
direction of light of sight
logical need_global_a2max
global value for schmid scheme
character(len=std_len) time_integrator
Which time integrator to use.
Module for reading input and writing output.
character(len=std_len) time_stepper
Which time stepper to use.
double precision ars_gamma
IMEX_ARS3 parameter ars_gamma.
double precision, dimension(:), allocatable refine_threshold
Error tolerance for refinement decision.
character(len=std_len), dimension(:), allocatable typepred1
The spatial discretization for the predictor step when using a two step PC method.
character(len= *), parameter fmt_r
logical, dimension(:), allocatable w_write
integer domain_nx
number of cells for each dimension in level-one mesh
double precision time_max
End time for the simulation.
double precision, dimension(:), allocatable w_refine_weight
Weights of variables used to calculate error for mesh refinement.
character(len=std_len) typetvd
Which type of TVD method to use.
logical saveprim
If true, convert from conservative to primitive variables in output.
double precision, dimension(ndim) qstretch_baselevel
stretch factor between cells at AMR level 1, per dimension
integer max_blocks
The maximum number of grid blocks in a processor.
The data structure that contains information about a tree node/grid block.
Definition: mod_forest.t:11
type(tree_node_ptr), dimension(:,:), allocatable, save igrid_to_node
Array to go from an [igrid, ipe] index to a node pointer.
Definition: mod_forest.t:32
character(len=name_len) physics_type
String describing the physics type of the simulation.
Definition: mod_physics.t:14
logical slab_uniform
uniform Cartesian geometry or not (stretched Cartesian)
integer, parameter stretch_uni
Unidirectional stretching from a side.
logical, dimension(ndim) collapse
If collapse(DIM) is true, generate output integrated over DIM.
double precision, dimension(:,:), allocatable dxmid
integer, parameter filecollapse_
Constant indicating collapsed output.
subroutine get_global_maxima(wmax)
Compute global maxima of iw variables over the leaves of the grid.
integer, dimension(:), allocatable type_gradient_limiter
Type of slope limiter used for computing gradients or divergences, when typegrad or typediv are set t...
double precision xprob
minimum and maximum domain boundaries for each dimension
double precision, dimension(:), allocatable dg
extent of grid blocks in domain per dimension, in array over levels
double precision, dimension(ndim, 2) writespshift
double precision time_convert_factor
Conversion factor for time unit.
logical need_global_cmax
need global maximal wave speed
character(len=std_len) convert_type
Which format to use when converting.
Module with geometry-related routines (e.g., divergence, curl)
Definition: mod_geometry.t:2
integer collapselevel
The level at which to produce line-integrated / collapsed output.
integer, parameter plevel_
subroutine, public kracken(verb, string)
Definition: mod_kracken.t:198
integer ndir
Number of spatial dimensions (components) for vector variables.
character(len=std_len) collapse_type
Writes D-1 slice, can do so in various formats, depending on slice_type.
Definition: mod_slice.t:2
character(len=std_len) typedimsplit
integer nstep
How many sub-steps the time integrator takes.
integer coordinate
Definition: mod_geometry.t:6
integer, dimension(:), allocatable type_limiter
Type of slope limiter used for reconstructing variables on cell edges.
double precision, dimension(:), allocatable amr_wavefilter
refinement: lohner estimate wavefilter setting
logical resume_previous_run
If true, restart a previous run from the latest snapshot.
subroutine read_arguments()
Read the command line arguments passed to amrvac.
double precision, dimension(:), allocatable entropycoef
double precision wall_time_max
Ending wall time (in hours) for the simulation.
integer it_init
initial iteration count
integer it_max
Stop the simulation after this many time steps have been taken.
double precision dtmin
Stop the simulation when the time step becomes smaller than this value.
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer direction_slit
direction of slit (for spectrum)
Module with basic grid data structures.
Definition: mod_forest.t:2
character(len= *), parameter undefined
character(len=std_len) usr_filename
User parameter file.
integer, parameter nsavehi
Maximum number of saves that can be defined by tsave or itsave.
character(len=std_len) typeboundspeed
Which type of TVDLF method to use.
character(len=std_len) typeprolonglimit
Limiter used for prolongation to refined grids and ghost cells.
subroutine snapshot_read_header(fh, offset_tree, offset_block)
Read header for a snapshot.
subroutine, public retrev(name, val, len, ier)
Definition: mod_kracken.t:56
subroutine block_shape_io(igrid, n_ghost, ixOL, n_values)
Determine the shape of a block for output (whether to include ghost cells, and on which sides) ...
Collapses D-dimensional output to D-1 view by line-of-sight integration.
Definition: mod_collapse.t:4
double precision timelast
Definition: mod_timing.t:16
integer npe
The number of MPI tasks.
logical, dimension(:), allocatable writelevel
double precision courantpar
The Courant (CFL) number used for the simulation.
integer, dimension(:,:), allocatable, save sfc
Array to go from a Morton number to an igrid and processor index. Sfc(1:3, MN) contains [igrid...
Definition: mod_forest.t:43
character(len=std_len), dimension(:), allocatable flux_scheme
Which spatial discretization to use (per grid level)
integer, parameter fileanalysis_
Constant indicating analysis output (see Writing a custom analysis subroutine)
integer, dimension(3, 3, 3) lvc
Levi-Civita tensor.
double precision small_density
logical, dimension(ndim) stretched_dim
True if a dimension is stretched.
logical, dimension(ndim) aperiodb
True for dimensions with aperiodic boundaries.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, public small_values_daverage
Average over this many cells in each direction.
subroutine read_snapshot
Routine to read in snapshots (.dat files). When it cannot recognize the file version, it will automatically try the &#39;old&#39; reader.
integer, dimension(:), allocatable ng
number of grid blocks in domain per dimension, in array over levels
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
integer, dimension(nfile) isavet
integer nw_found
number of w found in dat files
Module with slope/flux limiters.
Definition: mod_limiter.t:2
subroutine get_volume_average(power, mode, volume)
Compute mean(w**power) over the leaves of the grid. The first mode (power=1) corresponds to the mean...
subroutine write_forest(file_handle)
Definition: forest.t:283
logical pass_wall_time
If true, wall time is up, modify snapshotnext for later overwrite.
integer nghostcells
Number of ghost cells surrounding a grid.
integer nslices
Number of slices to output.
Definition: mod_slice.t:16
double precision dtdiffpar
For resistive MHD, the time step is also limited by the diffusion time: .
procedure(p_no_args), pointer usr_print_log
Module with all the methods that users can customize in AMRVAC.
integer, dimension(:), allocatable, save sfc_to_igrid
Go from a Morton number to an igrid index (for a single processor)
Definition: mod_forest.t:53
logical, dimension(:), allocatable loglimit
integer ixghi
Upper index of grid block arrays.
integer type_block_io
MPI type for IO: block excluding ghost cells.
integer function limiter_type(namelim)
Definition: mod_limiter.t:45
character(len=std_len) typeghostfill
double precision xload
Stores the memory and load imbalance, used in printlog.
character(len=std_len) filename_euv
Base file name for synthetic EUV emission output.
integer ditregrid
Reconstruct the AMR grid once every ditregrid iteration(s)
character(len=std_len) geometry_name
integer rk3_switch
RK3 Butcher table.
double precision cada3_radius
radius of the asymptotic region [0.001, 10], larger means more accurate in smooth region but more ove...
Definition: mod_limiter.t:13
logical angmomfix
Enable to strictly conserve the angular momentum (works both in cylindrical and spherical coordinates...
double precision small_pressure
character(len=std_len) typesourcesplit
How to apply dimensional splitting to the source terms, see Discretization.
integer, parameter fileslice_
Constant indicating slice output.
subroutine printlog_default
Write volume-averaged values and other information to the log file.
logical stagger_grid
True for using stagger grid.
subroutine write_slice
Definition: mod_slice.t:30
logical save_physical_boundary
True for save physical boundary cells in dat files.
integer, parameter cartesian
Definition: mod_geometry.t:7
character(len=std_len) resolution_euv
resolution of the output
integer ssprk_order
SSPRK choice of methods (both threestep and fourstep, Shu-Osher 2N* implementation) also fivestep SSP...
logical nocartesian
IO switches for conversion.
integer ierrmpi
A global MPI error return code.
integer ixm
the mesh range (within a block with ghost cells)
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
character(len=std_len), dimension(:, :), allocatable typeboundary
Array indicating the type of boundary condition per variable and per physical boundary.
subroutine get_global_minima(wmin)
Compute global minima of iw variables over the leaves of the grid.
character(len=std_len), dimension(:), allocatable typeentropy
Which type of entropy fix to use with Riemann-type solvers.
double precision dtpar
If dtpar is positive, it sets the timestep dt, otherwise courantpar is used to limit the time step ba...
character(len=std_len) restart_from_file
If not &#39;unavailable&#39;, resume from snapshot with this base file name.
logical image
output image
double precision, dimension(:), allocatable w_convert_factor
Conversion factors the primitive variables.
character(len=std_len) typefilelog
Which type of log to write: &#39;normal&#39;, &#39;special&#39;, &#39;regression_test&#39;.
integer, dimension(:), allocatable, save morton_start
First Morton number per processor.
Definition: mod_forest.t:62
procedure(p_no_args), pointer usr_write_analysis
subroutine alloc_node(igrid)
allocate arrays on igrid node
logical final_dt_reduction
If true, allow final dt reduction for matching time_max on output.
integer, parameter unitterm
Unit for standard output.
integer, parameter filelog_
Constant indicating log output.
integer, parameter unitpar
integer nbufferx
Number of cells as buffer zone.
integer, dimension(3, 3) kr
Kronecker delta tensor.
double precision, dimension(:,:), allocatable dx
integer nleafs_active
Definition: mod_forest.t:78
character(len=std_len) typegrad
logical check_small_values
check and optionally fix unphysical small values (density, gas pressure)
double precision time_between_print
to monitor timeintegration loop at given wall-clock time intervals
integer, dimension(:), allocatable, parameter d
integer, dimension(:), allocatable, save morton_stop
Last Morton number per processor.
Definition: mod_forest.t:65
logical reset_it
If true, reset iteration count to 0.
subroutine write_collapsed
Definition: mod_collapse.t:11
character(len=40), dimension(nfile), parameter output_names
Names of the output methods.
integer mype
The rank of the current MPI task.
subroutine read_forest(file_handle)
Definition: forest.t:318
double precision global_time
The global simulation time.
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
subroutine saveamrfile(ifile)
subroutine read_par_files()
Read in the user-supplied parameter-file.
integer, parameter stretch_symm
Symmetric stretching around the center.
logical reset_time
If true, reset iteration count and global_time to original values, and start writing snapshots at ind...
integer, parameter unitsnapshot
logical function, public lget(keyword)
Definition: mod_kracken.t:161
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
double precision, dimension(nfile) dtsave
Repeatedly save output of type N when dtsave(N) simulation time has passed.
character(len=std_len) slice_type
choose data type of slice: vtu, vtuCC, dat, or csv
Definition: mod_slice.t:22
Module for handling problematic values in simulations, such as negative pressures.
logical use_particles
Use particles module or not.
integer, dimension(:), allocatable, save nleafs_level
How many leaves are present per refinement level.
Definition: mod_forest.t:81
double precision, dimension(nfile) tsavestart
Start of read out (not counting specified read outs)
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer, parameter uniterr
Unit for error messages.
subroutine get_fields_string(line, delims, n_max, fields, n_found, fully_read)
Routine to find entries in a string.
Module containing all the particle routines.
Definition: mod_particles.t:2
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:198
double precision, dimension(:,:), allocatable dxfirst_1mq
logical firstprocess
If true, call initonegrid_usr upon restarting.
double precision tfixgrid
Fix the AMR grid after this time.
character(len= *), parameter fmt_i
character(len= *), parameter fmt_r2
double precision length_convert_factor
integer iprob
problem switch allowing different setups in same usr_mod.t
double precision, dimension(ndim) dxlevel
double precision, dimension(nsavehi, nfile) tsave
Save output of type N on times tsave(:, N)
subroutine get_volume_average_func(func, f_avg, volume)
Compute the volume average of func(w) over the leaves of the grid.
integer, dimension(nfile) isaveit
integer snapshotini
Resume from the snapshot with this index.
integer function, public iget(keyword)
Definition: mod_kracken.t:141
integer, dimension(nslicemax) slicedir
The slice direction for each slice.
Definition: mod_slice.t:19
integer, parameter ndim
Number of spatial dimensions for grid variables.
subroutine read_snapshot_old()
integer ittimelast
Definition: mod_timing.t:15
integer phys_wider_stencil
To use wider stencils in flux calculations. A value of 1 will extend it by one cell in both direction...
Definition: mod_physics.t:18
logical stretch_uncentered
If true, adjust mod_geometry routines to account for grid stretching (but the flux computation will n...
integer imex_switch
IMEX_232 choice and parameters.
subroutine generate_plotfile
Definition: convert.t:3
double precision schmid_rad
Definition: mod_limiter.t:14
logical slab
Cartesian geometry or not.
double precision, dimension(nslicemax) slicecoord
Slice coordinates, see Slice output.
Definition: mod_slice.t:10
integer function get_snapshot_index(filename)
integer slowsteps
If > 1, then in the first slowsteps-1 time steps dt is reduced by a factor .
integer, parameter stretch_none
No stretching.
logical use_imex_scheme
whether IMEX in use or not
procedure(transform_w), pointer usr_transform_w
integer nwauxio
Number of auxiliary variables that are only included in output.
logical, dimension(:), allocatable logflag
integer icomm
The MPI communicator.
double precision small_temperature
error handling
logical, public trace_small_values
trace small values in the source file using traceback flag of compiler
logical, dimension(:), allocatable, public small_values_fix_iw
Whether to apply small value fixes to certain variables.
character(len=std_len) typediv
integer, parameter nlevelshi
The maximum number of levels in the grid refinement.
logical phys_energy
Solve energy equation or not.
Definition: mod_physics.t:25
integer, save nparents
Number of parent blocks.
Definition: mod_forest.t:73
integer refine_max_level
Maximal number of AMR levels.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
subroutine snapshot_write_header(fh, offset_tree, offset_block)
Write header for a snapshot.
integer, parameter nfile
Number of output methods.
integer log_fh
MPI file handle for logfile.
double precision location_slit
location of the slit
subroutine write_snapshot
logical reset_grid
If true, rebuild the AMR grid upon restarting.
logical source_split_usr
Use split or unsplit way to add user&#39;s source terms, default: unsplit.
integer, dimension(:,:), allocatable node
procedure(sub_get_aux), pointer phys_get_aux
Definition: mod_physics.t:77
integer, parameter spherical
Definition: mod_geometry.t:10
integer itfixgrid
Fix the AMR grid after this many time steps.
character(len=std_len) typecourant
How to compute the CFL-limited time step.
double precision, dimension(:,:), allocatable dxfirst
logical function snapshot_exists(ix)
integer, save nleafs
Number of leaf block.
Definition: mod_forest.t:76
logical trac
Use TRAC (Johnston 2019 ApJL, 873, L22) for MHD or 1D HD.
subroutine get_volume_coverage(vol_cov)
Compute how much of the domain is covered by each grid level. This routine does not take a non-Cartes...
integer, dimension(3), parameter compatible_versions
List of compatible versions.
integer it
Number of time steps taken.
integer, dimension(nfile) ditsave
Repeatedly save output of type N when ditsave(N) time steps have passed.
subroutine printlog_regression_test()
Print a log that can be used to check whether the code still produces the same output (regression tes...
subroutine create_output_file(fh, ix, extension, suffix)
Standard method for creating a new output file.
character(len=20), public small_values_method
How to handle small values.
double precision rk2_alfa
RK2(alfa) method parameters from Butcher tableau.
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
double precision, dimension(:), allocatable derefine_ratio
integer, dimension(ndim) nstretchedblocks_baselevel
(even) number of (symmetrically) stretched blocks at AMR level 1, per dimension
double precision time_init
Start time for the simulation.
character(len=std_len) typeaverage
integer ixgshi
Upper index of stagger grid block arrays.
Module for reading command line arguments.
Definition: mod_kracken.t:7
integer, parameter fileout_
Constant indicating regular output.
logical final_dt_exit
Force timeloop exit when final dt < dtmin.
integer, dimension(nsavehi, nfile) itsave
Save output of type N on iterations itsave(:, N)
pure integer function count_ix(ixOL)
Compute number of elements in index range.
integer slicenext
the file number of slices
Definition: mod_slice.t:13
logical spectrum
output spectrum
logical autoconvert
If true, already convert to output format during the run.
character(len=std_len) typecurl
procedure(sub_write_info), pointer phys_write_info
Definition: mod_physics.t:81
integer, parameter version_number
Version number of the .dat file output.