MPI-AMRVAC  2.0
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_input_output.t
Go to the documentation of this file.
1 !> Module for reading input and writing output
3 
4  implicit none
5  public
6 
7  !> Version number of the .dat file output
8  integer, parameter :: version_number = 4
9 
10  !> List of compatible versions
11  integer, parameter :: compatible_versions(2) = [3, 4]
12 
13  !> number of w found in dat files
14  integer :: nw_found
15 
16  ! Formats used in output
17  character(len=*), parameter :: fmt_r = 'es16.8' ! Default precision
18  character(len=*), parameter :: fmt_r2 = 'es10.2' ! Two digits
19  character(len=*), parameter :: fmt_i = 'i8' ! Integer format
20 
21 contains
22 
23  !> Read the command line arguments passed to amrvac
24  subroutine read_arguments()
27  use mod_slice, only: slicenext
28 
29  integer :: len, ier, n
30  integer, parameter :: max_files = 20 ! Maximum number of par files
31  integer :: n_par_files
32  integer :: ibegin(max_files)
33  integer :: iterm(max_files)
34  character(len=max_files*std_len) :: all_par_files
35  character(len=std_len) :: tmp_files(max_files)
36  logical :: resume
37 
38  if (mype == 0) then
39  print *, '-----------------------------------------------------------------------------'
40  print *, '-----------------------------------------------------------------------------'
41  print *, '| __ __ ____ ___ _ __ __ ______ ___ ____ |'
42  print *, '| | \/ | _ \_ _| / \ | \/ | _ \ \ / / \ / ___| |'
43  print *, '| | |\/| | |_) | |_____ / _ \ | |\/| | |_) \ \ / / _ \| | |'
44  print *, '| | | | | __/| |_____/ ___ \| | | | _ < \ V / ___ \ |___ |'
45  print *, '| |_| |_|_| |___| /_/ \_\_| |_|_| \_\ \_/_/ \_\____| |'
46  print *, '-----------------------------------------------------------------------------'
47  print *, '-----------------------------------------------------------------------------'
48  end if
49 
50  ! Specify the options and their default values
51  call kracken('cmd','-i amrvac.par -if ' // undefined // &
52  ' -slicenext -1 -collapsenext -1 -snapshotnext -1' // &
53  ' --help .false. -convert .false. -resume .false.')
54 
55  ! Get the par file(s)
56  call retrev('cmd_i', all_par_files, len, ier)
57 
58  ! Show the usage if the help flag was given, or no par file was specified
59  if (lget('cmd_-help') .or. len == 0) then
60  if (mype == 0) then
61  print *, 'Usage example:'
62  print *, 'mpirun -np 4 ./amrvac -i file.par [file2.par ...]'
63  print *, ' (later .par files override earlier ones)'
64  print *, ''
65  print *, 'Optional arguments:'
66  print *, '-resume Resume previous run'
67  print *, '-convert Convert snapshot files'
68  print *, '-if file0001.dat Use this snapshot to restart from'
69  print *, ' (you can modify e.g. output names)'
70  print *, '-resume Automatically resume previous run'
71  print *, ' (useful for long runs on HPC systems)'
72  print *, '-snapshotnext N Manual index for next snapshot'
73  print *, '-slicenext N Manual index for next slice output'
74  print *, '-collapsenext N Manual index for next collapsed output'
75  print *, ''
76  end if
77  call mpi_finalize(ierrmpi)
78  stop
79  end if
80 
81  ! Split the input files, in case multiple were given
82  call get_fields_string(all_par_files, " ,'"""//char(9), max_files, &
83  tmp_files, n_par_files)
84 
85  allocate(par_files(n_par_files))
86  par_files = tmp_files(1:n_par_files)
87 
88  ! Read in the other command line arguments
89  call retrev('cmd_if', restart_from_file, len, ier)
90 
91  !> \todo Document these command line options
92  slicenext = iget('cmd_slicenext')
93  collapsenext = iget('cmd_collapsenext')
94  snapshotnext = iget('cmd_snapshotnext')
95  convert = lget('cmd_convert') ! -convert present?
96  resume_previous_run = lget('cmd_resume') ! -resume present?
97 
98  end subroutine read_arguments
99 
100  !> Read in the user-supplied parameter-file
101  subroutine read_par_files()
104  use mod_small_values
105  use mod_limiter
106  use mod_slice
107 
108  logical :: fileopen, file_exists
109  integer :: i, j, k, ifile, io_state
110  integer :: iB, isave, iw, level, idim, islice
111  integer :: nx_vec(^nd), block_nx_vec(^nd)
112  integer :: my_unit, iostate
113  integer :: ilev
114  double precision :: dx_vec(^nd)
115 
116  character :: c_ndim
117  character(len=80) :: fmt_string
118  character(len=std_len) :: err_msg, restart_from_file_arg
119  character(len=std_len) :: basename_full, basename_prev, dummy_file
120  character(len=std_len), dimension(:), allocatable :: &
121  typeboundary_min^D, typeboundary_max^D
122  character(len=std_len), allocatable :: limiter(:)
123  character(len=std_len), allocatable :: gradient_limiter(:)
124  character(len=name_len) :: stretch_dim(ndim)
125 
126  double precision, dimension(nsavehi) :: tsave_log, tsave_dat, tsave_slice, &
127  tsave_collapsed, tsave_custom
128  double precision :: dtsave_log, dtsave_dat, dtsave_slice, &
129  dtsave_collapsed, dtsave_custom
130  integer :: ditsave_log, ditsave_dat, ditsave_slice, &
131  ditsave_collapsed, ditsave_custom
132  integer :: windex, ipower
133  double precision :: sizeuniformpart^D
134 
135  namelist /filelist/ base_filename,restart_from_file, &
142 
143  namelist /savelist/ tsave,itsave,dtsave,ditsave,nslices,slicedir, &
145  tsave_log, tsave_dat, tsave_slice, tsave_collapsed, tsave_custom, &
146  dtsave_log, dtsave_dat, dtsave_slice, dtsave_collapsed, dtsave_custom, &
147  ditsave_log, ditsave_dat, ditsave_slice, ditsave_collapsed, ditsave_custom
148 
151 
152  namelist /methodlist/ time_integrator, &
156  limiter,gradient_limiter,cada3_radius,&
161  flatcd,flatsh,&
166 
169 
170  namelist /meshlist/ refine_max_level,nbufferx^d,refine_threshold,&
172  stretch_dim, stretch_uncentered, &
178  namelist /paramlist/ courantpar, dtpar, dtdiffpar, &
180 
181  ! default maximum number of grid blocks in a processor
182  max_blocks=4000
183 
184  ! allocate cell size of all levels
185  allocate(dx(ndim,nlevelshi))
186  {allocate(dg^d(nlevelshi))\}
187  {allocate(ng^d(nlevelshi))\}
188 
189  ! default block size excluding ghost cells
190  {block_nx^d = 16\}
191 
192  ! default resolution of level-1 mesh (full domain)
193  {domain_nx^d = 32\}
194 
195  ! defaults for boundary treatments
196  typeghostfill = 'linear'
197 
198  ! default number of ghost-cell layers at each boundary of a block
199  nghostcells = 2
200 
201  ! Allocate boundary conditions arrays in new and old style
202  {
203  allocate(typeboundary_min^d(nwfluxbc))
204  allocate(typeboundary_max^d(nwfluxbc))
205  typeboundary_min^d = undefined
206  typeboundary_max^d = undefined
207  }
208 
209  allocate(typeboundary(nwflux, 2 * ndim))
210  typeboundary(:, :) = undefined
211 
212  ! not save physical boundary in dat files by default
213  save_physical_boundary = .false.
214 
215  internalboundary = .false.
216 
217  ! defaults for specific options
218  typegrad = 'central'
219  typediv = 'central'
220  typecurl = 'central'
221 
222  ! defaults for smallest physical values allowed
223  small_temperature = 0.d0
224  small_pressure = 0.d0
225  small_density = 0.d0
226 
227  allocate(small_values_fix_iw(nw))
228  small_values_fix_iw(:) = .true.
229 
230  ! defaults for convert behavior
231 
232  ! store the -if value from argument in command line
233  restart_from_file_arg = restart_from_file
234  nwauxio = 0
235  nocartesian = .false.
236  saveprim = .false.
237  autoconvert = .false.
238  convert_type = 'vtuBCCmpi'
239  slice_type = 'vtuCC'
240  collapse_type = 'vti'
241  allocate(w_write(nw))
242  w_write(1:nw) = .true.
243  allocate(writelevel(nlevelshi))
244  writelevel(1:nlevelshi) = .true.
245  writespshift(1:ndim,1:2) = zero
246  level_io = -1
247  level_io_min = 1
249 
250  ! normalization of primitive variables: only for output
251  ! note that length_convert_factor is for length
252  ! this scaling is optional, and must be set consistently if used
253  allocate(w_convert_factor(nw))
254  w_convert_factor(:) = 1.0d0
255  time_convert_factor = 1.0d0
256  length_convert_factor = 1.0d0
257 
258  ! AMR related defaults
259  refine_max_level = 1
260  {nbufferx^d = 0\}
261  allocate(refine_threshold(nlevelshi))
262  refine_threshold(1:nlevelshi) = 0.1d0
263  allocate(derefine_ratio(nlevelshi))
264  derefine_ratio(1:nlevelshi) = 1.0d0/8.0d0
265  prolongation_method = 'linear'
266  coarsenprimitive = .false.
267  prolongprimitive = .false.
268  typeprolonglimit = 'default'
269  refine_criterion = 3
270  allocate(w_refine_weight(nw+1))
271  w_refine_weight = 0.d0
272  allocate(logflag(nw+1))
273  logflag = .false.
274  allocate(amr_wavefilter(nlevelshi))
275  amr_wavefilter(1:nlevelshi) = 1.0d-2
276  tfixgrid = bigdouble
277  itfixgrid = biginteger
278  ditregrid = 1
279 
280  ! Grid stretching defaults
281  stretch_uncentered = .true.
282  stretch_dim(1:ndim) = undefined
283  qstretch_baselevel(1:ndim) = bigdouble
285 
286  ! IO defaults
287  it_init = 0
288  it_max = biginteger
289  time_init = 0.d0
290  time_max = bigdouble
291  wall_time_max = bigdouble
292  dtmin = 1.0d-10
293  nslices = 0
294  collapse = .false.
295  collapselevel = 1
296  time_between_print = 30.0d0 ! Print status every 30 seconds
297 
298  do ifile=1,nfile
299  do isave=1,nsavehi
300  tsave(isave,ifile) = bigdouble ! global_time of saves into the output files
301  itsave(isave,ifile) = biginteger ! it of saves into the output files
302  end do
303  dtsave(ifile) = bigdouble ! time between saves
304  ditsave(ifile) = biginteger ! timesteps between saves
305  isavet(ifile) = 1 ! index for saves by global_time
306  isaveit(ifile) = 1 ! index for saves by it
307  end do
308 
309  tsave_log = bigdouble
310  tsave_dat = bigdouble
311  tsave_slice = bigdouble
312  tsave_collapsed = bigdouble
313  tsave_custom = bigdouble
314 
315  dtsave_log = bigdouble
316  dtsave_dat = bigdouble
317  dtsave_slice = bigdouble
318  dtsave_collapsed = bigdouble
319  dtsave_custom = bigdouble
320 
321  ditsave_log = biginteger
322  ditsave_dat = biginteger
323  ditsave_slice = biginteger
324  ditsave_collapsed = biginteger
325  ditsave_custom = biginteger
326 
327  typefilelog = 'default'
328 
329  ! defaults for input
330  reset_time = .false.
331  reset_it = .false.
332  firstprocess = .false.
333  reset_grid = .false.
334  base_filename = 'data'
335 
336  ! Defaults for discretization methods
337  typeaverage = 'default'
338  tvdlfeps = one
339  nxdiffusehllc = 0
340  flathllc = .false.
341  slowsteps = -1
342  courantpar = 0.8d0
343  typecourant = 'maxsum'
344  dimsplit = .false.
345  typedimsplit = 'default'
346  typelimited = 'predictor'
347  if(physics_type=='mhd') then
348  cada3_radius = 0.5d0
349  else
350  cada3_radius = 0.1d0
351  end if
352  typetvd = 'roe'
353  typeboundspeed = 'cmaxmean'
354  source_split_usr= .false.
355  time_integrator = 'twostep'
356  solve_internal_e= .false.
357  angmomfix = .false.
358 
360  allocate(limiter(nlevelshi),gradient_limiter(nlevelshi))
361  do level=1,nlevelshi
362  flux_scheme(level) = 'tvdlf'
363  typepred1(level) = 'default'
364  limiter(level) = 'minmod'
365  gradient_limiter(level) = 'minmod'
366  end do
367 
368  flatcd = .false.
369  flatsh = .false.
370  typesourcesplit = 'sfs'
371  allocate(loglimit(nw))
372  loglimit(1:nw) = .false.
373 
374  allocate(typeentropy(nw))
375 
376  do iw=1,nw
377  typeentropy(iw)='nul' ! Entropy fix type
378  end do
379 
380  dtdiffpar = 0.5d0
381  dtpar = -1.d0
382 
383  ! problem setup defaults
384  iprob = 1
385 
386  ! end defaults
387 
388  ! Initialize Kronecker delta, and Levi-Civita tensor
389  do i=1,3
390  do j=1,3
391  if(i==j)then
392  kr(i,j)=1
393  else
394  kr(i,j)=0
395  endif
396  do k=1,3
397  if(i==j.or.j==k.or.k==i)then
398  lvc(i,j,k)=0
399  else if(i+1==j.or.i-2==j)then
400  lvc(i,j,k)=1
401  else
402  lvc(i,j,k)=-1
403  endif
404  enddo
405  enddo
406  enddo
407 
408  ! These are used to construct file and log names from multiple par files
409  basename_full = ''
410  basename_prev = ''
411 
412  do i = 1, size(par_files)
413  if (mype == 0) print *, "Reading " // trim(par_files(i))
414 
415  ! Check whether the file exists
416  inquire(file=trim(par_files(i)), exist=file_exists)
417 
418  if (.not. file_exists) then
419  write(err_msg, *) "The parameter file " // trim(par_files(i)) // &
420  " does not exist"
421  call mpistop(trim(err_msg))
422  end if
423 
424  open(unitpar, file=trim(par_files(i)), status='old')
425 
426  ! Try to read in the namelists. They can be absent or in a different
427  ! order, since we rewind before each read.
428  rewind(unitpar)
429  read(unitpar, filelist, end=101)
430 
431 101 rewind(unitpar)
432  read(unitpar, savelist, end=102)
433 
434 102 rewind(unitpar)
435  read(unitpar, stoplist, end=103)
436 
437 103 rewind(unitpar)
438  read(unitpar, methodlist, end=104)
439 
440 104 rewind(unitpar)
441  read(unitpar, boundlist, end=105)
442 
443 105 rewind(unitpar)
444  read(unitpar, meshlist, end=106)
445 
446 106 rewind(unitpar)
447  read(unitpar, paramlist, end=107)
448 
449 107 close(unitpar)
450 
451  ! Append the log and file names given in the par files
452  if (base_filename /= basename_prev) &
453  basename_full = trim(basename_full) // trim(base_filename)
454  basename_prev = base_filename
455  end do
456 
457  base_filename = basename_full
458 
459  ! Check whether output directory is writable
460  if(mype==0) then
461  dummy_file = trim(base_filename)//"DUMMY"
462  open(newunit=my_unit, file=trim(dummy_file), iostat=iostate)
463  if (iostate /= 0) then
464  call mpistop("Can't write to output directory (" // &
465  trim(base_filename) // ")")
466  else
467  close(my_unit, status='delete')
468  end if
469  end if
470 
471  ! restart filename from command line overwrites the one in par file
472  if(restart_from_file_arg /= undefined) &
473  restart_from_file=restart_from_file_arg
474 
475  if (resume_previous_run) then
476  ! Root process will search snapshot
477  if (mype == 0) then
478  do i = -1, 9998
479  ! Check if the next snapshot is missing
480  if (.not. snapshot_exists(i+1)) exit
481  end do
482 
483  if (i == -1) then
484  ! If initial data is missing (e.g. moved due to lack of space),
485  ! search file with highest index
486  do i = 9999, 0, -1
487  if (snapshot_exists(i)) exit
488  end do
489  end if
490  end if
491  call mpi_bcast(i, 1, mpi_integer, 0, icomm, ierrmpi)
492 
493  if (i == -1) then
494  if(mype==0) write(*,*) "No snapshots found to resume from, start a new run..."
495  else
496  !call mpistop("No snapshots found to resume from")
497  ! Set file name to restart from
498  write(restart_from_file, "(a,i4.4,a)") trim(base_filename), i, ".dat"
499  end if
500  end if
501 
502  if (restart_from_file == undefined) then
503  snapshotnext = 0
504  slicenext = 0
505  collapsenext = 0
506  if (firstprocess) &
507  call mpistop("Please restart from a snapshot when firstprocess=T")
508  if (convert) &
509  call mpistop('Change convert to .false. for a new run!')
510  end if
511 
512  if (small_pressure < 0.d0) call mpistop("small_pressure should be positive.")
513  if (small_density < 0.d0) call mpistop("small_density should be positive.")
514  ! Give priority to non-zero small temperature
516 
517  if(convert) autoconvert=.false.
518 
519  where (tsave_log < bigdouble) tsave(:, 1) = tsave_log
520  where (tsave_dat < bigdouble) tsave(:, 2) = tsave_dat
521  where (tsave_slice < bigdouble) tsave(:, 3) = tsave_slice
522  where (tsave_collapsed < bigdouble) tsave(:, 4) = tsave_collapsed
523  where (tsave_custom < bigdouble) tsave(:, 5) = tsave_custom
524 
525  if (dtsave_log < bigdouble) dtsave(1) = dtsave_log
526  if (dtsave_dat < bigdouble) dtsave(2) = dtsave_dat
527  if (dtsave_slice < bigdouble) dtsave(3) = dtsave_slice
528  if (dtsave_collapsed < bigdouble) dtsave(4) = dtsave_collapsed
529  if (dtsave_custom < bigdouble) dtsave(5) = dtsave_custom
530 
531  if (ditsave_log < bigdouble) ditsave(1) = ditsave_log
532  if (ditsave_dat < bigdouble) ditsave(2) = ditsave_dat
533  if (ditsave_slice < bigdouble) ditsave(3) = ditsave_slice
534  if (ditsave_collapsed < bigdouble) ditsave(4) = ditsave_collapsed
535  if (ditsave_custom < bigdouble) ditsave(5) = ditsave_custom
536  ! convert hours to seconds for ending wall time
537  if (wall_time_max < bigdouble) wall_time_max=wall_time_max*3600.d0
538 
539  if (mype == 0) then
540  write(unitterm, *) ''
541  write(unitterm, *) 'Output type | dtsave | ditsave | itsave(1) | tsave(1)'
542  write(fmt_string, *) '(A12," | ",E9.3E2," | ",I6," | "'//&
543  ',I6, " | ",E9.3E2)'
544  end if
545 
546  do ifile = 1, nfile
547  if (mype == 0) write(unitterm, fmt_string) trim(output_names(ifile)), &
548  dtsave(ifile), ditsave(ifile), itsave(1, ifile), tsave(1, ifile)
549  end do
550 
551  if (mype == 0) write(unitterm, *) ''
552 
553  do islice=1,nslices
554  if(slicedir(islice) > ndim) &
555  write(uniterr,*)'Warning in read_par_files: ', &
556  'Slice ', islice,' direction',slicedir(islice),'larger than ndim=',ndim
557  if(slicedir(islice) < 1) &
558  write(uniterr,*)'Warning in read_par_files: ', &
559  'Slice ', islice,' direction',slicedir(islice),'too small, should be [',1,ndim,']'
560  end do
561 
562  if(it_max==biginteger .and. time_max==bigdouble.and.mype==0) write(uniterr,*) &
563  'Warning in read_par_files: it_max or time_max not given!'
564 
565  do level=1,nlevelshi
566  if(flux_scheme(level)=='tvd'.and.time_integrator/='onestep') &
567  call mpistop(" tvd is onestep method, reset time_integrator='onestep'")
568  if(flux_scheme(level)=='tvd')then
569  if(mype==0.and.(.not.dimsplit)) write(unitterm,*) &
570  'Warning: setting dimsplit=T for tvd, as used for level=',level
571  dimsplit=.true.
572  endif
573  if(flux_scheme(level)=='hlld'.and.physics_type/='mhd') &
574  call mpistop("Cannot use hlld flux if not using MHD physics!")
575 
576  if (typepred1(level)=='default') then
577  select case (flux_scheme(level))
578  case ('cd')
579  typepred1(level)='cd'
580  case ('cd4')
581  typepred1(level)='cd4'
582  case ('fd')
583  typepred1(level)='fd'
584  case ('tvdlf','tvdmu')
585  typepred1(level)='hancock'
586  case ('hll')
587  typepred1(level)='hll'
588  case ('hllc')
589  typepred1(level)='hllc'
590  case ('hllcd')
591  typepred1(level)='hllcd'
592  case ('hlld')
593  typepred1(level)='hlld'
594  case ('nul','source','tvd')
595  typepred1(level)='nul'
596  case default
597  call mpistop("No default predictor for this full step")
598  end select
599  end if
600  end do
601 
602  ! finite difference scheme fd need global maximal speed
603  if(any(flux_scheme=='fd')) need_global_cmax=.true.
604 
605  select case (time_integrator)
606  case ("onestep")
607  nstep=1
608  case ("twostep", "twostep_trapezoidal")
609  nstep=2
610  case ("threestep")
611  nstep=3
612  case ("fourstep","rk4","jameson","ssprk43")
613  nstep=4
614  case ("ssprk54")
615  nstep=5
616  case default
617  call mpistop("Unknown time_integrator")
618  end select
619 
620  do i = 1, ndim
621  select case (stretch_dim(i))
622  case (undefined, 'none')
624  stretched_dim(i) = .false.
625  case ('uni','uniform')
627  stretched_dim(i) = .true.
628  case ('symm', 'symmetric')
630  stretched_dim(i) = .true.
631  case default
633  stretched_dim(i) = .false.
634  if (mype == 0) print *, 'Got stretch_type = ', stretch_type(i)
635  call mpistop('Unknown stretch type')
636  end select
637  end do
638 
639  ! Harmonize the parameters for dimensional splitting and source splitting
640  if(typedimsplit =='default'.and. dimsplit) typedimsplit='xyyx'
641  if(typedimsplit =='default'.and..not.dimsplit) typedimsplit='unsplit'
642  dimsplit = typedimsplit /='unsplit'
643 
644  if(typeaxial=='default') then
645  typeaxial='slab'
646  if(mype==0) then
647  write(*,*) 'Warning: coordinate system is not specified!'
648  write(*,*) 'call set_coordinate_system in usr_init in mod_usr.t'
649  write(*,*) 'Now use Cartesian coordinate'
650  end if
651  end if
652 
653  if(typeaxial=="slab") then
654  if(any(stretched_dim)) then
655  typeaxial="slabstretch"
656  slab=.false.
657  slab_stretched=.true.
658  else
659  slab=.true.
660  end if
661  else
662  slab=.false.
663  end if
664 
665  if(typeaxial=='spherical') then
666  if(dimsplit) then
667  if(mype==0)print *,'Warning: spherical symmetry needs dimsplit=F, resetting'
668  dimsplit=.false.
669  end if
670  end if
671 
672  if (ndim==1) dimsplit=.false.
673  if (.not.dimsplit.and.ndim>1) then
674  select case (time_integrator)
675  case ("ssprk54","ssprk43","fourstep", "rk4", "threestep", "twostep")
676  ! Runge-Kutta needs predictor
677  typelimited="predictor"
678  if (mype==0) write(unitterm, '(A30,A)') 'typelimited: ', typelimited
679  end select
680  end if
681 
682  ! Type limiter is of integer type for performance
683  allocate(type_limiter(nlevelshi))
685 
686  do level=1,nlevelshi
687  type_limiter(level) = limiter_type(limiter(level))
688  type_gradient_limiter(level) = limiter_type(gradient_limiter(level))
689  end do
690 
691  if (any(limiter(1:nlevelshi)== 'ppm')&
692  .and.(flatsh.and.physics_type=='rho')) then
693  call mpistop(" PPM with flatsh=.true. can not be used with physics_type='rho'!")
694  end if
695 
696  ! Copy boundary conditions to typeboundary, which is used internally
697  {
698  if (any(typeboundary_min^d /= undefined)) then
699  typeboundary(1:nwfluxbc, 2*^d-1) = typeboundary_min^d(1:nwfluxbc)
700  end if
701 
702  if (any(typeboundary_max^d /= undefined)) then
703  typeboundary(1:nwfluxbc, 2*^d) = typeboundary_max^d(1:nwfluxbc)
704  end if
705  }
706 
707  ! psi, tracers take the same boundary type as density
708  if (nwfluxbc<nwflux) then
709  do iw = nwfluxbc+1, nwflux
710  typeboundary(iw,:) = typeboundary(iw_rho, :)
711  end do
712  end if
713 
714  if (any(typeboundary == undefined)) then
715  call mpistop("Not all boundary conditions have been defined")
716  end if
717 
718  do idim=1,ndim
719  periodb(idim)=(any(typeboundary(:,2*idim-1:2*idim)=='periodic'))
720  aperiodb(idim)=(any(typeboundary(:,2*idim-1:2*idim)=='aperiodic'))
721  if (periodb(idim).or.aperiodb(idim)) then
722  do iw=1,nwflux
723  if (typeboundary(iw,2*idim-1) .ne. typeboundary(iw,2*idim)) &
724  call mpistop("Wrong counterpart in periodic boundary")
725 
726  if (typeboundary(iw,2*idim-1) /= 'periodic' .and. &
727  typeboundary(iw,2*idim-1) /= 'aperiodic') then
728  call mpistop("Each dimension should either have all "//&
729  "or no variables periodic, some can be aperiodic")
730  end if
731  end do
732  end if
733  end do
734  {^nooned
735  do idim=1,ndim
736  if(any(typeboundary(:,2*idim-1)=='pole')) then
737  if(any(typeboundary(:,2*idim-1)/='pole')) typeboundary(:,2*idim-1)='pole'
738  if(phys_energy) then
739  windex=2
740  else
741  windex=1
742  end if
743  typeboundary(:,2*idim-1)='symm'
744  if(physics_type/='rho') then
745  select case(typeaxial)
746  case('cylindrical')
747  typeboundary(phi_+1,2*idim-1)='asymm'
748  if(physics_type=='mhd') typeboundary(ndir+windex+phi_,2*idim-1)='asymm'
749  case('spherical')
750  typeboundary(3:ndir+1,2*idim-1)='asymm'
751  if(physics_type=='mhd') typeboundary(ndir+windex+2:ndir+windex+ndir,2*idim-1)='asymm'
752  case default
753  call mpistop('Pole is in cylindrical, polar, spherical coordinates!')
754  end select
755  end if
756  end if
757  if(any(typeboundary(:,2*idim)=='pole')) then
758  if(any(typeboundary(:,2*idim)/='pole')) typeboundary(:,2*idim)='pole'
759  if(phys_energy) then
760  windex=2
761  else
762  windex=1
763  end if
764  typeboundary(:,2*idim)='symm'
765  if(physics_type/='rho') then
766  select case(typeaxial)
767  case('cylindrical')
768  typeboundary(phi_+1,2*idim)='asymm'
769  if(physics_type=='mhd') typeboundary(ndir+windex+phi_,2*idim)='asymm'
770  case('spherical')
771  typeboundary(3:ndir+1,2*idim)='asymm'
772  if(physics_type=='mhd') typeboundary(ndir+windex+2:ndir+windex+ndir,2*idim)='asymm'
773  case default
774  call mpistop('Pole is in cylindrical, polar, spherical coordinates!')
775  end select
776  end if
777  end if
778  end do
779  }
780 
781  if(any(limiter(1:nlevelshi)=='mp5')) then
782  nghostcells=3
783  end if
784 
785  if(any(limiter(1:nlevelshi)=='ppm')) then
786  nghostcells=4
787  end if
788 
789  ! If a wider stencil is used, extend the number of ghost cells
791 
792  select case (typeaxial)
793  {^nooned
794  case ("spherical")
795  xprob^lim^de=xprob^lim^de*two*dpi;
796  \}
797  case ("cylindrical")
798  {
799  if (^d==phi_) then
800  xprob^lim^d=xprob^lim^d*two*dpi;
801  end if
802  \}
803  end select
804 
805  ! full block size including ghostcells
806  {ixghi^d = block_nx^d + 2*nghostcells\}
807  {ixgshi^d = ixghi^d\}
808 
809  nx_vec = [{domain_nx^d|, }]
810  block_nx_vec = [{block_nx^d|, }]
811 
812  if (any(nx_vec < 4) .or. any(mod(nx_vec, 2) == 1)) &
813  call mpistop('Grid size (domain_nx^D) has to be even and >= 4')
814 
815  if (any(block_nx_vec < 4) .or. any(mod(block_nx_vec, 2) == 1)) &
816  call mpistop('Block size (block_nx^D) has to be even and >= 4')
817 
818  if (any([ domain_nx^d/block_nx^d ] == 1) .and. mype == 0) then
819  print *, "TODO: possible bug when domain_nx^D/block_nx^D == 1"
820  end if
821 
822  { if(mod(domain_nx^d,block_nx^d)/=0) &
823  call mpistop('Grid (domain_nx^D) and block (block_nx^D) must be consistent') \}
824 
826  write(unitterm,*)'Error: refine_max_level',refine_max_level,'>nlevelshi ',nlevelshi
827  call mpistop("Reset nlevelshi and recompile!")
828  endif
829 
830  if (any(stretched_dim)) then
831  allocate(qstretch(0:nlevelshi,1:ndim),dxfirst(0:nlevelshi,1:ndim),&
833  allocate(nstretchedblocks(1:nlevelshi,1:ndim))
834  qstretch(0:nlevelshi,1:ndim)=0.0d0
835  dxfirst(0:nlevelshi,1:ndim)=0.0d0
837  {if (stretch_type(^d) == stretch_uni) then
838  ! first some sanity checks
839  if(qstretch_baselevel(^d)<1.0d0.or.qstretch_baselevel(^d)==bigdouble) then
840  if(mype==0) then
841  write(*,*) 'stretched grid needs finite qstretch_baselevel>1'
842  write(*,*) 'will try default value for qstretch_baselevel in dimension', ^d
843  endif
844  if(xprobmin^d>smalldouble)then
845  qstretch_baselevel(^d)=(xprobmax^d/xprobmin^d)**(1.d0/dble(domain_nx^d))
846  else
847  call mpistop("can not set qstretch_baselevel automatically")
848  endif
849  endif
850  if(mod(block_nx^d,2)==1) &
851  call mpistop("stretched grid needs even block size block_nxD")
852  if(mod(domain_nx^d/block_nx^d,2)/=0) &
853  call mpistop("number level 1 blocks in D must be even")
854  qstretch(1,^d)=qstretch_baselevel(^d)
855  dxfirst(1,^d)=(xprobmax^d-xprobmin^d) &
856  *(1.0d0-qstretch(1,^d))/(1.0d0-qstretch(1,^d)**domain_nx^d)
857  qstretch(0,^d)=qstretch(1,^d)**2
858  dxfirst(0,^d)=dxfirst(1,^d)*(1.0d0+qstretch(1,^d))
859  if(refine_max_level>1)then
860  do ilev=2,refine_max_level
861  qstretch(ilev,^d)=dsqrt(qstretch(ilev-1,^d))
862  dxfirst(ilev,^d)=dxfirst(ilev-1,^d) &
863  /(1.0d0+dsqrt(qstretch(ilev-1,^d)))
864  enddo
865  endif
866  endif \}
867  if(mype==0) then
868  {if(stretch_type(^d) == stretch_uni) then
869  write(*,*) 'Stretched dimension ', ^d
870  write(*,*) 'Using stretched grid with qs=',qstretch(0:refine_max_level,^d)
871  write(*,*) ' and first cell sizes=',dxfirst(0:refine_max_level,^d)
872  endif\}
873  end if
874  {if(stretch_type(^d) == stretch_symm) then
875  if(mype==0) then
876  write(*,*) 'will apply symmetric stretch in dimension', ^d
877  endif
878  if(mod(block_nx^d,2)==1) &
879  call mpistop("stretched grid needs even block size block_nxD")
880  ! checks on the input variable nstretchedblocks_baselevel
881  if(nstretchedblocks_baselevel(^d)==0) &
882  call mpistop("need finite even number of stretched blocks at baselevel")
883  if(mod(nstretchedblocks_baselevel(^d),2)==1) &
884  call mpistop("need even number of stretched blocks at baselevel")
885  if(qstretch_baselevel(^d)<1.0d0.or.qstretch_baselevel(^d)==bigdouble) &
886  call mpistop('stretched grid needs finite qstretch_baselevel>1')
887  ! compute stretched part to ensure uniform center
888  ipower=(nstretchedblocks_baselevel(^d)/2)*block_nx^d
889  if(nstretchedblocks_baselevel(^d)==domain_nx^d/block_nx^d)then
890  xstretch^d=0.5d0*(xprobmax^d-xprobmin^d)
891  else
892  xstretch^d=(xprobmax^d-xprobmin^d) &
893  /(2.0d0+dble(domain_nx^d-nstretchedblocks_baselevel(^d)*block_nx^d) &
894  *(1.0d0-qstretch_baselevel(^d))/(1.0d0-qstretch_baselevel(^d)**ipower))
895  endif
896  if(xstretch^d>(xprobmax^d-xprobmin^d)*0.5d0) &
897  call mpistop(" stretched grid part should not exceed full domain")
898  dxfirst(1,^d)=xstretch^d*(1.0d0-qstretch_baselevel(^d)) &
899  /(1.0d0-qstretch_baselevel(^d)**ipower)
900  nstretchedblocks(1,^d)=nstretchedblocks_baselevel(^d)
901  qstretch(1,^d)=qstretch_baselevel(^d)
902  qstretch(0,^d)=qstretch(1,^d)**2
903  dxfirst(0,^d)=dxfirst(1,^d)*(1.0d0+qstretch(1,^d))
904  dxmid(1,^d)=dxfirst(1,^d)
905  dxmid(0,^d)=dxfirst(1,^d)*2.0d0
906  if(refine_max_level>1)then
907  do ilev=2,refine_max_level
908  nstretchedblocks(ilev,^d)=2*nstretchedblocks(ilev-1,^d)
909  qstretch(ilev,^d)=dsqrt(qstretch(ilev-1,^d))
910  dxfirst(ilev,^d)=dxfirst(ilev-1,^d) &
911  /(1.0d0+dsqrt(qstretch(ilev-1,^d)))
912  dxmid(ilev,^d)=dxmid(ilev-1,^d)/2.0d0
913  enddo
914  endif
915  ! sanity check on total domain size:
916  sizeuniformpart^d=dxfirst(1,^d) &
917  *(domain_nx^d-nstretchedblocks_baselevel(^d)*block_nx^d)
918  if(mype==0) then
919  print *,'uniform part of size=',sizeuniformpart^d
920  print *,'setting of domain is then=',2*xstretch^d+sizeuniformpart^d
921  print *,'versus=',xprobmax^d-xprobmin^d
922  endif
923  if(dabs(xprobmax^d-xprobmin^d-2*xstretch^d-sizeuniformpart^d)>smalldouble) then
924  call mpistop('mismatch in domain size!')
925  endif
926  endif \}
927  dxfirst_1mq(0:refine_max_level,1:ndim)=dxfirst(0:refine_max_level,1:ndim) &
928  /(1.0d0-qstretch(0:refine_max_level,1:ndim))
929  end if
930 
931  dx_vec = [{xprobmax^d-xprobmin^d|, }] / nx_vec
932 
933  if (mype==0) then
934  write(c_ndim, '(I1)') ^nd
935  write(unitterm, '(A30,' // c_ndim // '(I0," "))') &
936  ' Domain size (cells): ', nx_vec
937  write(unitterm, '(A30,' // c_ndim // '(E9.3," "))') &
938  ' Level one dx: ', dx_vec
939  end if
940 
941  if (any(dx_vec < smalldouble)) &
942  call mpistop("Incorrect domain size (too small grid spacing)")
943 
944  dx(:, 1) = dx_vec
945 
946  if(sum(w_refine_weight(:))==0) w_refine_weight(1) = 1.d0
947  if(dabs(sum(w_refine_weight(:))-1.d0)>smalldouble) then
948  write(unitterm,*) "Sum of all elements in w_refine_weight be 1.d0"
949  call mpistop("Reset w_refine_weight so the sum is 1.d0")
950  end if
951 
952  if (mype==0) write(unitterm, '(A30)', advance='no') 'Refine estimation: '
953 
954  select case (refine_criterion)
955  case (0)
956  if (mype==0) write(unitterm, '(A)') "user defined"
957  case (1)
958  if (mype==0) write(unitterm, '(A)') "relative error"
959  case (2)
960  if (mype==0) write(unitterm, '(A)') "Lohner's original scheme"
961  case (3)
962  if (mype==0) write(unitterm, '(A)') "Lohner's scheme"
963  case default
964  call mpistop("Unknown error estimator, change refine_criterion")
965  end select
966 
967  if (tfixgrid<bigdouble/2.0d0) then
968  if(mype==0)print*,'Warning, at time=',tfixgrid,'the grid will be fixed'
969  end if
970  if (itfixgrid<biginteger/2) then
971  if(mype==0)print*,'Warning, at iteration=',itfixgrid,'the grid will be fixed'
972  end if
973  if (ditregrid>1) then
974  if(mype==0)print*,'Note, Grid is reconstructed once every',ditregrid,'iterations'
975  end if
976 
977 
978  do islice=1,nslices
979  select case(slicedir(islice))
980  {case(^d)
981  if(slicecoord(islice)<xprobmin^d.or.slicecoord(islice)>xprobmax^d) &
982  write(uniterr,*)'Warning in read_par_files: ', &
983  'Slice ', islice, ' coordinate',slicecoord(islice),'out of bounds for dimension ',slicedir(islice)
984  \}
985  end select
986  end do
987 
988  if (mype==0) then
989  write(unitterm, '(A30,A,A)') 'restart_from_file: ', ' ', trim(restart_from_file)
990  write(unitterm, '(A30,L1)') 'converting: ', convert
991  write(unitterm, '(A)') ''
992  endif
993 
994  end subroutine read_par_files
995 
996  !> Routine to find entries in a string
997  subroutine get_fields_string(line, delims, n_max, fields, n_found, fully_read)
998  !> The line from which we want to read
999  character(len=*), intent(in) :: line
1000  !> A string with delimiters. For example delims = " ,'"""//char(9)
1001  character(len=*), intent(in) :: delims
1002  !> Maximum number of entries to read in
1003  integer, intent(in) :: n_max
1004  !> Number of entries found
1005  integer, intent(inout) :: n_found
1006  !> Fields in the strings
1007  character(len=*), intent(inout) :: fields(n_max)
1008  logical, intent(out), optional :: fully_read
1009 
1010  integer :: ixs_start(n_max)
1011  integer :: ixs_end(n_max)
1012  integer :: ix, ix_prev
1013 
1014  ix_prev = 0
1015  n_found = 0
1016 
1017  do while (n_found < n_max)
1018  ! Find the starting point of the next entry (a non-delimiter value)
1019  ix = verify(line(ix_prev+1:), delims)
1020  if (ix == 0) exit
1021 
1022  n_found = n_found + 1
1023  ixs_start(n_found) = ix_prev + ix ! This is the absolute position in 'line'
1024 
1025  ! Get the end point of the current entry (next delimiter index minus one)
1026  ix = scan(line(ixs_start(n_found)+1:), delims) - 1
1027 
1028  if (ix == -1) then ! If there is no last delimiter,
1029  ixs_end(n_found) = len(line) ! the end of the line is the endpoint
1030  else
1031  ixs_end(n_found) = ixs_start(n_found) + ix
1032  end if
1033 
1034  fields(n_found) = line(ixs_start(n_found):ixs_end(n_found))
1035  ix_prev = ixs_end(n_found) ! We continue to search from here
1036  end do
1037 
1038  if (present(fully_read)) then
1039  ix = verify(line(ix_prev+1:), delims)
1040  fully_read = (ix == 0) ! Are there only delimiters?
1041  end if
1042 
1043  end subroutine get_fields_string
1044 
1045  subroutine saveamrfile(ifile)
1049  use mod_particles, only: write_particles_snapshot
1050  use mod_slice, only: write_slice
1051  use mod_collapse, only: write_collapsed
1052  integer:: ifile
1053 
1054  select case (ifile)
1055  case (fileout_)
1056  ! Write .dat snapshot
1057  call write_snapshot()
1058 
1059  ! Generate formatted output (e.g., VTK)
1061 
1062  if(use_particles) call write_particles_snapshot()
1063 
1065  case (fileslice_)
1066  call write_slice
1067  case (filecollapse_)
1068  call write_collapsed
1069  case (filelog_)
1070  select case (typefilelog)
1071  case ('default')
1072  call printlog_default
1073  case ('regression_test')
1075  case ('special')
1076  if (.not. associated(usr_print_log)) then
1077  call mpistop("usr_print_log not defined")
1078  else
1079  call usr_print_log()
1080  end if
1081  case default
1082  call mpistop("Error in SaveFile: Unknown typefilelog")
1083  end select
1084  case (fileanalysis_)
1085  if (associated(usr_write_analysis)) then
1086  call usr_write_analysis()
1087  end if
1088  case default
1089  write(*,*) 'No save method is defined for ifile=',ifile
1090  call mpistop("")
1091  end select
1092 
1093  ! opedit: Flush stdout and stderr from time to time.
1094  flush(unit=unitterm)
1095 
1096  end subroutine saveamrfile
1097 
1098  !> Standard method for creating a new output file
1099  subroutine create_output_file(fh, ix, extension, suffix)
1101  integer, intent(out) :: fh !< File handle
1102  integer, intent(in) :: ix !< Index of file
1103  character(len=*), intent(in) :: extension !< Extension of file
1104  character(len=*), intent(in), optional :: suffix !< Optional suffix
1105  character(len=std_len) :: filename
1106  integer :: amode
1107 
1108  if (ix >= 10000) then
1109  call mpistop("Number of output files is limited to 10000 (0...9999)")
1110  end if
1111 
1112  if (present(suffix)) then
1113  write(filename,"(a,a,i4.4,a)") trim(base_filename), &
1114  trim(suffix), ix, extension
1115  else
1116  write(filename,"(a,i4.4,a)") trim(base_filename), ix, extension
1117  end if
1118 
1119  ! MPI cannot easily replace existing files
1120  open(unit=unitsnapshot,file=filename,status='replace')
1121  close(unitsnapshot, status='delete')
1122 
1123  amode = ior(mpi_mode_create, mpi_mode_wronly)
1124  call mpi_file_open(mpi_comm_self,filename,amode, &
1125  mpi_info_null, fh, ierrmpi)
1126 
1127  if (ierrmpi /= 0) then
1128  print *, "Error, cannot create file ", trim(filename)
1129  call mpistop("Fatal error")
1130  end if
1131 
1132  end subroutine create_output_file
1133 
1134  ! Check if a snapshot exists
1135  logical function snapshot_exists(ix)
1137  integer, intent(in) :: ix !< Index of snapshot
1138  character(len=std_len) :: filename
1139 
1140  write(filename, "(a,i4.4,a)") trim(base_filename), ix, ".dat"
1141  inquire(file=trim(filename), exist=snapshot_exists)
1142  end function snapshot_exists
1143 
1144  integer function get_snapshot_index(filename)
1145  character(len=*), intent(in) :: filename
1146  integer :: i
1147 
1148  ! Try to parse index in restart_from_file string (e.g. basename0000.dat)
1149  i = len_trim(filename) - 7
1150  read(filename(i:i+3), '(I4)') get_snapshot_index
1151  end function get_snapshot_index
1152 
1153  !> Write header for a snapshot
1154  subroutine snapshot_write_header(fh, offset_tree, offset_block)
1156  use mod_physics
1158  use mod_slice, only: slicenext
1159  integer, intent(in) :: fh !< File handle
1160  integer(kind=MPI_OFFSET_KIND), intent(in) :: offset_tree !< Offset of tree info
1161  integer(kind=MPI_OFFSET_KIND), intent(in) :: offset_block !< Offset of block data
1162  integer, dimension(MPI_STATUS_SIZE) :: st
1163  integer :: iw, er
1164 
1165  call mpi_file_write(fh, version_number, 1, mpi_integer, st, er)
1166  call mpi_file_write(fh, int(offset_tree), 1, mpi_integer, st, er)
1167  call mpi_file_write(fh, int(offset_block), 1, mpi_integer, st, er)
1168  call mpi_file_write(fh, nw, 1, mpi_integer, st, er)
1169  call mpi_file_write(fh, ndir, 1, mpi_integer, st, er)
1170  call mpi_file_write(fh, ndim, 1, mpi_integer, st, er)
1171  call mpi_file_write(fh, levmax, 1, mpi_integer, st, er)
1172  call mpi_file_write(fh, nleafs, 1, mpi_integer, st, er)
1173  call mpi_file_write(fh, nparents, 1, mpi_integer, st, er)
1174  call mpi_file_write(fh, it, 1, mpi_integer, st, er)
1175  ! Note: It is nice when this double has an even number of 4 byte
1176  ! integers before it (for alignment)
1177  call mpi_file_write(fh, global_time, 1, mpi_double_precision, st, er)
1178 
1179  call mpi_file_write(fh, [ xprobmin^d ], ndim, &
1180  mpi_double_precision, st, er)
1181  call mpi_file_write(fh, [ xprobmax^d ], ndim, &
1182  mpi_double_precision, st, er)
1183  call mpi_file_write(fh, [ domain_nx^d ], ndim, &
1184  mpi_integer, st, er)
1185  call mpi_file_write(fh, [ block_nx^d ], ndim, &
1186  mpi_integer, st, er)
1187  do iw = 1, nw
1188  call mpi_file_write(fh, cons_wnames(iw), name_len, mpi_character, st, er)
1189  end do
1190 
1191  ! TODO: write geometry info
1192 
1193  ! Physics related information
1194  call mpi_file_write(fh, physics_type, name_len, mpi_character, st, er)
1195 
1196  ! Format:
1197  ! integer :: n_par
1198  ! double precision :: values(n_par)
1199  ! character(n_par * name_len) :: names
1200  call phys_write_info(fh)
1201 
1202  ! Write snapshotnext etc., which is useful for restarting.
1203  ! Note we add one, since snapshotnext is updated *after* this procedure
1204  if(pass_wall_time) then
1205  call mpi_file_write(fh, snapshotnext, 1, mpi_integer, st, er)
1206  else
1207  call mpi_file_write(fh, snapshotnext+1, 1, mpi_integer, st, er)
1208  end if
1209  call mpi_file_write(fh, slicenext, 1, mpi_integer, st, er)
1210  call mpi_file_write(fh, collapsenext, 1, mpi_integer, st, er)
1211  ! Write stagger grid mark
1212  call mpi_file_write(fh, stagger_grid, 1, mpi_logical, st, er)
1213 
1214  end subroutine snapshot_write_header
1215 
1216  subroutine snapshot_read_header(fh, offset_tree, offset_block)
1219  use mod_physics, only: physics_type
1220  use mod_slice, only: slicenext
1221  integer, intent(in) :: fh !< File handle
1222  integer(MPI_OFFSET_KIND), intent(out) :: offset_tree !< Offset of tree info
1223  integer(MPI_OFFSET_KIND), intent(out) :: offset_block !< Offset of block data
1224  integer :: i, version
1225  integer :: ibuf(ndim), iw
1226  double precision :: rbuf(ndim)
1227  integer, dimension(MPI_STATUS_SIZE) :: st
1228  character(len=name_len), allocatable :: var_names(:), param_names(:)
1229  double precision, allocatable :: params(:)
1230  character(len=name_len) :: phys_name
1231  integer :: er, n_par, tmp_int
1232  logical :: stagger_mark_dat
1233 
1234  ! Version number
1235  call mpi_file_read(fh, version, 1, mpi_integer, st, er)
1236  if (all(compatible_versions /= version)) then
1237  call mpistop("Incompatible file version (maybe old format?)")
1238  end if
1239 
1240  ! offset_tree
1241  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1242  offset_tree = ibuf(1)
1243 
1244  ! offset_block
1245  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1246  offset_block = ibuf(1)
1247 
1248  ! nw
1249  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1250  nw_found=ibuf(1)
1251  if (nw /= ibuf(1)) then
1252  write(*,*) "nw=",nw," and nw found in restart file=",ibuf(1)
1253  write(*,*) "Please be aware of changes in w at restart."
1254  !call mpistop("currently, changing nw at restart is not allowed")
1255  end if
1256 
1257  ! ndir
1258  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1259  if (ibuf(1) /= ndir) then
1260  write(*,*) "ndir in restart file = ",ibuf(1)
1261  write(*,*) "ndir = ",ndir
1262  call mpistop("reset ndir to ndir in restart file")
1263  end if
1264 
1265  ! ndim
1266  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1267  if (ibuf(1) /= ndim) then
1268  write(*,*) "ndim in restart file = ",ibuf(1)
1269  write(*,*) "ndim = ",ndim
1270  call mpistop("reset ndim to ndim in restart file")
1271  end if
1272 
1273  ! levmax
1274  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1275  if (ibuf(1) > refine_max_level) then
1276  write(*,*) "number of levels in restart file = ",ibuf(1)
1277  write(*,*) "refine_max_level = ",refine_max_level
1278  call mpistop("refine_max_level < num. levels in restart file")
1279  end if
1280 
1281  ! nleafs
1282  call mpi_file_read(fh, nleafs, 1, mpi_integer, st, er)
1283 
1284  ! nparents
1285  call mpi_file_read(fh, nparents, 1, mpi_integer, st, er)
1286 
1287  ! it
1288  call mpi_file_read(fh, it, 1, mpi_integer, st, er)
1289 
1290  ! global time
1291  call mpi_file_read(fh, global_time, 1, mpi_double_precision, st, er)
1292 
1293  ! xprobmin^D
1294  call mpi_file_read(fh,rbuf(1:ndim),ndim,mpi_double_precision,st,er)
1295  if (maxval(abs(rbuf(1:ndim) - [ xprobmin^d ])) > 0) then
1296  write(*,*) "Error: xprobmin differs from restart data: ", rbuf(1:ndim)
1297  call mpistop("change xprobmin^D in par file")
1298  end if
1299 
1300  ! xprobmax^D
1301  call mpi_file_read(fh,rbuf(1:ndim),ndim,mpi_double_precision,st,er)
1302  if (maxval(abs(rbuf(1:ndim) - [ xprobmax^d ])) > 0) then
1303  write(*,*) "Error: xprobmax differs from restart data: ", rbuf(1:ndim)
1304  call mpistop("change xprobmax^D in par file")
1305  end if
1306 
1307  ! domain_nx^D
1308  call mpi_file_read(fh,ibuf(1:ndim), ndim, mpi_integer,st,er)
1309  if (any(ibuf(1:ndim) /= [ domain_nx^d ])) then
1310  write(*,*) "Error: mesh size differs from restart data: ", ibuf(1:ndim)
1311  call mpistop("change domain_nx^D in par file")
1312  end if
1313 
1314  ! block_nx^D
1315  call mpi_file_read(fh,ibuf(1:ndim), ndim, mpi_integer,st,er)
1316  if (any(ibuf(1:ndim) /= [ block_nx^d ])) then
1317  write(*,*) "Error: block size differs from restart data:", ibuf(1:ndim)
1318  call mpistop("change block_nx^D in par file")
1319  end if
1320 
1321  ! From version 4 onwards, the later parts of the header must be present
1322  if (version > 3) then
1323  ! w_names (not used here)
1324  allocate(var_names(nw_found))
1325  do iw = 1, nw_found
1326  call mpi_file_read(fh, var_names(iw), name_len, mpi_character, st, er)
1327  end do
1328 
1329  ! Physics related information
1330  call mpi_file_read(fh, phys_name, name_len, mpi_character, st, er)
1331 
1332  if (phys_name /= physics_type) then
1333  call mpistop("Cannot restart with a different physics type")
1334  end if
1335 
1336  call mpi_file_read(fh, n_par, 1, mpi_integer, st, er)
1337  allocate(params(n_par))
1338  allocate(param_names(n_par))
1339  call mpi_file_read(fh, params, n_par, mpi_double_precision, st, er)
1340  call mpi_file_read(fh, param_names, name_len * n_par, mpi_character, st, er)
1341 
1342  ! Read snapshotnext etc. for restarting
1343  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1344 
1345  ! Only set snapshotnext if the user hasn't specified it
1346  if (snapshotnext == -1) snapshotnext = tmp_int
1347 
1348  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1349  if (slicenext == -1) slicenext = tmp_int
1350 
1351  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1352  if (collapsenext == -1) collapsenext = tmp_int
1353  else
1354  ! Guess snapshotnext from file name if not set
1355  if (snapshotnext == -1) &
1357  ! Set slicenext and collapsenext if not set
1358  if (slicenext == -1) slicenext = 0
1359  if (collapsenext == -1) collapsenext = 0
1360  end if
1361 
1362  ! Still used in convert
1364 
1365  if (version > 4) then
1366  call mpi_file_read(fh, stagger_mark_dat, 1, mpi_logical, st, er)
1367  if (stagger_grid .neqv. stagger_mark_dat) then
1368  write(*,*) "Error: stagger grid flag differs from restart data:", stagger_mark_dat
1369  call mpistop("change parameter to use stagger grid")
1370  end if
1371  end if
1372 
1373  end subroutine snapshot_read_header
1374 
1375  !> Compute number of elements in index range
1376  pure integer function count_ix(ixO^L)
1377  integer, intent(in) :: ixO^L
1378 
1379  count_ix = product([ ixomax^d ] - [ ixomin^d ] + 1)
1380  end function count_ix
1381 
1382  !> Determine the shape of a block for output (whether to include ghost cells,
1383  !> and on which sides)
1384  subroutine block_shape_io(igrid, n_ghost, ixO^L, n_values)
1386 
1387  integer, intent(in) :: igrid
1388  !> nghost(1:ndim) contains the number of ghost cells on the block's minimum
1389  !> boundaries, and nghost(ndim+1:2*ndim) on the block's maximum boundaries
1390  integer, intent(out) :: n_ghost(2*ndim)
1391  !> Index range on output block
1392  integer, intent(out) :: ixO^L
1393  !> Number of cells/values in output
1394  integer, intent(out) :: n_values
1395 
1396  integer :: idim
1397 
1398  n_ghost(:) = 0
1399 
1400  if(save_physical_boundary) then
1401  do idim=1,ndim
1402  ! Include ghost cells on lower boundary
1403  if(ps(igrid)%is_physical_boundary(2*idim-1)) n_ghost(idim)=nghostcells
1404  ! Include ghost cells on upper boundary
1405  if(ps(igrid)%is_physical_boundary(2*idim)) n_ghost(ndim+idim)=nghostcells
1406  end do
1407  end if
1408 
1409  {ixomin^d = ixmlo^d - n_ghost(^d)\}
1410  {ixomax^d = ixmhi^d + n_ghost(ndim+^d)\}
1411 
1412  n_values = count_ix(ixo^l) * nw
1413  if(stagger_grid) then
1414  n_values=n_values+ product([ ixomax^d ] - [ ixomin^d ] + 2)*nws
1415  end if
1416  end subroutine block_shape_io
1417 
1418  subroutine write_snapshot
1421  use mod_physics
1422 
1423  integer :: file_handle, igrid, Morton_no, iwrite
1424  integer :: ipe, ix_buffer(2*ndim+1), n_values
1425  integer :: ixO^L, n_ghost(2*ndim), n_values_stagger
1426  integer :: iorecvstatus(mpi_status_size)
1427  integer :: ioastatus(mpi_status_size)
1428  integer :: igrecvstatus(mpi_status_size)
1429  integer :: istatus(mpi_status_size)
1430  type(tree_node), pointer :: pnode
1431  integer(kind=MPI_OFFSET_KIND) :: offset_tree_info
1432  integer(kind=MPI_OFFSET_KIND) :: offset_block_data
1433  integer(kind=MPI_OFFSET_KIND) :: offset_offsets
1434  double precision, allocatable :: w_buffer(:)
1435 
1436  integer, allocatable :: block_ig(:, :)
1437  integer, allocatable :: block_lvl(:)
1438  integer(kind=MPI_OFFSET_KIND), allocatable :: block_offset(:)
1439 
1440  call mpi_barrier(icomm, ierrmpi)
1441 
1442  ! Allocate send/receive buffer
1443  n_values = count_ix(ixg^ll) * nw
1444  if(stagger_grid) then
1445  n_values = n_values + count_ix(ixgs^ll) * nws
1446  end if
1447  allocate(w_buffer(n_values))
1448 
1449  ! Allocate arrays with information about grid blocks
1450  allocate(block_ig(ndim, nleafs))
1451  allocate(block_lvl(nleafs))
1452  allocate(block_offset(nleafs+1))
1453 
1454  ! master processor
1455  if (mype==0) then
1456  call create_output_file(file_handle, snapshotnext, ".dat")
1457 
1458  ! Don't know offsets yet, we will write header again later
1459  offset_tree_info = -1
1460  offset_block_data = -1
1461  call snapshot_write_header(file_handle, offset_tree_info, &
1462  offset_block_data)
1463 
1464  call mpi_file_get_position(file_handle, offset_tree_info, ierrmpi)
1465 
1466  call write_forest(file_handle)
1467 
1468  ! Collect information about the spatial index (ig^D) and refinement level
1469  ! of leaves
1470  do morton_no = morton_start(0), morton_stop(npe-1)
1471  igrid = sfc(1, morton_no)
1472  ipe = sfc(2, morton_no)
1473  pnode => igrid_to_node(igrid, ipe)%node
1474 
1475  block_ig(:, morton_no) = [ pnode%ig^d ]
1476  block_lvl(morton_no) = pnode%level
1477  block_offset(morton_no) = 0 ! Will be determined later
1478  end do
1479 
1480  call mpi_file_write(file_handle, block_lvl, size(block_lvl), &
1481  mpi_integer, istatus, ierrmpi)
1482 
1483  call mpi_file_write(file_handle, block_ig, size(block_ig), &
1484  mpi_integer, istatus, ierrmpi)
1485 
1486  ! Block offsets are currently unknown, but will be overwritten later
1487  call mpi_file_get_position(file_handle, offset_offsets, ierrmpi)
1488  call mpi_file_write(file_handle, block_offset(1:nleafs), nleafs, &
1489  mpi_offset, istatus, ierrmpi)
1490 
1491  call mpi_file_get_position(file_handle, offset_block_data, ierrmpi)
1492 
1493  ! Check whether data was written as expected
1494  if (offset_block_data - offset_tree_info /= &
1495  (nleafs + nparents) * size_logical + &
1496  nleafs * ((1+ndim) * size_int + 2 * size_int)) then
1497  if (mype == 0) then
1498  print *, "Warning: MPI_OFFSET type /= 8 bytes"
1499  print *, "This *could* cause problems when reading .dat files"
1500  end if
1501  end if
1502 
1503  block_offset(1) = offset_block_data
1504  iwrite = 0
1505  end if
1506 
1507  do morton_no=morton_start(mype), morton_stop(mype)
1508  igrid = sfc_to_igrid(morton_no)
1509  itag = morton_no
1510 
1511  if (nwaux>0) then
1512  ! extra layer around mesh only for later averaging in convert
1513  ! set dxlevel value for use in gradient subroutine,
1514  ! which might be used in getaux
1515  saveigrid=igrid
1516  ^d&dxlevel(^d)=rnode(rpdx^d_, igrid);
1517  block=>ps(igrid)
1518  call phys_get_aux(.true., ps(igrid)%w, ps(igrid)%x, ixg^ll, &
1519  ixm^ll^ladd1, "write_snapshot")
1520  endif
1521 
1522  call block_shape_io(igrid, n_ghost, ixo^l, n_values)
1523  ix_buffer(1) = n_values
1524  ix_buffer(2:) = n_ghost
1525  if(stagger_grid) then
1526  n_values_stagger = n_values - product([ ixomax^d ] - [ ixomin^d ] + 2)*nws
1527  w_buffer(1:n_values_stagger) = pack(ps(igrid)%w(ixo^s, 1:nw), .true.)
1528  {ixomin^d = ixmlo^d - n_ghost(^d) - 1\}
1529  {ixomax^d = ixmhi^d + n_ghost(ndim+^d)\}
1530  w_buffer(n_values_stagger+1:n_values) = pack(ps(igrid)%ws(ixo^s, 1:nws), .true.)
1531  else
1532  w_buffer(1:n_values) = pack(ps(igrid)%w(ixo^s, 1:nw), .true.)
1533  end if
1534 
1535  if (mype /= 0) then
1536  call mpi_send(ix_buffer, 2*ndim+1, &
1537  mpi_integer, 0, itag, icomm, ierrmpi)
1538  call mpi_send(w_buffer, n_values, &
1539  mpi_double_precision, 0, itag, icomm, ierrmpi)
1540  else
1541  iwrite = iwrite+1
1542  call mpi_file_write(file_handle, ix_buffer(2:), &
1543  2*ndim, mpi_integer, istatus, ierrmpi)
1544  call mpi_file_write(file_handle, w_buffer, &
1545  n_values, mpi_double_precision, istatus, ierrmpi)
1546 
1547  ! Set offset of next block
1548  block_offset(iwrite+1) = block_offset(iwrite) + &
1549  int(n_values, mpi_offset_kind) * size_double + &
1550  2 * ndim * size_int
1551  end if
1552  end do
1553 
1554  ! Write data communicated from other processors
1555  if (mype == 0) then
1556  do ipe = 1, npe-1
1557  do morton_no=morton_start(ipe), morton_stop(ipe)
1558  iwrite=iwrite+1
1559  itag=morton_no
1560 
1561  call mpi_recv(ix_buffer, 2*ndim+1, mpi_integer, ipe, itag, icomm,&
1562  igrecvstatus, ierrmpi)
1563  n_values = ix_buffer(1)
1564 
1565  call mpi_recv(w_buffer, n_values, mpi_double_precision,&
1566  ipe, itag, icomm, iorecvstatus, ierrmpi)
1567 
1568  call mpi_file_write(file_handle, ix_buffer(2:), &
1569  2*ndim, mpi_integer, istatus, ierrmpi)
1570  call mpi_file_write(file_handle, w_buffer, &
1571  n_values, mpi_double_precision, istatus, ierrmpi)
1572 
1573  ! Set offset of next block
1574  block_offset(iwrite+1) = block_offset(iwrite) + &
1575  int(n_values, mpi_offset_kind) * size_double + &
1576  2 * ndim * size_int
1577  end do
1578  end do
1579 
1580  ! Write block offsets (now we know them)
1581  call mpi_file_seek(file_handle, offset_offsets, mpi_seek_set, ierrmpi)
1582  call mpi_file_write(file_handle, block_offset(1:nleafs), nleafs, &
1583  mpi_offset, istatus, ierrmpi)
1584 
1585  ! Write header again, now with correct offsets
1586  call mpi_file_seek(file_handle, 0_mpi_offset_kind, mpi_seek_set, ierrmpi)
1587  call snapshot_write_header(file_handle, offset_tree_info, &
1588  offset_block_data)
1589 
1590  call mpi_file_close(file_handle, ierrmpi)
1591  end if
1592 
1593  call mpi_barrier(icomm, ierrmpi)
1594 
1595  end subroutine write_snapshot
1596 
1597  !> Routine to read in snapshots (.dat files). When it cannot recognize the
1598  !> file version, it will automatically try the 'old' reader.
1599  subroutine read_snapshot
1601  use mod_forest
1603  use mod_slice, only: slicenext
1604 
1605  integer :: ix_buffer(2*ndim+1), n_values, n_values_stagger
1606  integer :: ixO^L, ixOs^L
1607  integer :: file_handle, amode, igrid, Morton_no, iread
1608  integer :: istatus(mpi_status_size)
1609  integer :: iorecvstatus(mpi_status_size)
1610  integer :: ipe,inrecv,nrecv, file_version
1611  logical :: fexist
1612  integer(MPI_OFFSET_KIND) :: offset_tree_info
1613  integer(MPI_OFFSET_KIND) :: offset_block_data
1614  double precision, allocatable :: w_buffer(:)
1615  double precision, dimension(:^D&,:), allocatable :: w, ws
1616 
1617  if (mype==0) then
1618  inquire(file=trim(restart_from_file), exist=fexist)
1619  if (.not.fexist) call mpistop(trim(restart_from_file)//" not found!")
1620 
1621  call mpi_file_open(mpi_comm_self,restart_from_file,mpi_mode_rdonly, &
1622  mpi_info_null,file_handle,ierrmpi)
1623  call mpi_file_read(file_handle, file_version, 1, mpi_integer, &
1624  istatus, ierrmpi)
1625  end if
1626 
1627  call mpi_bcast(file_version,1,mpi_integer,0,icomm,ierrmpi)
1628 
1629  if (all(compatible_versions /= file_version)) then
1630  if (mype == 0) print *, "Unknown version, trying old snapshot reader..."
1631  call mpi_file_close(file_handle,ierrmpi)
1632  call read_snapshot_old()
1633 
1634  ! Guess snapshotnext from file name if not set
1635  if (snapshotnext == -1) &
1637  ! Set slicenext and collapsenext if not set
1638  if (slicenext == -1) slicenext = 0
1639  if (collapsenext == -1) collapsenext = 0
1640 
1641  ! Still used in convert
1643 
1644  return ! Leave this routine
1645  else if (mype == 0) then
1646  call mpi_file_seek(file_handle, 0_mpi_offset_kind, mpi_seek_set, ierrmpi)
1647  call snapshot_read_header(file_handle, offset_tree_info, &
1648  offset_block_data)
1649  end if
1650 
1651  ! Share information about restart file
1652  call mpi_bcast(nw_found,1,mpi_integer,0,icomm,ierrmpi)
1653  call mpi_bcast(nleafs,1,mpi_integer,0,icomm,ierrmpi)
1654  call mpi_bcast(nparents,1,mpi_integer,0,icomm,ierrmpi)
1655  call mpi_bcast(it,1,mpi_integer,0,icomm,ierrmpi)
1656  call mpi_bcast(global_time,1,mpi_double_precision,0,icomm,ierrmpi)
1657 
1658  call mpi_bcast(snapshotnext,1,mpi_integer,0,icomm,ierrmpi)
1659  call mpi_bcast(slicenext,1,mpi_integer,0,icomm,ierrmpi)
1660  call mpi_bcast(collapsenext,1,mpi_integer,0,icomm,ierrmpi)
1661 
1662  ! Allocate send/receive buffer
1663  n_values = count_ix(ixg^ll) * nw_found
1664  if(stagger_grid) then
1665  n_values = n_values + count_ix(ixgs^ll) * nws
1666  allocate(ws(ixgs^t,1:nws))
1667  end if
1668  allocate(w_buffer(n_values))
1669  allocate(w(ixg^t,1:nw_found))
1670 
1672 
1673  if (mype == 0) then
1674  call mpi_file_seek(file_handle, offset_tree_info, &
1675  mpi_seek_set, ierrmpi)
1676  end if
1677 
1678  call read_forest(file_handle)
1679 
1680  do morton_no=morton_start(mype),morton_stop(mype)
1681  igrid=sfc_to_igrid(morton_no)
1682  call alloc_node(igrid)
1683  end do
1684 
1685  if (mype==0) then
1686  call mpi_file_seek(file_handle, offset_block_data, mpi_seek_set, ierrmpi)
1687 
1688  iread = 0
1689  do ipe = 0, npe-1
1690  do morton_no=morton_start(ipe),morton_stop(ipe)
1691  iread=iread+1
1692  itag=morton_no
1693 
1694  call mpi_file_read(file_handle,ix_buffer(1:2*ndim), 2*ndim, &
1695  mpi_integer, istatus,ierrmpi)
1696 
1697  ! Construct ixO^L array from number of ghost cells
1698  {ixomin^d = ixmlo^d - ix_buffer(^d)\}
1699  {ixomax^d = ixmhi^d + ix_buffer(ndim+^d)\}
1700  n_values = count_ix(ixo^l) * nw_found
1701  if(stagger_grid) then
1702  {ixosmin^d = ixomin^d - 1\}
1703  {ixosmax^d = ixomax^d\}
1704  n_values_stagger = n_values
1705  n_values = n_values + count_ix(ixos^l) * nws
1706  end if
1707 
1708  call mpi_file_read(file_handle, w_buffer, n_values, &
1709  mpi_double_precision, istatus, ierrmpi)
1710 
1711  if (mype == ipe) then ! Root task
1712  igrid=sfc_to_igrid(morton_no)
1713  if(stagger_grid) then
1714  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values_stagger), &
1715  shape(w(ixo^s, 1:nw_found)))
1716  ps(igrid)%ws(ixos^s,1:nws)=reshape(w_buffer(n_values_stagger+1:n_values), &
1717  shape(ws(ixos^s, 1:nws)))
1718  else
1719  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values), &
1720  shape(w(ixo^s, 1:nw_found)))
1721  end if
1722  if (nw_found<nw) then
1723  if (associated(usr_transform_w)) then
1724  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1725  else
1726  ps(igrid)%w(ixo^s,1:nw_found)=w(ixo^s,1:nw_found)
1727  end if
1728  else if (nw_found>nw) then
1729  if (associated(usr_transform_w)) then
1730  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1731  else
1732  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1733  end if
1734  else
1735  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1736  end if
1737  else
1738  call mpi_send([ ixo^l, n_values ], 2*ndim+1, &
1739  mpi_integer, ipe, itag, icomm, ierrmpi)
1740  call mpi_send(w_buffer, n_values, &
1741  mpi_double_precision, ipe, itag, icomm, ierrmpi)
1742  end if
1743  end do
1744  end do
1745 
1746  call mpi_file_close(file_handle,ierrmpi)
1747 
1748  else ! mype > 0
1749 
1750  do morton_no=morton_start(mype),morton_stop(mype)
1751  igrid=sfc_to_igrid(morton_no)
1752  itag=morton_no
1753 
1754  call mpi_recv(ix_buffer, 2*ndim+1, mpi_integer, 0, itag, icomm,&
1755  iorecvstatus, ierrmpi)
1756  {ixomin^d = ix_buffer(^d)\}
1757  {ixomax^d = ix_buffer(ndim+^d)\}
1758  n_values = ix_buffer(2*ndim+1)
1759 
1760  call mpi_recv(w_buffer, n_values, mpi_double_precision,&
1761  0, itag, icomm, iorecvstatus, ierrmpi)
1762 
1763  if(stagger_grid) then
1764  n_values_stagger = n_values - count_ix(ixo^l) * nw_found
1765  {ixosmin^d = ixomin^d - 1\}
1766  {ixosmax^d = ixomax^d\}
1767  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values_stagger), &
1768  shape(w(ixo^s, 1:nw_found)))
1769  ps(igrid)%ws(ixos^s,1:nws)=reshape(w_buffer(n_values_stagger+1:n_values), &
1770  shape(ws(ixos^s, 1:nws)))
1771  else
1772  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values), &
1773  shape(w(ixo^s, 1:nw_found)))
1774  end if
1775  if (nw_found<nw) then
1776  if (associated(usr_transform_w)) then
1777  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1778  else
1779  ps(igrid)%w(ixo^s,1:nw_found)=w(ixo^s,1:nw_found)
1780  end if
1781  else if (nw_found>nw) then
1782  if (associated(usr_transform_w)) then
1783  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1784  else
1785  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1786  end if
1787  else
1788  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1789  end if
1790  end do
1791  end if
1792 
1793  call mpi_barrier(icomm,ierrmpi)
1794 
1795  end subroutine read_snapshot
1796 
1797  subroutine read_snapshot_old()
1800 
1801  double precision :: wio(ixg^t,1:nw)
1802  integer :: fh, igrid, Morton_no, iread
1803  integer :: levmaxini, ndimini, ndirini
1804  integer :: nwini, neqparini, nxini^D
1805  integer(kind=MPI_OFFSET_KIND) :: offset
1806  integer :: istatus(mpi_status_size)
1807  integer, allocatable :: iorecvstatus(:,:)
1808  integer :: ipe,inrecv,nrecv
1809  integer :: sendini(7+^nd)
1810  character(len=80) :: filename
1811  logical :: fexist
1812  double precision :: eqpar_dummy(100)
1813 
1814  if (mype==0) then
1815  call mpi_file_open(mpi_comm_self,trim(restart_from_file), &
1816  mpi_mode_rdonly,mpi_info_null,fh,ierrmpi)
1817 
1818  offset=-int(7*size_int+size_double,kind=mpi_offset_kind)
1819  call mpi_file_seek(fh,offset,mpi_seek_end,ierrmpi)
1820 
1821  call mpi_file_read(fh,nleafs,1,mpi_integer,istatus,ierrmpi)
1823  call mpi_file_read(fh,levmaxini,1,mpi_integer,istatus,ierrmpi)
1824  call mpi_file_read(fh,ndimini,1,mpi_integer,istatus,ierrmpi)
1825  call mpi_file_read(fh,ndirini,1,mpi_integer,istatus,ierrmpi)
1826  call mpi_file_read(fh,nwini,1,mpi_integer,istatus,ierrmpi)
1827  call mpi_file_read(fh,neqparini,1,mpi_integer,istatus,ierrmpi)
1828  call mpi_file_read(fh,it,1,mpi_integer,istatus,ierrmpi)
1829  call mpi_file_read(fh,global_time,1,mpi_double_precision,istatus,ierrmpi)
1830 
1831  ! check if settings are suitable for restart
1832  if (levmaxini>refine_max_level) then
1833  write(*,*) "number of levels in restart file = ",levmaxini
1834  write(*,*) "refine_max_level = ",refine_max_level
1835  call mpistop("refine_max_level < number of levels in restart file")
1836  end if
1837  if (ndimini/=ndim) then
1838  write(*,*) "ndim in restart file = ",ndimini
1839  write(*,*) "ndim = ",ndim
1840  call mpistop("reset ndim to ndim in restart file")
1841  end if
1842  if (ndirini/=ndir) then
1843  write(*,*) "ndir in restart file = ",ndirini
1844  write(*,*) "ndir = ",ndir
1845  call mpistop("reset ndir to ndir in restart file")
1846  end if
1847  if (nw/=nwini) then
1848  write(*,*) "nw=",nw," and nw in restart file=",nwini
1849  call mpistop("currently, changing nw at restart is not allowed")
1850  end if
1851 
1852  offset=offset-int(ndimini*size_int+neqparini*size_double,kind=mpi_offset_kind)
1853  call mpi_file_seek(fh,offset,mpi_seek_end,ierrmpi)
1854 
1855  {call mpi_file_read(fh,nxini^d,1,mpi_integer,istatus,ierrmpi)\}
1856  if (ixghi^d/=nxini^d+2*nghostcells|.or.) then
1857  write(*,*) "Error: reset resolution to ",nxini^d+2*nghostcells
1858  call mpistop("change with setamrvac")
1859  end if
1860 
1861  call mpi_file_read(fh,eqpar_dummy,neqparini, &
1862  mpi_double_precision,istatus,ierrmpi)
1863  end if
1864 
1865  ! broadcast the global parameters first
1866  if (npe>1) then
1867  if (mype==0) then
1868  sendini=(/nleafs,levmaxini,ndimini,ndirini,nwini,neqparini,it ,^d&nxini^d /)
1869  end if
1870  call mpi_bcast(sendini,7+^nd,mpi_integer,0,icomm,ierrmpi)
1871  nleafs=sendini(1);levmaxini=sendini(2);ndimini=sendini(3);
1872  ndirini=sendini(4);nwini=sendini(5);
1873  neqparini=sendini(6);it=sendini(7);
1874  nxini^d=sendini(7+^d);
1876  call mpi_bcast(global_time,1,mpi_double_precision,0,icomm,ierrmpi)
1877  end if
1878 
1879  if (mype == 0) then
1880  offset = int(size_block_io,kind=mpi_offset_kind) * &
1881  int(nleafs,kind=mpi_offset_kind)
1882  call mpi_file_seek(fh,offset,mpi_seek_set,ierrmpi)
1883  end if
1884 
1885  call read_forest(fh)
1886 
1887  do morton_no=morton_start(mype),morton_stop(mype)
1888  igrid=sfc_to_igrid(morton_no)
1889  call alloc_node(igrid)
1890  end do
1891 
1892  if (mype==0)then
1893  iread=0
1894 
1895  do morton_no=morton_start(0),morton_stop(0)
1896  igrid=sfc_to_igrid(morton_no)
1897  iread=iread+1
1898  offset=int(size_block_io,kind=mpi_offset_kind) &
1899  *int(morton_no-1,kind=mpi_offset_kind)
1900  call mpi_file_read_at(fh,offset,ps(igrid)%w,1,type_block_io, &
1901  istatus,ierrmpi)
1902  end do
1903  if (npe>1) then
1904  do ipe=1,npe-1
1905  do morton_no=morton_start(ipe),morton_stop(ipe)
1906  iread=iread+1
1907  itag=morton_no
1908  offset=int(size_block_io,kind=mpi_offset_kind)&
1909  *int(morton_no-1,kind=mpi_offset_kind)
1910  call mpi_file_read_at(fh,offset,wio,1,type_block_io,&
1911  istatus,ierrmpi)
1912  call mpi_send(wio,1,type_block_io,ipe,itag,icomm,ierrmpi)
1913  end do
1914  end do
1915  end if
1916  call mpi_file_close(fh,ierrmpi)
1917  else
1918  nrecv=(morton_stop(mype)-morton_start(mype)+1)
1919  allocate(iorecvstatus(mpi_status_size,nrecv))
1920  inrecv=0
1921  do morton_no=morton_start(mype),morton_stop(mype)
1922  igrid=sfc_to_igrid(morton_no)
1923  itag=morton_no
1924  inrecv=inrecv+1
1925  call mpi_recv(ps(igrid)%w,1,type_block_io,0,itag,icomm,&
1926  iorecvstatus(:,inrecv),ierrmpi)
1927  end do
1928  deallocate(iorecvstatus)
1929  end if
1930 
1931  call mpi_barrier(icomm,ierrmpi)
1932 
1933  end subroutine read_snapshot_old
1934 
1935  !> Write volume-averaged values and other information to the log file
1936  subroutine printlog_default
1938  use mod_timing
1941 
1942  logical :: fileopen
1943  integer :: i, iw, level
1944  double precision :: wmean(1:nw), total_volume
1945  double precision :: volume_coverage(refine_max_level)
1946  integer :: nx^D, nc, ncells, dit
1947  double precision :: dtTimeLast, now, cellupdatesPerSecond
1948  double precision :: activeBlocksPerCore, wctPerCodeTime, timeToFinish
1949  character(len=40) :: fmt_string
1950  character(len=80) :: filename
1951  character(len=2048) :: line
1952  logical, save :: opened = .false.
1953  integer :: amode, istatus(mpi_status_size)
1954  integer, parameter :: my_unit = 20
1955 
1956  ! Compute the volume-average of w**1 = w
1957  call get_volume_average(1, wmean, total_volume)
1958 
1959  ! Compute the volume coverage
1960  call get_volume_coverage(volume_coverage)
1961 
1962  if (mype == 0) then
1963 
1964  ! To compute cell updates per second, we do the following:
1965  nx^d=ixmhi^d-ixmlo^d+1;
1966  nc={nx^d*}
1967  ncells = nc * nleafs_active
1968 
1969  ! assumes the number of active leafs haven't changed since last compute.
1970  now = mpi_wtime()
1971  dit = it - ittimelast
1972  dttimelast = now - timelast
1973  ittimelast = it
1974  timelast = now
1975  cellupdatespersecond = dble(ncells) * dble(nstep) * &
1976  dble(dit) / (dttimelast * dble(npe))
1977 
1978  ! blocks per core:
1979  activeblockspercore = dble(nleafs_active) / dble(npe)
1980 
1981  ! Wall clock time per code time unit in seconds:
1982  wctpercodetime = dttimelast / max(dit * dt, epsilon(1.0d0))
1983 
1984  ! Wall clock time to finish in hours:
1985  timetofinish = (time_max - global_time) * wctpercodetime / 3600.0d0
1986 
1987  ! On first entry, open the file and generate the header
1988  if (.not. opened) then
1989 
1990  filename = trim(base_filename) // ".log"
1991 
1992  ! Delete the log when not doing a restart run
1993  if (restart_from_file == undefined) then
1994  open(unit=my_unit,file=trim(filename),status='replace')
1995  close(my_unit, status='delete')
1996  end if
1997 
1998  amode = ior(mpi_mode_create,mpi_mode_wronly)
1999  amode = ior(amode,mpi_mode_append)
2000 
2001  call mpi_file_open(mpi_comm_self, filename, amode, &
2002  mpi_info_null, log_fh, ierrmpi)
2003 
2004  opened = .true.
2005 
2006  ! Start of file headern
2007  line = "it global_time dt"
2008  do level=1,nw
2009  i = len_trim(line) + 2
2010  write(line(i:),"(a,a)") trim(cons_wnames(level)), " "
2011  end do
2012 
2013  ! Volume coverage per level
2014  do level = 1, refine_max_level
2015  i = len_trim(line) + 2
2016  write(line(i:), "(a,i0)") "c", level
2017  end do
2018 
2019  ! Cell counts per level
2020  do level=1,refine_max_level
2021  i = len_trim(line) + 2
2022  write(line(i:), "(a,i0)") "n", level
2023  end do
2024 
2025  ! Rest of file header
2026  line = trim(line) // " | Xload Xmemory 'Cell_Updates /second/core'"
2027  line = trim(line) // " 'Active_Blocks/Core' 'Wct Per Code Time [s]'"
2028  line = trim(line) // " 'TimeToFinish [hrs]'"
2029 
2030  ! Only write header if not restarting
2031  if (restart_from_file == undefined) then
2032  call mpi_file_write(log_fh, trim(line) // new_line('a'), &
2033  len_trim(line)+1, mpi_character, istatus, ierrmpi)
2034  end if
2035  end if
2036 
2037  ! Construct the line to be added to the log
2038 
2039  fmt_string = '(' // fmt_i // ',2' // fmt_r // ')'
2040  write(line, fmt_string) it, global_time, dt
2041  i = len_trim(line) + 2
2042 
2043  write(fmt_string, '(a,i0,a)') '(', nw, fmt_r // ')'
2044  write(line(i:), fmt_string) wmean(1:nw)
2045  i = len_trim(line) + 2
2046 
2047  write(fmt_string, '(a,i0,a)') '(', refine_max_level, fmt_r // ')'
2048  write(line(i:), fmt_string) volume_coverage(1:refine_max_level)
2049  i = len_trim(line) + 2
2050 
2051  write(fmt_string, '(a,i0,a)') '(', refine_max_level, fmt_i // ')'
2052  write(line(i:), fmt_string) nleafs_level(1:refine_max_level)
2053  i = len_trim(line) + 2
2054 
2055  fmt_string = '(a,6' // fmt_r2 // ')'
2056  write(line(i:), fmt_string) '| ', xload, xmemory, cellupdatespersecond, &
2057  activeblockspercore, wctpercodetime, timetofinish
2058 
2059  call mpi_file_write(log_fh, trim(line) // new_line('a') , &
2060  len_trim(line)+1, mpi_character, istatus, ierrmpi)
2061  end if
2062 
2063  end subroutine printlog_default
2064 
2065  !> Print a log that can be used to check whether the code still produces the
2066  !> same output (regression test)
2067  subroutine printlog_regression_test()
2069 
2070  integer, parameter :: n_modes = 2
2071  integer, parameter :: my_unit = 123
2072  character(len=40) :: fmt_string
2073  logical, save :: file_open = .false.
2074  integer :: power
2075  double precision :: modes(nw, n_modes), volume
2076 
2077  do power = 1, n_modes
2078  call get_volume_average(power, modes(:, power), volume)
2079  end do
2080 
2081  if (mype == 0) then
2082  if (.not. file_open) then
2083  open(my_unit, file = trim(base_filename) // ".log")
2084  file_open = .true.
2085 
2086  write(my_unit, *) "# time mean(w) mean(w**2)"
2087  end if
2088 
2089  write(fmt_string, "(a,i0,a)") "(", nw * n_modes + 1, fmt_r // ")"
2090  write(my_unit, fmt_string) global_time, modes
2091  end if
2092  end subroutine printlog_regression_test
2093 
2094  !> Compute mean(w**power) over the leaves of the grid. The first mode
2095  !> (power=1) corresponds to the mean, the second to the mean squared values
2096  !> and so on.
2097  subroutine get_volume_average(power, mode, volume)
2099 
2100  integer, intent(in) :: power !< Which mode to compute
2101  double precision, intent(out) :: mode(nw) !< The computed mode
2102  double precision, intent(out) :: volume !< The total grid volume
2103  integer :: iigrid, igrid, iw
2104  double precision :: wsum(nw+1)
2105  double precision :: dvolume(ixg^t)
2106  double precision :: dsum_recv(1:nw+1)
2107 
2108  wsum(:) = 0
2109 
2110  ! Loop over all the grids
2111  do iigrid = 1, igridstail
2112  igrid = igrids(iigrid)
2113 
2114  ! Determine the volume of the grid cells
2115  if (slab) then
2116  dvolume(ixm^t) = {rnode(rpdx^d_,igrid)|*}
2117  else
2118  dvolume(ixm^t) = ps(igrid)%dvolume(ixm^t)
2119  end if
2120 
2121  ! Store total volume in last element
2122  wsum(nw+1) = wsum(nw+1) + sum(dvolume(ixm^t))
2123 
2124  ! Compute the modes of the cell-centered variables, weighted by volume
2125  do iw = 1, nw
2126  wsum(iw) = wsum(iw) + &
2127  sum(dvolume(ixm^t)*ps(igrid)%w(ixm^t,iw)**power)
2128  end do
2129  end do
2130 
2131  ! Make the information available on all tasks
2132  call mpi_allreduce(wsum, dsum_recv, nw+1, mpi_double_precision, &
2133  mpi_sum, icomm, ierrmpi)
2134 
2135  ! Set the volume and the average
2136  volume = dsum_recv(nw+1)
2137  mode = dsum_recv(1:nw) / volume
2138 
2139  end subroutine get_volume_average
2140 
2141  !> Compute how much of the domain is covered by each grid level. This routine
2142  !> does not take a non-Cartesian geometry into account.
2143  subroutine get_volume_coverage(vol_cov)
2145 
2146  double precision, intent(out) :: vol_cov(1:refine_max_level)
2147  double precision :: dsum_recv(1:refine_max_level)
2148  integer :: iigrid, igrid, iw, level
2149 
2150  ! First determine the total 'flat' volume in each level
2151  vol_cov(1:refine_max_level)=zero
2152 
2153  do iigrid = 1, igridstail
2154  igrid = igrids(iigrid);
2155  level = node(plevel_,igrid)
2156  vol_cov(level) = vol_cov(level)+ &
2157  {(rnode(rpxmax^d_,igrid)-rnode(rpxmin^d_,igrid))|*}
2158  end do
2159 
2160  ! Make the information available on all tasks
2161  call mpi_allreduce(vol_cov, dsum_recv, refine_max_level, mpi_double_precision, &
2162  mpi_sum, icomm, ierrmpi)
2163 
2164  ! Normalize
2165  vol_cov = dsum_recv / sum(dsum_recv)
2166  end subroutine get_volume_coverage
2167 
2168  !> Compute the volume average of func(w) over the leaves of the grid.
2169  subroutine get_volume_average_func(func, f_avg, volume)
2171 
2172  interface
2173  pure function func(w_vec, w_size) result(val)
2174  integer, intent(in) :: w_size
2175  double precision, intent(in) :: w_vec(w_size)
2176  double precision :: val
2177  end function func
2178  end interface
2179  double precision, intent(out) :: f_avg !< The volume average of func
2180  double precision, intent(out) :: volume !< The total grid volume
2181  integer :: iigrid, igrid, i^D
2182  double precision :: wsum(2)
2183  double precision :: dvolume(ixg^t)
2184  double precision :: dsum_recv(2)
2185 
2186  wsum(:) = 0
2187 
2188  ! Loop over all the grids
2189  do iigrid = 1, igridstail
2190  igrid = igrids(iigrid)
2191 
2192  ! Determine the volume of the grid cells
2193  if (slab) then
2194  dvolume(ixm^t) = {rnode(rpdx^d_,igrid)|*}
2195  else
2196  dvolume(ixm^t) = ps(igrid)%dvolume(ixm^t)
2197  end if
2198 
2199  ! Store total volume in last element
2200  wsum(2) = wsum(2) + sum(dvolume(ixm^t))
2201 
2202  ! Compute the modes of the cell-centered variables, weighted by volume
2203  {do i^d = ixmlo^d, ixmhi^d\}
2204  wsum(1) = wsum(1) + dvolume(i^d) * &
2205  func(ps(igrid)%w(i^d, :), nw)
2206  {end do\}
2207  end do
2208 
2209  ! Make the information available on all tasks
2210  call mpi_allreduce(wsum, dsum_recv, 2, mpi_double_precision, &
2211  mpi_sum, icomm, ierrmpi)
2212 
2213  ! Set the volume and the average
2214  volume = dsum_recv(2)
2215  f_avg = dsum_recv(1) / volume
2216 
2217  end subroutine get_volume_average_func
2218 
2219  !> Compute global maxima of iw variables over the leaves of the grid.
2220  subroutine get_global_maxima(wmax)
2222 
2223  double precision, intent(out) :: wmax(nw) !< The global maxima
2224 
2225  integer :: iigrid, igrid, iw
2226  double precision :: wmax_mype(nw),wmax_recv(nw)
2227 
2228  wmax_mype(1:nw) = -bigdouble
2229 
2230  ! Loop over all the grids
2231  do iigrid = 1, igridstail
2232  igrid = igrids(iigrid)
2233  do iw = 1, nw
2234  wmax_mype(iw)=max(wmax_mype(iw),maxval(ps(igrid)%w(ixm^t,iw)))
2235  end do
2236  end do
2237 
2238  ! Make the information available on all tasks
2239  call mpi_allreduce(wmax_mype, wmax_recv, nw, mpi_double_precision, &
2240  mpi_max, icomm, ierrmpi)
2241 
2242  wmax(1:nw)=wmax_recv(1:nw)
2243 
2244  end subroutine get_global_maxima
2245 
2246  !> Compute global minima of iw variables over the leaves of the grid.
2247  subroutine get_global_minima(wmin)
2249 
2250  double precision, intent(out) :: wmin(nw) !< The global maxima
2251 
2252  integer :: iigrid, igrid, iw
2253  double precision :: wmin_mype(nw),wmin_recv(nw)
2254 
2255  wmin_mype(1:nw) = bigdouble
2256 
2257  ! Loop over all the grids
2258  do iigrid = 1, igridstail
2259  igrid = igrids(iigrid)
2260  do iw = 1, nw
2261  wmin_mype(iw)=min(wmin_mype(iw),minval(ps(igrid)%w(ixm^t,iw)))
2262  end do
2263  end do
2264 
2265  ! Make the information available on all tasks
2266  call mpi_allreduce(wmin_mype, wmin_recv, nw, mpi_double_precision, &
2267  mpi_min, icomm, ierrmpi)
2268 
2269  wmin(1:nw)=wmin_recv(1:nw)
2270 
2271  end subroutine get_global_minima
2272 
2273 
2274 
2275 end module mod_input_output
This module contains definitions of global parameters and variables and some generic functions/subrou...
character(len=std_len) prolongation_method
character(len=std_len) time_integrator
Which time integrator to use.
Module for reading input and writing output.
double precision, dimension(:), allocatable refine_threshold
Error tolerance for refinement decision.
character(len=std_len), dimension(:), allocatable typepred1
The spatial dicretization for the predictor step when using a two step method.
character(len= *), parameter fmt_r
logical, dimension(:), allocatable w_write
logical small_values_use_primitive
Use primitive variables when correcting small values.
integer domain_nx
number of cells for each dimension in level-one mesh
double precision time_max
End time for the simulation.
double precision, dimension(:), allocatable w_refine_weight
Weights of variables used to calculate error for mesh refinement.
character(len=std_len) typetvd
Which type of TVD method to use.
logical saveprim
If true, convert from conservative to primitive variables in output.
double precision, dimension(ndim) qstretch_baselevel
stretch factor between cells at AMR level 1, per dimension
integer max_blocks
The maximum number of grid blocks in a processor.
The data structure that contains information about a tree node/grid block.
Definition: mod_forest.t:11
type(tree_node_ptr), dimension(:,:), allocatable, save igrid_to_node
Array to go from an [igrid, ipe] index to a node pointer.
Definition: mod_forest.t:32
character(len=name_len) physics_type
String describing the physics type of the simulation.
Definition: mod_physics.t:14
integer, parameter stretch_uni
Unidirectional stretching from a side.
logical, dimension(ndim) collapse
If collapse(DIM) is true, generate output integrated over DIM.
double precision, dimension(:,:), allocatable dxmid
integer, parameter filecollapse_
Constant indicating collapsed output.
subroutine get_global_maxima(wmax)
Compute global maxima of iw variables over the leaves of the grid.
integer, dimension(:), allocatable type_gradient_limiter
Type of slope limiter used for computing gradients or divergences, when typegrad or typediv are set t...
double precision xprob
minimum and maximum domain boundaries for each dimension
double precision, dimension(:), allocatable dg
extent of grid blocks in domain per dimension, in array over levels
double precision, dimension(ndim, 2) writespshift
double precision time_convert_factor
Conversion factor for time unit.
logical need_global_cmax
need global maximal wave speed
character(len=std_len) convert_type
Which format to use when converting.
integer collapselevel
The level at which to produce line-integrated / collapsed output.
integer, parameter plevel_
subroutine, public kracken(verb, string)
Definition: mod_kracken.t:198
integer ndir
Number of spatial dimensions (components) for vector variables.
character(len=std_len) collapse_type
Writes D-1 slice, can do so in various formats, depending on slice_type.
Definition: mod_slice.t:2
character(len=std_len) typedimsplit
integer nstep
How many sub-steps the time integrator takes.
integer, dimension(:), allocatable type_limiter
Type of slope limiter used for reconstructing variables on cell edges.
double precision, dimension(:), allocatable amr_wavefilter
refinement: lohner estimate wavefilter setting
logical resume_previous_run
If true, restart a previous run from the latest snapshot.
subroutine read_arguments()
Read the command line arguments passed to amrvac.
double precision, dimension(:), allocatable entropycoef
double precision wall_time_max
Ending wall time (in hours) for the simulation.
integer it_init
initial iteration count
integer it_max
Stop the simulation after this many time steps have been taken.
double precision dtmin
Stop the simulation when the time step becomes smaller than this value.
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
Module with basic grid data structures.
Definition: mod_forest.t:2
character(len= *), parameter undefined
integer, parameter nsavehi
Maximum number of saves that can be defined by tsave or itsave.
character(len=std_len) typeboundspeed
Which type of TVDLF method to use.
character(len=std_len) typeprolonglimit
Limiter used for prolongation to refined grids and ghost cells.
character(len=std_len) typeaxial
subroutine snapshot_read_header(fh, offset_tree, offset_block)
subroutine, public retrev(name, val, len, ier)
Definition: mod_kracken.t:56
subroutine block_shape_io(igrid, n_ghost, ixOL, n_values)
Determine the shape of a block for output (whether to include ghost cells, and on which sides) ...
Collapses D-dimensional output to D-1 view by line-of-sight integration.
Definition: mod_collapse.t:4
double precision timelast
Definition: mod_timing.t:16
integer npe
The number of MPI tasks.
logical, dimension(:), allocatable writelevel
double precision courantpar
The Courant (CFL) number used for the simulation.
integer, dimension(:,:), allocatable, save sfc
Array to go from a Morton number to an igrid and processor index. Sfc(1:3, MN) contains [igrid...
Definition: mod_forest.t:43
character(len=std_len), dimension(:), allocatable flux_scheme
Which spatial discretization to use (per grid level)
integer, parameter fileanalysis_
Constant indicating analysis output (see Writing a custom analysis subroutine)
integer, dimension(3, 3, 3) lvc
Levi-Civita tensor.
double precision small_density
logical, dimension(ndim) stretched_dim
True if a dimension is stretched.
logical, dimension(ndim) aperiodb
True for dimensions with aperiodic boundaries.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, public small_values_daverage
Average over this many cells in each direction.
subroutine read_snapshot
Routine to read in snapshots (.dat files). When it cannot recognize the file version, it will automatically try the &#39;old&#39; reader.
integer, dimension(:), allocatable ng
number of grid blocks in domain per dimension, in array over levels
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
integer, dimension(nfile) isavet
integer nw_found
number of w found in dat files
Module with slope/flux limiters.
Definition: mod_limiter.t:2
subroutine get_volume_average(power, mode, volume)
Compute mean(w**power) over the leaves of the grid. The first mode (power=1) corresponds to the mean...
integer, dimension(2), parameter compatible_versions
List of compatible versions.
subroutine write_forest(file_handle)
Definition: forest.t:350
logical pass_wall_time
If true, wall time is up, modify snapshotnext for later overwrite.
integer nghostcells
Number of ghost cells surrounding a grid.
integer nslices
Number of slices to output.
Definition: mod_slice.t:16
double precision dtdiffpar
For resistive MHD, the time step is also limited by the diffusion time: .
procedure(p_no_args), pointer usr_print_log
Module with all the methods that users can customize in AMRVAC.
integer, dimension(:), allocatable, save sfc_to_igrid
Go from a Morton number to an igrid index (for a single processor)
Definition: mod_forest.t:53
logical, dimension(:), allocatable loglimit
integer ixghi
Upper index of grid block arrays.
integer type_block_io
MPI type for IO: block excluding ghost cells.
integer function limiter_type(namelim)
Definition: mod_limiter.t:28
character(len=std_len) typeghostfill
double precision xload
Stores the memory and load imbalance, used in printlog.
integer ditregrid
Reconstruct the AMR grid once every ditregrid iteration(s)
double precision cada3_radius
radius of the asymptotic region [0.001, 10], larger means more accurate in smooth region but more ove...
Definition: mod_limiter.t:11
logical angmomfix
Enable to strictly conserve the angular momentum (works both in cylindrical and spherical coordinates...
logical, dimension(:), allocatable small_values_fix_iw
Whether to apply small value fixes to certain variables.
double precision small_pressure
character(len=std_len) typesourcesplit
How to apply dimensional splitting to the source terms, see disretization.md.
integer, parameter fileslice_
Constant indicating slice output.
subroutine printlog_default
Write volume-averaged values and other information to the log file.
logical stagger_grid
True for using stagger grid.
subroutine write_slice
Definition: mod_slice.t:27
logical save_physical_boundary
True for save physical boundary cells in dat files.
logical nocartesian
IO switches for conversion.
integer ierrmpi
A global MPI error return code.
integer ixm
the mesh range (within a block with ghost cells)
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
character(len=std_len), dimension(:, :), allocatable typeboundary
Array indicating the type of boundary condition per variable and per physical boundary.
subroutine get_global_minima(wmin)
Compute global minima of iw variables over the leaves of the grid.
logical solve_internal_e
Solve polytropic process instead of solving total energy.
character(len=std_len), dimension(:), allocatable typeentropy
Which type of entropy fix to use with Riemann-type solvers.
double precision dtpar
If dtpar is positive, it sets the timestep dt, otherwise courantpar is used to limit the time step ba...
character(len=std_len) restart_from_file
If not &#39;unavailable&#39;, resume from snapshot with this base file name.
double precision, dimension(:), allocatable w_convert_factor
Conversion factors the primitive variables.
character(len=std_len) typefilelog
Which type of log to write: &#39;normal&#39;, &#39;special&#39;, &#39;regression_test&#39;.
integer, dimension(:), allocatable, save morton_start
First Morton number per processor.
Definition: mod_forest.t:62
procedure(p_no_args), pointer usr_write_analysis
subroutine alloc_node(igrid)
integer, parameter unitterm
Unit for standard output.
integer, parameter filelog_
Constant indicating log output.
integer, parameter unitpar
integer nbufferx
Number of cells as buffer zone.
integer, dimension(3, 3) kr
Kronecker delta tensor.
double precision, dimension(:,:), allocatable dx
integer nleafs_active
Definition: mod_forest.t:78
character(len=std_len) typegrad
logical check_small_values
check and optionally fix unphysical small values (density, gas pressure)
double precision time_between_print
to monitor timeintegration loop at given wall-clock time intervals
integer, dimension(:), allocatable, parameter d
integer, dimension(:), allocatable, save morton_stop
Last Morton number per processor.
Definition: mod_forest.t:65
logical reset_it
If true, reset iteration count to 0.
subroutine write_collapsed
Definition: mod_collapse.t:11
character(len=40), dimension(nfile), parameter output_names
Names of the output methods.
integer mype
The rank of the current MPI task.
subroutine read_forest(file_handle)
Definition: forest.t:389
double precision global_time
The global simulation time.
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
subroutine saveamrfile(ifile)
subroutine read_par_files()
Read in the user-supplied parameter-file.
integer, parameter stretch_symm
Symmetric stretching around the center.
logical reset_time
If true, reset iteration count and global_time to original values, and start writing snapshots at ind...
integer, parameter unitsnapshot
logical function, public lget(keyword)
Definition: mod_kracken.t:161
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
double precision, dimension(nfile) dtsave
Repeatedly save output of type N when dtsave(N) simulation time has passed.
character(len=std_len) slice_type
choose data type of slice: vtu, vtuCC, dat, or csv
Definition: mod_slice.t:22
Module for handling problematic values in simulations, such as negative pressures.
logical use_particles
Use particles module or not.
integer, dimension(:), allocatable, save nleafs_level
How many leaves are present per refinement level.
Definition: mod_forest.t:81
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer, parameter uniterr
Unit for error messages.
subroutine get_fields_string(line, delims, n_max, fields, n_found, fully_read)
Routine to find entries in a string.
Module containing all the particle routines.
Definition: mod_particles.t:2
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:160
double precision, dimension(:,:), allocatable dxfirst_1mq
logical firstprocess
If true, call initonegrid_usr upon restarting.
double precision tfixgrid
Fix the AMR grid after this time.
character(len= *), parameter fmt_i
character(len= *), parameter fmt_r2
double precision length_convert_factor
integer iprob
problem switch allowing different setups in same usr_mod.t
double precision, dimension(ndim) dxlevel
double precision, dimension(nsavehi, nfile) tsave
Save output of type N on times tsave(:, N)
subroutine get_volume_average_func(func, f_avg, volume)
Compute the volume average of func(w) over the leaves of the grid.
integer, dimension(nfile) isaveit
integer snapshotini
Resume from the snapshot with this index.
integer function, public iget(keyword)
Definition: mod_kracken.t:141
integer, dimension(nslicemax) slicedir
The slice direction for each slice.
Definition: mod_slice.t:19
integer, parameter ndim
Number of spatial dimensions for grid variables.
subroutine read_snapshot_old()
integer ittimelast
Definition: mod_timing.t:15
integer phys_wider_stencil
To use wider stencils in flux calculations. A value of 1 will extend it by one cell in both direction...
Definition: mod_physics.t:18
logical stretch_uncentered
If true, adjust mod_geometry routines to account for grid stretching (but the flux computation will n...
subroutine generate_plotfile
Definition: convert.t:3
logical slab
Cartesian geometry or not.
double precision, dimension(nslicemax) slicecoord
Slice coordinates, see Slice output.
Definition: mod_slice.t:10
integer function get_snapshot_index(filename)
integer slowsteps
If > 1, then in the first slowsteps-1 time steps dt is reduced by a factor .
integer, parameter stretch_none
No stretching.
procedure(transform_w), pointer usr_transform_w
integer nwauxio
Number of auxiliary variables that are only included in output.
logical, dimension(:), allocatable logflag
integer icomm
The MPI communicator.
double precision small_temperature
error handling
character(len=std_len) typelimited
What should be used as a basis for the limiting in TVD methods. Options are &#39;original&#39;, &#39;previous&#39; and &#39;predictor&#39;.
character(len=std_len) typediv
integer, parameter nlevelshi
The maximum number of levels in the grid refinement.
integer, save nparents
Number of parent blocks.
Definition: mod_forest.t:73
integer refine_max_level
Maximal number of AMR levels.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
subroutine snapshot_write_header(fh, offset_tree, offset_block)
Write header for a snapshot.
integer, parameter nfile
Number of output methods.
integer log_fh
MPI file handle for logfile.
subroutine write_snapshot
logical reset_grid
If true, rebuild the AMR grid upon restarting.
logical source_split_usr
Use split or unsplit way to add user&#39;s source terms, default: unsplit.
integer, dimension(:,:), allocatable node
logical slab_stretched
Stretched Cartesian geometry or not.
procedure(sub_get_aux), pointer phys_get_aux
Definition: mod_physics.t:47
integer itfixgrid
Fix the AMR grid after this many time steps.
character(len=std_len) typecourant
How to compute the CFL-limited time step.
double precision, dimension(:,:), allocatable dxfirst
logical function snapshot_exists(ix)
integer, save nleafs
Number of leaf block.
Definition: mod_forest.t:76
logical phys_energy
Solve energy equation or not.
subroutine get_volume_coverage(vol_cov)
Compute how much of the domain is covered by each grid level. This routine does not take a non-Cartes...
integer it
Number of time steps taken.
integer, dimension(nfile) ditsave
Repeatedly save output of type N when ditsave(N) time steps have passed.
subroutine printlog_regression_test()
Print a log that can be used to check whether the code still produces the same output (regression tes...
subroutine create_output_file(fh, ix, extension, suffix)
Standard method for creating a new output file.
character(len=20), public small_values_method
How to handle small values.
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
double precision, dimension(:), allocatable derefine_ratio
integer, dimension(ndim) nstretchedblocks_baselevel
(even) number of (symmetrically) stretched blocks at AMR level 1, per dimension
double precision time_init
Start time for the simulation.
character(len=std_len) typeaverage
integer ixgshi
Upper index of stagger grid block arrays.
Module for reading command line arguments.
Definition: mod_kracken.t:7
integer, parameter fileout_
Constant indicating regular output.
integer, dimension(nsavehi, nfile) itsave
Save output of type N on iterations itsave(:, N)
pure integer function count_ix(ixOL)
Compute number of elements in index range.
integer slicenext
the file number of slices
Definition: mod_slice.t:13
logical autoconvert
If true, already convert to output format during the run.
character(len=std_len) typecurl
procedure(sub_write_info), pointer phys_write_info
Definition: mod_physics.t:51
integer, parameter version_number
Version number of the .dat file output.