MPI-AMRVAC  2.2
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_input_output.t
Go to the documentation of this file.
1 !> Module for reading input and writing output
3 
4  implicit none
5  public
6 
7  !> Version number of the .dat file output
8  integer, parameter :: version_number = 5
9 
10  !> List of compatible versions
11  integer, parameter :: compatible_versions(3) = [3, 4, 5]
12 
13  !> number of w found in dat files
14  integer :: nw_found
15 
16  ! Formats used in output
17  character(len=*), parameter :: fmt_r = 'es16.8' ! Default precision
18  character(len=*), parameter :: fmt_r2 = 'es10.2' ! Two digits
19  character(len=*), parameter :: fmt_i = 'i8' ! Integer format
20 
21 contains
22 
23  !> Read the command line arguments passed to amrvac
24  subroutine read_arguments()
27  use mod_slice, only: slicenext
28 
29  integer :: len, ier, n
30  integer, parameter :: max_files = 20 ! Maximum number of par files
31  integer :: n_par_files
32  integer :: ibegin(max_files)
33  integer :: iterm(max_files)
34  character(len=max_files*std_len) :: all_par_files
35  character(len=std_len) :: tmp_files(max_files)
36  logical :: resume
37 
38  if (mype == 0) then
39  print *, '-----------------------------------------------------------------------------'
40  print *, '-----------------------------------------------------------------------------'
41  print *, '| __ __ ____ ___ _ __ __ ______ ___ ____ |'
42  print *, '| | \/ | _ \_ _| / \ | \/ | _ \ \ / / \ / ___| |'
43  print *, '| | |\/| | |_) | |_____ / _ \ | |\/| | |_) \ \ / / _ \| | |'
44  print *, '| | | | | __/| |_____/ ___ \| | | | _ < \ V / ___ \ |___ |'
45  print *, '| |_| |_|_| |___| /_/ \_\_| |_|_| \_\ \_/_/ \_\____| |'
46  print *, '-----------------------------------------------------------------------------'
47  print *, '-----------------------------------------------------------------------------'
48  end if
49 
50  ! Specify the options and their default values
51  call kracken('cmd','-i amrvac.par -if ' // undefined // &
52  ' -slicenext -1 -collapsenext -1 -snapshotnext -1' // &
53  ' --help .false. -convert .false. -resume .false.')
54 
55  ! Get the par file(s)
56  call retrev('cmd_i', all_par_files, len, ier)
57 
58  ! Show the usage if the help flag was given, or no par file was specified
59  if (lget('cmd_-help') .or. len == 0) then
60  if (mype == 0) then
61  print *, 'Usage example:'
62  print *, 'mpirun -np 4 ./amrvac -i file.par [file2.par ...]'
63  print *, ' (later .par files override earlier ones)'
64  print *, ''
65  print *, 'Optional arguments:'
66  print *, '-resume Resume previous run'
67  print *, '-convert Convert snapshot files'
68  print *, '-if file0001.dat Use this snapshot to restart from'
69  print *, ' (you can modify e.g. output names)'
70  print *, '-resume Automatically resume previous run'
71  print *, ' (useful for long runs on HPC systems)'
72  print *, '-snapshotnext N Manual index for next snapshot'
73  print *, '-slicenext N Manual index for next slice output'
74  print *, '-collapsenext N Manual index for next collapsed output'
75  print *, ''
76  end if
77  call mpi_finalize(ierrmpi)
78  stop
79  end if
80 
81  ! Split the input files, in case multiple were given
82  call get_fields_string(all_par_files, " ,'"""//char(9), max_files, &
83  tmp_files, n_par_files)
84 
85  allocate(par_files(n_par_files))
86  par_files = tmp_files(1:n_par_files)
87 
88  ! Read in the other command line arguments
89  call retrev('cmd_if', restart_from_file, len, ier)
90 
91  !> \todo Document these command line options
92  slicenext = iget('cmd_slicenext')
93  collapsenext = iget('cmd_collapsenext')
94  snapshotnext = iget('cmd_snapshotnext')
95  convert = lget('cmd_convert') ! -convert present?
96  resume_previous_run = lget('cmd_resume') ! -resume present?
97 
98  end subroutine read_arguments
99 
100  !> Read in the user-supplied parameter-file
101  subroutine read_par_files()
104  use mod_small_values
105  use mod_limiter
106  use mod_slice
107  use mod_geometry
108 
109  logical :: fileopen, file_exists
110  integer :: i, j, k, ifile, io_state
111  integer :: iB, isave, iw, level, idim, islice
112  integer :: nx_vec(^nd), block_nx_vec(^nd)
113  integer :: my_unit, iostate
114  integer :: ilev
115  double precision :: dx_vec(^nd)
116 
117  character :: c_ndim
118  character(len=80) :: fmt_string
119  character(len=std_len) :: err_msg, restart_from_file_arg
120  character(len=std_len) :: basename_full, basename_prev, dummy_file
121  character(len=std_len), dimension(:), allocatable :: &
122  typeboundary_min^D, typeboundary_max^D
123  character(len=std_len), allocatable :: limiter(:)
124  character(len=std_len), allocatable :: gradient_limiter(:)
125  character(len=name_len) :: stretch_dim(ndim)
126 
127  double precision, dimension(nsavehi) :: tsave_log, tsave_dat, tsave_slice, &
128  tsave_collapsed, tsave_custom
129  double precision :: dtsave_log, dtsave_dat, dtsave_slice, &
130  dtsave_collapsed, dtsave_custom
131  integer :: ditsave_log, ditsave_dat, ditsave_slice, &
132  ditsave_collapsed, ditsave_custom
133  double precision :: tsavestart_log, tsavestart_dat, tsavestart_slice, &
134  tsavestart_collapsed, tsavestart_custom
135  integer :: windex, ipower
136  double precision :: sizeuniformpart^D
137 
138  namelist /filelist/ base_filename,restart_from_file, &
145 
146  namelist /savelist/ tsave,itsave,dtsave,ditsave,nslices,slicedir, &
148  tsave_log, tsave_dat, tsave_slice, tsave_collapsed, tsave_custom, &
149  dtsave_log, dtsave_dat, dtsave_slice, dtsave_collapsed, dtsave_custom, &
150  ditsave_log, ditsave_dat, ditsave_slice, ditsave_collapsed, ditsave_custom,&
151  tsavestart_log, tsavestart_dat, tsavestart_slice, tsavestart_collapsed,&
152  tsavestart_custom, tsavestart
153 
156 
157  namelist /methodlist/ time_integrator, &
161  limiter,gradient_limiter,cada3_radius,&
166  flatcd,flatsh,&
171 
174 
175  namelist /meshlist/ refine_max_level,nbufferx^d,refine_threshold,&
177  stretch_dim, stretch_uncentered, &
183  namelist /paramlist/ courantpar, dtpar, dtdiffpar, &
185 
186  namelist /euvlist/ filename_euv,image,spectrum,wavelength,direction_los,&
188 
189  ! default maximum number of grid blocks in a processor
190  max_blocks=4000
191 
192  ! allocate cell size of all levels
193  allocate(dx(ndim,nlevelshi))
194  {allocate(dg^d(nlevelshi))\}
195  {allocate(ng^d(nlevelshi))\}
196 
197  ! default block size excluding ghost cells
198  {block_nx^d = 16\}
199 
200  ! default resolution of level-1 mesh (full domain)
201  {domain_nx^d = 32\}
202 
203 
204 
205  ! defaults for boundary treatments
206  typeghostfill = 'linear'
207 
208  ! default number of ghost-cell layers at each boundary of a block
209  nghostcells = 2
210 
211  ! Allocate boundary conditions arrays in new and old style
212  {
213  allocate(typeboundary_min^d(nwfluxbc))
214  allocate(typeboundary_max^d(nwfluxbc))
215  typeboundary_min^d = undefined
216  typeboundary_max^d = undefined
217  }
218 
219  allocate(typeboundary(nwflux, 2 * ndim))
220  typeboundary(:, :) = undefined
221 
222  ! not save physical boundary in dat files by default
223  save_physical_boundary = .false.
224 
225  internalboundary = .false.
226 
227  ! defaults for specific options
228  typegrad = 'central'
229  typediv = 'central'
230  typecurl = 'central'
231 
232  ! defaults for smallest physical values allowed
233  small_temperature = 0.d0
234  small_pressure = 0.d0
235  small_density = 0.d0
236 
237  allocate(small_values_fix_iw(nw))
238  small_values_fix_iw(:) = .true.
239 
240  ! defaults for convert behavior
241 
242  ! store the -if value from argument in command line
243  restart_from_file_arg = restart_from_file
244  nwauxio = 0
245  nocartesian = .false.
246  saveprim = .false.
247  autoconvert = .false.
248  convert_type = 'vtuBCCmpi'
249  slice_type = 'vtuCC'
250  collapse_type = 'vti'
251  allocate(w_write(nw))
252  w_write(1:nw) = .true.
253  allocate(writelevel(nlevelshi))
254  writelevel(1:nlevelshi) = .true.
255  writespshift(1:ndim,1:2) = zero
256  level_io = -1
257  level_io_min = 1
259 
260  ! normalization of primitive variables: only for output
261  ! note that length_convert_factor is for length
262  ! this scaling is optional, and must be set consistently if used
263  allocate(w_convert_factor(nw))
264  w_convert_factor(:) = 1.0d0
265  time_convert_factor = 1.0d0
266  length_convert_factor = 1.0d0
267 
268  ! AMR related defaults
269  refine_max_level = 1
270  {nbufferx^d = 0\}
271  allocate(refine_threshold(nlevelshi))
272  refine_threshold(1:nlevelshi) = 0.1d0
273  allocate(derefine_ratio(nlevelshi))
274  derefine_ratio(1:nlevelshi) = 1.0d0/8.0d0
275  prolongation_method = 'linear'
276  coarsenprimitive = .false.
277  prolongprimitive = .false.
278  typeprolonglimit = 'default'
279  refine_criterion = 3
280  allocate(w_refine_weight(nw+1))
281  w_refine_weight = 0.d0
282  allocate(logflag(nw+1))
283  logflag = .false.
284  allocate(amr_wavefilter(nlevelshi))
285  amr_wavefilter(1:nlevelshi) = 1.0d-2
286  tfixgrid = bigdouble
287  itfixgrid = biginteger
288  ditregrid = 1
289 
290  ! Grid stretching defaults
291  stretch_uncentered = .true.
292  stretch_dim(1:ndim) = undefined
293  qstretch_baselevel(1:ndim) = bigdouble
295 
296  ! IO defaults
297  it_init = 0
298  it_max = biginteger
299  time_init = 0.d0
300  time_max = bigdouble
301  wall_time_max = bigdouble
302  dtmin = 1.0d-10
303  nslices = 0
304  collapse = .false.
305  collapselevel = 1
306  time_between_print = 30.0d0 ! Print status every 30 seconds
307 
308  do ifile=1,nfile
309  do isave=1,nsavehi
310  tsave(isave,ifile) = bigdouble ! global_time of saves into the output files
311  itsave(isave,ifile) = biginteger ! it of saves into the output files
312  end do
313  dtsave(ifile) = bigdouble ! time between saves
314  ditsave(ifile) = biginteger ! timesteps between saves
315  isavet(ifile) = 1 ! index for saves by global_time
316  isaveit(ifile) = 1 ! index for saves by it
317  tsavestart(ifile) = 0.0d0
318  end do
319 
320  tsave_log = bigdouble
321  tsave_dat = bigdouble
322  tsave_slice = bigdouble
323  tsave_collapsed = bigdouble
324  tsave_custom = bigdouble
325 
326  dtsave_log = bigdouble
327  dtsave_dat = bigdouble
328  dtsave_slice = bigdouble
329  dtsave_collapsed = bigdouble
330  dtsave_custom = bigdouble
331 
332  ditsave_log = biginteger
333  ditsave_dat = biginteger
334  ditsave_slice = biginteger
335  ditsave_collapsed = biginteger
336  ditsave_custom = biginteger
337 
338  tsavestart_log = bigdouble
339  tsavestart_dat = bigdouble
340  tsavestart_slice = bigdouble
341  tsavestart_collapsed = bigdouble
342  tsavestart_custom = bigdouble
343 
344  typefilelog = 'default'
345 
346  ! defaults for input
347  reset_time = .false.
348  reset_it = .false.
349  firstprocess = .false.
350  reset_grid = .false.
351  base_filename = 'data'
352  usr_filename = ''
353 
354 
355  ! Defaults for discretization methods
356  typeaverage = 'default'
357  tvdlfeps = one
358  nxdiffusehllc = 0
359  flathllc = .false.
360  slowsteps = -1
361  courantpar = 0.8d0
362  typecourant = 'maxsum'
363  dimsplit = .false.
364  typedimsplit = 'default'
365  if(physics_type=='mhd') then
366  cada3_radius = 0.5d0
367  else
368  cada3_radius = 0.1d0
369  end if
370  typetvd = 'roe'
371  typeboundspeed = 'cmaxmean'
372  source_split_usr= .false.
373  time_integrator = 'twostep'
374  solve_internal_e= .false.
375  angmomfix = .false.
376 
378  allocate(limiter(nlevelshi),gradient_limiter(nlevelshi))
379  do level=1,nlevelshi
380  flux_scheme(level) = 'tvdlf'
381  typepred1(level) = 'default'
382  limiter(level) = 'minmod'
383  gradient_limiter(level) = 'minmod'
384  end do
385 
386  flatcd = .false.
387  flatsh = .false.
388  typesourcesplit = 'sfs'
389  allocate(loglimit(nw))
390  loglimit(1:nw) = .false.
391 
392  allocate(typeentropy(nw))
393 
394  do iw=1,nw
395  typeentropy(iw)='nul' ! Entropy fix type
396  end do
397 
398  dtdiffpar = 0.5d0
399  dtpar = -1.d0
400 
401  ! problem setup defaults
402  iprob = 1
403 
404  ! end defaults
405 
406  ! Initialize Kronecker delta, and Levi-Civita tensor
407  do i=1,3
408  do j=1,3
409  if(i==j)then
410  kr(i,j)=1
411  else
412  kr(i,j)=0
413  endif
414  do k=1,3
415  if(i==j.or.j==k.or.k==i)then
416  lvc(i,j,k)=0
417  else if(i+1==j.or.i-2==j)then
418  lvc(i,j,k)=1
419  else
420  lvc(i,j,k)=-1
421  endif
422  enddo
423  enddo
424  enddo
425 
426  ! These are used to construct file and log names from multiple par files
427  basename_full = ''
428  basename_prev = ''
429 
430  do i = 1, size(par_files)
431  if (mype == 0) print *, "Reading " // trim(par_files(i))
432 
433  ! Check whether the file exists
434  inquire(file=trim(par_files(i)), exist=file_exists)
435 
436  if (.not. file_exists) then
437  write(err_msg, *) "The parameter file " // trim(par_files(i)) // &
438  " does not exist"
439  call mpistop(trim(err_msg))
440  end if
441 
442  open(unitpar, file=trim(par_files(i)), status='old')
443 
444  ! Try to read in the namelists. They can be absent or in a different
445  ! order, since we rewind before each read.
446  rewind(unitpar)
447  read(unitpar, filelist, end=101)
448 
449 101 rewind(unitpar)
450  read(unitpar, savelist, end=102)
451 
452 102 rewind(unitpar)
453  read(unitpar, stoplist, end=103)
454 
455 103 rewind(unitpar)
456  read(unitpar, methodlist, end=104)
457 
458 104 rewind(unitpar)
459  read(unitpar, boundlist, end=105)
460 
461 105 rewind(unitpar)
462  read(unitpar, meshlist, end=106)
463 
464 106 rewind(unitpar)
465  read(unitpar, paramlist, end=107)
466 
467 107 rewind(unitpar)
468  read(unitpar, euvlist, end=108)
469 
470 108 close(unitpar)
471 
472  ! Append the log and file names given in the par files
473  if (base_filename /= basename_prev) &
474  basename_full = trim(basename_full) // trim(base_filename)
475  basename_prev = base_filename
476  end do
477 
478  base_filename = basename_full
479 
480  ! Check whether output directory is writable
481  if(mype==0) then
482  dummy_file = trim(base_filename)//"DUMMY"
483  open(newunit=my_unit, file=trim(dummy_file), iostat=iostate)
484  if (iostate /= 0) then
485  call mpistop("Can't write to output directory (" // &
486  trim(base_filename) // ")")
487  else
488  close(my_unit, status='delete')
489  end if
490  end if
491 
492  ! restart filename from command line overwrites the one in par file
493  if(restart_from_file_arg /= undefined) &
494  restart_from_file=restart_from_file_arg
495 
496  if (resume_previous_run) then
497  ! Root process will search snapshot
498  if (mype == 0) then
499  do i = -1, 9998
500  ! Check if the next snapshot is missing
501  if (.not. snapshot_exists(i+1)) exit
502  end do
503 
504  if (i == -1) then
505  ! If initial data is missing (e.g. moved due to lack of space),
506  ! search file with highest index
507  do i = 9999, 0, -1
508  if (snapshot_exists(i)) exit
509  end do
510  end if
511  end if
512  call mpi_bcast(i, 1, mpi_integer, 0, icomm, ierrmpi)
513 
514  if (i == -1) then
515  if(mype==0) write(*,*) "No snapshots found to resume from, start a new run..."
516  else
517  !call mpistop("No snapshots found to resume from")
518  ! Set file name to restart from
519  write(restart_from_file, "(a,i4.4,a)") trim(base_filename), i, ".dat"
520  end if
521  end if
522 
523  if (restart_from_file == undefined) then
524  snapshotnext = 0
525  slicenext = 0
526  collapsenext = 0
527  if (firstprocess) &
528  call mpistop("Please restart from a snapshot when firstprocess=T")
529  if (convert) &
530  call mpistop('Change convert to .false. for a new run!')
531  end if
532 
533  if (small_pressure < 0.d0) call mpistop("small_pressure should be positive.")
534  if (small_density < 0.d0) call mpistop("small_density should be positive.")
535  ! Give priority to non-zero small temperature
537 
538  if(convert) autoconvert=.false.
539 
540  where (tsave_log < bigdouble) tsave(:, 1) = tsave_log
541  where (tsave_dat < bigdouble) tsave(:, 2) = tsave_dat
542  where (tsave_slice < bigdouble) tsave(:, 3) = tsave_slice
543  where (tsave_collapsed < bigdouble) tsave(:, 4) = tsave_collapsed
544  where (tsave_custom < bigdouble) tsave(:, 5) = tsave_custom
545 
546  if (dtsave_log < bigdouble) dtsave(1) = dtsave_log
547  if (dtsave_dat < bigdouble) dtsave(2) = dtsave_dat
548  if (dtsave_slice < bigdouble) dtsave(3) = dtsave_slice
549  if (dtsave_collapsed < bigdouble) dtsave(4) = dtsave_collapsed
550  if (dtsave_custom < bigdouble) dtsave(5) = dtsave_custom
551 
552  if (tsavestart_log < bigdouble) tsavestart(1) = tsavestart_log
553  if (tsavestart_dat < bigdouble) tsavestart(2) = tsavestart_dat
554  if (tsavestart_slice < bigdouble) tsavestart(3) = tsavestart_slice
555  if (tsavestart_collapsed < bigdouble) tsavestart(4) = tsavestart_collapsed
556  if (tsavestart_custom < bigdouble) tsavestart(5) = tsavestart_custom
557 
558  if (ditsave_log < bigdouble) ditsave(1) = ditsave_log
559  if (ditsave_dat < bigdouble) ditsave(2) = ditsave_dat
560  if (ditsave_slice < bigdouble) ditsave(3) = ditsave_slice
561  if (ditsave_collapsed < bigdouble) ditsave(4) = ditsave_collapsed
562  if (ditsave_custom < bigdouble) ditsave(5) = ditsave_custom
563  ! convert hours to seconds for ending wall time
564  if (wall_time_max < bigdouble) wall_time_max=wall_time_max*3600.d0
565 
566  if (mype == 0) then
567  write(unitterm, *) ''
568  write(unitterm, *) 'Output type | tsavestart | dtsave | ditsave | itsave(1) | tsave(1)'
569  write(fmt_string, *) '(A12," | ",E9.3E2," | ",E9.3E2," | ",I6," | "'//&
570  ',I6, " | ",E9.3E2)'
571  end if
572 
573  do ifile = 1, nfile
574  if (mype == 0) write(unitterm, fmt_string) trim(output_names(ifile)), &
575  tsavestart(ifile), dtsave(ifile), ditsave(ifile), itsave(1, ifile), tsave(1, ifile)
576  end do
577 
578  if (mype == 0) write(unitterm, *) ''
579 
580  do islice=1,nslices
581  if(slicedir(islice) > ndim) &
582  write(uniterr,*)'Warning in read_par_files: ', &
583  'Slice ', islice,' direction',slicedir(islice),'larger than ndim=',ndim
584  if(slicedir(islice) < 1) &
585  write(uniterr,*)'Warning in read_par_files: ', &
586  'Slice ', islice,' direction',slicedir(islice),'too small, should be [',1,ndim,']'
587  end do
588 
589  if(it_max==biginteger .and. time_max==bigdouble.and.mype==0) write(uniterr,*) &
590  'Warning in read_par_files: it_max or time_max not given!'
591 
592  do level=1,nlevelshi
593  if(flux_scheme(level)=='tvd'.and.time_integrator/='onestep') &
594  call mpistop(" tvd is onestep method, reset time_integrator='onestep'")
595  if(flux_scheme(level)=='tvd')then
596  if(mype==0.and.(.not.dimsplit)) write(unitterm,*) &
597  'Warning: setting dimsplit=T for tvd, as used for level=',level
598  dimsplit=.true.
599  endif
600  if(flux_scheme(level)=='hlld'.and.physics_type/='mhd') &
601  call mpistop("Cannot use hlld flux if not using MHD physics!")
602 
603  if (typepred1(level)=='default') then
604  select case (flux_scheme(level))
605  case ('cd')
606  typepred1(level)='cd'
607  case ('cd4')
608  typepred1(level)='cd4'
609  case ('fd')
610  typepred1(level)='fd'
611  case ('tvdlf','tvdmu')
612  typepred1(level)='hancock'
613  case ('hll')
614  typepred1(level)='hll'
615  case ('hllc')
616  typepred1(level)='hllc'
617  case ('hllcd')
618  typepred1(level)='hllcd'
619  case ('hlld')
620  typepred1(level)='hlld'
621  case ('nul','source','tvd')
622  typepred1(level)='nul'
623  case default
624  call mpistop("No default predictor for this full step")
625  end select
626  end if
627  end do
628 
629  ! finite difference scheme fd need global maximal speed
630  if(any(flux_scheme=='fd')) need_global_cmax=.true.
631 
632  select case (time_integrator)
633  case ("onestep")
634  nstep=1
635  case ("twostep", "twostep_trapezoidal")
636  nstep=2
637  case ("threestep")
638  nstep=3
639  case ("fourstep","rk4","jameson","ssprk43")
640  nstep=4
641  case ("ssprk54")
642  nstep=5
643  case default
644  call mpistop("Unknown time_integrator")
645  end select
646 
647  do i = 1, ndim
648  select case (stretch_dim(i))
649  case (undefined, 'none')
651  stretched_dim(i) = .false.
652  case ('uni','uniform')
654  stretched_dim(i) = .true.
655  case ('symm', 'symmetric')
657  stretched_dim(i) = .true.
658  case default
660  stretched_dim(i) = .false.
661  if (mype == 0) print *, 'Got stretch_type = ', stretch_type(i)
662  call mpistop('Unknown stretch type')
663  end select
664  end do
665 
666  ! Harmonize the parameters for dimensional splitting and source splitting
667  if(typedimsplit =='default'.and. dimsplit) typedimsplit='xyyx'
668  if(typedimsplit =='default'.and..not.dimsplit) typedimsplit='unsplit'
669  dimsplit = typedimsplit /='unsplit'
670 
671  if(coordinate==-1) then
673  if(mype==0) then
674  write(*,*) 'Warning: coordinate system is not specified!'
675  write(*,*) 'call set_coordinate_system in usr_init in mod_usr.t'
676  write(*,*) 'Now use Cartesian coordinate'
677  end if
678  end if
679 
680  if(coordinate==cartesian) then
681  slab=.true.
682  slab_uniform=.true.
683  if(any(stretched_dim)) then
685  slab_uniform=.false.
686  end if
687  else
688  slab=.false.
689  slab_uniform=.false.
690  end if
691 
692  if(coordinate==spherical) then
693  if(dimsplit) then
694  if(mype==0)print *,'Warning: spherical symmetry needs dimsplit=F, resetting'
695  dimsplit=.false.
696  end if
697  end if
698 
699  if (ndim==1) dimsplit=.false.
700 
701  ! Type limiter is of integer type for performance
702  allocate(type_limiter(nlevelshi))
704 
705  do level=1,nlevelshi
706  type_limiter(level) = limiter_type(limiter(level))
707  type_gradient_limiter(level) = limiter_type(gradient_limiter(level))
708  end do
709 
710  if (any(limiter(1:nlevelshi)== 'ppm')&
711  .and.(flatsh.and.physics_type=='rho')) then
712  call mpistop(" PPM with flatsh=.true. can not be used with physics_type='rho'!")
713  end if
714 
715  ! Copy boundary conditions to typeboundary, which is used internally
716  {
717  if (any(typeboundary_min^d /= undefined)) then
718  typeboundary(1:nwfluxbc, 2*^d-1) = typeboundary_min^d(1:nwfluxbc)
719  end if
720 
721  if (any(typeboundary_max^d /= undefined)) then
722  typeboundary(1:nwfluxbc, 2*^d) = typeboundary_max^d(1:nwfluxbc)
723  end if
724  }
725 
726  ! psi, tracers take the same boundary type as density
727  if (nwfluxbc<nwflux) then
728  do iw = nwfluxbc+1, nwflux
729  typeboundary(iw,:) = typeboundary(iw_rho, :)
730  end do
731  end if
732 
733  if (any(typeboundary == undefined)) then
734  call mpistop("Not all boundary conditions have been defined")
735  end if
736 
737  do idim=1,ndim
738  periodb(idim)=(any(typeboundary(:,2*idim-1:2*idim)=='periodic'))
739  aperiodb(idim)=(any(typeboundary(:,2*idim-1:2*idim)=='aperiodic'))
740  if (periodb(idim).or.aperiodb(idim)) then
741  do iw=1,nwflux
742  if (typeboundary(iw,2*idim-1) .ne. typeboundary(iw,2*idim)) &
743  call mpistop("Wrong counterpart in periodic boundary")
744 
745  if (typeboundary(iw,2*idim-1) /= 'periodic' .and. &
746  typeboundary(iw,2*idim-1) /= 'aperiodic') then
747  call mpistop("Each dimension should either have all "//&
748  "or no variables periodic, some can be aperiodic")
749  end if
750  end do
751  end if
752  end do
753  {^nooned
754  do idim=1,ndim
755  if(any(typeboundary(:,2*idim-1)=='pole')) then
756  if(any(typeboundary(:,2*idim-1)/='pole')) typeboundary(:,2*idim-1)='pole'
757  if(phys_energy) then
758  windex=2
759  else
760  windex=1
761  end if
762  typeboundary(:,2*idim-1)='symm'
763  if(physics_type/='rho') then
764  select case(coordinate)
765  case(cylindrical)
766  typeboundary(phi_+1,2*idim-1)='asymm'
767  if(physics_type=='mhd') typeboundary(ndir+windex+phi_,2*idim-1)='asymm'
768  case(spherical)
769  typeboundary(3:ndir+1,2*idim-1)='asymm'
770  if(physics_type=='mhd') typeboundary(ndir+windex+2:ndir+windex+ndir,2*idim-1)='asymm'
771  case default
772  call mpistop('Pole is in cylindrical, polar, spherical coordinates!')
773  end select
774  end if
775  end if
776  if(any(typeboundary(:,2*idim)=='pole')) then
777  if(any(typeboundary(:,2*idim)/='pole')) typeboundary(:,2*idim)='pole'
778  if(phys_energy) then
779  windex=2
780  else
781  windex=1
782  end if
783  typeboundary(:,2*idim)='symm'
784  if(physics_type/='rho') then
785  select case(coordinate)
786  case(cylindrical)
787  typeboundary(phi_+1,2*idim)='asymm'
788  if(physics_type=='mhd') typeboundary(ndir+windex+phi_,2*idim)='asymm'
789  case(spherical)
790  typeboundary(3:ndir+1,2*idim)='asymm'
791  if(physics_type=='mhd') typeboundary(ndir+windex+2:ndir+windex+ndir,2*idim)='asymm'
792  case default
793  call mpistop('Pole is in cylindrical, polar, spherical coordinates!')
794  end select
795  end if
796  end if
797  end do
798  }
799 
800  if(any(limiter(1:nlevelshi)=='mp5')) then
801  nghostcells=3
802  end if
803 
804  if(any(limiter(1:nlevelshi)=='weno5')) then
805  nghostcells=3
806  end if
807 
808  if(any(limiter(1:nlevelshi)=='wenoz5')) then
809  nghostcells=3
810  end if
811 
812  if(any(limiter(1:nlevelshi)=='wenozp5')) then
813  nghostcells=3
814  end if
815 
816  if(any(limiter(1:nlevelshi)=='ppm')) then
817  nghostcells=4
818  end if
819 
820  if(any(limiter(1:nlevelshi)=='weno7')) then
821  nghostcells=4
822  end if
823 
824  if(any(limiter(1:nlevelshi)=='mpweno7')) then
825  nghostcells=4
826  end if
827 
828  if(any(limiter(1:nlevelshi)=='exeno7')) then
829  nghostcells=4
830  end if
831 
832  ! prolongation in AMR for constrained transport MHD needs even number ghosts
833  if(stagger_grid .and. refine_max_level>1 .and. mod(nghostcells,2)/=0) then
835  end if
836 
837  ! If a wider stencil is used, extend the number of ghost cells
839 
840  select case (coordinate)
841  {^nooned
842  case (spherical)
843  xprob^lim^de=xprob^lim^de*two*dpi;
844  \}
845  case (cylindrical)
846  {
847  if (^d==phi_) then
848  xprob^lim^d=xprob^lim^d*two*dpi;
849  end if
850  \}
851  end select
852 
853  ! full block size including ghostcells
854  {ixghi^d = block_nx^d + 2*nghostcells\}
855  {ixgshi^d = ixghi^d\}
856 
857  nx_vec = [{domain_nx^d|, }]
858  block_nx_vec = [{block_nx^d|, }]
859 
860  if (any(nx_vec < 4) .or. any(mod(nx_vec, 2) == 1)) &
861  call mpistop('Grid size (domain_nx^D) has to be even and >= 4')
862 
863  if (any(block_nx_vec < 4) .or. any(mod(block_nx_vec, 2) == 1)) &
864  call mpistop('Block size (block_nx^D) has to be even and >= 4')
865 
866  if (any([ domain_nx^d/block_nx^d ] == 1) .and. mype == 0) then
867  print *, "TODO: possible bug when domain_nx^D/block_nx^D == 1"
868  end if
869 
870  { if(mod(domain_nx^d,block_nx^d)/=0) &
871  call mpistop('Grid (domain_nx^D) and block (block_nx^D) must be consistent') \}
872 
874  write(unitterm,*)'Error: refine_max_level',refine_max_level,'>nlevelshi ',nlevelshi
875  call mpistop("Reset nlevelshi and recompile!")
876  endif
877 
878  if (any(stretched_dim)) then
879  allocate(qstretch(0:nlevelshi,1:ndim),dxfirst(0:nlevelshi,1:ndim),&
881  allocate(nstretchedblocks(1:nlevelshi,1:ndim))
882  qstretch(0:nlevelshi,1:ndim)=0.0d0
883  dxfirst(0:nlevelshi,1:ndim)=0.0d0
885  {if (stretch_type(^d) == stretch_uni) then
886  ! first some sanity checks
887  if(qstretch_baselevel(^d)<1.0d0.or.qstretch_baselevel(^d)==bigdouble) then
888  if(mype==0) then
889  write(*,*) 'stretched grid needs finite qstretch_baselevel>1'
890  write(*,*) 'will try default value for qstretch_baselevel in dimension', ^d
891  endif
892  if(xprobmin^d>smalldouble)then
893  qstretch_baselevel(^d)=(xprobmax^d/xprobmin^d)**(1.d0/dble(domain_nx^d))
894  else
895  call mpistop("can not set qstretch_baselevel automatically")
896  endif
897  endif
898  if(mod(block_nx^d,2)==1) &
899  call mpistop("stretched grid needs even block size block_nxD")
900  if(mod(domain_nx^d/block_nx^d,2)/=0) &
901  call mpistop("number level 1 blocks in D must be even")
902  qstretch(1,^d)=qstretch_baselevel(^d)
903  dxfirst(1,^d)=(xprobmax^d-xprobmin^d) &
904  *(1.0d0-qstretch(1,^d))/(1.0d0-qstretch(1,^d)**domain_nx^d)
905  qstretch(0,^d)=qstretch(1,^d)**2
906  dxfirst(0,^d)=dxfirst(1,^d)*(1.0d0+qstretch(1,^d))
907  if(refine_max_level>1)then
908  do ilev=2,refine_max_level
909  qstretch(ilev,^d)=dsqrt(qstretch(ilev-1,^d))
910  dxfirst(ilev,^d)=dxfirst(ilev-1,^d) &
911  /(1.0d0+dsqrt(qstretch(ilev-1,^d)))
912  enddo
913  endif
914  endif \}
915  if(mype==0) then
916  {if(stretch_type(^d) == stretch_uni) then
917  write(*,*) 'Stretched dimension ', ^d
918  write(*,*) 'Using stretched grid with qs=',qstretch(0:refine_max_level,^d)
919  write(*,*) ' and first cell sizes=',dxfirst(0:refine_max_level,^d)
920  endif\}
921  end if
922  {if(stretch_type(^d) == stretch_symm) then
923  if(mype==0) then
924  write(*,*) 'will apply symmetric stretch in dimension', ^d
925  endif
926  if(mod(block_nx^d,2)==1) &
927  call mpistop("stretched grid needs even block size block_nxD")
928  ! checks on the input variable nstretchedblocks_baselevel
929  if(nstretchedblocks_baselevel(^d)==0) &
930  call mpistop("need finite even number of stretched blocks at baselevel")
931  if(mod(nstretchedblocks_baselevel(^d),2)==1) &
932  call mpistop("need even number of stretched blocks at baselevel")
933  if(qstretch_baselevel(^d)<1.0d0.or.qstretch_baselevel(^d)==bigdouble) &
934  call mpistop('stretched grid needs finite qstretch_baselevel>1')
935  ! compute stretched part to ensure uniform center
936  ipower=(nstretchedblocks_baselevel(^d)/2)*block_nx^d
937  if(nstretchedblocks_baselevel(^d)==domain_nx^d/block_nx^d)then
938  xstretch^d=0.5d0*(xprobmax^d-xprobmin^d)
939  else
940  xstretch^d=(xprobmax^d-xprobmin^d) &
941  /(2.0d0+dble(domain_nx^d-nstretchedblocks_baselevel(^d)*block_nx^d) &
942  *(1.0d0-qstretch_baselevel(^d))/(1.0d0-qstretch_baselevel(^d)**ipower))
943  endif
944  if(xstretch^d>(xprobmax^d-xprobmin^d)*0.5d0) &
945  call mpistop(" stretched grid part should not exceed full domain")
946  dxfirst(1,^d)=xstretch^d*(1.0d0-qstretch_baselevel(^d)) &
947  /(1.0d0-qstretch_baselevel(^d)**ipower)
948  nstretchedblocks(1,^d)=nstretchedblocks_baselevel(^d)
949  qstretch(1,^d)=qstretch_baselevel(^d)
950  qstretch(0,^d)=qstretch(1,^d)**2
951  dxfirst(0,^d)=dxfirst(1,^d)*(1.0d0+qstretch(1,^d))
952  dxmid(1,^d)=dxfirst(1,^d)
953  dxmid(0,^d)=dxfirst(1,^d)*2.0d0
954  if(refine_max_level>1)then
955  do ilev=2,refine_max_level
956  nstretchedblocks(ilev,^d)=2*nstretchedblocks(ilev-1,^d)
957  qstretch(ilev,^d)=dsqrt(qstretch(ilev-1,^d))
958  dxfirst(ilev,^d)=dxfirst(ilev-1,^d) &
959  /(1.0d0+dsqrt(qstretch(ilev-1,^d)))
960  dxmid(ilev,^d)=dxmid(ilev-1,^d)/2.0d0
961  enddo
962  endif
963  ! sanity check on total domain size:
964  sizeuniformpart^d=dxfirst(1,^d) &
965  *(domain_nx^d-nstretchedblocks_baselevel(^d)*block_nx^d)
966  if(mype==0) then
967  print *,'uniform part of size=',sizeuniformpart^d
968  print *,'setting of domain is then=',2*xstretch^d+sizeuniformpart^d
969  print *,'versus=',xprobmax^d-xprobmin^d
970  endif
971  if(dabs(xprobmax^d-xprobmin^d-2*xstretch^d-sizeuniformpart^d)>smalldouble) then
972  call mpistop('mismatch in domain size!')
973  endif
974  endif \}
975  dxfirst_1mq(0:refine_max_level,1:ndim)=dxfirst(0:refine_max_level,1:ndim) &
976  /(1.0d0-qstretch(0:refine_max_level,1:ndim))
977  end if
978 
979  dx_vec = [{xprobmax^d-xprobmin^d|, }] / nx_vec
980 
981  if (mype==0) then
982  write(c_ndim, '(I1)') ^nd
983  write(unitterm, '(A30,' // c_ndim // '(I0," "))') &
984  ' Domain size (cells): ', nx_vec
985  write(unitterm, '(A30,' // c_ndim // '(E9.3," "))') &
986  ' Level one dx: ', dx_vec
987  end if
988 
989  if (any(dx_vec < smalldouble)) &
990  call mpistop("Incorrect domain size (too small grid spacing)")
991 
992  dx(:, 1) = dx_vec
993 
994  if(sum(w_refine_weight(:))==0) w_refine_weight(1) = 1.d0
995  if(dabs(sum(w_refine_weight(:))-1.d0)>smalldouble) then
996  write(unitterm,*) "Sum of all elements in w_refine_weight be 1.d0"
997  call mpistop("Reset w_refine_weight so the sum is 1.d0")
998  end if
999 
1000  if (mype==0) write(unitterm, '(A30)', advance='no') 'Refine estimation: '
1001 
1002  select case (refine_criterion)
1003  case (0)
1004  if (mype==0) write(unitterm, '(A)') "user defined"
1005  case (1)
1006  if (mype==0) write(unitterm, '(A)') "relative error"
1007  case (2)
1008  if (mype==0) write(unitterm, '(A)') "Lohner's original scheme"
1009  case (3)
1010  if (mype==0) write(unitterm, '(A)') "Lohner's scheme"
1011  case default
1012  call mpistop("Unknown error estimator, change refine_criterion")
1013  end select
1014 
1015  if (tfixgrid<bigdouble/2.0d0) then
1016  if(mype==0)print*,'Warning, at time=',tfixgrid,'the grid will be fixed'
1017  end if
1018  if (itfixgrid<biginteger/2) then
1019  if(mype==0)print*,'Warning, at iteration=',itfixgrid,'the grid will be fixed'
1020  end if
1021  if (ditregrid>1) then
1022  if(mype==0)print*,'Note, Grid is reconstructed once every',ditregrid,'iterations'
1023  end if
1024 
1025 
1026  do islice=1,nslices
1027  select case(slicedir(islice))
1028  {case(^d)
1029  if(slicecoord(islice)<xprobmin^d.or.slicecoord(islice)>xprobmax^d) &
1030  write(uniterr,*)'Warning in read_par_files: ', &
1031  'Slice ', islice, ' coordinate',slicecoord(islice),'out of bounds for dimension ',slicedir(islice)
1032  \}
1033  end select
1034  end do
1035 
1036  if (mype==0) then
1037  write(unitterm, '(A30,A,A)') 'restart_from_file: ', ' ', trim(restart_from_file)
1038  write(unitterm, '(A30,L1)') 'converting: ', convert
1039  write(unitterm, '(A)') ''
1040  endif
1041 
1042  end subroutine read_par_files
1043 
1044  !> Routine to find entries in a string
1045  subroutine get_fields_string(line, delims, n_max, fields, n_found, fully_read)
1046  !> The line from which we want to read
1047  character(len=*), intent(in) :: line
1048  !> A string with delimiters. For example delims = " ,'"""//char(9)
1049  character(len=*), intent(in) :: delims
1050  !> Maximum number of entries to read in
1051  integer, intent(in) :: n_max
1052  !> Number of entries found
1053  integer, intent(inout) :: n_found
1054  !> Fields in the strings
1055  character(len=*), intent(inout) :: fields(n_max)
1056  logical, intent(out), optional :: fully_read
1057 
1058  integer :: ixs_start(n_max)
1059  integer :: ixs_end(n_max)
1060  integer :: ix, ix_prev
1061 
1062  ix_prev = 0
1063  n_found = 0
1064 
1065  do while (n_found < n_max)
1066  ! Find the starting point of the next entry (a non-delimiter value)
1067  ix = verify(line(ix_prev+1:), delims)
1068  if (ix == 0) exit
1069 
1070  n_found = n_found + 1
1071  ixs_start(n_found) = ix_prev + ix ! This is the absolute position in 'line'
1072 
1073  ! Get the end point of the current entry (next delimiter index minus one)
1074  ix = scan(line(ixs_start(n_found)+1:), delims) - 1
1075 
1076  if (ix == -1) then ! If there is no last delimiter,
1077  ixs_end(n_found) = len(line) ! the end of the line is the endpoint
1078  else
1079  ixs_end(n_found) = ixs_start(n_found) + ix
1080  end if
1081 
1082  fields(n_found) = line(ixs_start(n_found):ixs_end(n_found))
1083  ix_prev = ixs_end(n_found) ! We continue to search from here
1084  end do
1085 
1086  if (present(fully_read)) then
1087  ix = verify(line(ix_prev+1:), delims)
1088  fully_read = (ix == 0) ! Are there only delimiters?
1089  end if
1090 
1091  end subroutine get_fields_string
1092 
1093  subroutine saveamrfile(ifile)
1097  use mod_particles, only: write_particles_snapshot
1098  use mod_slice, only: write_slice
1099  use mod_collapse, only: write_collapsed
1100  integer:: ifile
1101 
1102  select case (ifile)
1103  case (fileout_)
1104  ! Write .dat snapshot
1105  call write_snapshot()
1106 
1107  ! Generate formatted output (e.g., VTK)
1109 
1110  if(use_particles) call write_particles_snapshot()
1111 
1113  case (fileslice_)
1114  call write_slice
1115  case (filecollapse_)
1116  call write_collapsed
1117  case (filelog_)
1118  select case (typefilelog)
1119  case ('default')
1120  call printlog_default
1121  case ('regression_test')
1123  case ('special')
1124  if (.not. associated(usr_print_log)) then
1125  call mpistop("usr_print_log not defined")
1126  else
1127  call usr_print_log()
1128  end if
1129  case default
1130  call mpistop("Error in SaveFile: Unknown typefilelog")
1131  end select
1132  case (fileanalysis_)
1133  if (associated(usr_write_analysis)) then
1134  call usr_write_analysis()
1135  end if
1136  case default
1137  write(*,*) 'No save method is defined for ifile=',ifile
1138  call mpistop("")
1139  end select
1140 
1141  ! opedit: Flush stdout and stderr from time to time.
1142  flush(unit=unitterm)
1143 
1144  end subroutine saveamrfile
1145 
1146  !> Standard method for creating a new output file
1147  subroutine create_output_file(fh, ix, extension, suffix)
1149  integer, intent(out) :: fh !< File handle
1150  integer, intent(in) :: ix !< Index of file
1151  character(len=*), intent(in) :: extension !< Extension of file
1152  character(len=*), intent(in), optional :: suffix !< Optional suffix
1153  character(len=std_len) :: filename
1154  integer :: amode
1155 
1156  if (ix >= 10000) then
1157  call mpistop("Number of output files is limited to 10000 (0...9999)")
1158  end if
1159 
1160  if (present(suffix)) then
1161  write(filename,"(a,a,i4.4,a)") trim(base_filename), &
1162  trim(suffix), ix, extension
1163  else
1164  write(filename,"(a,i4.4,a)") trim(base_filename), ix, extension
1165  end if
1166 
1167  ! MPI cannot easily replace existing files
1168  open(unit=unitsnapshot,file=filename,status='replace')
1169  close(unitsnapshot, status='delete')
1170 
1171  amode = ior(mpi_mode_create, mpi_mode_wronly)
1172  call mpi_file_open(mpi_comm_self,filename,amode, &
1173  mpi_info_null, fh, ierrmpi)
1174 
1175  if (ierrmpi /= 0) then
1176  print *, "Error, cannot create file ", trim(filename)
1177  call mpistop("Fatal error")
1178  end if
1179 
1180  end subroutine create_output_file
1181 
1182  ! Check if a snapshot exists
1183  logical function snapshot_exists(ix)
1185  integer, intent(in) :: ix !< Index of snapshot
1186  character(len=std_len) :: filename
1187 
1188  write(filename, "(a,i4.4,a)") trim(base_filename), ix, ".dat"
1189  inquire(file=trim(filename), exist=snapshot_exists)
1190  end function snapshot_exists
1191 
1192  integer function get_snapshot_index(filename)
1193  character(len=*), intent(in) :: filename
1194  integer :: i
1195 
1196  ! Try to parse index in restart_from_file string (e.g. basename0000.dat)
1197  i = len_trim(filename) - 7
1198  read(filename(i:i+3), '(I4)') get_snapshot_index
1199  end function get_snapshot_index
1200 
1201  !> Write header for a snapshot
1202  !>
1203  !> If you edit the header, don't forget to update: snapshot_write_header(),
1204  !> snapshot_read_header(), doc/fileformat.md, tools/python/dat_reader.py
1205  subroutine snapshot_write_header(fh, offset_tree, offset_block)
1207  use mod_physics
1209  use mod_slice, only: slicenext
1210  integer, intent(in) :: fh !< File handle
1211  integer(kind=MPI_OFFSET_KIND), intent(in) :: offset_tree !< Offset of tree info
1212  integer(kind=MPI_OFFSET_KIND), intent(in) :: offset_block !< Offset of block data
1213  integer, dimension(MPI_STATUS_SIZE) :: st
1214  integer :: iw, er
1215 
1216  call mpi_file_write(fh, version_number, 1, mpi_integer, st, er)
1217  call mpi_file_write(fh, int(offset_tree), 1, mpi_integer, st, er)
1218  call mpi_file_write(fh, int(offset_block), 1, mpi_integer, st, er)
1219  call mpi_file_write(fh, nw, 1, mpi_integer, st, er)
1220  call mpi_file_write(fh, ndir, 1, mpi_integer, st, er)
1221  call mpi_file_write(fh, ndim, 1, mpi_integer, st, er)
1222  call mpi_file_write(fh, levmax, 1, mpi_integer, st, er)
1223  call mpi_file_write(fh, nleafs, 1, mpi_integer, st, er)
1224  call mpi_file_write(fh, nparents, 1, mpi_integer, st, er)
1225  call mpi_file_write(fh, it, 1, mpi_integer, st, er)
1226  ! Note: It is nice when this double has an even number of 4 byte
1227  ! integers before it (for alignment)
1228  call mpi_file_write(fh, global_time, 1, mpi_double_precision, st, er)
1229 
1230  call mpi_file_write(fh, [ xprobmin^d ], ndim, &
1231  mpi_double_precision, st, er)
1232  call mpi_file_write(fh, [ xprobmax^d ], ndim, &
1233  mpi_double_precision, st, er)
1234  call mpi_file_write(fh, [ domain_nx^d ], ndim, &
1235  mpi_integer, st, er)
1236  call mpi_file_write(fh, [ block_nx^d ], ndim, &
1237  mpi_integer, st, er)
1238 
1239  ! Periodicity (assume all variables are periodic if one is)
1240  call mpi_file_write(fh, periodb, ndim, mpi_logical, st, er)
1241 
1242  ! Geometry
1243  call mpi_file_write(fh, geometry_name(1:name_len), &
1244  name_len, mpi_character, st, er)
1245 
1246  ! Write stagger grid mark
1247  call mpi_file_write(fh, stagger_grid, 1, mpi_logical, st, er)
1248 
1249  do iw = 1, nw
1250  call mpi_file_write(fh, cons_wnames(iw), name_len, mpi_character, st, er)
1251  end do
1252 
1253  ! Physics related information
1254  call mpi_file_write(fh, physics_type, name_len, mpi_character, st, er)
1255 
1256  ! Format:
1257  ! integer :: n_par
1258  ! double precision :: values(n_par)
1259  ! character(n_par * name_len) :: names
1260  call phys_write_info(fh)
1261 
1262  ! Write snapshotnext etc., which is useful for restarting.
1263  ! Note we add one, since snapshotnext is updated *after* this procedure
1264  if(pass_wall_time) then
1265  call mpi_file_write(fh, snapshotnext, 1, mpi_integer, st, er)
1266  else
1267  call mpi_file_write(fh, snapshotnext+1, 1, mpi_integer, st, er)
1268  end if
1269  call mpi_file_write(fh, slicenext, 1, mpi_integer, st, er)
1270  call mpi_file_write(fh, collapsenext, 1, mpi_integer, st, er)
1271 
1272  end subroutine snapshot_write_header
1273 
1274  !> Read header for a snapshot
1275  !>
1276  !> If you edit the header, don't forget to update: snapshot_write_header(),
1277  !> snapshot_read_header(), doc/fileformat.md, tools/python/dat_reader.py
1278  subroutine snapshot_read_header(fh, offset_tree, offset_block)
1281  use mod_physics, only: physics_type
1282  use mod_slice, only: slicenext
1283  integer, intent(in) :: fh !< File handle
1284  integer(MPI_OFFSET_KIND), intent(out) :: offset_tree !< Offset of tree info
1285  integer(MPI_OFFSET_KIND), intent(out) :: offset_block !< Offset of block data
1286  integer :: i, version
1287  integer :: ibuf(ndim), iw
1288  double precision :: rbuf(ndim)
1289  integer, dimension(MPI_STATUS_SIZE) :: st
1290  character(len=name_len), allocatable :: var_names(:), param_names(:)
1291  double precision, allocatable :: params(:)
1292  character(len=name_len) :: phys_name, geom_name
1293  integer :: er, n_par, tmp_int
1294  logical :: stagger_mark_dat, periodic(ndim)
1295 
1296  ! Version number
1297  call mpi_file_read(fh, version, 1, mpi_integer, st, er)
1298  if (all(compatible_versions /= version)) then
1299  call mpistop("Incompatible file version (maybe old format?)")
1300  end if
1301 
1302  ! offset_tree
1303  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1304  offset_tree = ibuf(1)
1305 
1306  ! offset_block
1307  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1308  offset_block = ibuf(1)
1309 
1310  ! nw
1311  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1312  nw_found=ibuf(1)
1313  if (nw /= ibuf(1)) then
1314  write(*,*) "nw=",nw," and nw found in restart file=",ibuf(1)
1315  write(*,*) "Please be aware of changes in w at restart."
1316  !call mpistop("currently, changing nw at restart is not allowed")
1317  end if
1318 
1319  ! ndir
1320  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1321  if (ibuf(1) /= ndir) then
1322  write(*,*) "ndir in restart file = ",ibuf(1)
1323  write(*,*) "ndir = ",ndir
1324  call mpistop("reset ndir to ndir in restart file")
1325  end if
1326 
1327  ! ndim
1328  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1329  if (ibuf(1) /= ndim) then
1330  write(*,*) "ndim in restart file = ",ibuf(1)
1331  write(*,*) "ndim = ",ndim
1332  call mpistop("reset ndim to ndim in restart file")
1333  end if
1334 
1335  ! levmax
1336  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1337  if (ibuf(1) > refine_max_level) then
1338  write(*,*) "number of levels in restart file = ",ibuf(1)
1339  write(*,*) "refine_max_level = ",refine_max_level
1340  call mpistop("refine_max_level < num. levels in restart file")
1341  end if
1342 
1343  ! nleafs
1344  call mpi_file_read(fh, nleafs, 1, mpi_integer, st, er)
1345 
1346  ! nparents
1347  call mpi_file_read(fh, nparents, 1, mpi_integer, st, er)
1348 
1349  ! it
1350  call mpi_file_read(fh, it, 1, mpi_integer, st, er)
1351 
1352  ! global time
1353  call mpi_file_read(fh, global_time, 1, mpi_double_precision, st, er)
1354 
1355  ! xprobmin^D
1356  call mpi_file_read(fh,rbuf(1:ndim),ndim,mpi_double_precision,st,er)
1357  if (maxval(abs(rbuf(1:ndim) - [ xprobmin^d ])) > 0) then
1358  write(*,*) "Error: xprobmin differs from restart data: ", rbuf(1:ndim)
1359  call mpistop("change xprobmin^D in par file")
1360  end if
1361 
1362  ! xprobmax^D
1363  call mpi_file_read(fh,rbuf(1:ndim),ndim,mpi_double_precision,st,er)
1364  if (maxval(abs(rbuf(1:ndim) - [ xprobmax^d ])) > 0) then
1365  write(*,*) "Error: xprobmax differs from restart data: ", rbuf(1:ndim)
1366  call mpistop("change xprobmax^D in par file")
1367  end if
1368 
1369  ! domain_nx^D
1370  call mpi_file_read(fh,ibuf(1:ndim), ndim, mpi_integer,st,er)
1371  if (any(ibuf(1:ndim) /= [ domain_nx^d ])) then
1372  write(*,*) "Error: mesh size differs from restart data: ", ibuf(1:ndim)
1373  call mpistop("change domain_nx^D in par file")
1374  end if
1375 
1376  ! block_nx^D
1377  call mpi_file_read(fh,ibuf(1:ndim), ndim, mpi_integer,st,er)
1378  if (any(ibuf(1:ndim) /= [ block_nx^d ])) then
1379  write(*,*) "Error: block size differs from restart data:", ibuf(1:ndim)
1380  call mpistop("change block_nx^D in par file")
1381  end if
1382 
1383  ! From version 5, read more info about the grid
1384  if (version > 4) then
1385  call mpi_file_read(fh, periodic, ndim, mpi_logical, st, er)
1386  if (any(periodic .neqv. periodb)) &
1387  call mpistop("change in periodicity in par file")
1388 
1389  call mpi_file_read(fh, geom_name, name_len, mpi_character, st, er)
1390 
1391  if (geom_name /= geometry_name(1:name_len)) then
1392  write(*,*) "type of coordinates in data is: ", geom_name
1393  call mpistop("select the correct coordinates in mod_usr.t file")
1394  end if
1395 
1396  call mpi_file_read(fh, stagger_mark_dat, 1, mpi_logical, st, er)
1397  if (stagger_grid .neqv. stagger_mark_dat) then
1398  write(*,*) "Error: stagger grid flag differs from restart data:", stagger_mark_dat
1399  call mpistop("change parameter to use stagger grid")
1400  end if
1401  end if
1402 
1403  ! From version 4 onwards, the later parts of the header must be present
1404  if (version > 3) then
1405  ! w_names (not used here)
1406  allocate(var_names(nw_found))
1407  do iw = 1, nw_found
1408  call mpi_file_read(fh, var_names(iw), name_len, mpi_character, st, er)
1409  end do
1410 
1411  ! Physics related information
1412  call mpi_file_read(fh, phys_name, name_len, mpi_character, st, er)
1413 
1414  if (phys_name /= physics_type) then
1415  call mpistop("Cannot restart with a different physics type")
1416  end if
1417 
1418  call mpi_file_read(fh, n_par, 1, mpi_integer, st, er)
1419  allocate(params(n_par))
1420  allocate(param_names(n_par))
1421  call mpi_file_read(fh, params, n_par, mpi_double_precision, st, er)
1422  call mpi_file_read(fh, param_names, name_len * n_par, mpi_character, st, er)
1423 
1424  ! Read snapshotnext etc. for restarting
1425  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1426 
1427  ! Only set snapshotnext if the user hasn't specified it
1428  if (snapshotnext == -1) snapshotnext = tmp_int
1429 
1430  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1431  if (slicenext == -1) slicenext = tmp_int
1432 
1433  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1434  if (collapsenext == -1) collapsenext = tmp_int
1435  else
1436  ! Guess snapshotnext from file name if not set
1437  if (snapshotnext == -1) &
1439  ! Set slicenext and collapsenext if not set
1440  if (slicenext == -1) slicenext = 0
1441  if (collapsenext == -1) collapsenext = 0
1442  end if
1443 
1444  ! Still used in convert
1446 
1447  end subroutine snapshot_read_header
1448 
1449  !> Compute number of elements in index range
1450  pure integer function count_ix(ixO^L)
1451  integer, intent(in) :: ixO^L
1452 
1453  count_ix = product([ ixomax^d ] - [ ixomin^d ] + 1)
1454  end function count_ix
1455 
1456  !> Determine the shape of a block for output (whether to include ghost cells,
1457  !> and on which sides)
1458  subroutine block_shape_io(igrid, n_ghost, ixO^L, n_values)
1460 
1461  integer, intent(in) :: igrid
1462  !> nghost(1:ndim) contains the number of ghost cells on the block's minimum
1463  !> boundaries, and nghost(ndim+1:2*ndim) on the block's maximum boundaries
1464  integer, intent(out) :: n_ghost(2*ndim)
1465  !> Index range on output block
1466  integer, intent(out) :: ixO^L
1467  !> Number of cells/values in output
1468  integer, intent(out) :: n_values
1469 
1470  integer :: idim
1471 
1472  n_ghost(:) = 0
1473 
1474  if(save_physical_boundary) then
1475  do idim=1,ndim
1476  ! Include ghost cells on lower boundary
1477  if(ps(igrid)%is_physical_boundary(2*idim-1)) n_ghost(idim)=nghostcells
1478  ! Include ghost cells on upper boundary
1479  if(ps(igrid)%is_physical_boundary(2*idim)) n_ghost(ndim+idim)=nghostcells
1480  end do
1481  end if
1482 
1483  {ixomin^d = ixmlo^d - n_ghost(^d)\}
1484  {ixomax^d = ixmhi^d + n_ghost(ndim+^d)\}
1485 
1486  n_values = count_ix(ixo^l) * nw
1487 
1488  end subroutine block_shape_io
1489 
1490  subroutine write_snapshot
1493  use mod_physics
1494 
1495  integer :: file_handle, igrid, Morton_no, iwrite
1496  integer :: ipe, ix_buffer(2*ndim+1), n_values
1497  integer :: ixO^L, n_ghost(2*ndim)
1498  integer :: ixOs^L,n_values_stagger
1499  integer :: iorecvstatus(mpi_status_size)
1500  integer :: ioastatus(mpi_status_size)
1501  integer :: igrecvstatus(mpi_status_size)
1502  integer :: istatus(mpi_status_size)
1503  type(tree_node), pointer :: pnode
1504  integer(kind=MPI_OFFSET_KIND) :: offset_tree_info
1505  integer(kind=MPI_OFFSET_KIND) :: offset_block_data
1506  integer(kind=MPI_OFFSET_KIND) :: offset_offsets
1507  double precision, allocatable :: w_buffer(:)
1508 
1509  integer, allocatable :: block_ig(:, :)
1510  integer, allocatable :: block_lvl(:)
1511  integer(kind=MPI_OFFSET_KIND), allocatable :: block_offset(:)
1512 
1513  call mpi_barrier(icomm, ierrmpi)
1514 
1515  ! Allocate send/receive buffer
1516  n_values = count_ix(ixg^ll) * nw
1517  if(stagger_grid) then
1518  n_values = n_values + count_ix(ixgs^ll) * nws
1519  end if
1520  allocate(w_buffer(n_values))
1521 
1522  ! Allocate arrays with information about grid blocks
1523  allocate(block_ig(ndim, nleafs))
1524  allocate(block_lvl(nleafs))
1525  allocate(block_offset(nleafs+1))
1526 
1527  ! master processor
1528  if (mype==0) then
1529  call create_output_file(file_handle, snapshotnext, ".dat")
1530 
1531  ! Don't know offsets yet, we will write header again later
1532  offset_tree_info = -1
1533  offset_block_data = -1
1534  call snapshot_write_header(file_handle, offset_tree_info, &
1535  offset_block_data)
1536 
1537  call mpi_file_get_position(file_handle, offset_tree_info, ierrmpi)
1538 
1539  call write_forest(file_handle)
1540 
1541  ! Collect information about the spatial index (ig^D) and refinement level
1542  ! of leaves
1543  do morton_no = morton_start(0), morton_stop(npe-1)
1544  igrid = sfc(1, morton_no)
1545  ipe = sfc(2, morton_no)
1546  pnode => igrid_to_node(igrid, ipe)%node
1547 
1548  block_ig(:, morton_no) = [ pnode%ig^d ]
1549  block_lvl(morton_no) = pnode%level
1550  block_offset(morton_no) = 0 ! Will be determined later
1551  end do
1552 
1553  call mpi_file_write(file_handle, block_lvl, size(block_lvl), &
1554  mpi_integer, istatus, ierrmpi)
1555 
1556  call mpi_file_write(file_handle, block_ig, size(block_ig), &
1557  mpi_integer, istatus, ierrmpi)
1558 
1559  ! Block offsets are currently unknown, but will be overwritten later
1560  call mpi_file_get_position(file_handle, offset_offsets, ierrmpi)
1561  call mpi_file_write(file_handle, block_offset(1:nleafs), nleafs, &
1562  mpi_offset, istatus, ierrmpi)
1563 
1564  call mpi_file_get_position(file_handle, offset_block_data, ierrmpi)
1565 
1566  ! Check whether data was written as expected
1567  if (offset_block_data - offset_tree_info /= &
1568  (nleafs + nparents) * size_logical + &
1569  nleafs * ((1+ndim) * size_int + 2 * size_int)) then
1570  if (mype == 0) then
1571  print *, "Warning: MPI_OFFSET type /= 8 bytes"
1572  print *, "This *could* cause problems when reading .dat files"
1573  end if
1574  end if
1575 
1576  block_offset(1) = offset_block_data
1577  iwrite = 0
1578  end if
1579 
1580  do morton_no=morton_start(mype), morton_stop(mype)
1581  igrid = sfc_to_igrid(morton_no)
1582  itag = morton_no
1583 
1584  if (nwaux>0) then
1585  ! extra layer around mesh only for later averaging in convert
1586  ! set dxlevel value for use in gradient subroutine,
1587  ! which might be used in getaux
1588  saveigrid=igrid
1589  ^d&dxlevel(^d)=rnode(rpdx^d_, igrid);
1590  block=>ps(igrid)
1591  call phys_get_aux(.true., ps(igrid)%w, ps(igrid)%x, ixg^ll, &
1592  ixm^ll^ladd1, "write_snapshot")
1593  endif
1594 
1595  call block_shape_io(igrid, n_ghost, ixo^l, n_values)
1596  if(stagger_grid) then
1597  w_buffer(1:n_values) = pack(ps(igrid)%w(ixo^s, 1:nw), .true.)
1598  {ixosmin^d = ixomin^d -1\}
1599  {ixosmax^d = ixomax^d \}
1600  n_values_stagger= count_ix(ixos^l)*nws
1601  w_buffer(n_values+1:n_values+n_values_stagger) = pack(ps(igrid)%ws(ixos^s, 1:nws), .true.)
1602  n_values=n_values+n_values_stagger
1603  else
1604  w_buffer(1:n_values) = pack(ps(igrid)%w(ixo^s, 1:nw), .true.)
1605  end if
1606  ix_buffer(1) = n_values
1607  ix_buffer(2:) = n_ghost
1608 
1609  if (mype /= 0) then
1610  call mpi_send(ix_buffer, 2*ndim+1, &
1611  mpi_integer, 0, itag, icomm, ierrmpi)
1612  call mpi_send(w_buffer, n_values, &
1613  mpi_double_precision, 0, itag, icomm, ierrmpi)
1614  else
1615  iwrite = iwrite+1
1616  call mpi_file_write(file_handle, ix_buffer(2:), &
1617  2*ndim, mpi_integer, istatus, ierrmpi)
1618  call mpi_file_write(file_handle, w_buffer, &
1619  n_values, mpi_double_precision, istatus, ierrmpi)
1620 
1621  ! Set offset of next block
1622  block_offset(iwrite+1) = block_offset(iwrite) + &
1623  int(n_values, mpi_offset_kind) * size_double + &
1624  2 * ndim * size_int
1625  end if
1626  end do
1627 
1628  ! Write data communicated from other processors
1629  if (mype == 0) then
1630  do ipe = 1, npe-1
1631  do morton_no=morton_start(ipe), morton_stop(ipe)
1632  iwrite=iwrite+1
1633  itag=morton_no
1634 
1635  call mpi_recv(ix_buffer, 2*ndim+1, mpi_integer, ipe, itag, icomm,&
1636  igrecvstatus, ierrmpi)
1637  n_values = ix_buffer(1)
1638 
1639  call mpi_recv(w_buffer, n_values, mpi_double_precision,&
1640  ipe, itag, icomm, iorecvstatus, ierrmpi)
1641 
1642  call mpi_file_write(file_handle, ix_buffer(2:), &
1643  2*ndim, mpi_integer, istatus, ierrmpi)
1644  call mpi_file_write(file_handle, w_buffer, &
1645  n_values, mpi_double_precision, istatus, ierrmpi)
1646 
1647  ! Set offset of next block
1648  block_offset(iwrite+1) = block_offset(iwrite) + &
1649  int(n_values, mpi_offset_kind) * size_double + &
1650  2 * ndim * size_int
1651  end do
1652  end do
1653 
1654  ! Write block offsets (now we know them)
1655  call mpi_file_seek(file_handle, offset_offsets, mpi_seek_set, ierrmpi)
1656  call mpi_file_write(file_handle, block_offset(1:nleafs), nleafs, &
1657  mpi_offset, istatus, ierrmpi)
1658 
1659  ! Write header again, now with correct offsets
1660  call mpi_file_seek(file_handle, 0_mpi_offset_kind, mpi_seek_set, ierrmpi)
1661  call snapshot_write_header(file_handle, offset_tree_info, &
1662  offset_block_data)
1663 
1664  call mpi_file_close(file_handle, ierrmpi)
1665  end if
1666 
1667  call mpi_barrier(icomm, ierrmpi)
1668 
1669  end subroutine write_snapshot
1670 
1671  !> Routine to read in snapshots (.dat files). When it cannot recognize the
1672  !> file version, it will automatically try the 'old' reader.
1673  subroutine read_snapshot
1675  use mod_forest
1677  use mod_slice, only: slicenext
1678 
1679  integer :: ix_buffer(2*ndim+1), n_values, n_values_stagger
1680  integer :: ixO^L, ixOs^L
1681  integer :: file_handle, amode, igrid, Morton_no, iread
1682  integer :: istatus(mpi_status_size)
1683  integer :: iorecvstatus(mpi_status_size)
1684  integer :: ipe,inrecv,nrecv, file_version
1685  logical :: fexist
1686  integer(MPI_OFFSET_KIND) :: offset_tree_info
1687  integer(MPI_OFFSET_KIND) :: offset_block_data
1688  double precision, allocatable :: w_buffer(:)
1689  double precision, dimension(:^D&,:), allocatable :: w, ws
1690 
1691  if (mype==0) then
1692  inquire(file=trim(restart_from_file), exist=fexist)
1693  if (.not.fexist) call mpistop(trim(restart_from_file)//" not found!")
1694 
1695  call mpi_file_open(mpi_comm_self,restart_from_file,mpi_mode_rdonly, &
1696  mpi_info_null,file_handle,ierrmpi)
1697  call mpi_file_read(file_handle, file_version, 1, mpi_integer, &
1698  istatus, ierrmpi)
1699  end if
1700 
1701  call mpi_bcast(file_version,1,mpi_integer,0,icomm,ierrmpi)
1702 
1703  if (all(compatible_versions /= file_version)) then
1704  if (mype == 0) print *, "Unknown version, trying old snapshot reader..."
1705  call mpi_file_close(file_handle,ierrmpi)
1706  call read_snapshot_old()
1707 
1708  ! Guess snapshotnext from file name if not set
1709  if (snapshotnext == -1) &
1711  ! Set slicenext and collapsenext if not set
1712  if (slicenext == -1) slicenext = 0
1713  if (collapsenext == -1) collapsenext = 0
1714 
1715  ! Still used in convert
1717 
1718  return ! Leave this routine
1719  else if (mype == 0) then
1720  call mpi_file_seek(file_handle, 0_mpi_offset_kind, mpi_seek_set, ierrmpi)
1721  call snapshot_read_header(file_handle, offset_tree_info, &
1722  offset_block_data)
1723  end if
1724 
1725  ! Share information about restart file
1726  call mpi_bcast(nw_found,1,mpi_integer,0,icomm,ierrmpi)
1727  call mpi_bcast(nleafs,1,mpi_integer,0,icomm,ierrmpi)
1728  call mpi_bcast(nparents,1,mpi_integer,0,icomm,ierrmpi)
1729  call mpi_bcast(it,1,mpi_integer,0,icomm,ierrmpi)
1730  call mpi_bcast(global_time,1,mpi_double_precision,0,icomm,ierrmpi)
1731 
1732  call mpi_bcast(snapshotnext,1,mpi_integer,0,icomm,ierrmpi)
1733  call mpi_bcast(slicenext,1,mpi_integer,0,icomm,ierrmpi)
1734  call mpi_bcast(collapsenext,1,mpi_integer,0,icomm,ierrmpi)
1735 
1736  ! Allocate send/receive buffer
1737  n_values = count_ix(ixg^ll) * nw_found
1738  if(stagger_grid) then
1739  n_values = n_values + count_ix(ixgs^ll) * nws
1740  allocate(ws(ixgs^t,1:nws))
1741  end if
1742  allocate(w_buffer(n_values))
1743  allocate(w(ixg^t,1:nw_found))
1744 
1746 
1747  if (mype == 0) then
1748  call mpi_file_seek(file_handle, offset_tree_info, &
1749  mpi_seek_set, ierrmpi)
1750  end if
1751 
1752  call read_forest(file_handle)
1753 
1754  do morton_no=morton_start(mype),morton_stop(mype)
1755  igrid=sfc_to_igrid(morton_no)
1756  call alloc_node(igrid)
1757  end do
1758 
1759  if (mype==0) then
1760  call mpi_file_seek(file_handle, offset_block_data, mpi_seek_set, ierrmpi)
1761 
1762  iread = 0
1763  do ipe = 0, npe-1
1764  do morton_no=morton_start(ipe),morton_stop(ipe)
1765  iread=iread+1
1766  itag=morton_no
1767 
1768  call mpi_file_read(file_handle,ix_buffer(1:2*ndim), 2*ndim, &
1769  mpi_integer, istatus,ierrmpi)
1770 
1771  ! Construct ixO^L array from number of ghost cells
1772  {ixomin^d = ixmlo^d - ix_buffer(^d)\}
1773  {ixomax^d = ixmhi^d + ix_buffer(ndim+^d)\}
1774  n_values = count_ix(ixo^l) * nw_found
1775  if(stagger_grid) then
1776  {ixosmin^d = ixomin^d - 1\}
1777  {ixosmax^d = ixomax^d\}
1778  n_values_stagger = n_values
1779  n_values = n_values + count_ix(ixos^l) * nws
1780  end if
1781 
1782  call mpi_file_read(file_handle, w_buffer, n_values, &
1783  mpi_double_precision, istatus, ierrmpi)
1784 
1785  if (mype == ipe) then ! Root task
1786  igrid=sfc_to_igrid(morton_no)
1787  if(stagger_grid) then
1788  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values_stagger), &
1789  shape(w(ixo^s, 1:nw_found)))
1790  ps(igrid)%ws(ixos^s,1:nws)=reshape(w_buffer(n_values_stagger+1:n_values), &
1791  shape(ws(ixos^s, 1:nws)))
1792  else
1793  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values), &
1794  shape(w(ixo^s, 1:nw_found)))
1795  end if
1796  if (nw_found<nw) then
1797  if (associated(usr_transform_w)) then
1798  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1799  else
1800  ps(igrid)%w(ixo^s,1:nw_found)=w(ixo^s,1:nw_found)
1801  end if
1802  else if (nw_found>nw) then
1803  if (associated(usr_transform_w)) then
1804  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1805  else
1806  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1807  end if
1808  else
1809  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1810  end if
1811  else
1812  call mpi_send([ ixo^l, n_values ], 2*ndim+1, &
1813  mpi_integer, ipe, itag, icomm, ierrmpi)
1814  call mpi_send(w_buffer, n_values, &
1815  mpi_double_precision, ipe, itag, icomm, ierrmpi)
1816  end if
1817  end do
1818  end do
1819 
1820  call mpi_file_close(file_handle,ierrmpi)
1821 
1822  else ! mype > 0
1823 
1824  do morton_no=morton_start(mype),morton_stop(mype)
1825  igrid=sfc_to_igrid(morton_no)
1826  itag=morton_no
1827 
1828  call mpi_recv(ix_buffer, 2*ndim+1, mpi_integer, 0, itag, icomm,&
1829  iorecvstatus, ierrmpi)
1830  {ixomin^d = ix_buffer(^d)\}
1831  {ixomax^d = ix_buffer(ndim+^d)\}
1832  n_values = ix_buffer(2*ndim+1)
1833 
1834  call mpi_recv(w_buffer, n_values, mpi_double_precision,&
1835  0, itag, icomm, iorecvstatus, ierrmpi)
1836 
1837  if(stagger_grid) then
1838  n_values_stagger = count_ix(ixo^l) * nw_found
1839  {ixosmin^d = ixomin^d - 1\}
1840  {ixosmax^d = ixomax^d\}
1841  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values_stagger), &
1842  shape(w(ixo^s, 1:nw_found)))
1843  ps(igrid)%ws(ixos^s,1:nws)=reshape(w_buffer(n_values_stagger+1:n_values), &
1844  shape(ws(ixos^s, 1:nws)))
1845  else
1846  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values), &
1847  shape(w(ixo^s, 1:nw_found)))
1848  end if
1849  if (nw_found<nw) then
1850  if (associated(usr_transform_w)) then
1851  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1852  else
1853  ps(igrid)%w(ixo^s,1:nw_found)=w(ixo^s,1:nw_found)
1854  end if
1855  else if (nw_found>nw) then
1856  if (associated(usr_transform_w)) then
1857  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1858  else
1859  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1860  end if
1861  else
1862  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1863  end if
1864  end do
1865  end if
1866 
1867  call mpi_barrier(icomm,ierrmpi)
1868 
1869  end subroutine read_snapshot
1870 
1871  subroutine read_snapshot_old()
1874 
1875  double precision :: wio(ixg^t,1:nw)
1876  integer :: fh, igrid, Morton_no, iread
1877  integer :: levmaxini, ndimini, ndirini
1878  integer :: nwini, neqparini, nxini^D
1879  integer(kind=MPI_OFFSET_KIND) :: offset
1880  integer :: istatus(mpi_status_size)
1881  integer, allocatable :: iorecvstatus(:,:)
1882  integer :: ipe,inrecv,nrecv
1883  integer :: sendini(7+^nd)
1884  character(len=80) :: filename
1885  logical :: fexist
1886  double precision :: eqpar_dummy(100)
1887 
1888  if (mype==0) then
1889  call mpi_file_open(mpi_comm_self,trim(restart_from_file), &
1890  mpi_mode_rdonly,mpi_info_null,fh,ierrmpi)
1891 
1892  offset=-int(7*size_int+size_double,kind=mpi_offset_kind)
1893  call mpi_file_seek(fh,offset,mpi_seek_end,ierrmpi)
1894 
1895  call mpi_file_read(fh,nleafs,1,mpi_integer,istatus,ierrmpi)
1897  call mpi_file_read(fh,levmaxini,1,mpi_integer,istatus,ierrmpi)
1898  call mpi_file_read(fh,ndimini,1,mpi_integer,istatus,ierrmpi)
1899  call mpi_file_read(fh,ndirini,1,mpi_integer,istatus,ierrmpi)
1900  call mpi_file_read(fh,nwini,1,mpi_integer,istatus,ierrmpi)
1901  call mpi_file_read(fh,neqparini,1,mpi_integer,istatus,ierrmpi)
1902  call mpi_file_read(fh,it,1,mpi_integer,istatus,ierrmpi)
1903  call mpi_file_read(fh,global_time,1,mpi_double_precision,istatus,ierrmpi)
1904 
1905  ! check if settings are suitable for restart
1906  if (levmaxini>refine_max_level) then
1907  write(*,*) "number of levels in restart file = ",levmaxini
1908  write(*,*) "refine_max_level = ",refine_max_level
1909  call mpistop("refine_max_level < number of levels in restart file")
1910  end if
1911  if (ndimini/=ndim) then
1912  write(*,*) "ndim in restart file = ",ndimini
1913  write(*,*) "ndim = ",ndim
1914  call mpistop("reset ndim to ndim in restart file")
1915  end if
1916  if (ndirini/=ndir) then
1917  write(*,*) "ndir in restart file = ",ndirini
1918  write(*,*) "ndir = ",ndir
1919  call mpistop("reset ndir to ndir in restart file")
1920  end if
1921  if (nw/=nwini) then
1922  write(*,*) "nw=",nw," and nw in restart file=",nwini
1923  call mpistop("currently, changing nw at restart is not allowed")
1924  end if
1925 
1926  offset=offset-int(ndimini*size_int+neqparini*size_double,kind=mpi_offset_kind)
1927  call mpi_file_seek(fh,offset,mpi_seek_end,ierrmpi)
1928 
1929  {call mpi_file_read(fh,nxini^d,1,mpi_integer,istatus,ierrmpi)\}
1930  if (ixghi^d/=nxini^d+2*nghostcells|.or.) then
1931  write(*,*) "Error: reset resolution to ",nxini^d+2*nghostcells
1932  call mpistop("change with setamrvac")
1933  end if
1934 
1935  call mpi_file_read(fh,eqpar_dummy,neqparini, &
1936  mpi_double_precision,istatus,ierrmpi)
1937  end if
1938 
1939  ! broadcast the global parameters first
1940  if (npe>1) then
1941  if (mype==0) then
1942  sendini=(/nleafs,levmaxini,ndimini,ndirini,nwini,neqparini,it ,^d&nxini^d /)
1943  end if
1944  call mpi_bcast(sendini,7+^nd,mpi_integer,0,icomm,ierrmpi)
1945  nleafs=sendini(1);levmaxini=sendini(2);ndimini=sendini(3);
1946  ndirini=sendini(4);nwini=sendini(5);
1947  neqparini=sendini(6);it=sendini(7);
1948  nxini^d=sendini(7+^d);
1950  call mpi_bcast(global_time,1,mpi_double_precision,0,icomm,ierrmpi)
1951  end if
1952 
1953  if (mype == 0) then
1954  offset = int(size_block_io,kind=mpi_offset_kind) * &
1955  int(nleafs,kind=mpi_offset_kind)
1956  call mpi_file_seek(fh,offset,mpi_seek_set,ierrmpi)
1957  end if
1958 
1959  call read_forest(fh)
1960 
1961  do morton_no=morton_start(mype),morton_stop(mype)
1962  igrid=sfc_to_igrid(morton_no)
1963  call alloc_node(igrid)
1964  end do
1965 
1966  if (mype==0)then
1967  iread=0
1968 
1969  do morton_no=morton_start(0),morton_stop(0)
1970  igrid=sfc_to_igrid(morton_no)
1971  iread=iread+1
1972  offset=int(size_block_io,kind=mpi_offset_kind) &
1973  *int(morton_no-1,kind=mpi_offset_kind)
1974  call mpi_file_read_at(fh,offset,ps(igrid)%w,1,type_block_io, &
1975  istatus,ierrmpi)
1976  end do
1977  if (npe>1) then
1978  do ipe=1,npe-1
1979  do morton_no=morton_start(ipe),morton_stop(ipe)
1980  iread=iread+1
1981  itag=morton_no
1982  offset=int(size_block_io,kind=mpi_offset_kind)&
1983  *int(morton_no-1,kind=mpi_offset_kind)
1984  call mpi_file_read_at(fh,offset,wio,1,type_block_io,&
1985  istatus,ierrmpi)
1986  call mpi_send(wio,1,type_block_io,ipe,itag,icomm,ierrmpi)
1987  end do
1988  end do
1989  end if
1990  call mpi_file_close(fh,ierrmpi)
1991  else
1992  nrecv=(morton_stop(mype)-morton_start(mype)+1)
1993  allocate(iorecvstatus(mpi_status_size,nrecv))
1994  inrecv=0
1995  do morton_no=morton_start(mype),morton_stop(mype)
1996  igrid=sfc_to_igrid(morton_no)
1997  itag=morton_no
1998  inrecv=inrecv+1
1999  call mpi_recv(ps(igrid)%w,1,type_block_io,0,itag,icomm,&
2000  iorecvstatus(:,inrecv),ierrmpi)
2001  end do
2002  deallocate(iorecvstatus)
2003  end if
2004 
2005  call mpi_barrier(icomm,ierrmpi)
2006 
2007  end subroutine read_snapshot_old
2008 
2009  !> Write volume-averaged values and other information to the log file
2010  subroutine printlog_default
2012  use mod_timing
2015 
2016  logical :: fileopen
2017  integer :: i, iw, level
2018  double precision :: wmean(1:nw), total_volume
2019  double precision :: volume_coverage(refine_max_level)
2020  integer :: nx^D, nc, ncells, dit
2021  double precision :: dtTimeLast, now, cellupdatesPerSecond
2022  double precision :: activeBlocksPerCore, wctPerCodeTime, timeToFinish
2023  character(len=40) :: fmt_string
2024  character(len=80) :: filename
2025  character(len=2048) :: line
2026  logical, save :: opened = .false.
2027  integer :: amode, istatus(mpi_status_size)
2028  integer, parameter :: my_unit = 20
2029 
2030  ! Compute the volume-average of w**1 = w
2031  call get_volume_average(1, wmean, total_volume)
2032 
2033  ! Compute the volume coverage
2034  call get_volume_coverage(volume_coverage)
2035 
2036  if (mype == 0) then
2037 
2038  ! To compute cell updates per second, we do the following:
2039  nx^d=ixmhi^d-ixmlo^d+1;
2040  nc={nx^d*}
2041  ncells = nc * nleafs_active
2042 
2043  ! assumes the number of active leafs haven't changed since last compute.
2044  now = mpi_wtime()
2045  dit = it - ittimelast
2046  dttimelast = now - timelast
2047  ittimelast = it
2048  timelast = now
2049  cellupdatespersecond = dble(ncells) * dble(nstep) * &
2050  dble(dit) / (dttimelast * dble(npe))
2051 
2052  ! blocks per core:
2053  activeblockspercore = dble(nleafs_active) / dble(npe)
2054 
2055  ! Wall clock time per code time unit in seconds:
2056  wctpercodetime = dttimelast / max(dit * dt, epsilon(1.0d0))
2057 
2058  ! Wall clock time to finish in hours:
2059  timetofinish = (time_max - global_time) * wctpercodetime / 3600.0d0
2060 
2061  ! On first entry, open the file and generate the header
2062  if (.not. opened) then
2063 
2064  filename = trim(base_filename) // ".log"
2065 
2066  ! Delete the log when not doing a restart run
2067  if (restart_from_file == undefined) then
2068  open(unit=my_unit,file=trim(filename),status='replace')
2069  close(my_unit, status='delete')
2070  end if
2071 
2072  amode = ior(mpi_mode_create,mpi_mode_wronly)
2073  amode = ior(amode,mpi_mode_append)
2074 
2075  call mpi_file_open(mpi_comm_self, filename, amode, &
2076  mpi_info_null, log_fh, ierrmpi)
2077 
2078  opened = .true.
2079 
2080  ! Start of file headern
2081  line = "it global_time dt"
2082  do level=1,nw
2083  i = len_trim(line) + 2
2084  write(line(i:),"(a,a)") trim(cons_wnames(level)), " "
2085  end do
2086 
2087  ! Volume coverage per level
2088  do level = 1, refine_max_level
2089  i = len_trim(line) + 2
2090  write(line(i:), "(a,i0)") "c", level
2091  end do
2092 
2093  ! Cell counts per level
2094  do level=1,refine_max_level
2095  i = len_trim(line) + 2
2096  write(line(i:), "(a,i0)") "n", level
2097  end do
2098 
2099  ! Rest of file header
2100  line = trim(line) // " | Xload Xmemory 'Cell_Updates /second/core'"
2101  line = trim(line) // " 'Active_Blocks/Core' 'Wct Per Code Time [s]'"
2102  line = trim(line) // " 'TimeToFinish [hrs]'"
2103 
2104  ! Only write header if not restarting
2105  if (restart_from_file == undefined) then
2106  call mpi_file_write(log_fh, trim(line) // new_line('a'), &
2107  len_trim(line)+1, mpi_character, istatus, ierrmpi)
2108  end if
2109  end if
2110 
2111  ! Construct the line to be added to the log
2112 
2113  fmt_string = '(' // fmt_i // ',2' // fmt_r // ')'
2114  write(line, fmt_string) it, global_time, dt
2115  i = len_trim(line) + 2
2116 
2117  write(fmt_string, '(a,i0,a)') '(', nw, fmt_r // ')'
2118  write(line(i:), fmt_string) wmean(1:nw)
2119  i = len_trim(line) + 2
2120 
2121  write(fmt_string, '(a,i0,a)') '(', refine_max_level, fmt_r // ')'
2122  write(line(i:), fmt_string) volume_coverage(1:refine_max_level)
2123  i = len_trim(line) + 2
2124 
2125  write(fmt_string, '(a,i0,a)') '(', refine_max_level, fmt_i // ')'
2126  write(line(i:), fmt_string) nleafs_level(1:refine_max_level)
2127  i = len_trim(line) + 2
2128 
2129  fmt_string = '(a,6' // fmt_r2 // ')'
2130  write(line(i:), fmt_string) '| ', xload, xmemory, cellupdatespersecond, &
2131  activeblockspercore, wctpercodetime, timetofinish
2132 
2133  call mpi_file_write(log_fh, trim(line) // new_line('a') , &
2134  len_trim(line)+1, mpi_character, istatus, ierrmpi)
2135  end if
2136 
2137  end subroutine printlog_default
2138 
2139  !> Print a log that can be used to check whether the code still produces the
2140  !> same output (regression test)
2141  subroutine printlog_regression_test()
2143 
2144  integer, parameter :: n_modes = 2
2145  integer, parameter :: my_unit = 123
2146  character(len=40) :: fmt_string
2147  logical, save :: file_open = .false.
2148  integer :: power
2149  double precision :: modes(nw, n_modes), volume
2150 
2151  do power = 1, n_modes
2152  call get_volume_average(power, modes(:, power), volume)
2153  end do
2154 
2155  if (mype == 0) then
2156  if (.not. file_open) then
2157  open(my_unit, file = trim(base_filename) // ".log")
2158  file_open = .true.
2159 
2160  write(my_unit, *) "# time mean(w) mean(w**2)"
2161  end if
2162 
2163  write(fmt_string, "(a,i0,a)") "(", nw * n_modes + 1, fmt_r // ")"
2164  write(my_unit, fmt_string) global_time, modes
2165  end if
2166  end subroutine printlog_regression_test
2167 
2168  !> Compute mean(w**power) over the leaves of the grid. The first mode
2169  !> (power=1) corresponds to the mean, the second to the mean squared values
2170  !> and so on.
2171  subroutine get_volume_average(power, mode, volume)
2173 
2174  integer, intent(in) :: power !< Which mode to compute
2175  double precision, intent(out) :: mode(nw) !< The computed mode
2176  double precision, intent(out) :: volume !< The total grid volume
2177  integer :: iigrid, igrid, iw
2178  double precision :: wsum(nw+1)
2179  double precision :: dsum_recv(1:nw+1)
2180 
2181  wsum(:) = 0
2182 
2183  ! Loop over all the grids
2184  do iigrid = 1, igridstail
2185  igrid = igrids(iigrid)
2186 
2187  ! Store total volume in last element
2188  wsum(nw+1) = wsum(nw+1) + sum(ps(igrid)%dvolume(ixm^t))
2189 
2190  ! Compute the modes of the cell-centered variables, weighted by volume
2191  do iw = 1, nw
2192  wsum(iw) = wsum(iw) + &
2193  sum(ps(igrid)%dvolume(ixm^t)*ps(igrid)%w(ixm^t,iw)**power)
2194  end do
2195  end do
2196 
2197  ! Make the information available on all tasks
2198  call mpi_allreduce(wsum, dsum_recv, nw+1, mpi_double_precision, &
2199  mpi_sum, icomm, ierrmpi)
2200 
2201  ! Set the volume and the average
2202  volume = dsum_recv(nw+1)
2203  mode = dsum_recv(1:nw) / volume
2204 
2205  end subroutine get_volume_average
2206 
2207  !> Compute how much of the domain is covered by each grid level. This routine
2208  !> does not take a non-Cartesian geometry into account.
2209  subroutine get_volume_coverage(vol_cov)
2211 
2212  double precision, intent(out) :: vol_cov(1:refine_max_level)
2213  double precision :: dsum_recv(1:refine_max_level)
2214  integer :: iigrid, igrid, iw, level
2215 
2216  ! First determine the total 'flat' volume in each level
2217  vol_cov(1:refine_max_level)=zero
2218 
2219  do iigrid = 1, igridstail
2220  igrid = igrids(iigrid);
2221  level = node(plevel_,igrid)
2222  vol_cov(level) = vol_cov(level)+ &
2223  {(rnode(rpxmax^d_,igrid)-rnode(rpxmin^d_,igrid))|*}
2224  end do
2225 
2226  ! Make the information available on all tasks
2227  call mpi_allreduce(vol_cov, dsum_recv, refine_max_level, mpi_double_precision, &
2228  mpi_sum, icomm, ierrmpi)
2229 
2230  ! Normalize
2231  vol_cov = dsum_recv / sum(dsum_recv)
2232  end subroutine get_volume_coverage
2233 
2234  !> Compute the volume average of func(w) over the leaves of the grid.
2235  subroutine get_volume_average_func(func, f_avg, volume)
2237 
2238  interface
2239  pure function func(w_vec, w_size) result(val)
2240  integer, intent(in) :: w_size
2241  double precision, intent(in) :: w_vec(w_size)
2242  double precision :: val
2243  end function func
2244  end interface
2245  double precision, intent(out) :: f_avg !< The volume average of func
2246  double precision, intent(out) :: volume !< The total grid volume
2247  integer :: iigrid, igrid, i^D
2248  double precision :: wsum(2)
2249  double precision :: dsum_recv(2)
2250 
2251  wsum(:) = 0
2252 
2253  ! Loop over all the grids
2254  do iigrid = 1, igridstail
2255  igrid = igrids(iigrid)
2256 
2257  ! Store total volume in last element
2258  wsum(2) = wsum(2) + sum(ps(igrid)%dvolume(ixm^t))
2259 
2260  ! Compute the modes of the cell-centered variables, weighted by volume
2261  {do i^d = ixmlo^d, ixmhi^d\}
2262  wsum(1) = wsum(1) + ps(igrid)%dvolume(i^d) * &
2263  func(ps(igrid)%w(i^d, :), nw)
2264  {end do\}
2265  end do
2266 
2267  ! Make the information available on all tasks
2268  call mpi_allreduce(wsum, dsum_recv, 2, mpi_double_precision, &
2269  mpi_sum, icomm, ierrmpi)
2270 
2271  ! Set the volume and the average
2272  volume = dsum_recv(2)
2273  f_avg = dsum_recv(1) / volume
2274 
2275  end subroutine get_volume_average_func
2276 
2277  !> Compute global maxima of iw variables over the leaves of the grid.
2278  subroutine get_global_maxima(wmax)
2280 
2281  double precision, intent(out) :: wmax(nw) !< The global maxima
2282 
2283  integer :: iigrid, igrid, iw
2284  double precision :: wmax_mype(nw),wmax_recv(nw)
2285 
2286  wmax_mype(1:nw) = -bigdouble
2287 
2288  ! Loop over all the grids
2289  do iigrid = 1, igridstail
2290  igrid = igrids(iigrid)
2291  do iw = 1, nw
2292  wmax_mype(iw)=max(wmax_mype(iw),maxval(ps(igrid)%w(ixm^t,iw)))
2293  end do
2294  end do
2295 
2296  ! Make the information available on all tasks
2297  call mpi_allreduce(wmax_mype, wmax_recv, nw, mpi_double_precision, &
2298  mpi_max, icomm, ierrmpi)
2299 
2300  wmax(1:nw)=wmax_recv(1:nw)
2301 
2302  end subroutine get_global_maxima
2303 
2304  !> Compute global minima of iw variables over the leaves of the grid.
2305  subroutine get_global_minima(wmin)
2307 
2308  double precision, intent(out) :: wmin(nw) !< The global maxima
2309 
2310  integer :: iigrid, igrid, iw
2311  double precision :: wmin_mype(nw),wmin_recv(nw)
2312 
2313  wmin_mype(1:nw) = bigdouble
2314 
2315  ! Loop over all the grids
2316  do iigrid = 1, igridstail
2317  igrid = igrids(iigrid)
2318  do iw = 1, nw
2319  wmin_mype(iw)=min(wmin_mype(iw),minval(ps(igrid)%w(ixm^t,iw)))
2320  end do
2321  end do
2322 
2323  ! Make the information available on all tasks
2324  call mpi_allreduce(wmin_mype, wmin_recv, nw, mpi_double_precision, &
2325  mpi_min, icomm, ierrmpi)
2326 
2327  wmin(1:nw)=wmin_recv(1:nw)
2328 
2329  end subroutine get_global_minima
2330 
2331 
2332 
2333 end module mod_input_output
integer, parameter cylindrical
Definition: mod_geometry.t:9
This module contains definitions of global parameters and variables and some generic functions/subrou...
character(len=std_len) prolongation_method
integer, parameter cartesian_stretched
Definition: mod_geometry.t:8
integer direction_los
direction of light of sight
character(len=std_len) time_integrator
Which time integrator to use.
Module for reading input and writing output.
double precision, dimension(:), allocatable refine_threshold
Error tolerance for refinement decision.
character(len=std_len), dimension(:), allocatable typepred1
The spatial dicretization for the predictor step when using a two step method.
character(len= *), parameter fmt_r
logical, dimension(:), allocatable w_write
logical small_values_use_primitive
Use primitive variables when correcting small values.
integer domain_nx
number of cells for each dimension in level-one mesh
double precision time_max
End time for the simulation.
double precision, dimension(:), allocatable w_refine_weight
Weights of variables used to calculate error for mesh refinement.
character(len=std_len) typetvd
Which type of TVD method to use.
logical saveprim
If true, convert from conservative to primitive variables in output.
double precision, dimension(ndim) qstretch_baselevel
stretch factor between cells at AMR level 1, per dimension
integer max_blocks
The maximum number of grid blocks in a processor.
The data structure that contains information about a tree node/grid block.
Definition: mod_forest.t:11
type(tree_node_ptr), dimension(:,:), allocatable, save igrid_to_node
Array to go from an [igrid, ipe] index to a node pointer.
Definition: mod_forest.t:32
character(len=name_len) physics_type
String describing the physics type of the simulation.
Definition: mod_physics.t:14
logical slab_uniform
uniform Cartesian geometry or not (stretched Cartesian)
integer, parameter stretch_uni
Unidirectional stretching from a side.
logical, dimension(ndim) collapse
If collapse(DIM) is true, generate output integrated over DIM.
double precision, dimension(:,:), allocatable dxmid
integer, parameter filecollapse_
Constant indicating collapsed output.
subroutine get_global_maxima(wmax)
Compute global maxima of iw variables over the leaves of the grid.
integer, dimension(:), allocatable type_gradient_limiter
Type of slope limiter used for computing gradients or divergences, when typegrad or typediv are set t...
double precision xprob
minimum and maximum domain boundaries for each dimension
double precision, dimension(:), allocatable dg
extent of grid blocks in domain per dimension, in array over levels
double precision, dimension(ndim, 2) writespshift
double precision time_convert_factor
Conversion factor for time unit.
logical need_global_cmax
need global maximal wave speed
character(len=std_len) convert_type
Which format to use when converting.
Module with geometry-related routines (e.g., divergence, curl)
Definition: mod_geometry.t:2
integer collapselevel
The level at which to produce line-integrated / collapsed output.
integer, parameter plevel_
subroutine, public kracken(verb, string)
Definition: mod_kracken.t:198
integer ndir
Number of spatial dimensions (components) for vector variables.
character(len=std_len) collapse_type
Writes D-1 slice, can do so in various formats, depending on slice_type.
Definition: mod_slice.t:2
character(len=std_len) typedimsplit
integer nstep
How many sub-steps the time integrator takes.
integer coordinate
Definition: mod_geometry.t:6
integer, dimension(:), allocatable type_limiter
Type of slope limiter used for reconstructing variables on cell edges.
double precision, dimension(:), allocatable amr_wavefilter
refinement: lohner estimate wavefilter setting
logical resume_previous_run
If true, restart a previous run from the latest snapshot.
subroutine read_arguments()
Read the command line arguments passed to amrvac.
double precision, dimension(:), allocatable entropycoef
double precision wall_time_max
Ending wall time (in hours) for the simulation.
integer it_init
initial iteration count
integer it_max
Stop the simulation after this many time steps have been taken.
double precision dtmin
Stop the simulation when the time step becomes smaller than this value.
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer direction_slit
direction of slit (for spectrum)
Module with basic grid data structures.
Definition: mod_forest.t:2
character(len= *), parameter undefined
integer itag
MPI recv send variables for AMR.
character(len=std_len) usr_filename
User parameter file.
integer, parameter nsavehi
Maximum number of saves that can be defined by tsave or itsave.
character(len=std_len) typeboundspeed
Which type of TVDLF method to use.
character(len=std_len) typeprolonglimit
Limiter used for prolongation to refined grids and ghost cells.
subroutine snapshot_read_header(fh, offset_tree, offset_block)
Read header for a snapshot.
subroutine, public retrev(name, val, len, ier)
Definition: mod_kracken.t:56
subroutine block_shape_io(igrid, n_ghost, ixOL, n_values)
Determine the shape of a block for output (whether to include ghost cells, and on which sides) ...
Collapses D-dimensional output to D-1 view by line-of-sight integration.
Definition: mod_collapse.t:4
double precision timelast
Definition: mod_timing.t:16
integer npe
The number of MPI tasks.
logical, dimension(:), allocatable writelevel
double precision courantpar
The Courant (CFL) number used for the simulation.
integer, dimension(:,:), allocatable, save sfc
Array to go from a Morton number to an igrid and processor index. Sfc(1:3, MN) contains [igrid...
Definition: mod_forest.t:43
character(len=std_len), dimension(:), allocatable flux_scheme
Which spatial discretization to use (per grid level)
integer, parameter fileanalysis_
Constant indicating analysis output (see Writing a custom analysis subroutine)
integer, dimension(3, 3, 3) lvc
Levi-Civita tensor.
double precision small_density
logical, dimension(ndim) stretched_dim
True if a dimension is stretched.
logical, dimension(ndim) aperiodb
True for dimensions with aperiodic boundaries.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, public small_values_daverage
Average over this many cells in each direction.
subroutine read_snapshot
Routine to read in snapshots (.dat files). When it cannot recognize the file version, it will automatically try the &#39;old&#39; reader.
integer, dimension(:), allocatable ng
number of grid blocks in domain per dimension, in array over levels
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
integer, dimension(nfile) isavet
integer nw_found
number of w found in dat files
Module with slope/flux limiters.
Definition: mod_limiter.t:2
subroutine get_volume_average(power, mode, volume)
Compute mean(w**power) over the leaves of the grid. The first mode (power=1) corresponds to the mean...
subroutine write_forest(file_handle)
Definition: forest.t:350
logical pass_wall_time
If true, wall time is up, modify snapshotnext for later overwrite.
integer nghostcells
Number of ghost cells surrounding a grid.
integer nslices
Number of slices to output.
Definition: mod_slice.t:16
double precision dtdiffpar
For resistive MHD, the time step is also limited by the diffusion time: .
procedure(p_no_args), pointer usr_print_log
Module with all the methods that users can customize in AMRVAC.
integer, dimension(:), allocatable, save sfc_to_igrid
Go from a Morton number to an igrid index (for a single processor)
Definition: mod_forest.t:53
logical, dimension(:), allocatable loglimit
integer ixghi
Upper index of grid block arrays.
integer type_block_io
MPI type for IO: block excluding ghost cells.
integer function limiter_type(namelim)
Definition: mod_limiter.t:38
character(len=std_len) typeghostfill
double precision xload
Stores the memory and load imbalance, used in printlog.
character(len=std_len) filename_euv
Base file name for synthetic EUV emission output.
integer ditregrid
Reconstruct the AMR grid once every ditregrid iteration(s)
character(len=std_len) geometry_name
double precision cada3_radius
radius of the asymptotic region [0.001, 10], larger means more accurate in smooth region but more ove...
Definition: mod_limiter.t:13
logical angmomfix
Enable to strictly conserve the angular momentum (works both in cylindrical and spherical coordinates...
logical, dimension(:), allocatable small_values_fix_iw
Whether to apply small value fixes to certain variables.
double precision small_pressure
character(len=std_len) typesourcesplit
How to apply dimensional splitting to the source terms, see disretization.md.
integer, parameter fileslice_
Constant indicating slice output.
subroutine printlog_default
Write volume-averaged values and other information to the log file.
logical stagger_grid
True for using stagger grid.
subroutine write_slice
Definition: mod_slice.t:27
logical save_physical_boundary
True for save physical boundary cells in dat files.
integer, parameter cartesian
Definition: mod_geometry.t:7
character(len=std_len) resolution_euv
resolution of the output
logical nocartesian
IO switches for conversion.
integer ierrmpi
A global MPI error return code.
integer ixm
the mesh range (within a block with ghost cells)
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
character(len=std_len), dimension(:, :), allocatable typeboundary
Array indicating the type of boundary condition per variable and per physical boundary.
subroutine get_global_minima(wmin)
Compute global minima of iw variables over the leaves of the grid.
logical solve_internal_e
Solve polytropic process instead of solving total energy.
character(len=std_len), dimension(:), allocatable typeentropy
Which type of entropy fix to use with Riemann-type solvers.
double precision dtpar
If dtpar is positive, it sets the timestep dt, otherwise courantpar is used to limit the time step ba...
character(len=std_len) restart_from_file
If not &#39;unavailable&#39;, resume from snapshot with this base file name.
logical image
output image
double precision, dimension(:), allocatable w_convert_factor
Conversion factors the primitive variables.
character(len=std_len) typefilelog
Which type of log to write: &#39;normal&#39;, &#39;special&#39;, &#39;regression_test&#39;.
integer, dimension(:), allocatable, save morton_start
First Morton number per processor.
Definition: mod_forest.t:62
procedure(p_no_args), pointer usr_write_analysis
subroutine alloc_node(igrid)
allocate arrays on igrid node
integer, parameter unitterm
Unit for standard output.
integer, parameter filelog_
Constant indicating log output.
integer, parameter unitpar
integer nbufferx
Number of cells as buffer zone.
integer, dimension(3, 3) kr
Kronecker delta tensor.
double precision, dimension(:,:), allocatable dx
integer nleafs_active
Definition: mod_forest.t:78
character(len=std_len) typegrad
logical check_small_values
check and optionally fix unphysical small values (density, gas pressure)
double precision time_between_print
to monitor timeintegration loop at given wall-clock time intervals
integer, dimension(:), allocatable, parameter d
integer, dimension(:), allocatable, save morton_stop
Last Morton number per processor.
Definition: mod_forest.t:65
logical reset_it
If true, reset iteration count to 0.
subroutine write_collapsed
Definition: mod_collapse.t:11
character(len=40), dimension(nfile), parameter output_names
Names of the output methods.
integer mype
The rank of the current MPI task.
subroutine read_forest(file_handle)
Definition: forest.t:389
double precision global_time
The global simulation time.
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
subroutine saveamrfile(ifile)
subroutine read_par_files()
Read in the user-supplied parameter-file.
integer, parameter stretch_symm
Symmetric stretching around the center.
logical reset_time
If true, reset iteration count and global_time to original values, and start writing snapshots at ind...
integer, parameter unitsnapshot
logical function, public lget(keyword)
Definition: mod_kracken.t:161
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
double precision, dimension(nfile) dtsave
Repeatedly save output of type N when dtsave(N) simulation time has passed.
character(len=std_len) slice_type
choose data type of slice: vtu, vtuCC, dat, or csv
Definition: mod_slice.t:22
Module for handling problematic values in simulations, such as negative pressures.
logical use_particles
Use particles module or not.
integer, dimension(:), allocatable, save nleafs_level
How many leaves are present per refinement level.
Definition: mod_forest.t:81
double precision, dimension(nfile) tsavestart
Start of read out (not counting specified read outs)
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer, parameter uniterr
Unit for error messages.
subroutine get_fields_string(line, delims, n_max, fields, n_found, fully_read)
Routine to find entries in a string.
Module containing all the particle routines.
Definition: mod_particles.t:2
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:198
double precision, dimension(:,:), allocatable dxfirst_1mq
logical firstprocess
If true, call initonegrid_usr upon restarting.
double precision tfixgrid
Fix the AMR grid after this time.
character(len= *), parameter fmt_i
character(len= *), parameter fmt_r2
double precision length_convert_factor
integer iprob
problem switch allowing different setups in same usr_mod.t
double precision, dimension(ndim) dxlevel
double precision, dimension(nsavehi, nfile) tsave
Save output of type N on times tsave(:, N)
subroutine get_volume_average_func(func, f_avg, volume)
Compute the volume average of func(w) over the leaves of the grid.
integer, dimension(nfile) isaveit
integer snapshotini
Resume from the snapshot with this index.
integer function, public iget(keyword)
Definition: mod_kracken.t:141
integer, dimension(nslicemax) slicedir
The slice direction for each slice.
Definition: mod_slice.t:19
integer, parameter ndim
Number of spatial dimensions for grid variables.
subroutine read_snapshot_old()
integer ittimelast
Definition: mod_timing.t:15
integer phys_wider_stencil
To use wider stencils in flux calculations. A value of 1 will extend it by one cell in both direction...
Definition: mod_physics.t:18
logical stretch_uncentered
If true, adjust mod_geometry routines to account for grid stretching (but the flux computation will n...
subroutine generate_plotfile
Definition: convert.t:3
logical slab
Cartesian geometry or not.
double precision, dimension(nslicemax) slicecoord
Slice coordinates, see Slice output.
Definition: mod_slice.t:10
integer function get_snapshot_index(filename)
integer slowsteps
If > 1, then in the first slowsteps-1 time steps dt is reduced by a factor .
integer, parameter stretch_none
No stretching.
procedure(transform_w), pointer usr_transform_w
integer nwauxio
Number of auxiliary variables that are only included in output.
logical, dimension(:), allocatable logflag
integer icomm
The MPI communicator.
double precision small_temperature
error handling
character(len=std_len) typediv
integer, parameter nlevelshi
The maximum number of levels in the grid refinement.
integer, save nparents
Number of parent blocks.
Definition: mod_forest.t:73
integer refine_max_level
Maximal number of AMR levels.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
subroutine snapshot_write_header(fh, offset_tree, offset_block)
Write header for a snapshot.
integer, parameter nfile
Number of output methods.
integer log_fh
MPI file handle for logfile.
double precision location_slit
location of the slit
subroutine write_snapshot
logical reset_grid
If true, rebuild the AMR grid upon restarting.
logical source_split_usr
Use split or unsplit way to add user&#39;s source terms, default: unsplit.
integer, dimension(:,:), allocatable node
procedure(sub_get_aux), pointer phys_get_aux
Definition: mod_physics.t:57
integer, parameter spherical
Definition: mod_geometry.t:10
integer itfixgrid
Fix the AMR grid after this many time steps.
character(len=std_len) typecourant
How to compute the CFL-limited time step.
double precision, dimension(:,:), allocatable dxfirst
logical function snapshot_exists(ix)
integer, save nleafs
Number of leaf block.
Definition: mod_forest.t:76
logical phys_energy
Solve energy equation or not.
subroutine get_volume_coverage(vol_cov)
Compute how much of the domain is covered by each grid level. This routine does not take a non-Cartes...
integer, dimension(3), parameter compatible_versions
List of compatible versions.
integer it
Number of time steps taken.
integer, dimension(nfile) ditsave
Repeatedly save output of type N when ditsave(N) time steps have passed.
subroutine printlog_regression_test()
Print a log that can be used to check whether the code still produces the same output (regression tes...
subroutine create_output_file(fh, ix, extension, suffix)
Standard method for creating a new output file.
character(len=20), public small_values_method
How to handle small values.
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
double precision, dimension(:), allocatable derefine_ratio
integer, dimension(ndim) nstretchedblocks_baselevel
(even) number of (symmetrically) stretched blocks at AMR level 1, per dimension
double precision time_init
Start time for the simulation.
character(len=std_len) typeaverage
integer ixgshi
Upper index of stagger grid block arrays.
Module for reading command line arguments.
Definition: mod_kracken.t:7
integer, parameter fileout_
Constant indicating regular output.
integer, dimension(nsavehi, nfile) itsave
Save output of type N on iterations itsave(:, N)
pure integer function count_ix(ixOL)
Compute number of elements in index range.
integer slicenext
the file number of slices
Definition: mod_slice.t:13
logical spectrum
output spectrum
logical autoconvert
If true, already convert to output format during the run.
character(len=std_len) typecurl
procedure(sub_write_info), pointer phys_write_info
Definition: mod_physics.t:61
integer, parameter version_number
Version number of the .dat file output.