MPI-AMRVAC  2.2
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_input_output.t
Go to the documentation of this file.
1 !> Module for reading input and writing output
3 
4  implicit none
5  public
6 
7  !> Version number of the .dat file output
8  integer, parameter :: version_number = 5
9 
10  !> List of compatible versions
11  integer, parameter :: compatible_versions(3) = [3, 4, 5]
12 
13  !> number of w found in dat files
14  integer :: nw_found
15 
16  ! Formats used in output
17  character(len=*), parameter :: fmt_r = 'es16.8' ! Default precision
18  character(len=*), parameter :: fmt_r2 = 'es10.2' ! Two digits
19  character(len=*), parameter :: fmt_i = 'i8' ! Integer format
20 
21 contains
22 
23  !> Read the command line arguments passed to amrvac
24  subroutine read_arguments()
27  use mod_slice, only: slicenext
28 
29  integer :: len, ier, n
30  integer, parameter :: max_files = 20 ! Maximum number of par files
31  integer :: n_par_files
32  integer :: ibegin(max_files)
33  integer :: iterm(max_files)
34  character(len=max_files*std_len) :: all_par_files
35  character(len=std_len) :: tmp_files(max_files)
36  logical :: resume
37 
38  if (mype == 0) then
39  print *, '-----------------------------------------------------------------------------'
40  print *, '-----------------------------------------------------------------------------'
41  print *, '| __ __ ____ ___ _ __ __ ______ ___ ____ |'
42  print *, '| | \/ | _ \_ _| / \ | \/ | _ \ \ / / \ / ___| |'
43  print *, '| | |\/| | |_) | |_____ / _ \ | |\/| | |_) \ \ / / _ \| | |'
44  print *, '| | | | | __/| |_____/ ___ \| | | | _ < \ V / ___ \ |___ |'
45  print *, '| |_| |_|_| |___| /_/ \_\_| |_|_| \_\ \_/_/ \_\____| |'
46  print *, '-----------------------------------------------------------------------------'
47  print *, '-----------------------------------------------------------------------------'
48  end if
49 
50  ! Specify the options and their default values
51  call kracken('cmd','-i amrvac.par -if ' // undefined // &
52  ' -slicenext -1 -collapsenext -1 -snapshotnext -1' // &
53  ' --help .false. -convert .false. -resume .false.')
54 
55  ! Get the par file(s)
56  call retrev('cmd_i', all_par_files, len, ier)
57 
58  ! Show the usage if the help flag was given, or no par file was specified
59  if (lget('cmd_-help') .or. len == 0) then
60  if (mype == 0) then
61  print *, 'Usage example:'
62  print *, 'mpirun -np 4 ./amrvac -i file.par [file2.par ...]'
63  print *, ' (later .par files override earlier ones)'
64  print *, ''
65  print *, 'Optional arguments:'
66  print *, '-resume Resume previous run'
67  print *, '-convert Convert snapshot files'
68  print *, '-if file0001.dat Use this snapshot to restart from'
69  print *, ' (you can modify e.g. output names)'
70  print *, '-resume Automatically resume previous run'
71  print *, ' (useful for long runs on HPC systems)'
72  print *, '-snapshotnext N Manual index for next snapshot'
73  print *, '-slicenext N Manual index for next slice output'
74  print *, '-collapsenext N Manual index for next collapsed output'
75  print *, ''
76  end if
77  call mpi_finalize(ierrmpi)
78  stop
79  end if
80 
81  ! Split the input files, in case multiple were given
82  call get_fields_string(all_par_files, " ,'"""//char(9), max_files, &
83  tmp_files, n_par_files)
84 
85  allocate(par_files(n_par_files))
86  par_files = tmp_files(1:n_par_files)
87 
88  ! Read in the other command line arguments
89  call retrev('cmd_if', restart_from_file, len, ier)
90 
91  !> \todo Document these command line options
92  slicenext = iget('cmd_slicenext')
93  collapsenext = iget('cmd_collapsenext')
94  snapshotnext = iget('cmd_snapshotnext')
95  convert = lget('cmd_convert') ! -convert present?
96  resume_previous_run = lget('cmd_resume') ! -resume present?
97 
98  end subroutine read_arguments
99 
100  !> Read in the user-supplied parameter-file
101  subroutine read_par_files()
104  use mod_small_values
105  use mod_limiter
106  use mod_slice
107  use mod_geometry
108 
109  logical :: fileopen, file_exists
110  integer :: i, j, k, ifile, io_state
111  integer :: iB, isave, iw, level, idim, islice
112  integer :: nx_vec(^nd), block_nx_vec(^nd)
113  integer :: my_unit, iostate
114  integer :: ilev
115  double precision :: dx_vec(^nd)
116 
117  character :: c_ndim
118  character(len=80) :: fmt_string
119  character(len=std_len) :: err_msg, restart_from_file_arg
120  character(len=std_len) :: basename_full, basename_prev, dummy_file
121  character(len=std_len), dimension(:), allocatable :: &
122  typeboundary_min^D, typeboundary_max^D
123  character(len=std_len), allocatable :: limiter(:)
124  character(len=std_len), allocatable :: gradient_limiter(:)
125  character(len=name_len) :: stretch_dim(ndim)
126 
127  double precision, dimension(nsavehi) :: tsave_log, tsave_dat, tsave_slice, &
128  tsave_collapsed, tsave_custom
129  double precision :: dtsave_log, dtsave_dat, dtsave_slice, &
130  dtsave_collapsed, dtsave_custom
131  integer :: ditsave_log, ditsave_dat, ditsave_slice, &
132  ditsave_collapsed, ditsave_custom
133  double precision :: tsavestart_log, tsavestart_dat, tsavestart_slice, &
134  tsavestart_collapsed, tsavestart_custom
135  integer :: windex, ipower
136  double precision :: sizeuniformpart^D
137 
138  namelist /filelist/ base_filename,restart_from_file, &
145 
146  namelist /savelist/ tsave,itsave,dtsave,ditsave,nslices,slicedir, &
148  tsave_log, tsave_dat, tsave_slice, tsave_collapsed, tsave_custom, &
149  dtsave_log, dtsave_dat, dtsave_slice, dtsave_collapsed, dtsave_custom, &
150  ditsave_log, ditsave_dat, ditsave_slice, ditsave_collapsed, ditsave_custom,&
151  tsavestart_log, tsavestart_dat, tsavestart_slice, tsavestart_collapsed,&
152  tsavestart_custom, tsavestart
153 
156 
157  namelist /methodlist/ time_integrator, &
161  limiter,gradient_limiter,cada3_radius,&
166  flatcd,flatsh,&
171 
174 
175  namelist /meshlist/ refine_max_level,nbufferx^d,refine_threshold,&
177  stretch_dim, stretch_uncentered, &
183  namelist /paramlist/ courantpar, dtpar, dtdiffpar, &
185 
186  namelist /euvlist/ filename_euv,image,spectrum,wavelength,direction_los,&
188 
189  ! default maximum number of grid blocks in a processor
190  max_blocks=4000
191 
192  ! allocate cell size of all levels
193  allocate(dx(ndim,nlevelshi))
194  {allocate(dg^d(nlevelshi))\}
195  {allocate(ng^d(nlevelshi))\}
196 
197  ! default block size excluding ghost cells
198  {block_nx^d = 16\}
199 
200  ! default resolution of level-1 mesh (full domain)
201  {domain_nx^d = 32\}
202 
203 
204 
205  ! defaults for boundary treatments
206  typeghostfill = 'linear'
207 
208  ! default number of ghost-cell layers at each boundary of a block
209  nghostcells = 2
210 
211  ! Allocate boundary conditions arrays in new and old style
212  {
213  allocate(typeboundary_min^d(nwfluxbc))
214  allocate(typeboundary_max^d(nwfluxbc))
215  typeboundary_min^d = undefined
216  typeboundary_max^d = undefined
217  }
218 
219  allocate(typeboundary(nwflux, 2 * ndim))
220  typeboundary(:, :) = undefined
221 
222  ! not save physical boundary in dat files by default
223  save_physical_boundary = .false.
224 
225  internalboundary = .false.
226 
227  ! defaults for specific options
228  typegrad = 'central'
229  typediv = 'central'
230  typecurl = 'central'
231 
232  ! defaults for smallest physical values allowed
233  small_temperature = 0.d0
234  small_pressure = 0.d0
235  small_density = 0.d0
236 
237  allocate(small_values_fix_iw(nw))
238  small_values_fix_iw(:) = .true.
239 
240  ! defaults for convert behavior
241 
242  ! store the -if value from argument in command line
243  restart_from_file_arg = restart_from_file
244  nwauxio = 0
245  nocartesian = .false.
246  saveprim = .false.
247  autoconvert = .false.
248  convert_type = 'vtuBCCmpi'
249  slice_type = 'vtuCC'
250  collapse_type = 'vti'
251  allocate(w_write(nw))
252  w_write(1:nw) = .true.
253  allocate(writelevel(nlevelshi))
254  writelevel(1:nlevelshi) = .true.
255  writespshift(1:ndim,1:2) = zero
256  level_io = -1
257  level_io_min = 1
259 
260  ! normalization of primitive variables: only for output
261  ! note that length_convert_factor is for length
262  ! this scaling is optional, and must be set consistently if used
263  allocate(w_convert_factor(nw))
264  w_convert_factor(:) = 1.0d0
265  time_convert_factor = 1.0d0
266  length_convert_factor = 1.0d0
267 
268  ! AMR related defaults
269  refine_max_level = 1
270  {nbufferx^d = 0\}
271  allocate(refine_threshold(nlevelshi))
272  refine_threshold(1:nlevelshi) = 0.1d0
273  allocate(derefine_ratio(nlevelshi))
274  derefine_ratio(1:nlevelshi) = 1.0d0/8.0d0
275  prolongation_method = 'linear'
276  coarsenprimitive = .false.
277  prolongprimitive = .false.
278  typeprolonglimit = 'default'
279  refine_criterion = 3
280  allocate(w_refine_weight(nw+1))
281  w_refine_weight = 0.d0
282  allocate(logflag(nw+1))
283  logflag = .false.
284  allocate(amr_wavefilter(nlevelshi))
285  amr_wavefilter(1:nlevelshi) = 1.0d-2
286  tfixgrid = bigdouble
287  itfixgrid = biginteger
288  ditregrid = 1
289 
290  ! Grid stretching defaults
291  stretch_uncentered = .true.
292  stretch_dim(1:ndim) = undefined
293  qstretch_baselevel(1:ndim) = bigdouble
295 
296  ! IO defaults
297  it_init = 0
298  it_max = biginteger
299  time_init = 0.d0
300  time_max = bigdouble
301  wall_time_max = bigdouble
302  dtmin = 1.0d-10
303  nslices = 0
304  collapse = .false.
305  collapselevel = 1
306  time_between_print = 30.0d0 ! Print status every 30 seconds
307 
308  do ifile=1,nfile
309  do isave=1,nsavehi
310  tsave(isave,ifile) = bigdouble ! global_time of saves into the output files
311  itsave(isave,ifile) = biginteger ! it of saves into the output files
312  end do
313  dtsave(ifile) = bigdouble ! time between saves
314  ditsave(ifile) = biginteger ! timesteps between saves
315  isavet(ifile) = 1 ! index for saves by global_time
316  isaveit(ifile) = 1 ! index for saves by it
317  tsavestart(ifile) = 0.0d0
318  end do
319 
320  tsave_log = bigdouble
321  tsave_dat = bigdouble
322  tsave_slice = bigdouble
323  tsave_collapsed = bigdouble
324  tsave_custom = bigdouble
325 
326  dtsave_log = bigdouble
327  dtsave_dat = bigdouble
328  dtsave_slice = bigdouble
329  dtsave_collapsed = bigdouble
330  dtsave_custom = bigdouble
331 
332  ditsave_log = biginteger
333  ditsave_dat = biginteger
334  ditsave_slice = biginteger
335  ditsave_collapsed = biginteger
336  ditsave_custom = biginteger
337 
338  tsavestart_log = bigdouble
339  tsavestart_dat = bigdouble
340  tsavestart_slice = bigdouble
341  tsavestart_collapsed = bigdouble
342  tsavestart_custom = bigdouble
343 
344  typefilelog = 'default'
345 
346  ! defaults for input
347  reset_time = .false.
348  reset_it = .false.
349  firstprocess = .false.
350  reset_grid = .false.
351  base_filename = 'data'
352  usr_filename = ''
353 
354 
355  ! Defaults for discretization methods
356  typeaverage = 'default'
357  tvdlfeps = one
358  nxdiffusehllc = 0
359  flathllc = .false.
360  slowsteps = -1
361  courantpar = 0.8d0
362  typecourant = 'maxsum'
363  dimsplit = .false.
364  typedimsplit = 'default'
365  if(physics_type=='mhd') then
366  cada3_radius = 0.5d0
367  else
368  cada3_radius = 0.1d0
369  end if
370  typetvd = 'roe'
371  typeboundspeed = 'cmaxmean'
372  source_split_usr= .false.
373  time_integrator = 'twostep'
374  solve_internal_e= .false.
375  angmomfix = .false.
376 
378  allocate(limiter(nlevelshi),gradient_limiter(nlevelshi))
379  do level=1,nlevelshi
380  flux_scheme(level) = 'tvdlf'
381  typepred1(level) = 'default'
382  limiter(level) = 'minmod'
383  gradient_limiter(level) = 'minmod'
384  end do
385 
386  flatcd = .false.
387  flatsh = .false.
388  typesourcesplit = 'sfs'
389  allocate(loglimit(nw))
390  loglimit(1:nw) = .false.
391 
392  allocate(typeentropy(nw))
393 
394  do iw=1,nw
395  typeentropy(iw)='nul' ! Entropy fix type
396  end do
397 
398  dtdiffpar = 0.5d0
399  dtpar = -1.d0
400 
401  ! problem setup defaults
402  iprob = 1
403 
404  ! end defaults
405 
406  ! Initialize Kronecker delta, and Levi-Civita tensor
407  do i=1,3
408  do j=1,3
409  if(i==j)then
410  kr(i,j)=1
411  else
412  kr(i,j)=0
413  endif
414  do k=1,3
415  if(i==j.or.j==k.or.k==i)then
416  lvc(i,j,k)=0
417  else if(i+1==j.or.i-2==j)then
418  lvc(i,j,k)=1
419  else
420  lvc(i,j,k)=-1
421  endif
422  enddo
423  enddo
424  enddo
425 
426  ! These are used to construct file and log names from multiple par files
427  basename_full = ''
428  basename_prev = ''
429 
430  do i = 1, size(par_files)
431  if (mype == 0) print *, "Reading " // trim(par_files(i))
432 
433  ! Check whether the file exists
434  inquire(file=trim(par_files(i)), exist=file_exists)
435 
436  if (.not. file_exists) then
437  write(err_msg, *) "The parameter file " // trim(par_files(i)) // &
438  " does not exist"
439  call mpistop(trim(err_msg))
440  end if
441 
442  open(unitpar, file=trim(par_files(i)), status='old')
443 
444  ! Try to read in the namelists. They can be absent or in a different
445  ! order, since we rewind before each read.
446  rewind(unitpar)
447  read(unitpar, filelist, end=101)
448 
449 101 rewind(unitpar)
450  read(unitpar, savelist, end=102)
451 
452 102 rewind(unitpar)
453  read(unitpar, stoplist, end=103)
454 
455 103 rewind(unitpar)
456  read(unitpar, methodlist, end=104)
457 
458 104 rewind(unitpar)
459  read(unitpar, boundlist, end=105)
460 
461 105 rewind(unitpar)
462  read(unitpar, meshlist, end=106)
463 
464 106 rewind(unitpar)
465  read(unitpar, paramlist, end=107)
466 
467 107 rewind(unitpar)
468  read(unitpar, euvlist, end=108)
469 
470 108 close(unitpar)
471 
472  ! Append the log and file names given in the par files
473  if (base_filename /= basename_prev) &
474  basename_full = trim(basename_full) // trim(base_filename)
475  basename_prev = base_filename
476  end do
477 
478  base_filename = basename_full
479 
480  ! Check whether output directory is writable
481  if(mype==0) then
482  dummy_file = trim(base_filename)//"DUMMY"
483  open(newunit=my_unit, file=trim(dummy_file), iostat=iostate)
484  if (iostate /= 0) then
485  call mpistop("Can't write to output directory (" // &
486  trim(base_filename) // ")")
487  else
488  close(my_unit, status='delete')
489  end if
490  end if
491 
492  ! restart filename from command line overwrites the one in par file
493  if(restart_from_file_arg /= undefined) &
494  restart_from_file=restart_from_file_arg
495 
496  if (resume_previous_run) then
497  ! Root process will search snapshot
498  if (mype == 0) then
499  do i = -1, 9998
500  ! Check if the next snapshot is missing
501  if (.not. snapshot_exists(i+1)) exit
502  end do
503 
504  if (i == -1) then
505  ! If initial data is missing (e.g. moved due to lack of space),
506  ! search file with highest index
507  do i = 9999, 0, -1
508  if (snapshot_exists(i)) exit
509  end do
510  end if
511  end if
512  call mpi_bcast(i, 1, mpi_integer, 0, icomm, ierrmpi)
513 
514  if (i == -1) then
515  if(mype==0) write(*,*) "No snapshots found to resume from, start a new run..."
516  else
517  !call mpistop("No snapshots found to resume from")
518  ! Set file name to restart from
519  write(restart_from_file, "(a,i4.4,a)") trim(base_filename), i, ".dat"
520  end if
521  end if
522 
523  if (restart_from_file == undefined) then
524  snapshotnext = 0
525  slicenext = 0
526  collapsenext = 0
527  if (firstprocess) &
528  call mpistop("Please restart from a snapshot when firstprocess=T")
529  if (convert) &
530  call mpistop('Change convert to .false. for a new run!')
531  end if
532 
533  if (small_pressure < 0.d0) call mpistop("small_pressure should be positive.")
534  if (small_density < 0.d0) call mpistop("small_density should be positive.")
535  ! Give priority to non-zero small temperature
537 
538  if(convert) autoconvert=.false.
539 
540  where (tsave_log < bigdouble) tsave(:, 1) = tsave_log
541  where (tsave_dat < bigdouble) tsave(:, 2) = tsave_dat
542  where (tsave_slice < bigdouble) tsave(:, 3) = tsave_slice
543  where (tsave_collapsed < bigdouble) tsave(:, 4) = tsave_collapsed
544  where (tsave_custom < bigdouble) tsave(:, 5) = tsave_custom
545 
546  if (dtsave_log < bigdouble) dtsave(1) = dtsave_log
547  if (dtsave_dat < bigdouble) dtsave(2) = dtsave_dat
548  if (dtsave_slice < bigdouble) dtsave(3) = dtsave_slice
549  if (dtsave_collapsed < bigdouble) dtsave(4) = dtsave_collapsed
550  if (dtsave_custom < bigdouble) dtsave(5) = dtsave_custom
551 
552  if (tsavestart_log < bigdouble) tsavestart(1) = tsavestart_log
553  if (tsavestart_dat < bigdouble) tsavestart(2) = tsavestart_dat
554  if (tsavestart_slice < bigdouble) tsavestart(3) = tsavestart_slice
555  if (tsavestart_collapsed < bigdouble) tsavestart(4) = tsavestart_collapsed
556  if (tsavestart_custom < bigdouble) tsavestart(5) = tsavestart_custom
557 
558  if (ditsave_log < bigdouble) ditsave(1) = ditsave_log
559  if (ditsave_dat < bigdouble) ditsave(2) = ditsave_dat
560  if (ditsave_slice < bigdouble) ditsave(3) = ditsave_slice
561  if (ditsave_collapsed < bigdouble) ditsave(4) = ditsave_collapsed
562  if (ditsave_custom < bigdouble) ditsave(5) = ditsave_custom
563  ! convert hours to seconds for ending wall time
564  if (wall_time_max < bigdouble) wall_time_max=wall_time_max*3600.d0
565 
566  if (mype == 0) then
567  write(unitterm, *) ''
568  write(unitterm, *) 'Output type | tsavestart | dtsave | ditsave | itsave(1) | tsave(1)'
569  write(fmt_string, *) '(A12," | ",E9.3E2," | ",E9.3E2," | ",I6," | "'//&
570  ',I6, " | ",E9.3E2)'
571  end if
572 
573  do ifile = 1, nfile
574  if (mype == 0) write(unitterm, fmt_string) trim(output_names(ifile)), &
575  tsavestart(ifile), dtsave(ifile), ditsave(ifile), itsave(1, ifile), tsave(1, ifile)
576  end do
577 
578  if (mype == 0) write(unitterm, *) ''
579 
580  do islice=1,nslices
581  if(slicedir(islice) > ndim) &
582  write(uniterr,*)'Warning in read_par_files: ', &
583  'Slice ', islice,' direction',slicedir(islice),'larger than ndim=',ndim
584  if(slicedir(islice) < 1) &
585  write(uniterr,*)'Warning in read_par_files: ', &
586  'Slice ', islice,' direction',slicedir(islice),'too small, should be [',1,ndim,']'
587  end do
588 
589  if(it_max==biginteger .and. time_max==bigdouble.and.mype==0) write(uniterr,*) &
590  'Warning in read_par_files: it_max or time_max not given!'
591 
592  do level=1,nlevelshi
593  if(flux_scheme(level)=='tvd'.and.time_integrator/='onestep') &
594  call mpistop(" tvd is onestep method, reset time_integrator='onestep'")
595  if(flux_scheme(level)=='tvd')then
596  if(mype==0.and.(.not.dimsplit)) write(unitterm,*) &
597  'Warning: setting dimsplit=T for tvd, as used for level=',level
598  dimsplit=.true.
599  endif
600  if(flux_scheme(level)=='hlld'.and.physics_type/='mhd') &
601  call mpistop("Cannot use hlld flux if not using MHD physics!")
602 
603  if(flux_scheme(level)=='hllc'.and.physics_type=='mf') &
604  call mpistop("Cannot use hllc flux if using magnetofriction physics!")
605 
606  if(flux_scheme(level)=='tvd'.and.physics_type=='mf') &
607  call mpistop("Cannot use tvd flux if using magnetofriction physics!")
608 
609  if(flux_scheme(level)=='tvdmu'.and.physics_type=='mf') &
610  call mpistop("Cannot use tvdmu flux if using magnetofriction physics!")
611 
612  if (typepred1(level)=='default') then
613  select case (flux_scheme(level))
614  case ('cd')
615  typepred1(level)='cd'
616  case ('cd4')
617  typepred1(level)='cd4'
618  case ('fd')
619  typepred1(level)='fd'
620  case ('tvdlf','tvdmu')
621  typepred1(level)='hancock'
622  case ('hll')
623  typepred1(level)='hll'
624  case ('hllc')
625  typepred1(level)='hllc'
626  case ('hllcd')
627  typepred1(level)='hllcd'
628  case ('hlld')
629  typepred1(level)='hlld'
630  case ('nul','source','tvd')
631  typepred1(level)='nul'
632  case default
633  call mpistop("No default predictor for this full step")
634  end select
635  end if
636  end do
637 
638  ! finite difference scheme fd need global maximal speed
639  if(any(flux_scheme=='fd')) need_global_cmax=.true.
640 
641  select case (time_integrator)
642  case ("onestep")
643  nstep=1
644  case ("twostep", "twostep_trapezoidal")
645  nstep=2
646  case ("threestep")
647  nstep=3
648  case ("fourstep","rk4","jameson","ssprk43")
649  nstep=4
650  case ("ssprk54")
651  nstep=5
652  case default
653  call mpistop("Unknown time_integrator")
654  end select
655 
656  do i = 1, ndim
657  select case (stretch_dim(i))
658  case (undefined, 'none')
660  stretched_dim(i) = .false.
661  case ('uni','uniform')
663  stretched_dim(i) = .true.
664  case ('symm', 'symmetric')
666  stretched_dim(i) = .true.
667  case default
669  stretched_dim(i) = .false.
670  if (mype == 0) print *, 'Got stretch_type = ', stretch_type(i)
671  call mpistop('Unknown stretch type')
672  end select
673  end do
674 
675  ! Harmonize the parameters for dimensional splitting and source splitting
676  if(typedimsplit =='default'.and. dimsplit) typedimsplit='xyyx'
677  if(typedimsplit =='default'.and..not.dimsplit) typedimsplit='unsplit'
678  dimsplit = typedimsplit /='unsplit'
679 
680  if(coordinate==-1) then
682  if(mype==0) then
683  write(*,*) 'Warning: coordinate system is not specified!'
684  write(*,*) 'call set_coordinate_system in usr_init in mod_usr.t'
685  write(*,*) 'Now use Cartesian coordinate'
686  end if
687  end if
688 
689  if(coordinate==cartesian) then
690  slab=.true.
691  slab_uniform=.true.
692  if(any(stretched_dim)) then
694  slab_uniform=.false.
695  end if
696  else
697  slab=.false.
698  slab_uniform=.false.
699  end if
700 
701  if(coordinate==spherical) then
702  if(dimsplit) then
703  if(mype==0)print *,'Warning: spherical symmetry needs dimsplit=F, resetting'
704  dimsplit=.false.
705  end if
706  end if
707 
708  if (ndim==1) dimsplit=.false.
709 
710  ! Type limiter is of integer type for performance
711  allocate(type_limiter(nlevelshi))
713 
714  do level=1,nlevelshi
715  type_limiter(level) = limiter_type(limiter(level))
716  type_gradient_limiter(level) = limiter_type(gradient_limiter(level))
717  end do
718 
719  if (any(limiter(1:nlevelshi)== 'ppm')&
720  .and.(flatsh.and.physics_type=='rho')) then
721  call mpistop(" PPM with flatsh=.true. can not be used with physics_type='rho'!")
722  end if
723 
724  ! Copy boundary conditions to typeboundary, which is used internally
725  {
726  if (any(typeboundary_min^d /= undefined)) then
727  typeboundary(1:nwfluxbc, 2*^d-1) = typeboundary_min^d(1:nwfluxbc)
728  end if
729 
730  if (any(typeboundary_max^d /= undefined)) then
731  typeboundary(1:nwfluxbc, 2*^d) = typeboundary_max^d(1:nwfluxbc)
732  end if
733  }
734 
735  ! psi, tracers take the same boundary type as the first variable
736  if (nwfluxbc<nwflux) then
737  do iw = nwfluxbc+1, nwflux
738  typeboundary(iw,:) = typeboundary(1, :)
739  end do
740  end if
741 
742  if (any(typeboundary == undefined)) then
743  call mpistop("Not all boundary conditions have been defined")
744  end if
745 
746  do idim=1,ndim
747  periodb(idim)=(any(typeboundary(:,2*idim-1:2*idim)=='periodic'))
748  aperiodb(idim)=(any(typeboundary(:,2*idim-1:2*idim)=='aperiodic'))
749  if (periodb(idim).or.aperiodb(idim)) then
750  do iw=1,nwflux
751  if (typeboundary(iw,2*idim-1) .ne. typeboundary(iw,2*idim)) &
752  call mpistop("Wrong counterpart in periodic boundary")
753 
754  if (typeboundary(iw,2*idim-1) /= 'periodic' .and. &
755  typeboundary(iw,2*idim-1) /= 'aperiodic') then
756  call mpistop("Each dimension should either have all "//&
757  "or no variables periodic, some can be aperiodic")
758  end if
759  end do
760  end if
761  end do
762  {^nooned
763  do idim=1,ndim
764  if(any(typeboundary(:,2*idim-1)=='pole')) then
765  if(any(typeboundary(:,2*idim-1)/='pole')) typeboundary(:,2*idim-1)='pole'
766  if(phys_energy) then
767  windex=2
768  else
769  windex=1
770  end if
771  typeboundary(:,2*idim-1)='symm'
772  if(physics_type/='rho') then
773  select case(coordinate)
774  case(cylindrical)
775  typeboundary(phi_+1,2*idim-1)='asymm'
776  if(physics_type=='mhd') typeboundary(ndir+windex+phi_,2*idim-1)='asymm'
777  case(spherical)
778  typeboundary(3:ndir+1,2*idim-1)='asymm'
779  if(physics_type=='mhd') typeboundary(ndir+windex+2:ndir+windex+ndir,2*idim-1)='asymm'
780  case default
781  call mpistop('Pole is in cylindrical, polar, spherical coordinates!')
782  end select
783  end if
784  end if
785  if(any(typeboundary(:,2*idim)=='pole')) then
786  if(any(typeboundary(:,2*idim)/='pole')) typeboundary(:,2*idim)='pole'
787  if(phys_energy) then
788  windex=2
789  else
790  windex=1
791  end if
792  typeboundary(:,2*idim)='symm'
793  if(physics_type/='rho') then
794  select case(coordinate)
795  case(cylindrical)
796  typeboundary(phi_+1,2*idim)='asymm'
797  if(physics_type=='mhd') typeboundary(ndir+windex+phi_,2*idim)='asymm'
798  case(spherical)
799  typeboundary(3:ndir+1,2*idim)='asymm'
800  if(physics_type=='mhd') typeboundary(ndir+windex+2:ndir+windex+ndir,2*idim)='asymm'
801  case default
802  call mpistop('Pole is in cylindrical, polar, spherical coordinates!')
803  end select
804  end if
805  end if
806  end do
807  }
808 
809  if(any(limiter(1:nlevelshi)=='mp5')) then
810  nghostcells=3
811  end if
812 
813  if(any(limiter(1:nlevelshi)=='weno5')) then
814  nghostcells=3
815  end if
816 
817  if(any(limiter(1:nlevelshi)=='weno5nm')) then
818  nghostcells=3
819  end if
820 
821  if(any(limiter(1:nlevelshi)=='wenoz5')) then
822  nghostcells=3
823  end if
824 
825  if(any(limiter(1:nlevelshi)=='wenoz5nm')) then
826  nghostcells=3
827  end if
828 
829  if(any(limiter(1:nlevelshi)=='wenozp5')) then
830  nghostcells=3
831  end if
832 
833  if(any(limiter(1:nlevelshi)=='wenozp5nm')) then
834  nghostcells=3
835  end if
836 
837  if(any(limiter(1:nlevelshi)=='ppm')) then
838  nghostcells=4
839  end if
840 
841  if(any(limiter(1:nlevelshi)=='weno7')) then
842  nghostcells=4
843  end if
844 
845  if(any(limiter(1:nlevelshi)=='mpweno7')) then
846  nghostcells=4
847  end if
848 
849  if(any(limiter(1:nlevelshi)=='exeno7')) then
850  nghostcells=4
851  end if
852 
853  ! prolongation in AMR for constrained transport MHD needs even number ghosts
854  if(stagger_grid .and. refine_max_level>1 .and. mod(nghostcells,2)/=0) then
856  end if
857 
858  ! If a wider stencil is used, extend the number of ghost cells
860 
861  select case (coordinate)
862  {^nooned
863  case (spherical)
864  xprob^lim^de=xprob^lim^de*two*dpi;
865  \}
866  case (cylindrical)
867  {
868  if (^d==phi_) then
869  xprob^lim^d=xprob^lim^d*two*dpi;
870  end if
871  \}
872  end select
873 
874  ! full block size including ghostcells
875  {ixghi^d = block_nx^d + 2*nghostcells\}
876  {ixgshi^d = ixghi^d\}
877 
878  nx_vec = [{domain_nx^d|, }]
879  block_nx_vec = [{block_nx^d|, }]
880 
881  if (any(nx_vec < 4) .or. any(mod(nx_vec, 2) == 1)) &
882  call mpistop('Grid size (domain_nx^D) has to be even and >= 4')
883 
884  if (any(block_nx_vec < 4) .or. any(mod(block_nx_vec, 2) == 1)) &
885  call mpistop('Block size (block_nx^D) has to be even and >= 4')
886 
887  if (any([ domain_nx^d/block_nx^d ] == 1) .and. mype == 0) then
888  print *, "TODO: possible bug when domain_nx^D/block_nx^D == 1"
889  end if
890 
891  { if(mod(domain_nx^d,block_nx^d)/=0) &
892  call mpistop('Grid (domain_nx^D) and block (block_nx^D) must be consistent') \}
893 
895  write(unitterm,*)'Error: refine_max_level',refine_max_level,'>nlevelshi ',nlevelshi
896  call mpistop("Reset nlevelshi and recompile!")
897  endif
898 
899  if (any(stretched_dim)) then
900  allocate(qstretch(0:nlevelshi,1:ndim),dxfirst(0:nlevelshi,1:ndim),&
902  allocate(nstretchedblocks(1:nlevelshi,1:ndim))
903  qstretch(0:nlevelshi,1:ndim)=0.0d0
904  dxfirst(0:nlevelshi,1:ndim)=0.0d0
906  {if (stretch_type(^d) == stretch_uni) then
907  ! first some sanity checks
908  if(qstretch_baselevel(^d)<1.0d0.or.qstretch_baselevel(^d)==bigdouble) then
909  if(mype==0) then
910  write(*,*) 'stretched grid needs finite qstretch_baselevel>1'
911  write(*,*) 'will try default value for qstretch_baselevel in dimension', ^d
912  endif
913  if(xprobmin^d>smalldouble)then
914  qstretch_baselevel(^d)=(xprobmax^d/xprobmin^d)**(1.d0/dble(domain_nx^d))
915  else
916  call mpistop("can not set qstretch_baselevel automatically")
917  endif
918  endif
919  if(mod(block_nx^d,2)==1) &
920  call mpistop("stretched grid needs even block size block_nxD")
921  if(mod(domain_nx^d/block_nx^d,2)/=0) &
922  call mpistop("number level 1 blocks in D must be even")
923  qstretch(1,^d)=qstretch_baselevel(^d)
924  dxfirst(1,^d)=(xprobmax^d-xprobmin^d) &
925  *(1.0d0-qstretch(1,^d))/(1.0d0-qstretch(1,^d)**domain_nx^d)
926  qstretch(0,^d)=qstretch(1,^d)**2
927  dxfirst(0,^d)=dxfirst(1,^d)*(1.0d0+qstretch(1,^d))
928  if(refine_max_level>1)then
929  do ilev=2,refine_max_level
930  qstretch(ilev,^d)=dsqrt(qstretch(ilev-1,^d))
931  dxfirst(ilev,^d)=dxfirst(ilev-1,^d) &
932  /(1.0d0+dsqrt(qstretch(ilev-1,^d)))
933  enddo
934  endif
935  endif \}
936  if(mype==0) then
937  {if(stretch_type(^d) == stretch_uni) then
938  write(*,*) 'Stretched dimension ', ^d
939  write(*,*) 'Using stretched grid with qs=',qstretch(0:refine_max_level,^d)
940  write(*,*) ' and first cell sizes=',dxfirst(0:refine_max_level,^d)
941  endif\}
942  end if
943  {if(stretch_type(^d) == stretch_symm) then
944  if(mype==0) then
945  write(*,*) 'will apply symmetric stretch in dimension', ^d
946  endif
947  if(mod(block_nx^d,2)==1) &
948  call mpistop("stretched grid needs even block size block_nxD")
949  ! checks on the input variable nstretchedblocks_baselevel
950  if(nstretchedblocks_baselevel(^d)==0) &
951  call mpistop("need finite even number of stretched blocks at baselevel")
952  if(mod(nstretchedblocks_baselevel(^d),2)==1) &
953  call mpistop("need even number of stretched blocks at baselevel")
954  if(qstretch_baselevel(^d)<1.0d0.or.qstretch_baselevel(^d)==bigdouble) &
955  call mpistop('stretched grid needs finite qstretch_baselevel>1')
956  ! compute stretched part to ensure uniform center
957  ipower=(nstretchedblocks_baselevel(^d)/2)*block_nx^d
958  if(nstretchedblocks_baselevel(^d)==domain_nx^d/block_nx^d)then
959  xstretch^d=0.5d0*(xprobmax^d-xprobmin^d)
960  else
961  xstretch^d=(xprobmax^d-xprobmin^d) &
962  /(2.0d0+dble(domain_nx^d-nstretchedblocks_baselevel(^d)*block_nx^d) &
963  *(1.0d0-qstretch_baselevel(^d))/(1.0d0-qstretch_baselevel(^d)**ipower))
964  endif
965  if(xstretch^d>(xprobmax^d-xprobmin^d)*0.5d0) &
966  call mpistop(" stretched grid part should not exceed full domain")
967  dxfirst(1,^d)=xstretch^d*(1.0d0-qstretch_baselevel(^d)) &
968  /(1.0d0-qstretch_baselevel(^d)**ipower)
969  nstretchedblocks(1,^d)=nstretchedblocks_baselevel(^d)
970  qstretch(1,^d)=qstretch_baselevel(^d)
971  qstretch(0,^d)=qstretch(1,^d)**2
972  dxfirst(0,^d)=dxfirst(1,^d)*(1.0d0+qstretch(1,^d))
973  dxmid(1,^d)=dxfirst(1,^d)
974  dxmid(0,^d)=dxfirst(1,^d)*2.0d0
975  if(refine_max_level>1)then
976  do ilev=2,refine_max_level
977  nstretchedblocks(ilev,^d)=2*nstretchedblocks(ilev-1,^d)
978  qstretch(ilev,^d)=dsqrt(qstretch(ilev-1,^d))
979  dxfirst(ilev,^d)=dxfirst(ilev-1,^d) &
980  /(1.0d0+dsqrt(qstretch(ilev-1,^d)))
981  dxmid(ilev,^d)=dxmid(ilev-1,^d)/2.0d0
982  enddo
983  endif
984  ! sanity check on total domain size:
985  sizeuniformpart^d=dxfirst(1,^d) &
986  *(domain_nx^d-nstretchedblocks_baselevel(^d)*block_nx^d)
987  if(mype==0) then
988  print *,'uniform part of size=',sizeuniformpart^d
989  print *,'setting of domain is then=',2*xstretch^d+sizeuniformpart^d
990  print *,'versus=',xprobmax^d-xprobmin^d
991  endif
992  if(dabs(xprobmax^d-xprobmin^d-2*xstretch^d-sizeuniformpart^d)>smalldouble) then
993  call mpistop('mismatch in domain size!')
994  endif
995  endif \}
996  dxfirst_1mq(0:refine_max_level,1:ndim)=dxfirst(0:refine_max_level,1:ndim) &
997  /(1.0d0-qstretch(0:refine_max_level,1:ndim))
998  end if
999 
1000  dx_vec = [{xprobmax^d-xprobmin^d|, }] / nx_vec
1001 
1002  if (mype==0) then
1003  write(c_ndim, '(I1)') ^nd
1004  write(unitterm, '(A30,' // c_ndim // '(I0," "))') &
1005  ' Domain size (cells): ', nx_vec
1006  write(unitterm, '(A30,' // c_ndim // '(E9.3," "))') &
1007  ' Level one dx: ', dx_vec
1008  end if
1009 
1010  if (any(dx_vec < smalldouble)) &
1011  call mpistop("Incorrect domain size (too small grid spacing)")
1012 
1013  dx(:, 1) = dx_vec
1014 
1015  if(sum(w_refine_weight(:))==0) w_refine_weight(1) = 1.d0
1016  if(dabs(sum(w_refine_weight(:))-1.d0)>smalldouble) then
1017  write(unitterm,*) "Sum of all elements in w_refine_weight be 1.d0"
1018  call mpistop("Reset w_refine_weight so the sum is 1.d0")
1019  end if
1020 
1021  if (mype==0) write(unitterm, '(A30)', advance='no') 'Refine estimation: '
1022 
1023  select case (refine_criterion)
1024  case (0)
1025  if (mype==0) write(unitterm, '(A)') "user defined"
1026  case (1)
1027  if (mype==0) write(unitterm, '(A)') "relative error"
1028  case (2)
1029  if (mype==0) write(unitterm, '(A)') "Lohner's original scheme"
1030  case (3)
1031  if (mype==0) write(unitterm, '(A)') "Lohner's scheme"
1032  case default
1033  call mpistop("Unknown error estimator, change refine_criterion")
1034  end select
1035 
1036  if (tfixgrid<bigdouble/2.0d0) then
1037  if(mype==0)print*,'Warning, at time=',tfixgrid,'the grid will be fixed'
1038  end if
1039  if (itfixgrid<biginteger/2) then
1040  if(mype==0)print*,'Warning, at iteration=',itfixgrid,'the grid will be fixed'
1041  end if
1042  if (ditregrid>1) then
1043  if(mype==0)print*,'Note, Grid is reconstructed once every',ditregrid,'iterations'
1044  end if
1045 
1046 
1047  do islice=1,nslices
1048  select case(slicedir(islice))
1049  {case(^d)
1050  if(slicecoord(islice)<xprobmin^d.or.slicecoord(islice)>xprobmax^d) &
1051  write(uniterr,*)'Warning in read_par_files: ', &
1052  'Slice ', islice, ' coordinate',slicecoord(islice),'out of bounds for dimension ',slicedir(islice)
1053  \}
1054  end select
1055  end do
1056 
1057  if (mype==0) then
1058  write(unitterm, '(A30,A,A)') 'restart_from_file: ', ' ', trim(restart_from_file)
1059  write(unitterm, '(A30,L1)') 'converting: ', convert
1060  write(unitterm, '(A)') ''
1061  endif
1062 
1063  end subroutine read_par_files
1064 
1065  !> Routine to find entries in a string
1066  subroutine get_fields_string(line, delims, n_max, fields, n_found, fully_read)
1067  !> The line from which we want to read
1068  character(len=*), intent(in) :: line
1069  !> A string with delimiters. For example delims = " ,'"""//char(9)
1070  character(len=*), intent(in) :: delims
1071  !> Maximum number of entries to read in
1072  integer, intent(in) :: n_max
1073  !> Number of entries found
1074  integer, intent(inout) :: n_found
1075  !> Fields in the strings
1076  character(len=*), intent(inout) :: fields(n_max)
1077  logical, intent(out), optional :: fully_read
1078 
1079  integer :: ixs_start(n_max)
1080  integer :: ixs_end(n_max)
1081  integer :: ix, ix_prev
1082 
1083  ix_prev = 0
1084  n_found = 0
1085 
1086  do while (n_found < n_max)
1087  ! Find the starting point of the next entry (a non-delimiter value)
1088  ix = verify(line(ix_prev+1:), delims)
1089  if (ix == 0) exit
1090 
1091  n_found = n_found + 1
1092  ixs_start(n_found) = ix_prev + ix ! This is the absolute position in 'line'
1093 
1094  ! Get the end point of the current entry (next delimiter index minus one)
1095  ix = scan(line(ixs_start(n_found)+1:), delims) - 1
1096 
1097  if (ix == -1) then ! If there is no last delimiter,
1098  ixs_end(n_found) = len(line) ! the end of the line is the endpoint
1099  else
1100  ixs_end(n_found) = ixs_start(n_found) + ix
1101  end if
1102 
1103  fields(n_found) = line(ixs_start(n_found):ixs_end(n_found))
1104  ix_prev = ixs_end(n_found) ! We continue to search from here
1105  end do
1106 
1107  if (present(fully_read)) then
1108  ix = verify(line(ix_prev+1:), delims)
1109  fully_read = (ix == 0) ! Are there only delimiters?
1110  end if
1111 
1112  end subroutine get_fields_string
1113 
1114  subroutine saveamrfile(ifile)
1118  use mod_particles, only: write_particles_snapshot
1119  use mod_slice, only: write_slice
1120  use mod_collapse, only: write_collapsed
1121  integer:: ifile
1122 
1123  select case (ifile)
1124  case (fileout_)
1125  ! Write .dat snapshot
1126  call write_snapshot()
1127 
1128  ! Generate formatted output (e.g., VTK)
1130 
1131  if(use_particles) call write_particles_snapshot()
1132 
1134  case (fileslice_)
1135  call write_slice
1136  case (filecollapse_)
1137  call write_collapsed
1138  case (filelog_)
1139  select case (typefilelog)
1140  case ('default')
1141  call printlog_default
1142  case ('regression_test')
1144  case ('special')
1145  if (.not. associated(usr_print_log)) then
1146  call mpistop("usr_print_log not defined")
1147  else
1148  call usr_print_log()
1149  end if
1150  case default
1151  call mpistop("Error in SaveFile: Unknown typefilelog")
1152  end select
1153  case (fileanalysis_)
1154  if (associated(usr_write_analysis)) then
1155  call usr_write_analysis()
1156  end if
1157  case default
1158  write(*,*) 'No save method is defined for ifile=',ifile
1159  call mpistop("")
1160  end select
1161 
1162  ! opedit: Flush stdout and stderr from time to time.
1163  flush(unit=unitterm)
1164 
1165  end subroutine saveamrfile
1166 
1167  !> Standard method for creating a new output file
1168  subroutine create_output_file(fh, ix, extension, suffix)
1170  integer, intent(out) :: fh !< File handle
1171  integer, intent(in) :: ix !< Index of file
1172  character(len=*), intent(in) :: extension !< Extension of file
1173  character(len=*), intent(in), optional :: suffix !< Optional suffix
1174  character(len=std_len) :: filename
1175  integer :: amode
1176 
1177  if (ix >= 10000) then
1178  call mpistop("Number of output files is limited to 10000 (0...9999)")
1179  end if
1180 
1181  if (present(suffix)) then
1182  write(filename,"(a,a,i4.4,a)") trim(base_filename), &
1183  trim(suffix), ix, extension
1184  else
1185  write(filename,"(a,i4.4,a)") trim(base_filename), ix, extension
1186  end if
1187 
1188  ! MPI cannot easily replace existing files
1189  open(unit=unitsnapshot,file=filename,status='replace')
1190  close(unitsnapshot, status='delete')
1191 
1192  amode = ior(mpi_mode_create, mpi_mode_wronly)
1193  call mpi_file_open(mpi_comm_self,filename,amode, &
1194  mpi_info_null, fh, ierrmpi)
1195 
1196  if (ierrmpi /= 0) then
1197  print *, "Error, cannot create file ", trim(filename)
1198  call mpistop("Fatal error")
1199  end if
1200 
1201  end subroutine create_output_file
1202 
1203  ! Check if a snapshot exists
1204  logical function snapshot_exists(ix)
1206  integer, intent(in) :: ix !< Index of snapshot
1207  character(len=std_len) :: filename
1208 
1209  write(filename, "(a,i4.4,a)") trim(base_filename), ix, ".dat"
1210  inquire(file=trim(filename), exist=snapshot_exists)
1211  end function snapshot_exists
1212 
1213  integer function get_snapshot_index(filename)
1214  character(len=*), intent(in) :: filename
1215  integer :: i
1216 
1217  ! Try to parse index in restart_from_file string (e.g. basename0000.dat)
1218  i = len_trim(filename) - 7
1219  read(filename(i:i+3), '(I4)') get_snapshot_index
1220  end function get_snapshot_index
1221 
1222  !> Write header for a snapshot
1223  !>
1224  !> If you edit the header, don't forget to update: snapshot_write_header(),
1225  !> snapshot_read_header(), doc/fileformat.md, tools/python/dat_reader.py
1226  subroutine snapshot_write_header(fh, offset_tree, offset_block)
1228  use mod_physics
1230  use mod_slice, only: slicenext
1231  integer, intent(in) :: fh !< File handle
1232  integer(kind=MPI_OFFSET_KIND), intent(in) :: offset_tree !< Offset of tree info
1233  integer(kind=MPI_OFFSET_KIND), intent(in) :: offset_block !< Offset of block data
1234  integer, dimension(MPI_STATUS_SIZE) :: st
1235  integer :: iw, er
1236 
1237  call mpi_file_write(fh, version_number, 1, mpi_integer, st, er)
1238  call mpi_file_write(fh, int(offset_tree), 1, mpi_integer, st, er)
1239  call mpi_file_write(fh, int(offset_block), 1, mpi_integer, st, er)
1240  call mpi_file_write(fh, nw, 1, mpi_integer, st, er)
1241  call mpi_file_write(fh, ndir, 1, mpi_integer, st, er)
1242  call mpi_file_write(fh, ndim, 1, mpi_integer, st, er)
1243  call mpi_file_write(fh, levmax, 1, mpi_integer, st, er)
1244  call mpi_file_write(fh, nleafs, 1, mpi_integer, st, er)
1245  call mpi_file_write(fh, nparents, 1, mpi_integer, st, er)
1246  call mpi_file_write(fh, it, 1, mpi_integer, st, er)
1247  ! Note: It is nice when this double has an even number of 4 byte
1248  ! integers before it (for alignment)
1249  call mpi_file_write(fh, global_time, 1, mpi_double_precision, st, er)
1250 
1251  call mpi_file_write(fh, [ xprobmin^d ], ndim, &
1252  mpi_double_precision, st, er)
1253  call mpi_file_write(fh, [ xprobmax^d ], ndim, &
1254  mpi_double_precision, st, er)
1255  call mpi_file_write(fh, [ domain_nx^d ], ndim, &
1256  mpi_integer, st, er)
1257  call mpi_file_write(fh, [ block_nx^d ], ndim, &
1258  mpi_integer, st, er)
1259 
1260  ! Periodicity (assume all variables are periodic if one is)
1261  call mpi_file_write(fh, periodb, ndim, mpi_logical, st, er)
1262 
1263  ! Geometry
1264  call mpi_file_write(fh, geometry_name(1:name_len), &
1265  name_len, mpi_character, st, er)
1266 
1267  ! Write stagger grid mark
1268  call mpi_file_write(fh, stagger_grid, 1, mpi_logical, st, er)
1269 
1270  do iw = 1, nw
1271  call mpi_file_write(fh, cons_wnames(iw), name_len, mpi_character, st, er)
1272  end do
1273 
1274  ! Physics related information
1275  call mpi_file_write(fh, physics_type, name_len, mpi_character, st, er)
1276 
1277  ! Format:
1278  ! integer :: n_par
1279  ! double precision :: values(n_par)
1280  ! character(n_par * name_len) :: names
1281  call phys_write_info(fh)
1282 
1283  ! Write snapshotnext etc., which is useful for restarting.
1284  ! Note we add one, since snapshotnext is updated *after* this procedure
1285  if(pass_wall_time) then
1286  call mpi_file_write(fh, snapshotnext, 1, mpi_integer, st, er)
1287  else
1288  call mpi_file_write(fh, snapshotnext+1, 1, mpi_integer, st, er)
1289  end if
1290  call mpi_file_write(fh, slicenext, 1, mpi_integer, st, er)
1291  call mpi_file_write(fh, collapsenext, 1, mpi_integer, st, er)
1292 
1293  end subroutine snapshot_write_header
1294 
1295  !> Read header for a snapshot
1296  !>
1297  !> If you edit the header, don't forget to update: snapshot_write_header(),
1298  !> snapshot_read_header(), doc/fileformat.md, tools/python/dat_reader.py
1299  subroutine snapshot_read_header(fh, offset_tree, offset_block)
1302  use mod_physics, only: physics_type
1303  use mod_slice, only: slicenext
1304  integer, intent(in) :: fh !< File handle
1305  integer(MPI_OFFSET_KIND), intent(out) :: offset_tree !< Offset of tree info
1306  integer(MPI_OFFSET_KIND), intent(out) :: offset_block !< Offset of block data
1307  integer :: i, version
1308  integer :: ibuf(ndim), iw
1309  double precision :: rbuf(ndim)
1310  integer, dimension(MPI_STATUS_SIZE) :: st
1311  character(len=name_len), allocatable :: var_names(:), param_names(:)
1312  double precision, allocatable :: params(:)
1313  character(len=name_len) :: phys_name, geom_name
1314  integer :: er, n_par, tmp_int
1315  logical :: stagger_mark_dat, periodic(ndim)
1316 
1317  ! Version number
1318  call mpi_file_read(fh, version, 1, mpi_integer, st, er)
1319  if (all(compatible_versions /= version)) then
1320  call mpistop("Incompatible file version (maybe old format?)")
1321  end if
1322 
1323  ! offset_tree
1324  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1325  offset_tree = ibuf(1)
1326 
1327  ! offset_block
1328  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1329  offset_block = ibuf(1)
1330 
1331  ! nw
1332  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1333  nw_found=ibuf(1)
1334  if (nw /= ibuf(1)) then
1335  write(*,*) "nw=",nw," and nw found in restart file=",ibuf(1)
1336  write(*,*) "Please be aware of changes in w at restart."
1337  !call mpistop("currently, changing nw at restart is not allowed")
1338  end if
1339 
1340  ! ndir
1341  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1342  if (ibuf(1) /= ndir) then
1343  write(*,*) "ndir in restart file = ",ibuf(1)
1344  write(*,*) "ndir = ",ndir
1345  call mpistop("reset ndir to ndir in restart file")
1346  end if
1347 
1348  ! ndim
1349  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1350  if (ibuf(1) /= ndim) then
1351  write(*,*) "ndim in restart file = ",ibuf(1)
1352  write(*,*) "ndim = ",ndim
1353  call mpistop("reset ndim to ndim in restart file")
1354  end if
1355 
1356  ! levmax
1357  call mpi_file_read(fh, ibuf(1), 1, mpi_integer, st, er)
1358  if (ibuf(1) > refine_max_level) then
1359  write(*,*) "number of levels in restart file = ",ibuf(1)
1360  write(*,*) "refine_max_level = ",refine_max_level
1361  call mpistop("refine_max_level < num. levels in restart file")
1362  end if
1363 
1364  ! nleafs
1365  call mpi_file_read(fh, nleafs, 1, mpi_integer, st, er)
1366 
1367  ! nparents
1368  call mpi_file_read(fh, nparents, 1, mpi_integer, st, er)
1369 
1370  ! it
1371  call mpi_file_read(fh, it, 1, mpi_integer, st, er)
1372 
1373  ! global time
1374  call mpi_file_read(fh, global_time, 1, mpi_double_precision, st, er)
1375 
1376  ! xprobmin^D
1377  call mpi_file_read(fh,rbuf(1:ndim),ndim,mpi_double_precision,st,er)
1378  if (maxval(abs(rbuf(1:ndim) - [ xprobmin^d ])) > 0) then
1379  write(*,*) "Error: xprobmin differs from restart data: ", rbuf(1:ndim)
1380  call mpistop("change xprobmin^D in par file")
1381  end if
1382 
1383  ! xprobmax^D
1384  call mpi_file_read(fh,rbuf(1:ndim),ndim,mpi_double_precision,st,er)
1385  if (maxval(abs(rbuf(1:ndim) - [ xprobmax^d ])) > 0) then
1386  write(*,*) "Error: xprobmax differs from restart data: ", rbuf(1:ndim)
1387  call mpistop("change xprobmax^D in par file")
1388  end if
1389 
1390  ! domain_nx^D
1391  call mpi_file_read(fh,ibuf(1:ndim), ndim, mpi_integer,st,er)
1392  if (any(ibuf(1:ndim) /= [ domain_nx^d ])) then
1393  write(*,*) "Error: mesh size differs from restart data: ", ibuf(1:ndim)
1394  call mpistop("change domain_nx^D in par file")
1395  end if
1396 
1397  ! block_nx^D
1398  call mpi_file_read(fh,ibuf(1:ndim), ndim, mpi_integer,st,er)
1399  if (any(ibuf(1:ndim) /= [ block_nx^d ])) then
1400  write(*,*) "Error: block size differs from restart data:", ibuf(1:ndim)
1401  call mpistop("change block_nx^D in par file")
1402  end if
1403 
1404  ! From version 5, read more info about the grid
1405  if (version > 4) then
1406  call mpi_file_read(fh, periodic, ndim, mpi_logical, st, er)
1407  if (any(periodic .neqv. periodb)) &
1408  call mpistop("change in periodicity in par file")
1409 
1410  call mpi_file_read(fh, geom_name, name_len, mpi_character, st, er)
1411 
1412  if (geom_name /= geometry_name(1:name_len)) then
1413  write(*,*) "type of coordinates in data is: ", geom_name
1414  call mpistop("select the correct coordinates in mod_usr.t file")
1415  end if
1416 
1417  call mpi_file_read(fh, stagger_mark_dat, 1, mpi_logical, st, er)
1418  if (stagger_grid .neqv. stagger_mark_dat) then
1419  write(*,*) "Error: stagger grid flag differs from restart data:", stagger_mark_dat
1420  call mpistop("change parameter to use stagger grid")
1421  end if
1422  end if
1423 
1424  ! From version 4 onwards, the later parts of the header must be present
1425  if (version > 3) then
1426  ! w_names (not used here)
1427  allocate(var_names(nw_found))
1428  do iw = 1, nw_found
1429  call mpi_file_read(fh, var_names(iw), name_len, mpi_character, st, er)
1430  end do
1431 
1432  ! Physics related information
1433  call mpi_file_read(fh, phys_name, name_len, mpi_character, st, er)
1434 
1435  if (phys_name /= physics_type) then
1436  call mpistop("Cannot restart with a different physics type")
1437  end if
1438 
1439  call mpi_file_read(fh, n_par, 1, mpi_integer, st, er)
1440  allocate(params(n_par))
1441  allocate(param_names(n_par))
1442  call mpi_file_read(fh, params, n_par, mpi_double_precision, st, er)
1443  call mpi_file_read(fh, param_names, name_len * n_par, mpi_character, st, er)
1444 
1445  ! Read snapshotnext etc. for restarting
1446  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1447 
1448  ! Only set snapshotnext if the user hasn't specified it
1449  if (snapshotnext == -1) snapshotnext = tmp_int
1450 
1451  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1452  if (slicenext == -1) slicenext = tmp_int
1453 
1454  call mpi_file_read(fh, tmp_int, 1, mpi_integer, st, er)
1455  if (collapsenext == -1) collapsenext = tmp_int
1456  else
1457  ! Guess snapshotnext from file name if not set
1458  if (snapshotnext == -1) &
1460  ! Set slicenext and collapsenext if not set
1461  if (slicenext == -1) slicenext = 0
1462  if (collapsenext == -1) collapsenext = 0
1463  end if
1464 
1465  ! Still used in convert
1467 
1468  end subroutine snapshot_read_header
1469 
1470  !> Compute number of elements in index range
1471  pure integer function count_ix(ixO^L)
1472  integer, intent(in) :: ixO^L
1473 
1474  count_ix = product([ ixomax^d ] - [ ixomin^d ] + 1)
1475  end function count_ix
1476 
1477  !> Determine the shape of a block for output (whether to include ghost cells,
1478  !> and on which sides)
1479  subroutine block_shape_io(igrid, n_ghost, ixO^L, n_values)
1481 
1482  integer, intent(in) :: igrid
1483  !> nghost(1:ndim) contains the number of ghost cells on the block's minimum
1484  !> boundaries, and nghost(ndim+1:2*ndim) on the block's maximum boundaries
1485  integer, intent(out) :: n_ghost(2*ndim)
1486  !> Index range on output block
1487  integer, intent(out) :: ixO^L
1488  !> Number of cells/values in output
1489  integer, intent(out) :: n_values
1490 
1491  integer :: idim
1492 
1493  n_ghost(:) = 0
1494 
1495  if(save_physical_boundary) then
1496  do idim=1,ndim
1497  ! Include ghost cells on lower boundary
1498  if(ps(igrid)%is_physical_boundary(2*idim-1)) n_ghost(idim)=nghostcells
1499  ! Include ghost cells on upper boundary
1500  if(ps(igrid)%is_physical_boundary(2*idim)) n_ghost(ndim+idim)=nghostcells
1501  end do
1502  end if
1503 
1504  {ixomin^d = ixmlo^d - n_ghost(^d)\}
1505  {ixomax^d = ixmhi^d + n_ghost(ndim+^d)\}
1506 
1507  n_values = count_ix(ixo^l) * nw
1508 
1509  end subroutine block_shape_io
1510 
1511  subroutine write_snapshot
1514  use mod_physics
1515 
1516  integer :: file_handle, igrid, Morton_no, iwrite
1517  integer :: ipe, ix_buffer(2*ndim+1), n_values
1518  integer :: ixO^L, n_ghost(2*ndim)
1519  integer :: ixOs^L,n_values_stagger
1520  integer :: iorecvstatus(mpi_status_size)
1521  integer :: ioastatus(mpi_status_size)
1522  integer :: igrecvstatus(mpi_status_size)
1523  integer :: istatus(mpi_status_size)
1524  type(tree_node), pointer :: pnode
1525  integer(kind=MPI_OFFSET_KIND) :: offset_tree_info
1526  integer(kind=MPI_OFFSET_KIND) :: offset_block_data
1527  integer(kind=MPI_OFFSET_KIND) :: offset_offsets
1528  double precision, allocatable :: w_buffer(:)
1529 
1530  integer, allocatable :: block_ig(:, :)
1531  integer, allocatable :: block_lvl(:)
1532  integer(kind=MPI_OFFSET_KIND), allocatable :: block_offset(:)
1533 
1534  call mpi_barrier(icomm, ierrmpi)
1535 
1536  ! Allocate send/receive buffer
1537  n_values = count_ix(ixg^ll) * nw
1538  if(stagger_grid) then
1539  n_values = n_values + count_ix(ixgs^ll) * nws
1540  end if
1541  allocate(w_buffer(n_values))
1542 
1543  ! Allocate arrays with information about grid blocks
1544  allocate(block_ig(ndim, nleafs))
1545  allocate(block_lvl(nleafs))
1546  allocate(block_offset(nleafs+1))
1547 
1548  ! master processor
1549  if (mype==0) then
1550  call create_output_file(file_handle, snapshotnext, ".dat")
1551 
1552  ! Don't know offsets yet, we will write header again later
1553  offset_tree_info = -1
1554  offset_block_data = -1
1555  call snapshot_write_header(file_handle, offset_tree_info, &
1556  offset_block_data)
1557 
1558  call mpi_file_get_position(file_handle, offset_tree_info, ierrmpi)
1559 
1560  call write_forest(file_handle)
1561 
1562  ! Collect information about the spatial index (ig^D) and refinement level
1563  ! of leaves
1564  do morton_no = morton_start(0), morton_stop(npe-1)
1565  igrid = sfc(1, morton_no)
1566  ipe = sfc(2, morton_no)
1567  pnode => igrid_to_node(igrid, ipe)%node
1568 
1569  block_ig(:, morton_no) = [ pnode%ig^d ]
1570  block_lvl(morton_no) = pnode%level
1571  block_offset(morton_no) = 0 ! Will be determined later
1572  end do
1573 
1574  call mpi_file_write(file_handle, block_lvl, size(block_lvl), &
1575  mpi_integer, istatus, ierrmpi)
1576 
1577  call mpi_file_write(file_handle, block_ig, size(block_ig), &
1578  mpi_integer, istatus, ierrmpi)
1579 
1580  ! Block offsets are currently unknown, but will be overwritten later
1581  call mpi_file_get_position(file_handle, offset_offsets, ierrmpi)
1582  call mpi_file_write(file_handle, block_offset(1:nleafs), nleafs, &
1583  mpi_offset, istatus, ierrmpi)
1584 
1585  call mpi_file_get_position(file_handle, offset_block_data, ierrmpi)
1586 
1587  ! Check whether data was written as expected
1588  if (offset_block_data - offset_tree_info /= &
1589  (nleafs + nparents) * size_logical + &
1590  nleafs * ((1+ndim) * size_int + 2 * size_int)) then
1591  if (mype == 0) then
1592  print *, "Warning: MPI_OFFSET type /= 8 bytes"
1593  print *, "This *could* cause problems when reading .dat files"
1594  end if
1595  end if
1596 
1597  block_offset(1) = offset_block_data
1598  iwrite = 0
1599  end if
1600 
1601  do morton_no=morton_start(mype), morton_stop(mype)
1602  igrid = sfc_to_igrid(morton_no)
1603  itag = morton_no
1604 
1605  if (nwaux>0) then
1606  ! extra layer around mesh only for later averaging in convert
1607  ! set dxlevel value for use in gradient subroutine,
1608  ! which might be used in getaux
1609  saveigrid=igrid
1610  ^d&dxlevel(^d)=rnode(rpdx^d_, igrid);
1611  block=>ps(igrid)
1612  call phys_get_aux(.true., ps(igrid)%w, ps(igrid)%x, ixg^ll, &
1613  ixm^ll^ladd1, "write_snapshot")
1614  endif
1615 
1616  call block_shape_io(igrid, n_ghost, ixo^l, n_values)
1617  if(stagger_grid) then
1618  w_buffer(1:n_values) = pack(ps(igrid)%w(ixo^s, 1:nw), .true.)
1619  {ixosmin^d = ixomin^d -1\}
1620  {ixosmax^d = ixomax^d \}
1621  n_values_stagger= count_ix(ixos^l)*nws
1622  w_buffer(n_values+1:n_values+n_values_stagger) = pack(ps(igrid)%ws(ixos^s, 1:nws), .true.)
1623  n_values=n_values+n_values_stagger
1624  else
1625  w_buffer(1:n_values) = pack(ps(igrid)%w(ixo^s, 1:nw), .true.)
1626  end if
1627  ix_buffer(1) = n_values
1628  ix_buffer(2:) = n_ghost
1629 
1630  if (mype /= 0) then
1631  call mpi_send(ix_buffer, 2*ndim+1, &
1632  mpi_integer, 0, itag, icomm, ierrmpi)
1633  call mpi_send(w_buffer, n_values, &
1634  mpi_double_precision, 0, itag, icomm, ierrmpi)
1635  else
1636  iwrite = iwrite+1
1637  call mpi_file_write(file_handle, ix_buffer(2:), &
1638  2*ndim, mpi_integer, istatus, ierrmpi)
1639  call mpi_file_write(file_handle, w_buffer, &
1640  n_values, mpi_double_precision, istatus, ierrmpi)
1641 
1642  ! Set offset of next block
1643  block_offset(iwrite+1) = block_offset(iwrite) + &
1644  int(n_values, mpi_offset_kind) * size_double + &
1645  2 * ndim * size_int
1646  end if
1647  end do
1648 
1649  ! Write data communicated from other processors
1650  if (mype == 0) then
1651  do ipe = 1, npe-1
1652  do morton_no=morton_start(ipe), morton_stop(ipe)
1653  iwrite=iwrite+1
1654  itag=morton_no
1655 
1656  call mpi_recv(ix_buffer, 2*ndim+1, mpi_integer, ipe, itag, icomm,&
1657  igrecvstatus, ierrmpi)
1658  n_values = ix_buffer(1)
1659 
1660  call mpi_recv(w_buffer, n_values, mpi_double_precision,&
1661  ipe, itag, icomm, iorecvstatus, ierrmpi)
1662 
1663  call mpi_file_write(file_handle, ix_buffer(2:), &
1664  2*ndim, mpi_integer, istatus, ierrmpi)
1665  call mpi_file_write(file_handle, w_buffer, &
1666  n_values, mpi_double_precision, istatus, ierrmpi)
1667 
1668  ! Set offset of next block
1669  block_offset(iwrite+1) = block_offset(iwrite) + &
1670  int(n_values, mpi_offset_kind) * size_double + &
1671  2 * ndim * size_int
1672  end do
1673  end do
1674 
1675  ! Write block offsets (now we know them)
1676  call mpi_file_seek(file_handle, offset_offsets, mpi_seek_set, ierrmpi)
1677  call mpi_file_write(file_handle, block_offset(1:nleafs), nleafs, &
1678  mpi_offset, istatus, ierrmpi)
1679 
1680  ! Write header again, now with correct offsets
1681  call mpi_file_seek(file_handle, 0_mpi_offset_kind, mpi_seek_set, ierrmpi)
1682  call snapshot_write_header(file_handle, offset_tree_info, &
1683  offset_block_data)
1684 
1685  call mpi_file_close(file_handle, ierrmpi)
1686  end if
1687 
1688  call mpi_barrier(icomm, ierrmpi)
1689 
1690  end subroutine write_snapshot
1691 
1692  !> Routine to read in snapshots (.dat files). When it cannot recognize the
1693  !> file version, it will automatically try the 'old' reader.
1694  subroutine read_snapshot
1696  use mod_forest
1698  use mod_slice, only: slicenext
1699 
1700  integer :: ix_buffer(2*ndim+1), n_values, n_values_stagger
1701  integer :: ixO^L, ixOs^L
1702  integer :: file_handle, amode, igrid, Morton_no, iread
1703  integer :: istatus(mpi_status_size)
1704  integer :: iorecvstatus(mpi_status_size)
1705  integer :: ipe,inrecv,nrecv, file_version
1706  logical :: fexist
1707  integer(MPI_OFFSET_KIND) :: offset_tree_info
1708  integer(MPI_OFFSET_KIND) :: offset_block_data
1709  double precision, allocatable :: w_buffer(:)
1710  double precision, dimension(:^D&,:), allocatable :: w, ws
1711 
1712  if (mype==0) then
1713  inquire(file=trim(restart_from_file), exist=fexist)
1714  if (.not.fexist) call mpistop(trim(restart_from_file)//" not found!")
1715 
1716  call mpi_file_open(mpi_comm_self,restart_from_file,mpi_mode_rdonly, &
1717  mpi_info_null,file_handle,ierrmpi)
1718  call mpi_file_read(file_handle, file_version, 1, mpi_integer, &
1719  istatus, ierrmpi)
1720  end if
1721 
1722  call mpi_bcast(file_version,1,mpi_integer,0,icomm,ierrmpi)
1723 
1724  if (all(compatible_versions /= file_version)) then
1725  if (mype == 0) print *, "Unknown version, trying old snapshot reader..."
1726  call mpi_file_close(file_handle,ierrmpi)
1727  call read_snapshot_old()
1728 
1729  ! Guess snapshotnext from file name if not set
1730  if (snapshotnext == -1) &
1732  ! Set slicenext and collapsenext if not set
1733  if (slicenext == -1) slicenext = 0
1734  if (collapsenext == -1) collapsenext = 0
1735 
1736  ! Still used in convert
1738 
1739  return ! Leave this routine
1740  else if (mype == 0) then
1741  call mpi_file_seek(file_handle, 0_mpi_offset_kind, mpi_seek_set, ierrmpi)
1742  call snapshot_read_header(file_handle, offset_tree_info, &
1743  offset_block_data)
1744  end if
1745 
1746  ! Share information about restart file
1747  call mpi_bcast(nw_found,1,mpi_integer,0,icomm,ierrmpi)
1748  call mpi_bcast(nleafs,1,mpi_integer,0,icomm,ierrmpi)
1749  call mpi_bcast(nparents,1,mpi_integer,0,icomm,ierrmpi)
1750  call mpi_bcast(it,1,mpi_integer,0,icomm,ierrmpi)
1751  call mpi_bcast(global_time,1,mpi_double_precision,0,icomm,ierrmpi)
1752 
1753  call mpi_bcast(snapshotnext,1,mpi_integer,0,icomm,ierrmpi)
1754  call mpi_bcast(slicenext,1,mpi_integer,0,icomm,ierrmpi)
1755  call mpi_bcast(collapsenext,1,mpi_integer,0,icomm,ierrmpi)
1756 
1757  ! Allocate send/receive buffer
1758  n_values = count_ix(ixg^ll) * nw_found
1759  if(stagger_grid) then
1760  n_values = n_values + count_ix(ixgs^ll) * nws
1761  allocate(ws(ixgs^t,1:nws))
1762  end if
1763  allocate(w_buffer(n_values))
1764  allocate(w(ixg^t,1:nw_found))
1765 
1767 
1768  if (mype == 0) then
1769  call mpi_file_seek(file_handle, offset_tree_info, &
1770  mpi_seek_set, ierrmpi)
1771  end if
1772 
1773  call read_forest(file_handle)
1774 
1775  do morton_no=morton_start(mype),morton_stop(mype)
1776  igrid=sfc_to_igrid(morton_no)
1777  call alloc_node(igrid)
1778  end do
1779 
1780  if (mype==0) then
1781  call mpi_file_seek(file_handle, offset_block_data, mpi_seek_set, ierrmpi)
1782 
1783  iread = 0
1784  do ipe = 0, npe-1
1785  do morton_no=morton_start(ipe),morton_stop(ipe)
1786  iread=iread+1
1787  itag=morton_no
1788 
1789  call mpi_file_read(file_handle,ix_buffer(1:2*ndim), 2*ndim, &
1790  mpi_integer, istatus,ierrmpi)
1791 
1792  ! Construct ixO^L array from number of ghost cells
1793  {ixomin^d = ixmlo^d - ix_buffer(^d)\}
1794  {ixomax^d = ixmhi^d + ix_buffer(ndim+^d)\}
1795  n_values = count_ix(ixo^l) * nw_found
1796  if(stagger_grid) then
1797  {ixosmin^d = ixomin^d - 1\}
1798  {ixosmax^d = ixomax^d\}
1799  n_values_stagger = n_values
1800  n_values = n_values + count_ix(ixos^l) * nws
1801  end if
1802 
1803  call mpi_file_read(file_handle, w_buffer, n_values, &
1804  mpi_double_precision, istatus, ierrmpi)
1805 
1806  if (mype == ipe) then ! Root task
1807  igrid=sfc_to_igrid(morton_no)
1808  if(stagger_grid) then
1809  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values_stagger), &
1810  shape(w(ixo^s, 1:nw_found)))
1811  ps(igrid)%ws(ixos^s,1:nws)=reshape(w_buffer(n_values_stagger+1:n_values), &
1812  shape(ws(ixos^s, 1:nws)))
1813  else
1814  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values), &
1815  shape(w(ixo^s, 1:nw_found)))
1816  end if
1817  if (nw_found<nw) then
1818  if (associated(usr_transform_w)) then
1819  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1820  else
1821  ps(igrid)%w(ixo^s,1:nw_found)=w(ixo^s,1:nw_found)
1822  end if
1823  else if (nw_found>nw) then
1824  if (associated(usr_transform_w)) then
1825  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1826  else
1827  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1828  end if
1829  else
1830  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1831  end if
1832  else
1833  call mpi_send([ ixo^l, n_values ], 2*ndim+1, &
1834  mpi_integer, ipe, itag, icomm, ierrmpi)
1835  call mpi_send(w_buffer, n_values, &
1836  mpi_double_precision, ipe, itag, icomm, ierrmpi)
1837  end if
1838  end do
1839  end do
1840 
1841  call mpi_file_close(file_handle,ierrmpi)
1842 
1843  else ! mype > 0
1844 
1845  do morton_no=morton_start(mype),morton_stop(mype)
1846  igrid=sfc_to_igrid(morton_no)
1847  itag=morton_no
1848 
1849  call mpi_recv(ix_buffer, 2*ndim+1, mpi_integer, 0, itag, icomm,&
1850  iorecvstatus, ierrmpi)
1851  {ixomin^d = ix_buffer(^d)\}
1852  {ixomax^d = ix_buffer(ndim+^d)\}
1853  n_values = ix_buffer(2*ndim+1)
1854 
1855  call mpi_recv(w_buffer, n_values, mpi_double_precision,&
1856  0, itag, icomm, iorecvstatus, ierrmpi)
1857 
1858  if(stagger_grid) then
1859  n_values_stagger = count_ix(ixo^l) * nw_found
1860  {ixosmin^d = ixomin^d - 1\}
1861  {ixosmax^d = ixomax^d\}
1862  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values_stagger), &
1863  shape(w(ixo^s, 1:nw_found)))
1864  ps(igrid)%ws(ixos^s,1:nws)=reshape(w_buffer(n_values_stagger+1:n_values), &
1865  shape(ws(ixos^s, 1:nws)))
1866  else
1867  w(ixo^s, 1:nw_found) = reshape(w_buffer(1:n_values), &
1868  shape(w(ixo^s, 1:nw_found)))
1869  end if
1870  if (nw_found<nw) then
1871  if (associated(usr_transform_w)) then
1872  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1873  else
1874  ps(igrid)%w(ixo^s,1:nw_found)=w(ixo^s,1:nw_found)
1875  end if
1876  else if (nw_found>nw) then
1877  if (associated(usr_transform_w)) then
1878  call usr_transform_w(ixg^ll,ixm^ll,nw_found,w,ps(igrid)%x,ps(igrid)%w)
1879  else
1880  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1881  end if
1882  else
1883  ps(igrid)%w(ixo^s,1:nw)=w(ixo^s,1:nw)
1884  end if
1885  end do
1886  end if
1887 
1888  call mpi_barrier(icomm,ierrmpi)
1889 
1890  end subroutine read_snapshot
1891 
1892  subroutine read_snapshot_old()
1895 
1896  double precision :: wio(ixg^t,1:nw)
1897  integer :: fh, igrid, Morton_no, iread
1898  integer :: levmaxini, ndimini, ndirini
1899  integer :: nwini, neqparini, nxini^D
1900  integer(kind=MPI_OFFSET_KIND) :: offset
1901  integer :: istatus(mpi_status_size)
1902  integer, allocatable :: iorecvstatus(:,:)
1903  integer :: ipe,inrecv,nrecv
1904  integer :: sendini(7+^nd)
1905  character(len=80) :: filename
1906  logical :: fexist
1907  double precision :: eqpar_dummy(100)
1908 
1909  if (mype==0) then
1910  call mpi_file_open(mpi_comm_self,trim(restart_from_file), &
1911  mpi_mode_rdonly,mpi_info_null,fh,ierrmpi)
1912 
1913  offset=-int(7*size_int+size_double,kind=mpi_offset_kind)
1914  call mpi_file_seek(fh,offset,mpi_seek_end,ierrmpi)
1915 
1916  call mpi_file_read(fh,nleafs,1,mpi_integer,istatus,ierrmpi)
1918  call mpi_file_read(fh,levmaxini,1,mpi_integer,istatus,ierrmpi)
1919  call mpi_file_read(fh,ndimini,1,mpi_integer,istatus,ierrmpi)
1920  call mpi_file_read(fh,ndirini,1,mpi_integer,istatus,ierrmpi)
1921  call mpi_file_read(fh,nwini,1,mpi_integer,istatus,ierrmpi)
1922  call mpi_file_read(fh,neqparini,1,mpi_integer,istatus,ierrmpi)
1923  call mpi_file_read(fh,it,1,mpi_integer,istatus,ierrmpi)
1924  call mpi_file_read(fh,global_time,1,mpi_double_precision,istatus,ierrmpi)
1925 
1926  ! check if settings are suitable for restart
1927  if (levmaxini>refine_max_level) then
1928  write(*,*) "number of levels in restart file = ",levmaxini
1929  write(*,*) "refine_max_level = ",refine_max_level
1930  call mpistop("refine_max_level < number of levels in restart file")
1931  end if
1932  if (ndimini/=ndim) then
1933  write(*,*) "ndim in restart file = ",ndimini
1934  write(*,*) "ndim = ",ndim
1935  call mpistop("reset ndim to ndim in restart file")
1936  end if
1937  if (ndirini/=ndir) then
1938  write(*,*) "ndir in restart file = ",ndirini
1939  write(*,*) "ndir = ",ndir
1940  call mpistop("reset ndir to ndir in restart file")
1941  end if
1942  if (nw/=nwini) then
1943  write(*,*) "nw=",nw," and nw in restart file=",nwini
1944  call mpistop("currently, changing nw at restart is not allowed")
1945  end if
1946 
1947  offset=offset-int(ndimini*size_int+neqparini*size_double,kind=mpi_offset_kind)
1948  call mpi_file_seek(fh,offset,mpi_seek_end,ierrmpi)
1949 
1950  {call mpi_file_read(fh,nxini^d,1,mpi_integer,istatus,ierrmpi)\}
1951  if (ixghi^d/=nxini^d+2*nghostcells|.or.) then
1952  write(*,*) "Error: reset resolution to ",nxini^d+2*nghostcells
1953  call mpistop("change with setamrvac")
1954  end if
1955 
1956  call mpi_file_read(fh,eqpar_dummy,neqparini, &
1957  mpi_double_precision,istatus,ierrmpi)
1958  end if
1959 
1960  ! broadcast the global parameters first
1961  if (npe>1) then
1962  if (mype==0) then
1963  sendini=(/nleafs,levmaxini,ndimini,ndirini,nwini,neqparini,it ,^d&nxini^d /)
1964  end if
1965  call mpi_bcast(sendini,7+^nd,mpi_integer,0,icomm,ierrmpi)
1966  nleafs=sendini(1);levmaxini=sendini(2);ndimini=sendini(3);
1967  ndirini=sendini(4);nwini=sendini(5);
1968  neqparini=sendini(6);it=sendini(7);
1969  nxini^d=sendini(7+^d);
1971  call mpi_bcast(global_time,1,mpi_double_precision,0,icomm,ierrmpi)
1972  end if
1973 
1974  if (mype == 0) then
1975  offset = int(size_block_io,kind=mpi_offset_kind) * &
1976  int(nleafs,kind=mpi_offset_kind)
1977  call mpi_file_seek(fh,offset,mpi_seek_set,ierrmpi)
1978  end if
1979 
1980  call read_forest(fh)
1981 
1982  do morton_no=morton_start(mype),morton_stop(mype)
1983  igrid=sfc_to_igrid(morton_no)
1984  call alloc_node(igrid)
1985  end do
1986 
1987  if (mype==0)then
1988  iread=0
1989 
1990  do morton_no=morton_start(0),morton_stop(0)
1991  igrid=sfc_to_igrid(morton_no)
1992  iread=iread+1
1993  offset=int(size_block_io,kind=mpi_offset_kind) &
1994  *int(morton_no-1,kind=mpi_offset_kind)
1995  call mpi_file_read_at(fh,offset,ps(igrid)%w,1,type_block_io, &
1996  istatus,ierrmpi)
1997  end do
1998  if (npe>1) then
1999  do ipe=1,npe-1
2000  do morton_no=morton_start(ipe),morton_stop(ipe)
2001  iread=iread+1
2002  itag=morton_no
2003  offset=int(size_block_io,kind=mpi_offset_kind)&
2004  *int(morton_no-1,kind=mpi_offset_kind)
2005  call mpi_file_read_at(fh,offset,wio,1,type_block_io,&
2006  istatus,ierrmpi)
2007  call mpi_send(wio,1,type_block_io,ipe,itag,icomm,ierrmpi)
2008  end do
2009  end do
2010  end if
2011  call mpi_file_close(fh,ierrmpi)
2012  else
2013  nrecv=(morton_stop(mype)-morton_start(mype)+1)
2014  allocate(iorecvstatus(mpi_status_size,nrecv))
2015  inrecv=0
2016  do morton_no=morton_start(mype),morton_stop(mype)
2017  igrid=sfc_to_igrid(morton_no)
2018  itag=morton_no
2019  inrecv=inrecv+1
2020  call mpi_recv(ps(igrid)%w,1,type_block_io,0,itag,icomm,&
2021  iorecvstatus(:,inrecv),ierrmpi)
2022  end do
2023  deallocate(iorecvstatus)
2024  end if
2025 
2026  call mpi_barrier(icomm,ierrmpi)
2027 
2028  end subroutine read_snapshot_old
2029 
2030  !> Write volume-averaged values and other information to the log file
2031  subroutine printlog_default
2033  use mod_timing
2036 
2037  logical :: fileopen
2038  integer :: i, iw, level
2039  double precision :: wmean(1:nw), total_volume
2040  double precision :: volume_coverage(refine_max_level)
2041  integer :: nx^D, nc, ncells, dit
2042  double precision :: dtTimeLast, now, cellupdatesPerSecond
2043  double precision :: activeBlocksPerCore, wctPerCodeTime, timeToFinish
2044  character(len=40) :: fmt_string
2045  character(len=80) :: filename
2046  character(len=2048) :: line
2047  logical, save :: opened = .false.
2048  integer :: amode, istatus(mpi_status_size)
2049  integer, parameter :: my_unit = 20
2050 
2051  ! Compute the volume-average of w**1 = w
2052  call get_volume_average(1, wmean, total_volume)
2053 
2054  ! Compute the volume coverage
2055  call get_volume_coverage(volume_coverage)
2056 
2057  if (mype == 0) then
2058 
2059  ! To compute cell updates per second, we do the following:
2060  nx^d=ixmhi^d-ixmlo^d+1;
2061  nc={nx^d*}
2062  ncells = nc * nleafs_active
2063 
2064  ! assumes the number of active leafs haven't changed since last compute.
2065  now = mpi_wtime()
2066  dit = it - ittimelast
2067  dttimelast = now - timelast
2068  ittimelast = it
2069  timelast = now
2070  cellupdatespersecond = dble(ncells) * dble(nstep) * &
2071  dble(dit) / (dttimelast * dble(npe))
2072 
2073  ! blocks per core:
2074  activeblockspercore = dble(nleafs_active) / dble(npe)
2075 
2076  ! Wall clock time per code time unit in seconds:
2077  wctpercodetime = dttimelast / max(dit * dt, epsilon(1.0d0))
2078 
2079  ! Wall clock time to finish in hours:
2080  timetofinish = (time_max - global_time) * wctpercodetime / 3600.0d0
2081 
2082  ! On first entry, open the file and generate the header
2083  if (.not. opened) then
2084 
2085  filename = trim(base_filename) // ".log"
2086 
2087  ! Delete the log when not doing a restart run
2088  if (restart_from_file == undefined) then
2089  open(unit=my_unit,file=trim(filename),status='replace')
2090  close(my_unit, status='delete')
2091  end if
2092 
2093  amode = ior(mpi_mode_create,mpi_mode_wronly)
2094  amode = ior(amode,mpi_mode_append)
2095 
2096  call mpi_file_open(mpi_comm_self, filename, amode, &
2097  mpi_info_null, log_fh, ierrmpi)
2098 
2099  opened = .true.
2100 
2101  ! Start of file headern
2102  line = "it global_time dt"
2103  do level=1,nw
2104  i = len_trim(line) + 2
2105  write(line(i:),"(a,a)") trim(cons_wnames(level)), " "
2106  end do
2107 
2108  ! Volume coverage per level
2109  do level = 1, refine_max_level
2110  i = len_trim(line) + 2
2111  write(line(i:), "(a,i0)") "c", level
2112  end do
2113 
2114  ! Cell counts per level
2115  do level=1,refine_max_level
2116  i = len_trim(line) + 2
2117  write(line(i:), "(a,i0)") "n", level
2118  end do
2119 
2120  ! Rest of file header
2121  line = trim(line) // " | Xload Xmemory 'Cell_Updates /second/core'"
2122  line = trim(line) // " 'Active_Blocks/Core' 'Wct Per Code Time [s]'"
2123  line = trim(line) // " 'TimeToFinish [hrs]'"
2124 
2125  ! Only write header if not restarting
2126  if (restart_from_file == undefined) then
2127  call mpi_file_write(log_fh, trim(line) // new_line('a'), &
2128  len_trim(line)+1, mpi_character, istatus, ierrmpi)
2129  end if
2130  end if
2131 
2132  ! Construct the line to be added to the log
2133 
2134  fmt_string = '(' // fmt_i // ',2' // fmt_r // ')'
2135  write(line, fmt_string) it, global_time, dt
2136  i = len_trim(line) + 2
2137 
2138  write(fmt_string, '(a,i0,a)') '(', nw, fmt_r // ')'
2139  write(line(i:), fmt_string) wmean(1:nw)
2140  i = len_trim(line) + 2
2141 
2142  write(fmt_string, '(a,i0,a)') '(', refine_max_level, fmt_r // ')'
2143  write(line(i:), fmt_string) volume_coverage(1:refine_max_level)
2144  i = len_trim(line) + 2
2145 
2146  write(fmt_string, '(a,i0,a)') '(', refine_max_level, fmt_i // ')'
2147  write(line(i:), fmt_string) nleafs_level(1:refine_max_level)
2148  i = len_trim(line) + 2
2149 
2150  fmt_string = '(a,6' // fmt_r2 // ')'
2151  write(line(i:), fmt_string) '| ', xload, xmemory, cellupdatespersecond, &
2152  activeblockspercore, wctpercodetime, timetofinish
2153 
2154  call mpi_file_write(log_fh, trim(line) // new_line('a') , &
2155  len_trim(line)+1, mpi_character, istatus, ierrmpi)
2156  end if
2157 
2158  end subroutine printlog_default
2159 
2160  !> Print a log that can be used to check whether the code still produces the
2161  !> same output (regression test)
2162  subroutine printlog_regression_test()
2164 
2165  integer, parameter :: n_modes = 2
2166  integer, parameter :: my_unit = 123
2167  character(len=40) :: fmt_string
2168  logical, save :: file_open = .false.
2169  integer :: power
2170  double precision :: modes(nw, n_modes), volume
2171 
2172  do power = 1, n_modes
2173  call get_volume_average(power, modes(:, power), volume)
2174  end do
2175 
2176  if (mype == 0) then
2177  if (.not. file_open) then
2178  open(my_unit, file = trim(base_filename) // ".log")
2179  file_open = .true.
2180 
2181  write(my_unit, *) "# time mean(w) mean(w**2)"
2182  end if
2183 
2184  write(fmt_string, "(a,i0,a)") "(", nw * n_modes + 1, fmt_r // ")"
2185  write(my_unit, fmt_string) global_time, modes
2186  end if
2187  end subroutine printlog_regression_test
2188 
2189  !> Compute mean(w**power) over the leaves of the grid. The first mode
2190  !> (power=1) corresponds to the mean, the second to the mean squared values
2191  !> and so on.
2192  subroutine get_volume_average(power, mode, volume)
2194 
2195  integer, intent(in) :: power !< Which mode to compute
2196  double precision, intent(out) :: mode(nw) !< The computed mode
2197  double precision, intent(out) :: volume !< The total grid volume
2198  integer :: iigrid, igrid, iw
2199  double precision :: wsum(nw+1)
2200  double precision :: dsum_recv(1:nw+1)
2201 
2202  wsum(:) = 0
2203 
2204  ! Loop over all the grids
2205  do iigrid = 1, igridstail
2206  igrid = igrids(iigrid)
2207 
2208  ! Store total volume in last element
2209  wsum(nw+1) = wsum(nw+1) + sum(ps(igrid)%dvolume(ixm^t))
2210 
2211  ! Compute the modes of the cell-centered variables, weighted by volume
2212  do iw = 1, nw
2213  wsum(iw) = wsum(iw) + &
2214  sum(ps(igrid)%dvolume(ixm^t)*ps(igrid)%w(ixm^t,iw)**power)
2215  end do
2216  end do
2217 
2218  ! Make the information available on all tasks
2219  call mpi_allreduce(wsum, dsum_recv, nw+1, mpi_double_precision, &
2220  mpi_sum, icomm, ierrmpi)
2221 
2222  ! Set the volume and the average
2223  volume = dsum_recv(nw+1)
2224  mode = dsum_recv(1:nw) / volume
2225 
2226  end subroutine get_volume_average
2227 
2228  !> Compute how much of the domain is covered by each grid level. This routine
2229  !> does not take a non-Cartesian geometry into account.
2230  subroutine get_volume_coverage(vol_cov)
2232 
2233  double precision, intent(out) :: vol_cov(1:refine_max_level)
2234  double precision :: dsum_recv(1:refine_max_level)
2235  integer :: iigrid, igrid, iw, level
2236 
2237  ! First determine the total 'flat' volume in each level
2238  vol_cov(1:refine_max_level)=zero
2239 
2240  do iigrid = 1, igridstail
2241  igrid = igrids(iigrid);
2242  level = node(plevel_,igrid)
2243  vol_cov(level) = vol_cov(level)+ &
2244  {(rnode(rpxmax^d_,igrid)-rnode(rpxmin^d_,igrid))|*}
2245  end do
2246 
2247  ! Make the information available on all tasks
2248  call mpi_allreduce(vol_cov, dsum_recv, refine_max_level, mpi_double_precision, &
2249  mpi_sum, icomm, ierrmpi)
2250 
2251  ! Normalize
2252  vol_cov = dsum_recv / sum(dsum_recv)
2253  end subroutine get_volume_coverage
2254 
2255  !> Compute the volume average of func(w) over the leaves of the grid.
2256  subroutine get_volume_average_func(func, f_avg, volume)
2258 
2259  interface
2260  pure function func(w_vec, w_size) result(val)
2261  integer, intent(in) :: w_size
2262  double precision, intent(in) :: w_vec(w_size)
2263  double precision :: val
2264  end function func
2265  end interface
2266  double precision, intent(out) :: f_avg !< The volume average of func
2267  double precision, intent(out) :: volume !< The total grid volume
2268  integer :: iigrid, igrid, i^D
2269  double precision :: wsum(2)
2270  double precision :: dsum_recv(2)
2271 
2272  wsum(:) = 0
2273 
2274  ! Loop over all the grids
2275  do iigrid = 1, igridstail
2276  igrid = igrids(iigrid)
2277 
2278  ! Store total volume in last element
2279  wsum(2) = wsum(2) + sum(ps(igrid)%dvolume(ixm^t))
2280 
2281  ! Compute the modes of the cell-centered variables, weighted by volume
2282  {do i^d = ixmlo^d, ixmhi^d\}
2283  wsum(1) = wsum(1) + ps(igrid)%dvolume(i^d) * &
2284  func(ps(igrid)%w(i^d, :), nw)
2285  {end do\}
2286  end do
2287 
2288  ! Make the information available on all tasks
2289  call mpi_allreduce(wsum, dsum_recv, 2, mpi_double_precision, &
2290  mpi_sum, icomm, ierrmpi)
2291 
2292  ! Set the volume and the average
2293  volume = dsum_recv(2)
2294  f_avg = dsum_recv(1) / volume
2295 
2296  end subroutine get_volume_average_func
2297 
2298  !> Compute global maxima of iw variables over the leaves of the grid.
2299  subroutine get_global_maxima(wmax)
2301 
2302  double precision, intent(out) :: wmax(nw) !< The global maxima
2303 
2304  integer :: iigrid, igrid, iw
2305  double precision :: wmax_mype(nw),wmax_recv(nw)
2306 
2307  wmax_mype(1:nw) = -bigdouble
2308 
2309  ! Loop over all the grids
2310  do iigrid = 1, igridstail
2311  igrid = igrids(iigrid)
2312  do iw = 1, nw
2313  wmax_mype(iw)=max(wmax_mype(iw),maxval(ps(igrid)%w(ixm^t,iw)))
2314  end do
2315  end do
2316 
2317  ! Make the information available on all tasks
2318  call mpi_allreduce(wmax_mype, wmax_recv, nw, mpi_double_precision, &
2319  mpi_max, icomm, ierrmpi)
2320 
2321  wmax(1:nw)=wmax_recv(1:nw)
2322 
2323  end subroutine get_global_maxima
2324 
2325  !> Compute global minima of iw variables over the leaves of the grid.
2326  subroutine get_global_minima(wmin)
2328 
2329  double precision, intent(out) :: wmin(nw) !< The global maxima
2330 
2331  integer :: iigrid, igrid, iw
2332  double precision :: wmin_mype(nw),wmin_recv(nw)
2333 
2334  wmin_mype(1:nw) = bigdouble
2335 
2336  ! Loop over all the grids
2337  do iigrid = 1, igridstail
2338  igrid = igrids(iigrid)
2339  do iw = 1, nw
2340  wmin_mype(iw)=min(wmin_mype(iw),minval(ps(igrid)%w(ixm^t,iw)))
2341  end do
2342  end do
2343 
2344  ! Make the information available on all tasks
2345  call mpi_allreduce(wmin_mype, wmin_recv, nw, mpi_double_precision, &
2346  mpi_min, icomm, ierrmpi)
2347 
2348  wmin(1:nw)=wmin_recv(1:nw)
2349 
2350  end subroutine get_global_minima
2351 
2352 
2353 
2354 end module mod_input_output
integer, parameter cylindrical
Definition: mod_geometry.t:9
This module contains definitions of global parameters and variables and some generic functions/subrou...
character(len=std_len) prolongation_method
integer, parameter cartesian_stretched
Definition: mod_geometry.t:8
integer direction_los
direction of light of sight
character(len=std_len) time_integrator
Which time integrator to use.
Module for reading input and writing output.
double precision, dimension(:), allocatable refine_threshold
Error tolerance for refinement decision.
character(len=std_len), dimension(:), allocatable typepred1
The spatial dicretization for the predictor step when using a two step method.
character(len= *), parameter fmt_r
logical, dimension(:), allocatable w_write
logical small_values_use_primitive
Use primitive variables when correcting small values.
integer domain_nx
number of cells for each dimension in level-one mesh
double precision time_max
End time for the simulation.
double precision, dimension(:), allocatable w_refine_weight
Weights of variables used to calculate error for mesh refinement.
character(len=std_len) typetvd
Which type of TVD method to use.
logical saveprim
If true, convert from conservative to primitive variables in output.
double precision, dimension(ndim) qstretch_baselevel
stretch factor between cells at AMR level 1, per dimension
integer max_blocks
The maximum number of grid blocks in a processor.
The data structure that contains information about a tree node/grid block.
Definition: mod_forest.t:11
type(tree_node_ptr), dimension(:,:), allocatable, save igrid_to_node
Array to go from an [igrid, ipe] index to a node pointer.
Definition: mod_forest.t:32
character(len=name_len) physics_type
String describing the physics type of the simulation.
Definition: mod_physics.t:14
logical slab_uniform
uniform Cartesian geometry or not (stretched Cartesian)
integer, parameter stretch_uni
Unidirectional stretching from a side.
logical, dimension(ndim) collapse
If collapse(DIM) is true, generate output integrated over DIM.
double precision, dimension(:,:), allocatable dxmid
integer, parameter filecollapse_
Constant indicating collapsed output.
subroutine get_global_maxima(wmax)
Compute global maxima of iw variables over the leaves of the grid.
integer, dimension(:), allocatable type_gradient_limiter
Type of slope limiter used for computing gradients or divergences, when typegrad or typediv are set t...
double precision xprob
minimum and maximum domain boundaries for each dimension
double precision, dimension(:), allocatable dg
extent of grid blocks in domain per dimension, in array over levels
double precision, dimension(ndim, 2) writespshift
double precision time_convert_factor
Conversion factor for time unit.
logical need_global_cmax
need global maximal wave speed
character(len=std_len) convert_type
Which format to use when converting.
Module with geometry-related routines (e.g., divergence, curl)
Definition: mod_geometry.t:2
integer collapselevel
The level at which to produce line-integrated / collapsed output.
integer, parameter plevel_
subroutine, public kracken(verb, string)
Definition: mod_kracken.t:198
integer ndir
Number of spatial dimensions (components) for vector variables.
character(len=std_len) collapse_type
Writes D-1 slice, can do so in various formats, depending on slice_type.
Definition: mod_slice.t:2
character(len=std_len) typedimsplit
integer nstep
How many sub-steps the time integrator takes.
integer coordinate
Definition: mod_geometry.t:6
integer, dimension(:), allocatable type_limiter
Type of slope limiter used for reconstructing variables on cell edges.
double precision, dimension(:), allocatable amr_wavefilter
refinement: lohner estimate wavefilter setting
logical resume_previous_run
If true, restart a previous run from the latest snapshot.
subroutine read_arguments()
Read the command line arguments passed to amrvac.
double precision, dimension(:), allocatable entropycoef
double precision wall_time_max
Ending wall time (in hours) for the simulation.
integer it_init
initial iteration count
integer it_max
Stop the simulation after this many time steps have been taken.
double precision dtmin
Stop the simulation when the time step becomes smaller than this value.
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer direction_slit
direction of slit (for spectrum)
Module with basic grid data structures.
Definition: mod_forest.t:2
character(len= *), parameter undefined
integer itag
MPI recv send variables for AMR.
character(len=std_len) usr_filename
User parameter file.
integer, parameter nsavehi
Maximum number of saves that can be defined by tsave or itsave.
character(len=std_len) typeboundspeed
Which type of TVDLF method to use.
character(len=std_len) typeprolonglimit
Limiter used for prolongation to refined grids and ghost cells.
subroutine snapshot_read_header(fh, offset_tree, offset_block)
Read header for a snapshot.
subroutine, public retrev(name, val, len, ier)
Definition: mod_kracken.t:56
subroutine block_shape_io(igrid, n_ghost, ixOL, n_values)
Determine the shape of a block for output (whether to include ghost cells, and on which sides) ...
Collapses D-dimensional output to D-1 view by line-of-sight integration.
Definition: mod_collapse.t:4
double precision timelast
Definition: mod_timing.t:16
integer npe
The number of MPI tasks.
logical, dimension(:), allocatable writelevel
double precision courantpar
The Courant (CFL) number used for the simulation.
integer, dimension(:,:), allocatable, save sfc
Array to go from a Morton number to an igrid and processor index. Sfc(1:3, MN) contains [igrid...
Definition: mod_forest.t:43
character(len=std_len), dimension(:), allocatable flux_scheme
Which spatial discretization to use (per grid level)
integer, parameter fileanalysis_
Constant indicating analysis output (see Writing a custom analysis subroutine)
integer, dimension(3, 3, 3) lvc
Levi-Civita tensor.
double precision small_density
logical, dimension(ndim) stretched_dim
True if a dimension is stretched.
logical, dimension(ndim) aperiodb
True for dimensions with aperiodic boundaries.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, public small_values_daverage
Average over this many cells in each direction.
subroutine read_snapshot
Routine to read in snapshots (.dat files). When it cannot recognize the file version, it will automatically try the &#39;old&#39; reader.
integer, dimension(:), allocatable ng
number of grid blocks in domain per dimension, in array over levels
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
integer, dimension(nfile) isavet
integer nw_found
number of w found in dat files
Module with slope/flux limiters.
Definition: mod_limiter.t:2
subroutine get_volume_average(power, mode, volume)
Compute mean(w**power) over the leaves of the grid. The first mode (power=1) corresponds to the mean...
subroutine write_forest(file_handle)
Definition: forest.t:350
logical pass_wall_time
If true, wall time is up, modify snapshotnext for later overwrite.
integer nghostcells
Number of ghost cells surrounding a grid.
integer nslices
Number of slices to output.
Definition: mod_slice.t:16
double precision dtdiffpar
For resistive MHD, the time step is also limited by the diffusion time: .
procedure(p_no_args), pointer usr_print_log
Module with all the methods that users can customize in AMRVAC.
integer, dimension(:), allocatable, save sfc_to_igrid
Go from a Morton number to an igrid index (for a single processor)
Definition: mod_forest.t:53
logical, dimension(:), allocatable loglimit
integer ixghi
Upper index of grid block arrays.
integer type_block_io
MPI type for IO: block excluding ghost cells.
integer function limiter_type(namelim)
Definition: mod_limiter.t:42
character(len=std_len) typeghostfill
double precision xload
Stores the memory and load imbalance, used in printlog.
character(len=std_len) filename_euv
Base file name for synthetic EUV emission output.
integer ditregrid
Reconstruct the AMR grid once every ditregrid iteration(s)
character(len=std_len) geometry_name
double precision cada3_radius
radius of the asymptotic region [0.001, 10], larger means more accurate in smooth region but more ove...
Definition: mod_limiter.t:13
logical angmomfix
Enable to strictly conserve the angular momentum (works both in cylindrical and spherical coordinates...
logical, dimension(:), allocatable small_values_fix_iw
Whether to apply small value fixes to certain variables.
double precision small_pressure
character(len=std_len) typesourcesplit
How to apply dimensional splitting to the source terms, see disretization.md.
integer, parameter fileslice_
Constant indicating slice output.
subroutine printlog_default
Write volume-averaged values and other information to the log file.
logical stagger_grid
True for using stagger grid.
subroutine write_slice
Definition: mod_slice.t:27
logical save_physical_boundary
True for save physical boundary cells in dat files.
integer, parameter cartesian
Definition: mod_geometry.t:7
character(len=std_len) resolution_euv
resolution of the output
logical nocartesian
IO switches for conversion.
integer ierrmpi
A global MPI error return code.
integer ixm
the mesh range (within a block with ghost cells)
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
character(len=std_len), dimension(:, :), allocatable typeboundary
Array indicating the type of boundary condition per variable and per physical boundary.
subroutine get_global_minima(wmin)
Compute global minima of iw variables over the leaves of the grid.
logical solve_internal_e
Solve polytropic process instead of solving total energy.
character(len=std_len), dimension(:), allocatable typeentropy
Which type of entropy fix to use with Riemann-type solvers.
double precision dtpar
If dtpar is positive, it sets the timestep dt, otherwise courantpar is used to limit the time step ba...
character(len=std_len) restart_from_file
If not &#39;unavailable&#39;, resume from snapshot with this base file name.
logical image
output image
double precision, dimension(:), allocatable w_convert_factor
Conversion factors the primitive variables.
character(len=std_len) typefilelog
Which type of log to write: &#39;normal&#39;, &#39;special&#39;, &#39;regression_test&#39;.
integer, dimension(:), allocatable, save morton_start
First Morton number per processor.
Definition: mod_forest.t:62
procedure(p_no_args), pointer usr_write_analysis
subroutine alloc_node(igrid)
allocate arrays on igrid node
integer, parameter unitterm
Unit for standard output.
integer, parameter filelog_
Constant indicating log output.
integer, parameter unitpar
integer nbufferx
Number of cells as buffer zone.
integer, dimension(3, 3) kr
Kronecker delta tensor.
double precision, dimension(:,:), allocatable dx
integer nleafs_active
Definition: mod_forest.t:78
character(len=std_len) typegrad
logical check_small_values
check and optionally fix unphysical small values (density, gas pressure)
double precision time_between_print
to monitor timeintegration loop at given wall-clock time intervals
integer, dimension(:), allocatable, parameter d
integer, dimension(:), allocatable, save morton_stop
Last Morton number per processor.
Definition: mod_forest.t:65
logical reset_it
If true, reset iteration count to 0.
subroutine write_collapsed
Definition: mod_collapse.t:11
character(len=40), dimension(nfile), parameter output_names
Names of the output methods.
integer mype
The rank of the current MPI task.
subroutine read_forest(file_handle)
Definition: forest.t:389
double precision global_time
The global simulation time.
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
subroutine saveamrfile(ifile)
subroutine read_par_files()
Read in the user-supplied parameter-file.
integer, parameter stretch_symm
Symmetric stretching around the center.
logical reset_time
If true, reset iteration count and global_time to original values, and start writing snapshots at ind...
integer, parameter unitsnapshot
logical function, public lget(keyword)
Definition: mod_kracken.t:161
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
double precision, dimension(nfile) dtsave
Repeatedly save output of type N when dtsave(N) simulation time has passed.
character(len=std_len) slice_type
choose data type of slice: vtu, vtuCC, dat, or csv
Definition: mod_slice.t:22
Module for handling problematic values in simulations, such as negative pressures.
logical use_particles
Use particles module or not.
integer, dimension(:), allocatable, save nleafs_level
How many leaves are present per refinement level.
Definition: mod_forest.t:81
double precision, dimension(nfile) tsavestart
Start of read out (not counting specified read outs)
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer, parameter uniterr
Unit for error messages.
subroutine get_fields_string(line, delims, n_max, fields, n_found, fully_read)
Routine to find entries in a string.
Module containing all the particle routines.
Definition: mod_particles.t:2
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:198
double precision, dimension(:,:), allocatable dxfirst_1mq
logical firstprocess
If true, call initonegrid_usr upon restarting.
double precision tfixgrid
Fix the AMR grid after this time.
character(len= *), parameter fmt_i
character(len= *), parameter fmt_r2
double precision length_convert_factor
integer iprob
problem switch allowing different setups in same usr_mod.t
double precision, dimension(ndim) dxlevel
double precision, dimension(nsavehi, nfile) tsave
Save output of type N on times tsave(:, N)
subroutine get_volume_average_func(func, f_avg, volume)
Compute the volume average of func(w) over the leaves of the grid.
integer, dimension(nfile) isaveit
integer snapshotini
Resume from the snapshot with this index.
integer function, public iget(keyword)
Definition: mod_kracken.t:141
integer, dimension(nslicemax) slicedir
The slice direction for each slice.
Definition: mod_slice.t:19
integer, parameter ndim
Number of spatial dimensions for grid variables.
subroutine read_snapshot_old()
integer ittimelast
Definition: mod_timing.t:15
integer phys_wider_stencil
To use wider stencils in flux calculations. A value of 1 will extend it by one cell in both direction...
Definition: mod_physics.t:18
logical stretch_uncentered
If true, adjust mod_geometry routines to account for grid stretching (but the flux computation will n...
subroutine generate_plotfile
Definition: convert.t:3
logical slab
Cartesian geometry or not.
double precision, dimension(nslicemax) slicecoord
Slice coordinates, see Slice output.
Definition: mod_slice.t:10
integer function get_snapshot_index(filename)
integer slowsteps
If > 1, then in the first slowsteps-1 time steps dt is reduced by a factor .
integer, parameter stretch_none
No stretching.
procedure(transform_w), pointer usr_transform_w
integer nwauxio
Number of auxiliary variables that are only included in output.
logical, dimension(:), allocatable logflag
integer icomm
The MPI communicator.
double precision small_temperature
error handling
character(len=std_len) typediv
integer, parameter nlevelshi
The maximum number of levels in the grid refinement.
integer, save nparents
Number of parent blocks.
Definition: mod_forest.t:73
integer refine_max_level
Maximal number of AMR levels.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
subroutine snapshot_write_header(fh, offset_tree, offset_block)
Write header for a snapshot.
integer, parameter nfile
Number of output methods.
integer log_fh
MPI file handle for logfile.
double precision location_slit
location of the slit
subroutine write_snapshot
logical reset_grid
If true, rebuild the AMR grid upon restarting.
logical source_split_usr
Use split or unsplit way to add user&#39;s source terms, default: unsplit.
integer, dimension(:,:), allocatable node
procedure(sub_get_aux), pointer phys_get_aux
Definition: mod_physics.t:57
integer, parameter spherical
Definition: mod_geometry.t:10
integer itfixgrid
Fix the AMR grid after this many time steps.
character(len=std_len) typecourant
How to compute the CFL-limited time step.
double precision, dimension(:,:), allocatable dxfirst
logical function snapshot_exists(ix)
integer, save nleafs
Number of leaf block.
Definition: mod_forest.t:76
logical phys_energy
Solve energy equation or not.
subroutine get_volume_coverage(vol_cov)
Compute how much of the domain is covered by each grid level. This routine does not take a non-Cartes...
integer, dimension(3), parameter compatible_versions
List of compatible versions.
integer it
Number of time steps taken.
integer, dimension(nfile) ditsave
Repeatedly save output of type N when ditsave(N) time steps have passed.
subroutine printlog_regression_test()
Print a log that can be used to check whether the code still produces the same output (regression tes...
subroutine create_output_file(fh, ix, extension, suffix)
Standard method for creating a new output file.
character(len=20), public small_values_method
How to handle small values.
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
double precision, dimension(:), allocatable derefine_ratio
integer, dimension(ndim) nstretchedblocks_baselevel
(even) number of (symmetrically) stretched blocks at AMR level 1, per dimension
double precision time_init
Start time for the simulation.
character(len=std_len) typeaverage
integer ixgshi
Upper index of stagger grid block arrays.
Module for reading command line arguments.
Definition: mod_kracken.t:7
integer, parameter fileout_
Constant indicating regular output.
integer, dimension(nsavehi, nfile) itsave
Save output of type N on iterations itsave(:, N)
pure integer function count_ix(ixOL)
Compute number of elements in index range.
integer slicenext
the file number of slices
Definition: mod_slice.t:13
logical spectrum
output spectrum
logical autoconvert
If true, already convert to output format during the run.
character(len=std_len) typecurl
procedure(sub_write_info), pointer phys_write_info
Definition: mod_physics.t:61
integer, parameter version_number
Version number of the .dat file output.