MPI-AMRVAC  3.0
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_particle_base.t
Go to the documentation of this file.
1 !> Module with shared functionality for all the particle movers
3  use mod_global_parameters, only: name_len, std_len
4  use mod_physics
5  use mod_random
6  use mod_constants
7 
8  !> String describing the particle physics type
9  character(len=name_len) :: physics_type_particles = ""
10  !> String describing the particle integrator type
11  character(len=name_len) :: integrator_type_particles = ""
12  !> Header string used in CSV files
13  character(len=400) :: csv_header
14  !> Format string used in CSV files
15  character(len=60) :: csv_format
16  !> Maximum number of particles
17  integer :: nparticleshi
18  !> Maximum number of particles in one processor
20  !> Number of default payload variables for a particle
21  integer :: ndefpayload
22  !> Number of user-defined payload variables for a particle
23  integer :: nusrpayload
24  !> Number of total payload variables for a particle
25  integer :: npayload
26  !> Number of variables for grid field
27  integer :: ngridvars
28  !> Number of particles
29  integer :: num_particles
30  !> Time limit of particles
31  double precision :: tmax_particles
32  !> Minimum time of all particles
33  double precision :: min_particle_time
34  !> Time interval to save particles
35  double precision :: dtsave_particles
36  !> If positive, a constant time step for the particles
37  double precision :: const_dt_particles
38  !> Particle CFL safety factor
39  double precision :: particles_cfl
40  !> Time to write next particle output
41  double precision :: t_next_output
42  !> Whether to write individual particle output (followed particles)
43  logical :: write_individual
44  !> Whether to write ensemble output
45  logical :: write_ensemble
46  !> Whether to write particle snapshots
47  logical :: write_snapshot
48  !> Particle downsampling factor in CSV output
50  !> Use a relativistic particle mover?
51  logical :: relativistic
52  !> Resistivity
53  double precision :: particles_eta, particles_etah
54  double precision :: dtheta
55  logical :: losses
56  !> Identity number and total number of particles
57  integer :: nparticles
58  !> Iteration number of paritcles
59  integer :: it_particles
60  !> Output count for ensembles
61  integer :: n_output_ensemble
62  !> Output count for ensembles of destroyed particles
63  integer :: n_output_destroyed
64 
65  ! these two save the list of neighboring cpus:
66  integer, allocatable :: ipe_neighbor(:)
67  integer :: npe_neighbors
68 
69  integer :: type_particle
70  !> set the current igrid for the particle integrator:
71  integer :: igrid_working
72  !> set the current ipart for the particle integrator:
73  integer :: ipart_working
74 
75  integer, parameter :: unitparticles=15
76 
77  !> Array of identity numbers of particles in current processor
78  integer, dimension(:), allocatable :: particles_on_mype
79  !> Array of identity numbers of active particles in current processor
80  integer, dimension(:), allocatable :: particles_active_on_mype
81  !> Number of particles in current processor
82  integer :: nparticles_on_mype
83  !> Number of active particles in current processor
85  !> Normalization factor for velocity in the integrator
86  double precision :: integrator_velocity_factor(3)
87  !> Integrator to be used for particles
88  integer :: integrator
89 
90  !> Variable index for magnetic field
91  integer, allocatable :: bp(:)
92  !> Variable index for electric field
93  integer, allocatable :: ep(:)
94  !> Variable index for fluid velocity
95  integer, allocatable :: vp(:)
96  !> Variable index for current
97  integer, allocatable :: jp(:)
98 
100  type(particle_t), pointer :: self
101  !> extra information carried by the particle
102  double precision, allocatable :: payload(:)
103  integer :: igrid, ipe
104  end type particle_ptr
105 
107  !> follow the history of the particle
108  logical :: follow
109  !> identity number
110  integer :: index
111  !> charge
112  double precision :: q
113  !> mass
114  double precision :: m
115  !> time
116  double precision :: time
117  !> time step
118  double precision :: dt
119  !> coordinates
120  double precision :: x(3)
121  !> velocity, momentum, or special ones
122  double precision :: u(3)
123  end type particle_t
124 
125  ! Array containing all particles
126  type(particle_ptr), dimension(:), allocatable :: particle
127 
128  ! The pseudo-random number generator
129  type(rng_t) :: rng
130 
131  ! Pointers for user-defined stuff
132  procedure(sub_fill_gridvars), pointer :: particles_fill_gridvars => null()
135  procedure(sub_integrate), pointer :: particles_integrate => null()
136  procedure(fun_destroy), pointer :: usr_destroy_particle => null()
137 
138  abstract interface
139 
140  subroutine sub_fill_gridvars
141  end subroutine sub_fill_gridvars
142 
143  subroutine sub_define_additional_gridvars(ngridvars)
144  integer, intent(inout) :: ngridvars
145  end subroutine sub_define_additional_gridvars
146 
148  end subroutine sub_fill_additional_gridvars
149 
150  subroutine sub_integrate(end_time)
151  double precision, intent(in) :: end_time
152  end subroutine sub_integrate
153 
154  function fun_destroy(myparticle)
155  import particle_ptr
156  logical :: fun_destroy
157  type(particle_ptr), intent(in) :: myparticle
158  end function fun_destroy
159 
160  end interface
161 
162 contains
163 
164  !> Finalize the particles module
165  subroutine particles_finish()
167 
169 
170  ! Clean up particle type
171  call mpi_type_free(type_particle,ierrmpi)
172 
173  ! Clean up variables
174  deallocate(particle)
175  deallocate(ipe_neighbor)
176  deallocate(particles_on_mype)
177  deallocate(particles_active_on_mype)
178  deallocate(gridvars)
179 
180  end subroutine particles_finish
181 
182  !> Read this module's parameters from a file
183  subroutine particles_params_read(files)
184  use mod_global_parameters, only: unitpar
185  character(len=*), intent(in) :: files(:)
186  integer :: n
187 
188  namelist /particles_list/ physics_type_particles, nparticleshi, nparticles_per_cpu_hi, &
194 
195  do n = 1, size(files)
196  open(unitpar, file=trim(files(n)), status="old")
197  read(unitpar, particles_list, end=111)
198 111 close(unitpar)
199  end do
200 
201  end subroutine particles_params_read
202 
203  !> Give initial values to parameters
204  subroutine particle_base_init()
206  integer :: n, idir
207  integer, parameter :: i8 = selected_int_kind(18)
208  integer(i8) :: seed(2)
209  character(len=20) :: strdata
210 
211  physics_type_particles = 'advect'
212  nparticleshi = 10000000
213  nparticles_per_cpu_hi = 1000000
214  num_particles = 1000
215  ndefpayload = 1
216  nusrpayload = 0
217  tmax_particles = bigdouble
218  dtsave_particles = bigdouble
219  const_dt_particles = -bigdouble
220  particles_cfl = 0.5
221  write_individual = .true.
222  write_ensemble = .true.
223  write_snapshot = .true.
225  relativistic = .true.
226  particles_eta = -1.d0
227  particles_etah = -1.d0
228  t_next_output = 0.0d0
229  dtheta = 2.0d0*dpi / 60.0d0
230  losses = .false.
231  nparticles = 0
232  it_particles = 0
237  integrator_velocity_factor(:) = 1.0d0
238  integrator_type_particles = 'Boris'
239 
241 
242  ! If sampling, ndefpayload = nw:
243  if (physics_type_particles == 'sample') ndefpayload = nw
244  ! Total number of payloads
246 
247  ! initialise the random number generator
248  seed = [310952_i8, 8948923749821_i8]
249  call rng%set_seed(seed)
250 
251  allocate(particle(1:nparticleshi))
252  allocate(ipe_neighbor(0:npe-1))
255 
256  particles_on_mype(:) = 0
258 
259  ! Generate header for CSV files
260  csv_header = ' time, dt, x1, x2, x3, u1, u2, u3,'
261  ! If sampling, name the payloads like the fluid quantities
262  if (physics_type_particles == 'sample') then
263  do n = 1, nw
264  write(strdata,"(a,a,a)") ' ', trim(prim_wnames(n)), ','
265  csv_header = trim(csv_header) // trim(strdata)
266  end do
267  else ! Otherwise, payloads are called pl01, pl02, ...
268  do n = 1, ndefpayload
269  write(strdata,"(a,i2.2,a)") ' pl', n, ','
270  csv_header = trim(csv_header) // trim(strdata)
271  end do
272  end if
273  ! User-defined payloads are called usrpl01, usrpl02, ...
274  if (nusrpayload > 0) then
275  do n = 1, nusrpayload
276  write(strdata,"(a,i2.2,a)") ' usrpl', n, ','
277  csv_header = trim(csv_header) // trim(strdata)
278  end do
279  end if
280  csv_header = trim(csv_header) // ' ipe, iteration, index'
281 
282  ! Generate format string for CSV files
283  write(csv_format, '(A,I0,A,A)') '(', 8 + npayload, '(es14.6,", ")', &
284  'i8.7,", ",i11.10,", ",i8.7)'
285 
286  end subroutine particle_base_init
287 
288  !> Initialise communicators for particles
289  subroutine init_particles_com()
291 
292  integer :: oldtypes(0:7), offsets(0:7), blocklengths(0:7)
293 
294  oldtypes(0) = mpi_logical
295  oldtypes(1) = mpi_integer
296  oldtypes(2:7) = mpi_double_precision
297 
298  blocklengths(0:5)=1
299  blocklengths(6)=3
300  blocklengths(7)=3
301 
302  offsets(0) = 0
303  offsets(1) = size_logical * blocklengths(0)
304  offsets(2) = offsets(1) + size_int * blocklengths(1)
305  offsets(3) = offsets(2) + size_double * blocklengths(2)
306  offsets(4) = offsets(3) + size_double * blocklengths(3)
307  offsets(5) = offsets(4) + size_double * blocklengths(4)
308  offsets(6) = offsets(5) + size_double * blocklengths(5)
309  offsets(7) = offsets(6) + size_double * blocklengths(6)
310 
311  call mpi_type_struct(8,blocklengths,offsets,oldtypes,type_particle,ierrmpi)
312  call mpi_type_commit(type_particle,ierrmpi)
313 
314  end subroutine init_particles_com
315 
316  subroutine get_maxwellian_velocity(v, velocity)
317  double precision, intent(out) :: v(3)
318  double precision, intent(in) :: velocity
319  double precision :: vtmp(3), vnorm
320 
321  vtmp(1) = velocity * rng%normal()
322  vtmp(2) = velocity * rng%normal()
323  vtmp(3) = velocity * rng%normal()
324  vnorm = norm2(vtmp)
325  v = rng%sphere(vnorm)
326  end subroutine get_maxwellian_velocity
327 
328  subroutine get_uniform_pos(x)
330  double precision, intent(out) :: x(3)
331 
332  call rng%unif_01_vec(x)
333 
334  {^d&x(^d) = xprobmin^d + x(^d) * (xprobmax^d - xprobmin^d)\}
335  x(ndim+1:) = 0.0d0
336  end subroutine get_uniform_pos
337 
338  !> Initialize grid variables for particles
339  subroutine init_gridvars()
341 
342  integer :: igrid, iigrid
343 
344  do iigrid=1,igridstail; igrid=igrids(iigrid);
345  allocate(gridvars(igrid)%w(ixg^t,1:ngridvars))
346  gridvars(igrid)%w = 0.0d0
347 
348  if (time_advance) then
349  allocate(gridvars(igrid)%wold(ixg^t,1:ngridvars))
350  gridvars(igrid)%wold = 0.0d0
351  end if
352  end do
353 
355  if (associated(particles_define_additional_gridvars)) then
357  end if
358 
359  end subroutine init_gridvars
360 
361  !> Deallocate grid variables for particles
362  subroutine finish_gridvars()
364 
365  integer :: iigrid, igrid
366 
367  do iigrid=1,igridstail; igrid=igrids(iigrid);
368  deallocate(gridvars(igrid)%w)
369  if(time_advance) deallocate(gridvars(igrid)%wold)
370  end do
371 
372  end subroutine finish_gridvars
373 
374  !> This routine fills the particle grid variables with the default for mhd, i.e. only E and B
378 
379  integer :: igrid, iigrid
380  double precision :: E(ixG^T, ndir)
381  double precision :: B(ixG^T, ndir)
382 
383  do iigrid=1,igridstail; igrid=igrids(iigrid);
384  if (associated(usr_particle_fields)) then
385  call usr_particle_fields(ps(igrid)%w, ps(igrid)%x, e, b)
386  gridvars(igrid)%w(ixg^t,ep(:)) = e
387  gridvars(igrid)%w(ixg^t,bp(:)) = b
388  else
389  call fields_from_mhd(igrid, ps(igrid)%w, gridvars(igrid)%w)
390  end if
391 
392  if (time_advance) then
393  if (associated(usr_particle_fields)) then
394  call usr_particle_fields(pso(igrid)%w, ps(igrid)%x, e, b)
395  gridvars(igrid)%wold(ixg^t,ep(:)) = e
396  gridvars(igrid)%wold(ixg^t,bp(:)) = b
397  else
398  call fields_from_mhd(igrid, pso(igrid)%w, gridvars(igrid)%wold)
399  end if
400  end if
401  end do
402  end subroutine fill_gridvars_default
403 
404  !> Determine fields from MHD variables
405  subroutine fields_from_mhd(igrid, w_mhd, w_part)
407  integer, intent(in) :: igrid
408  double precision, intent(in) :: w_mhd(ixG^T,nw)
409  double precision, intent(inout) :: w_part(ixG^T,ngridvars)
410  integer :: idirmin, idir
411  double precision :: current(ixG^T,7-2*ndir:3)
412  double precision :: w(ixG^T,1:nw)
413 
414  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
415 
416  w(ixg^t,1:nw) = w_mhd(ixg^t,1:nw)
417  w_part(ixg^t,bp(1):bp(3)) = 0.0d0
418  w_part(ixg^t,ep(1):ep(3)) = 0.0d0
419 
420  call phys_to_primitive(ixg^ll,ixg^ll,w,ps(igrid)%x)
421 
422  ! fill with magnetic field:
423  if (b0field) then
424  do idir = 1, ndir
425  w_part(ixg^t,bp(idir))=w(ixg^t,iw_mag(idir))+ps(igrid)%B0(ixg^t,idir,0)
426  end do
427  else
428  w_part(ixg^t,bp(:)) = w(ixg^t,iw_mag(:))
429  end if
430 ! w_part(ixG^T,bp(:)) = w(ixG^T,iw_mag(:))
431 
432  ! fill with current
433  current = zero
434  call particle_get_current(w,ixg^ll,ixg^llim^d^lsub1,idirmin,current)
435  w_part(ixg^t,jp(:)) = current(ixg^t,:)
436 
437  ! fill with electric field
438  w_part(ixg^t,ep(1)) = w_part(ixg^t,bp(2)) * &
439  w(ixg^t,iw_mom(3)) - w_part(ixg^t,bp(3)) * &
440  w(ixg^t,iw_mom(2)) + particles_eta * current(ixg^t,1)
441  w_part(ixg^t,ep(2)) = w_part(ixg^t,bp(3)) * &
442  w(ixg^t,iw_mom(1)) - w_part(ixg^t,bp(1)) * &
443  w(ixg^t,iw_mom(3)) + particles_eta * current(ixg^t,2)
444  w_part(ixg^t,ep(3)) = w_part(ixg^t,bp(1)) * &
445  w(ixg^t,iw_mom(2)) - w_part(ixg^t,bp(2)) * &
446  w(ixg^t,iw_mom(1)) + particles_eta * current(ixg^t,3)
447 
448  ! Hall term
449  if (particles_etah > zero) then
450  w_part(ixg^t,ep(1)) = w_part(ixg^t,ep(1)) + particles_etah/w(ixg^t,iw_rho) * &
451  (current(ixg^t,2) * w_part(ixg^t,bp(3)) - current(ixg^t,3) * w_part(ixg^t,bp(2)))
452  w_part(ixg^t,ep(2)) = w_part(ixg^t,ep(2)) + particles_etah/w(ixg^t,iw_rho) * &
453  (-current(ixg^t,1) * w_part(ixg^t,bp(3)) + current(ixg^t,3) * w_part(ixg^t,bp(1)))
454  w_part(ixg^t,ep(3)) = w_part(ixg^t,ep(3)) + particles_etah/w(ixg^t,iw_rho) * &
455  (current(ixg^t,1) * w_part(ixg^t,bp(2)) - current(ixg^t,2) * w_part(ixg^t,bp(1)))
456  end if
457 
458  end subroutine fields_from_mhd
459 
460  !> Calculate idirmin and the idirmin:3 components of the common current array
461  !> make sure that dxlevel(^D) is set correctly.
462  subroutine particle_get_current(w,ixI^L,ixO^L,idirmin,current)
464  use mod_geometry
465 
466  integer :: idirmin0
467  integer :: ixO^L, idirmin, ixI^L
468  double precision :: w(ixI^S,1:nw)
469  integer :: idir
470  ! For ndir=2 only 3rd component of J can exist, ndir=1 is impossible for MHD
471  double precision :: current(ixI^S,7-2*ndir:3),bvec(ixI^S,1:ndir)
472 
473  idirmin0 = 7-2*ndir
474 
475  if (b0field) then
476  do idir = 1, ndir
477  bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))+block%B0(ixi^s,idir,0)
478  end do
479  else
480  do idir = 1, ndir
481  bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))
482  end do
483  end if
484 
485  call curlvector(bvec,ixi^l,ixo^l,current,idirmin,idirmin0,ndir)
486 
487  end subroutine particle_get_current
488 
489  !> Let particles evolve in time. The routine also handles grid variables and
490  !> output.
491  subroutine handle_particles()
492  use mod_timing
494 
495  integer :: steps_taken, nparticles_left
496 
497  tpartc0 = mpi_wtime()
498 
499  call set_neighbor_ipe
500 
501  tpartc_grid_0=mpi_wtime()
502  call init_gridvars()
503  tpartc_grid = tpartc_grid + (mpi_wtime()-tpartc_grid_0)
504 
505  tpartc_com0=mpi_wtime()
507  tpartc_com=tpartc_com + (mpi_wtime()-tpartc_com0)
508 
509  if(time_advance) then
511  else
513  end if
514 
515  ! main integration loop
516  do
517  if (tmax_particles >= t_next_output) then
518  call advance_particles(t_next_output, steps_taken)
519  tpartc_io_0 = mpi_wtime()
520  call write_particle_output()
521  timeio_tot = timeio_tot+(mpi_wtime()-tpartc_io_0)
522  tpartc_io = tpartc_io+(mpi_wtime()-tpartc_io_0)
523 
524  ! If we are using a constant time step, this prevents multiple outputs
525  ! at the same time
528  else
529  call advance_particles(tmax_particles, steps_taken)
530 ! call write_particle_output()
531  exit
532  end if
533 
534  call mpi_allreduce(nparticles_on_mype, nparticles_left, 1, mpi_integer, &
535  mpi_sum, icomm, ierrmpi)
536  if (nparticles_left == 0 .and. convert) call mpistop('No particles left')
537 
538  end do
539 
540  call finish_gridvars()
541 
542  tpartc = tpartc + (mpi_wtime() - tpartc0)
543 
544  end subroutine handle_particles
545 
546  !> Routine handles the particle evolution
547  subroutine advance_particles(end_time, steps_taken)
548  use mod_timing
550  double precision, intent(in) :: end_time !< Advance at most up to this time
551  integer, intent(out) :: steps_taken
552  integer :: n_active
553 
554  steps_taken = 0
555 
556  do
557  call select_active_particles(end_time)
558 
559  ! Determine total number of active particles
560  call mpi_allreduce(nparticles_active_on_mype, n_active, 1, mpi_integer, &
561  mpi_sum, icomm, ierrmpi)
562 
563  if (n_active == 0) exit
564 
565  tpartc_int_0=mpi_wtime()
566  call particles_integrate(end_time)
567  steps_taken = steps_taken + 1
568  tpartc_int=tpartc_int+(mpi_wtime()-tpartc_int_0)
569 
570  tpartc_com0=mpi_wtime()
571  call comm_particles()
572  tpartc_com=tpartc_com + (mpi_wtime()-tpartc_com0)
573 
575  end do
576 
577  end subroutine advance_particles
578 
579  pure subroutine limit_dt_endtime(t_left, dt_p)
580  double precision, intent(in) :: t_left
581  double precision, intent(inout) :: dt_p
582  double precision :: n_steps
583  double precision, parameter :: eps = 1d-10
584 
585  n_steps = t_left / dt_p
586 
587  if (n_steps < 1+eps) then
588  ! Last step
589  dt_p = t_left
590  else if (n_steps < 2-eps) then
591  ! Divide time left over two steps, to prevent one tiny time step
592  dt_p = 0.5d0 * t_left
593  end if
594 
595  end subroutine limit_dt_endtime
596 
597  ! Get the electric field in the grid at postion x.
598  ! For ideal SRMHD, we first interpolate b and u=lfac*v/c
599  ! The electric field then follows from e = b x beta, where beta=u/lfac.
600  ! This ensures for the resulting e that e<b and e.b=0. Interpolating on u
601  ! avoids interpolation-errors leading to v>c.
602  ! For (non-ideal) MHD, we directly interpolate the electric field as
603  ! there is no such constraint.
604 
605  subroutine get_vec(ix,igrid,x,tloc,vec)
608 
609  integer,intent(in) :: ix(ndir) !< Indices in gridvars
610  integer,intent(in) :: igrid
611  double precision,dimension(3), intent(in) :: x
612  double precision, intent(in) :: tloc
613  double precision,dimension(ndir), intent(out) :: vec
614  double precision,dimension(ndir) :: vec1, vec2
615  double precision :: td
616  integer :: ic^D,idir
617 
618  if (associated(usr_particle_analytic)) then
619  call usr_particle_analytic(ix, x, tloc, vec)
620  else if (.not.time_advance) then
621  do idir=1,ndir
622  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,ix(idir)), &
623  ps(igrid)%x(ixg^t,1:ndim),x,vec(idir))
624  end do
625  else
626  do idir=1,ndir
627  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,ix(idir)), &
628  ps(igrid)%x(ixg^t,1:ndim),x,vec1(idir))
629  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,ix(idir)), &
630  ps(igrid)%x(ixg^t,1:ndim),x,vec2(idir))
631  end do
632  td = (tloc - global_time) / dt
633  vec(:) = vec1(:) * (1.0d0 - td) + vec2(:) * td
634  end if
635 
636  end subroutine get_vec
637 
638  !> Get Lorentz factor from relativistic momentum
639  pure subroutine get_lfac(u,lfac)
640  use mod_global_parameters, only: ndir, c_norm
641  double precision,dimension(3), intent(in) :: u
642  double precision, intent(out) :: lfac
643 
644  if (relativistic) then
645  lfac = sqrt(1.0d0 + sum(u(:)**2)/c_norm**2)
646  else
647  lfac = 1.0d0
648  end if
649  end subroutine get_lfac
650 
651  !> Get Lorentz factor from velocity
652  pure subroutine get_lfac_from_velocity(v,lfac)
653  use mod_global_parameters, only: ndir, c_norm
654  double precision,dimension(3), intent(in) :: v
655  double precision, intent(out) :: lfac
656 
657  if (relativistic) then
658  lfac = 1.0d0 / sqrt(1.0d0 - sum(v(:)**2)/c_norm**2)
659  else
660  lfac = 1.0d0
661  end if
662 
663  end subroutine get_lfac_from_velocity
664 
665  subroutine cross(a,b,c)
667  use mod_geometry
668  double precision, dimension(ndir), intent(in) :: a,b
669  double precision, dimension(ndir), intent(out) :: c
670 
671  ! ndir needs to be three for this to work!!!
672  if(ndir==3) then
673  select case(coordinate)
675  c(1) = a(2)*b(3) - a(3)*b(2)
676  c(2) = a(3)*b(1) - a(1)*b(3)
677  c(3) = a(1)*b(2) - a(2)*b(1)
678  case (cylindrical)
679  c(r_) = a(phi_)*b(z_) - a(z_)*b(phi_)
680  c(phi_) = a(z_)*b(r_) - a(r_)*b(z_)
681  c(z_) = a(r_)*b(phi_) - a(phi_)*b(r_)
682  case default
683  call mpistop('geometry not implemented in cross(a,b,c)')
684  end select
685  else
686  call mpistop("cross in particles needs to be run with three components!")
687  end if
688 
689  end subroutine cross
690 
691  subroutine interpolate_var(igrid,ixI^L,ixO^L,gf,x,xloc,gfloc)
693 
694  integer, intent(in) :: igrid,ixI^L, ixO^L
695  double precision, intent(in) :: gf(ixI^S)
696  double precision, intent(in) :: x(ixI^S,1:ndim)
697  double precision, intent(in) :: xloc(1:3)
698  double precision, intent(out) :: gfloc
699  double precision :: xd^D
700  {^iftwod
701  double precision :: c00, c10
702  }
703  {^ifthreed
704  double precision :: c0, c1, c00, c10, c01, c11
705  }
706  integer :: ic^D, ic1^D, ic2^D, idir
707 
708  ! flat interpolation:
709  {ic^d = int((xloc(^d)-rnode(rpxmin^d_,igrid))/rnode(rpdx^d_,igrid)) + 1 + nghostcells \}
710  !gfloc = gf(ic^D)
711 
712  ! linear interpolation:
713  {
714  if (x({ic^dd},^d) .lt. xloc(^d)) then
715  ic1^d = ic^d
716  else
717  ic1^d = ic^d -1
718  end if
719  ic2^d = ic1^d + 1
720  \}
721 
722  {^d&
723  if(ic1^d.lt.ixglo^d+1 .or. ic2^d.gt.ixghi^d-1) then
724  print *, 'direction: ',^d
725  print *, 'position: ',xloc(1:ndim)
726  print *, 'indices:', ic1^d,ic2^d
727  call mpistop('Trying to interpolate from out of grid!')
728  end if
729  \}
730 
731  {^ifoned
732  xd1 = (xloc(1)-x(ic11,1)) / (x(ic21,1) - x(ic11,1))
733  gfloc = gf(ic11) * (1.0d0 - xd1) + gf(ic21) * xd1
734  }
735  {^iftwod
736  xd1 = (xloc(1)-x(ic11,ic12,1)) / (x(ic21,ic12,1) - x(ic11,ic12,1))
737  xd2 = (xloc(2)-x(ic11,ic12,2)) / (x(ic11,ic22,2) - x(ic11,ic12,2))
738  c00 = gf(ic11,ic12) * (1.0d0 - xd1) + gf(ic21,ic12) * xd1
739  c10 = gf(ic11,ic22) * (1.0d0 - xd1) + gf(ic21,ic22) * xd1
740  gfloc = c00 * (1.0d0 - xd2) + c10 * xd2
741  }
742  {^ifthreed
743  xd1 = (xloc(1)-x(ic11,ic12,ic13,1)) / (x(ic21,ic12,ic13,1) - x(ic11,ic12,ic13,1))
744  xd2 = (xloc(2)-x(ic11,ic12,ic13,2)) / (x(ic11,ic22,ic13,2) - x(ic11,ic12,ic13,2))
745  xd3 = (xloc(3)-x(ic11,ic12,ic13,3)) / (x(ic11,ic12,ic23,3) - x(ic11,ic12,ic13,3))
746 
747  c00 = gf(ic11,ic12,ic13) * (1.0d0 - xd1) + gf(ic21,ic12,ic13) * xd1
748  c10 = gf(ic11,ic22,ic13) * (1.0d0 - xd1) + gf(ic21,ic22,ic13) * xd1
749  c01 = gf(ic11,ic12,ic23) * (1.0d0 - xd1) + gf(ic21,ic12,ic23) * xd1
750  c11 = gf(ic11,ic22,ic23) * (1.0d0 - xd1) + gf(ic21,ic22,ic23) * xd1
751 
752  c0 = c00 * (1.0d0 - xd2) + c10 * xd2
753  c1 = c01 * (1.0d0 - xd2) + c11 * xd2
754 
755  gfloc = c0 * (1.0d0 - xd3) + c1 * xd3
756  }
757 
758  end subroutine interpolate_var
759 
761  use mod_timing
763 
764  double precision :: tpartc_avg, tpartc_int_avg, tpartc_io_avg, tpartc_com_avg, tpartc_grid_avg
765 
766  call mpi_reduce(tpartc,tpartc_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
767  call mpi_reduce(tpartc_int,tpartc_int_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
768  call mpi_reduce(tpartc_io,tpartc_io_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
769  call mpi_reduce(tpartc_com,tpartc_com_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
770  call mpi_reduce(tpartc_grid,tpartc_grid_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
771 
772  if( mype ==0 ) then
773  tpartc_avg = tpartc_avg/dble(npe)
774  tpartc_int_avg = tpartc_int_avg/dble(npe)
775  tpartc_io_avg = tpartc_io_avg/dble(npe)
776  tpartc_com_avg = tpartc_com_avg/dble(npe)
777  tpartc_grid_avg = tpartc_grid_avg/dble(npe)
778  write(*,'(a,f12.3,a)')' Particle handling took : ',tpartc,' sec'
779  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc/timeloop,' %'
780  write(*,'(a,f12.3,a)')' Particle IO took : ',tpartc_io_avg,' sec'
781  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_io_avg/timeloop,' %'
782  write(*,'(a,f12.3,a)')' Particle COM took : ',tpartc_com_avg,' sec'
783  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_com_avg/timeloop,' %'
784  write(*,'(a,f12.3,a)')' Particle integration took : ',tpartc_int_avg,' sec'
785  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_int_avg/timeloop,' %'
786  write(*,'(a,f12.3,a)')' Particle init grid took : ',tpartc_grid_avg,' sec'
787  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_grid_avg/timeloop,' %'
788  end if
789 
790  end subroutine time_spent_on_particles
791 
792  subroutine read_particles_snapshot(file_exists)
794 
795  logical,intent(out) :: file_exists
796  character(len=std_len) :: filename
797  integer :: mynpayload, mynparticles, pos
798 
799  ! some initialisations:
801  mynparticles = 0
802  nparticles = 0
803  it_particles = 0
804 
805  ! open the snapshot file on the headnode
806  file_exists=.false.
807  if (mype == 0) then
808 ! write(filename,"(a,a,i4.4,a)") trim(base_filename),'_particles',snapshotini,'.dat'
809  ! Strip restart_from_filename of the ending
810  pos = scan(restart_from_file, '.dat', back=.true.)
811  write(filename,"(a,a,i4.4,a)") trim(restart_from_file(1:pos-8)),'_particles',snapshotini,'.dat'
812  INQUIRE(file=filename, exist=file_exists)
813  if (.not. file_exists) then
814  write(*,*) 'WARNING: File '//trim(filename)//' with particle data does not exist.'
815  write(*,*) 'Initialising particles from user or default routines'
816  else
817  open(unit=unitparticles,file=filename,form='unformatted',action='read',status='unknown',access='stream')
818  read(unitparticles) nparticles,it_particles,mynpayload
819  if (mynpayload .ne. npayload) &
820  call mpistop('npayload in restart file does not match npayload in mod_particles')
821  end if
822  end if
823 
824  call mpi_bcast(file_exists,1,mpi_logical,0,icomm,ierrmpi)
825  if (.not. file_exists) return
826 
827  call mpi_bcast(nparticles,1,mpi_integer,0,icomm,ierrmpi)
828  call mpi_bcast(it_particles,1,mpi_integer,0,icomm,ierrmpi)
829 
830  do while (mynparticles .lt. nparticles)
831  if (mype == 0) then
833  .and. mynparticles .lt. nparticles)
834  call read_from_snapshot
835  mynparticles = mynparticles + 1
836  end do
837  end if ! mype==0
838  call mpi_bcast(mynparticles,1,mpi_integer,0,icomm,ierrmpi)
840  end do
841 
842  if (mype == 0) close(unit=unitparticles)
843 
844  end subroutine read_particles_snapshot
845 
846  subroutine read_from_snapshot()
847 
848  integer :: index,igrid_particle,ipe_particle
849 
850  read(unitparticles) index
851  allocate(particle(index)%self)
852  allocate(particle(index)%payload(1:npayload))
853  particle(index)%self%index = index
854 
855  read(unitparticles) particle(index)%self%follow
856  read(unitparticles) particle(index)%self%q
857  read(unitparticles) particle(index)%self%m
858  read(unitparticles) particle(index)%self%time
859  read(unitparticles) particle(index)%self%dt
860  read(unitparticles) particle(index)%self%x
861  read(unitparticles) particle(index)%self%u
862  read(unitparticles) particle(index)%payload(1:npayload)
863 
864 ! if (particle(index)%self%follow) print*, 'follow index:', index
865 
866  call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
867  particle(index)%igrid = igrid_particle
868  particle(index)%ipe = ipe_particle
869 
871 
872  end subroutine read_from_snapshot
873 
876 
877  character(len=std_len) :: filename
878  type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
879  type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
880  double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
881  double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
882  integer :: status(MPI_STATUS_SIZE)
883  integer,dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
884  integer :: ipe, ipart, iipart, send_n_particles_for_output
885  logical,save :: file_exists=.false.
886 
887  if (.not. write_snapshot) return
888 
889  receive_n_particles_for_output_from_ipe(:) = 0
890 
891  ! open the snapshot file on the headnode
892  if (mype .eq. 0) then
893  write(filename,"(a,a,i4.4,a)") trim(base_filename),'_particles',snapshotnext,'.dat'
894  INQUIRE(file=filename, exist=file_exists)
895  if (.not. file_exists) then
896  open(unit=unitparticles,file=filename,form='unformatted',status='new',access='stream')
897  else
898  open(unit=unitparticles,file=filename,form='unformatted',status='replace',access='stream')
899  end if
901  end if
902 
903  if (npe==1) then
904  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
905  call append_to_snapshot(particle(ipart)%self,particle(ipart)%payload)
906  end do
907  return
908  end if
909 
910  if (mype .ne. 0) then
911  call mpi_send(nparticles_on_mype,1,mpi_integer,0,mype,icomm,ierrmpi)
912  ! fill the send_buffer
913  send_n_particles_for_output = nparticles_on_mype
914  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
915  send_particles(iipart) = particle(ipart)%self
916  send_payload(1:npayload,iipart) = particle(ipart)%payload(1:npayload)
917  end do
918  else
919  ! get number of particles on other ipes
920  do ipe=1,npe-1
921  call mpi_recv(receive_n_particles_for_output_from_ipe(ipe),1,mpi_integer,ipe,ipe,icomm,status,ierrmpi)
922  end do
923  end if
924 
925  if (mype .ne. 0) then
926  call mpi_send(send_particles,send_n_particles_for_output,type_particle,0,mype,icomm,ierrmpi)
927  call mpi_send(send_payload,npayload*send_n_particles_for_output,mpi_double_precision,0,mype,icomm,ierrmpi)
928  end if
929 
930  if (mype==0) then
931  ! first output own particles (already in receive buffer)
932  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
933  call append_to_snapshot(particle(ipart)%self,particle(ipart)%payload)
934  end do
935  ! now output the particles sent from the other ipes
936  do ipe=1,npe-1
937  call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),type_particle,ipe,ipe,icomm,status,ierrmpi)
938  call mpi_recv(receive_payload,npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,icomm,status,ierrmpi)
939  do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
940  call append_to_snapshot(receive_particles(ipart),receive_payload(1:npayload,ipart))
941  end do ! ipart
942  end do ! ipe
943  close(unit=unitparticles)
944  end if ! mype == 0
945 
946  end subroutine write_particles_snapshot
947 
948  subroutine append_to_snapshot(myparticle,mypayload)
949 
950  type(particle_t),intent(in) :: myparticle
951  double precision :: mypayload(1:npayload)
952 
953  write(unitparticles) myparticle%index
954  write(unitparticles) myparticle%follow
955  write(unitparticles) myparticle%q
956  write(unitparticles) myparticle%m
957  write(unitparticles) myparticle%time
958  write(unitparticles) myparticle%dt
959  write(unitparticles) myparticle%x
960  write(unitparticles) myparticle%u
961  write(unitparticles) mypayload(1:npayload)
962 
963  end subroutine append_to_snapshot
964 
967 
968  character(len=std_len) :: filename
969  character(len=1024) :: line, strdata
970  integer :: iipart, ipart, icomp
971 
972  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
973  if (particle(ipart)%self%follow) then
974  filename = make_particle_filename(particle(ipart)%self%time,particle(ipart)%self%index,'individual')
975  open(unit=unitparticles,file=filename,status='replace')
976  line=''
977  do icomp=1, npayload
978  write(strdata,"(a,i2.2,a)") 'payload',icomp,','
979  line = trim(line)//trim(strdata)
980  end do
981  write(unitparticles,"(a,a,a)") 'time,dt,x1,x2,x3,u1,u2,u3,',trim(line),'ipe,iteration,index'
982  close(unit=unitparticles)
983  end if
984  end do
985 
986  end subroutine init_particles_output
987 
988  subroutine select_active_particles(end_time)
990  double precision, intent(in) :: end_time
991 
992  integer :: ipart, iipart
993  logical :: activate
994  double precision :: t_min_mype
995 
996  t_min_mype = bigdouble
998 
999  do iipart = 1, nparticles_on_mype
1000  ipart = particles_on_mype(iipart);
1001  activate = (particle(ipart)%self%time < end_time)
1002  t_min_mype = min(t_min_mype, particle(ipart)%self%time)
1003 
1004  if (activate) then
1007  end if
1008  end do
1009 
1010  call mpi_allreduce(t_min_mype, min_particle_time, 1, mpi_double_precision, &
1011  mpi_min, icomm, ierrmpi)
1012 
1013  end subroutine select_active_particles
1014 
1015  subroutine locate_particle(index,igrid_particle,ipe_particle)
1016  ! given the particles unique index, tell me on which cpu and igrid it is
1017  ! returns -1,-1 if particle was not found
1019 
1020  integer, intent(in) :: index
1021  integer, intent(out) :: igrid_particle, ipe_particle
1022  integer :: iipart,ipart,ipe_has_particle,ipe
1023  logical :: has_particle(0:npe-1)
1024  integer,dimension(0:1) :: buff
1025 
1026  has_particle(:) = .false.
1027  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1028  if (particle(ipart)%self%index == index) then
1029  has_particle(mype) = .true.
1030  exit
1031  end if
1032  end do
1033 
1034  if (has_particle(mype)) then
1035  buff(0) = particle(ipart)%igrid
1036  buff(1) = mype
1037  end if
1038 
1039  if (npe>0) call mpi_allgather(has_particle(mype),1,mpi_logical,has_particle,1,mpi_logical,icomm,ierrmpi)
1040 
1041  ipe_has_particle = -1
1042  do ipe=0,npe-1
1043  if (has_particle(ipe) .eqv. .true.) then
1044  ipe_has_particle = ipe
1045  exit
1046  end if
1047  end do
1048 
1049  if (ipe_has_particle .ne. -1) then
1050  if (npe>0) call mpi_bcast(buff,2,mpi_integer,ipe_has_particle,icomm,ierrmpi)
1051  igrid_particle=buff(0)
1052  ipe_particle = buff(1)
1053  else
1054  igrid_particle=-1
1055  ipe_particle = -1
1056  end if
1057 
1058  end subroutine locate_particle
1059 
1060  subroutine find_particle_ipe(x,igrid_particle,ipe_particle)
1061  use mod_forest, only: tree_node_ptr, tree_root
1062  use mod_slice, only: get_igslice
1064 
1065  double precision, intent(in) :: x(3)
1066  integer, intent(out) :: igrid_particle, ipe_particle
1067  integer :: ig(ndir,nlevelshi), ig_lvl(nlevelshi)
1068  integer :: idim, ic(ndim)
1069  type(tree_node_ptr) :: branch
1070 
1071  ! first check if the particle is in the domain
1072  if (.not. particle_in_domain(x)) then
1073  igrid_particle = -1
1074  ipe_particle = -1
1075  return
1076  end if
1077 
1078  ! get the index on each level
1079  do idim = 1, ndim
1080  call get_igslice(idim,x(idim), ig_lvl)
1081  ig(idim,:) = ig_lvl
1082  end do
1083 
1084  ! traverse the tree until leaf is found
1085  branch=tree_root({ig(^d,1)})
1086  do while (.not.branch%node%leaf)
1087  {ic(^d)=ig(^d,branch%node%level+1) - 2 * branch%node%ig^d +2\}
1088  branch%node => branch%node%child({ic(^d)})%node
1089  end do
1090 
1091  igrid_particle = branch%node%igrid
1092  ipe_particle = branch%node%ipe
1093 
1094  end subroutine find_particle_ipe
1095 
1096  !> Check if particle is inside computational domain
1097  logical function particle_in_domain(x)
1099  double precision, dimension(ndim), intent(in) :: x
1100 
1101  particle_in_domain = all(x >= [ xprobmin^d ]) .and. &
1102  all(x < [ xprobmax^d ]) ! Jannis: as before < instead of <= here, necessary?
1103  end function particle_in_domain
1104 
1105  !> Quick check if particle is still in igrid
1106  logical function particle_in_igrid(ipart, igrid)
1108  integer, intent(in) :: igrid, ipart
1109  double precision :: x(ndim), grid_rmin(ndim), grid_rmax(ndim)
1110 
1111  ! First check if the igrid is still there
1112  if (.not. allocated(ps(igrid)%w)) then
1113  particle_in_igrid = .false.
1114  else
1115  grid_rmin = [ {rnode(rpxmin^d_,igrid)} ]
1116  grid_rmax = [ {rnode(rpxmax^d_,igrid)} ]
1117  x = particle(ipart)%self%x(1:ndim)
1118  particle_in_igrid = all(x >= grid_rmin) .and. all(x < grid_rmax)
1119  end if
1120  end function particle_in_igrid
1121 
1122  subroutine set_neighbor_ipe()
1124 
1125  integer :: igrid, iigrid,ipe
1126  integer :: my_neighbor_type, i^D
1127  logical :: ipe_is_neighbor(0:npe-1)
1128 
1129  ipe_is_neighbor(:) = .false.
1130  do iigrid=1,igridstail; igrid=igrids(iigrid);
1131 
1132  {do i^db=-1,1\}
1133  if (i^d==0|.and.) then
1134  cycle
1135  end if
1136  my_neighbor_type=neighbor_type(i^d,igrid)
1137 
1138  select case (my_neighbor_type)
1139  case (neighbor_boundary) ! boundary
1140  ! do nothing
1141  case (neighbor_coarse) ! fine-coarse
1142  call ipe_fc(i^d,igrid,ipe_is_neighbor)
1143  case (neighbor_sibling) ! same level
1144  call ipe_srl(i^d,igrid,ipe_is_neighbor)
1145  case (neighbor_fine) ! coarse-fine
1146  call ipe_cf(i^d,igrid,ipe_is_neighbor)
1147  end select
1148 
1149  {end do\}
1150 
1151  end do
1152 
1153  ! remove self as neighbor
1154  ipe_is_neighbor(mype) = .false.
1155 
1156  ! Now make the list of neighbors
1157  npe_neighbors = 0
1158  do ipe=0,npe-1
1159  if (ipe_is_neighbor(ipe)) then
1162  end if
1163  end do ! ipe
1164 
1165  end subroutine set_neighbor_ipe
1166 
1167  subroutine ipe_fc(i^D,igrid,ipe_is_neighbor)
1169  integer, intent(in) :: i^D, igrid
1170  logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1171 
1172  ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1173 
1174  end subroutine ipe_fc
1175 
1176  subroutine ipe_srl(i^D,igrid,ipe_is_neighbor)
1178  integer, intent(in) :: i^D, igrid
1179  logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1180 
1181  ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1182 
1183  end subroutine ipe_srl
1184 
1185  subroutine ipe_cf(i^D,igrid,ipe_is_neighbor)
1187  integer, intent(in) :: i^D, igrid
1188  logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1189  integer :: ic^D, inc^D
1190 
1191  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1192  inc^db=2*i^db+ic^db
1193  \}
1194  ipe_is_neighbor( neighbor_child(2,inc^d,igrid) ) = .true.
1195 
1196  {end do\}
1197 
1198  end subroutine ipe_cf
1199 
1202 
1203  character(len=std_len) :: filename
1204  integer :: ipart,iipart
1205  type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1206  double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1207  integer :: send_n_particles_for_output
1208  integer :: nout
1209  double precision :: tout
1210 
1211  if (write_individual) then
1212  call output_individual()
1213  end if
1214 
1215  if (write_ensemble) then
1216  send_n_particles_for_output = 0
1217 
1218  do iipart=1,nparticles_on_mype
1219  ipart=particles_on_mype(iipart);
1220 
1221  ! have to send particle to rank zero for output
1222  if (mod(particle(ipart)%self%index,downsample_particles) .eq. 0) then
1223  send_n_particles_for_output = send_n_particles_for_output + 1
1224  send_particles(send_n_particles_for_output) = particle(ipart)%self
1225  send_payload(1:npayload,send_n_particles_for_output) = particle(ipart)%payload(1:npayload)
1226  end if
1227  end do
1228 
1229  call output_ensemble(send_n_particles_for_output,send_particles, &
1230  send_payload,'ensemble')
1231  end if
1232 
1233  end subroutine write_particle_output
1234 
1235  character(len=128) function make_particle_filename(tout,index,type)
1237 
1238  character(len=*), intent(in) :: type
1239  double precision, intent(in) :: tout
1240  integer, intent(in) :: index
1241  integer :: nout, mysnapshot
1242 
1243  select case(type)
1244  case ('ensemble')
1245  nout = nint(tout / dtsave_particles)
1246  write(make_particle_filename,"(a,a,i6.6,a)") trim(base_filename),'_ensemble',nout,'.csv'
1247  case ('destroy')
1248  write(make_particle_filename,"(a,a)") trim(base_filename),'_destroyed.csv'
1249  case ('individual')
1250  write(make_particle_filename,"(a,a,i6.6,a)") trim(base_filename),'_particle',index,'.csv'
1251  end select
1252 
1253  end function make_particle_filename
1254 
1255  subroutine output_ensemble(send_n_particles_for_output,send_particles,send_payload,typefile)
1257 
1258  integer, intent(in) :: send_n_particles_for_output
1259  type(particle_t), dimension(send_n_particles_for_output), intent(in) :: send_particles
1260  double precision, dimension(npayload,send_n_particles_for_output), intent(in) :: send_payload
1261  character(len=*), intent(in) :: typefile
1262  character(len=std_len) :: filename
1263  type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
1264  double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
1265  integer :: status(MPI_STATUS_SIZE)
1266  integer,dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1267  integer :: ipe, ipart, nout
1268  logical :: file_exists
1269 
1270  receive_n_particles_for_output_from_ipe(:) = 0
1271 
1272  call mpi_allgather(send_n_particles_for_output, 1, mpi_integer, &
1273  receive_n_particles_for_output_from_ipe, 1, mpi_integer, icomm, ierrmpi)
1274 
1275  ! If there are no particles to be written, skip the output
1276  if (sum(receive_n_particles_for_output_from_ipe(:)) == 0) return
1277 
1278  if (mype > 0) then
1279  call mpi_send(send_particles,send_n_particles_for_output,type_particle,0,mype,icomm,ierrmpi)
1280  call mpi_send(send_payload,npayload*send_n_particles_for_output,mpi_double_precision,0,mype,icomm,ierrmpi)
1281  else
1282  ! Create file and write header
1283  if(typefile=='destroy') then ! Destroyed file
1284  write(filename,"(a,a,i6.6,a)") trim(base_filename) // '_', &
1285  trim(typefile) // '.csv'
1287  inquire(file=filename, exist=file_exists)
1288 
1289  if (.not. file_exists) then
1290  open(unit=unitparticles, file=filename)
1291  write(unitparticles,"(a)") trim(csv_header)
1292  else
1293  open(unit=unitparticles, file=filename, access='append')
1294  end if
1295 
1296  else ! Ensemble file
1297  write(filename,"(a,a,i6.6,a)") trim(base_filename) // '_', &
1298  trim(typefile) // '_', n_output_ensemble,'.csv'
1300  open(unit=unitparticles,file=filename)
1301  write(unitparticles,"(a)") trim(csv_header)
1302  end if
1303 
1304  ! Write own particles
1305  do ipart=1,send_n_particles_for_output
1306  call output_particle(send_particles(ipart),send_payload(1:npayload,ipart),0,unitparticles)
1307  end do
1308 
1309  ! Write particles from other tasks
1310  do ipe=1,npe-1
1311  call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),type_particle,ipe,ipe,icomm,status,ierrmpi)
1312  call mpi_recv(receive_payload,npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,icomm,status,ierrmpi)
1313  do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1314  call output_particle(receive_particles(ipart),receive_payload(1:npayload,ipart),ipe,unitparticles)
1315  end do ! ipart
1316  end do ! ipe
1317 
1318  close(unitparticles)
1319  end if
1320 
1321  end subroutine output_ensemble
1322 
1323  subroutine output_individual()
1325  character(len=std_len) :: filename
1326  integer :: ipart,iipart,ipe
1327  logical :: file_exists
1328 
1329  do iipart=1,nparticles_on_mype
1330  ipart=particles_on_mype(iipart)
1331 
1332  ! should the particle be dumped?
1333  if (particle(ipart)%self%follow) then
1334  write(filename,"(a,a,i6.6,a)") trim(base_filename), '_particle_', &
1335  particle(ipart)%self%index, '.csv'
1336  inquire(file=filename, exist=file_exists)
1337 
1338  ! Create empty file and write header on first iteration, or when the
1339  ! file does not exist yet
1340  if (it_particles == 0 .or. .not. file_exists) then
1341  open(unit=unitparticles, file=filename)
1342  write(unitparticles,"(a)") trim(csv_header)
1343  else
1344  open(unit=unitparticles, file=filename, access='append')
1345  end if
1346 
1347  call output_particle(particle(ipart)%self,&
1348  particle(ipart)%payload(1:npayload),mype,unitparticles)
1349 
1350  close(unitparticles)
1351  end if
1352  end do
1353 
1354  end subroutine output_individual
1355 
1356  subroutine output_particle(myparticle,payload,ipe,file_unit)
1358  use mod_geometry
1359 
1360  type(particle_t),intent(in) :: myparticle
1361  double precision, intent(in) :: payload(npayload)
1362  integer, intent(in) :: ipe
1363  integer, intent(in) :: file_unit
1364  double precision :: x(3), u(3)
1365  integer :: icomp
1366 
1367  ! convert and normalize the position
1368  if (nocartesian) then
1369  select case(coordinate)
1371  x = myparticle%x*length_convert_factor
1372  case(cylindrical)
1373  x(r_) = myparticle%x(r_)*length_convert_factor
1374  x(z_) = myparticle%x(z_)*length_convert_factor
1375  x(phi_) = myparticle%x(phi_)
1376  case(spherical)
1377  x(:) = myparticle%x(:)
1378  x(1) = x(1)*length_convert_factor
1379  end select
1380  else
1381  call partcoord_to_cartesian(myparticle%x,x)
1382  x(:) = x(:)*length_convert_factor
1383  end if
1384 
1385  u = myparticle%u(1:3) * integrator_velocity_factor
1386 
1387  write(file_unit, csv_format) myparticle%time, myparticle%dt, x(1:3), &
1388  u, payload(1:npayload), ipe, it_particles, &
1389  myparticle%index
1390 
1391  end subroutine output_particle
1392 
1393  !> convert to cartesian coordinates
1394  subroutine partcoord_to_cartesian(xp,xpcart)
1395  ! conversion of particle coordinate from cylindrical/spherical to cartesian coordinates done here
1396  ! NOT converting velocity components: TODO?
1397  ! Also: nullifying values lower than smalldouble
1399  use mod_geometry
1400 
1401  double precision, intent(in) :: xp(1:3)
1402  double precision, intent(out) :: xpcart(1:3)
1403 
1404  select case (coordinate)
1406  xpcart(1:3)=xp(1:3)
1407  case (cylindrical)
1408  xpcart(1)=xp(1)*cos(xp(phi_))
1409  xpcart(2)=xp(1)*sin(xp(phi_))
1410  xpcart(3)=xp(z_)
1411  case (spherical)
1412  xpcart(1)=xp(1)*sin(xp(2))*cos(xp(3))
1413  {^iftwod
1414  xpcart(2)=xp(1)*cos(xp(2))
1415  xpcart(3)=xp(1)*sin(xp(2))*sin(xp(3))}
1416  {^ifthreed
1417  xpcart(2)=xp(1)*sin(xp(2))*sin(xp(3))
1418  xpcart(3)=xp(1)*cos(xp(2))}
1419  case default
1420  write(*,*) "No converter for coordinate=",coordinate
1421  end select
1422 
1423  end subroutine partcoord_to_cartesian
1424 
1425  subroutine comm_particles()
1427 
1428  integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1429  integer :: index
1430  integer :: tag_send, tag_receive, tag_send_p, tag_receive_p, send_buff, rcv_buff
1431  integer :: status(MPI_STATUS_SIZE)
1432  integer, dimension(0:npe-1) :: send_n_particles_to_ipe
1433  integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
1434  type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1435  type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
1436  double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1437  double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
1438  integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
1439  integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_received_from_ipe
1440  integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1441  integer :: destroy_n_particles_mype
1442  logical :: BC_applied
1443  integer, allocatable, dimension(:) :: sndrqst, rcvrqst
1444  integer :: isnd, ircv
1445  integer,dimension(npe_neighbors,nparticles_per_cpu_hi) :: payload_index
1446  send_n_particles_to_ipe(:) = 0
1447  receive_n_particles_from_ipe(:) = 0
1448  destroy_n_particles_mype = 0
1449 
1450  ! check if and where to send each particle, destroy if necessary
1451  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1452 
1453  ! first check if the particle should be destroyed because t>tmax in a fixed snapshot
1454  if ( .not.time_advance .and. particle(ipart)%self%time .gt. tmax_particles ) then
1455  destroy_n_particles_mype = destroy_n_particles_mype + 1
1456  particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1457  cycle
1458  end if
1459 
1460  ! Then check dostroy due to user-defined condition
1461  if (associated(usr_destroy_particle)) then
1462  if (usr_destroy_particle(particle(ipart))) then
1463  destroy_n_particles_mype = destroy_n_particles_mype + 1
1464  particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1465  cycle
1466  end if
1467  end if
1468 
1469  ! is my particle still in the same igrid?
1470  if (.not.particle_in_igrid(ipart,particle(ipart)%igrid)) then
1471  call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
1472 
1473  ! destroy particle if out of domain (signalled by return value of -1)
1474  if (igrid_particle == -1 )then
1475  call apply_periodb(particle(ipart)%self,igrid_particle,ipe_particle,bc_applied)
1476  if (.not. bc_applied .or. igrid_particle == -1) then
1477  destroy_n_particles_mype = destroy_n_particles_mype + 1
1478  particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1479  cycle
1480  end if
1481  end if
1482 
1483  ! particle still present
1484  particle(ipart)%igrid = igrid_particle
1485  particle(ipart)%ipe = ipe_particle
1486 
1487  ! if we have more than one core, is it on another cpu?
1488  if (npe .gt. 1 .and. particle(ipart)%ipe .ne. mype) then
1489  send_n_particles_to_ipe(ipe_particle) = &
1490  send_n_particles_to_ipe(ipe_particle) + 1
1491  particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1492  end if ! ipe_particle
1493 
1494  end if ! particle_in_grid
1495 
1496  end do ! ipart
1497 
1498  call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
1499 
1500  ! get out when only one core:
1501  if (npe == 1) return
1502 
1503  ! communicate amount of particles to be sent/received
1504  allocate( sndrqst(1:npe_neighbors), rcvrqst(1:npe_neighbors) )
1505  sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1506  isnd = 0; ircv = 0;
1507  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1508  tag_send = mype * npe + ipe
1509  tag_receive = ipe * npe + mype
1510  isnd = isnd + 1; ircv = ircv + 1;
1511  call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer, &
1512  ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1513  call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer, &
1514  ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1515  end do
1516  call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1517  call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1518  deallocate( sndrqst, rcvrqst )
1519 
1520  ! Communicate index of the particles to be sent/received
1521  allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
1522  sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1523  isnd = 0; ircv = 0;
1524  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1525  if (send_n_particles_to_ipe(ipe) > 0) then
1526  tag_send = mype * npe + ipe
1527  isnd = isnd + 1
1528  call mpi_isend(particle_index_to_be_sent_to_ipe(:,ipe),send_n_particles_to_ipe(ipe),mpi_integer, &
1529  ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1530  end if
1531  if (receive_n_particles_from_ipe(ipe) > 0) then
1532  tag_receive = ipe * npe + mype
1533  ircv = ircv + 1
1534  call mpi_irecv(particle_index_to_be_received_from_ipe(:,ipe),receive_n_particles_from_ipe(ipe),mpi_integer, &
1535  ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1536  end if
1537  end do
1538  call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1539  call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1540  deallocate( sndrqst, rcvrqst )
1541 
1542  ! Send and receive the data of the particles
1543  allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
1544  sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1545  isnd = 0; ircv = 0;
1546  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1547 
1548  ! should i send some particles to ipe?
1549  if (send_n_particles_to_ipe(ipe) .gt. 0) then
1550 
1551  do ipart = 1, send_n_particles_to_ipe(ipe)
1552  tag_send = mype * npe + ipe + ipart
1553  isnd = isnd+1
1554  call mpi_isend(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self, &
1555  1,type_particle,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1556  end do ! ipart
1557  end if ! send .gt. 0
1558 
1559  ! should i receive some particles from ipe?
1560  if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1561 
1562  do ipart = 1, receive_n_particles_from_ipe(ipe)
1563  tag_receive = ipe * npe + mype + ipart
1564  ircv = ircv+1
1565  index = particle_index_to_be_received_from_ipe(ipart,ipe)
1566  allocate(particle(index)%self)
1567  call mpi_irecv(particle(index)%self,1,type_particle,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1568  end do ! ipart
1569 
1570  end if ! receive .gt. 0
1571  end do ! ipe
1572  call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1573  call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1574  deallocate( sndrqst, rcvrqst )
1575 
1576  ! Send and receive the payloads
1577  ! NON-BLOCKING: one communication for each particle. Needed for whatever reason
1578  allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
1579  sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1580  isnd = 0; ircv = 0;
1581  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1582 
1583  ! should i send some particles to ipe?
1584  if (send_n_particles_to_ipe(ipe) .gt. 0) then
1585 
1586  do ipart = 1, send_n_particles_to_ipe(ipe)
1587  tag_send = mype * npe + ipe + ipart
1588  isnd = isnd+1
1589  call mpi_isend(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload), &
1590  npayload,mpi_double_precision,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1591  end do ! ipart
1592  end if ! send .gt. 0
1593 
1594  ! should i receive some particles from ipe?
1595  if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1596  do ipart = 1, receive_n_particles_from_ipe(ipe)
1597  tag_receive = ipe * npe + mype + ipart
1598  ircv = ircv+1
1599  index = particle_index_to_be_received_from_ipe(ipart,ipe)
1600  allocate(particle(index)%payload(npayload))
1601  call mpi_irecv(particle(index)%payload(1:npayload),npayload, &
1602  mpi_double_precision,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1603  end do ! ipart
1604  end if ! receive .gt. 0
1605  end do ! ipe
1606  call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1607  call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1608  deallocate( sndrqst, rcvrqst )
1609 
1610  ! Deeallocate sent particles
1611  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1612  if (send_n_particles_to_ipe(ipe) .gt. 0) then
1613  do ipart = 1, send_n_particles_to_ipe(ipe)
1614  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1615  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1616  call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
1617  end do ! ipart
1618  end if ! send .gt. 0
1619  end do
1620 
1621  ! Relocate received particles
1622  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1623  if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1624  do ipart = 1, receive_n_particles_from_ipe(ipe)
1625  index = particle_index_to_be_received_from_ipe(ipart,ipe)
1627  ! since we don't send the igrid, need to re-locate it
1628  call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
1629  particle(index)%igrid = igrid_particle
1630  particle(index)%ipe = ipe_particle
1631  end do ! ipart
1632  end if ! receive .gt. 0
1633  end do ! ipe
1634 
1635  end subroutine comm_particles
1636 
1639 
1640  integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1641  integer :: index
1642  integer :: tag_send, tag_receive, send_buff, rcv_buff
1643  integer :: status(MPI_STATUS_SIZE)
1644  integer, dimension(0:npe-1) :: send_n_particles_to_ipe
1645  integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
1646  type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1647  type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
1648  double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1649  double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
1650  integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
1651  integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1652  integer :: destroy_n_particles_mype
1653  logical :: BC_applied
1654 
1655  send_n_particles_to_ipe(:) = 0
1656  receive_n_particles_from_ipe(:) = 0
1657  destroy_n_particles_mype = 0
1658 
1659  ! check if and where to send each particle, relocate all of them (in case grid has changed)
1660  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1661 
1662  call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
1663 
1664  particle(ipart)%igrid = igrid_particle
1665  particle(ipart)%ipe = ipe_particle
1666 
1667  ! if we have more than one core, is it on another cpu?
1668  if (particle(ipart)%ipe .ne. mype) then
1669  send_n_particles_to_ipe(ipe_particle) = &
1670  send_n_particles_to_ipe(ipe_particle) + 1
1671  particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1672  end if ! ipe_particle
1673 
1674  end do ! ipart
1675 
1676  ! get out when only one core:
1677  if (npe == 1) return
1678 
1679  ! communicate amount of particles to be sent/received
1680  do ipe=0,npe-1;if (ipe .eq. mype) cycle;
1681  tag_send = mype * npe + ipe
1682  tag_receive = ipe * npe + mype
1683  call mpi_send(send_n_particles_to_ipe(ipe),1,mpi_integer,ipe,tag_send,icomm,ierrmpi)
1684  call mpi_recv(receive_n_particles_from_ipe(ipe),1,mpi_integer,ipe,tag_receive,icomm,status,ierrmpi)
1685  end do
1686 
1687  ! send and receive the data of the particles
1688  do ipe=0,npe-1;if (ipe .eq. mype) cycle;
1689  tag_send = mype * npe + ipe
1690  tag_receive = ipe * npe + mype
1691 
1692  ! should i send some particles to ipe?
1693  if (send_n_particles_to_ipe(ipe) .gt. 0) then
1694 
1695  ! create the send buffer
1696  do ipart = 1, send_n_particles_to_ipe(ipe)
1697  send_particles(ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
1698  send_payload(1:npayload,ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
1699  end do ! ipart
1700  call mpi_send(send_particles,send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,ierrmpi)
1701  call mpi_send(send_payload,npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,icomm,ierrmpi)
1702  do ipart = 1, send_n_particles_to_ipe(ipe)
1703  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1704  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1705  call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
1706  end do ! ipart
1707 
1708  end if ! send .gt. 0
1709 
1710  ! should i receive some particles from ipe?
1711  if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1712 
1713  call mpi_recv(receive_particles,receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,status,ierrmpi)
1714  call mpi_recv(receive_payload,npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,icomm,status,ierrmpi)
1715 
1716  do ipart = 1, receive_n_particles_from_ipe(ipe)
1717 
1718  index = receive_particles(ipart)%index
1719  allocate(particle(index)%self)
1720  particle(index)%self = receive_particles(ipart)
1721  allocate(particle(index)%payload(npayload))
1722  particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart)
1724 
1725  ! since we don't send the igrid, need to re-locate it
1726  call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
1727  particle(index)%igrid = igrid_particle
1728  particle(index)%ipe = ipe_particle
1729 
1730  end do ! ipart
1731 
1732  end if ! receive .gt. 0
1733  end do ! ipe
1734 
1735  end subroutine comm_particles_global
1736 
1737  subroutine apply_periodb(particle,igrid_particle,ipe_particle,BC_applied)
1739 
1740  type(particle_t), intent(inout) :: particle
1741  integer, intent(inout) :: igrid_particle, ipe_particle
1742  logical,intent(out) :: BC_applied
1743  integer :: idim
1744 
1745  bc_applied = .false.
1746  ! get out if we don't have any periodic BC:
1747  if (.not. any(periodb(1:ndim))) return
1748 
1749  ! go through dimensions and try re-inject the particle at the other side
1750  do idim=1,ndim
1751 
1752  if (.not. periodb(idim)) cycle
1753 
1754  select case(idim)
1755  {case (^d)
1756  if (particle%x(^d) .lt. xprobmin^d) then
1757  particle%x(^d) = particle%x(^d) + (xprobmax^d - xprobmin^d)
1758  bc_applied = .true.
1759  end if
1760  if (particle%x(^d) .ge. xprobmax^d) then
1761  particle%x(^d) = particle%x(^d) - (xprobmax^d - xprobmin^d)
1762  bc_applied = .true.
1763  end if
1764  \}
1765  end select
1766 
1767  end do
1768 
1769  call find_particle_ipe(particle%x,igrid_particle,ipe_particle)
1770 
1771  end subroutine apply_periodb
1772 
1773  !> clean up destroyed particles on all cores
1774  subroutine destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed)
1776 
1777  integer, intent(in) :: destroy_n_particles_mype
1778  integer, dimension(1:destroy_n_particles_mype), intent(in) :: particle_index_to_be_destroyed
1779  type(particle_t), dimension(destroy_n_particles_mype):: destroy_particles_mype
1780  double precision, dimension(npayload,destroy_n_particles_mype):: destroy_payload_mype
1781  integer :: iipart,ipart,destroy_n_particles
1782 
1783  destroy_n_particles = 0
1784 
1785  ! append the particle to list of destroyed particles
1786  do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
1787  destroy_particles_mype(iipart) = particle(ipart)%self
1788  destroy_payload_mype(1:npayload,iipart) = particle(ipart)%payload(1:npayload)
1789  end do
1790 
1791  call output_ensemble(destroy_n_particles_mype,destroy_particles_mype,destroy_payload_mype,'destroy')
1792 
1793  if (npe > 1) then
1794  call mpi_allreduce(destroy_n_particles_mype,destroy_n_particles,1,mpi_integer,mpi_sum,icomm,ierrmpi)
1795  else
1796  destroy_n_particles = destroy_n_particles_mype
1797  end if
1798 
1799  nparticles = nparticles - destroy_n_particles
1800 
1801  do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
1802  particle(ipart)%igrid = -1
1803  particle(ipart)%ipe = -1
1804  if(time_advance) then
1805  write(*,*) particle(ipart)%self%time, ': particle',ipart,' has left at it ',it_particles,' on pe', mype
1806  write(*,*) particle(ipart)%self%time, ': particles remaining:',nparticles, '(total)', nparticles_on_mype-1, 'on pe', mype
1807  endif
1808  deallocate(particle(ipart)%self)
1809  deallocate(particle(ipart)%payload)
1811  end do
1812 
1813  end subroutine destroy_particles
1814 
1816  implicit none
1817 
1818  integer, intent(in) :: ipart
1819 
1822 
1824 
1826  implicit none
1827 
1828  integer, intent(in) :: ipart
1829  integer :: i
1830 
1831  do i=1,nparticles_on_mype
1832  if (particles_on_mype(i) == ipart) then
1834  exit
1835  end if
1836  end do
1837 
1839 
1841 
1842 end module mod_particle_base
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:194
Module for physical and numeric constants.
Definition: mod_constants.t:2
Module with basic grid data structures.
Definition: mod_forest.t:2
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Definition: mod_forest.t:29
Module with geometry-related routines (e.g., divergence, curl)
Definition: mod_geometry.t:2
integer coordinate
Definition: mod_geometry.t:6
integer, parameter spherical
Definition: mod_geometry.t:10
integer, parameter cartesian
Definition: mod_geometry.t:7
integer, parameter cylindrical
Definition: mod_geometry.t:9
integer, parameter cartesian_expansion
Definition: mod_geometry.t:11
integer, parameter cartesian_stretched
Definition: mod_geometry.t:8
subroutine curlvector(qvec, ixIL, ixOL, curlvec, idirmin, idirmin0, ndir0, fourthorder)
Calculate curl of a vector qvec within ixL Options to employ standard second order CD evaluations use...
Definition: mod_geometry.t:626
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
logical nocartesian
IO switches for conversion.
integer ixghi
Upper index of grid block arrays.
integer, parameter unitpar
file handle for IO
double precision global_time
The global simulation time.
double precision time_max
End time for the simulation.
integer snapshotini
Resume from the snapshot with this index.
integer, parameter ndim
Number of spatial dimensions for grid variables.
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer, dimension(:), allocatable, parameter d
double precision length_convert_factor
integer ndir
Number of spatial dimensions (components) for vector variables.
integer ixm
the mesh range (within a block with ghost cells)
integer ierrmpi
A global MPI error return code.
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision c_norm
Normalised speed of light.
logical b0field
split magnetic field as background B0 field
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nghostcells
Number of ghost cells surrounding a grid.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer r_
Indices for cylindrical coordinates FOR TESTS, negative value when not used:
double precision, dimension(ndim) dxlevel
integer, parameter ixglo
Lower index of grid block arrays (always 1)
Module with shared functionality for all the particle movers.
subroutine get_maxwellian_velocity(v, velocity)
double precision, dimension(3) integrator_velocity_factor
Normalization factor for velocity in the integrator.
integer nparticles_per_cpu_hi
Maximum number of particles in one processor.
integer nparticleshi
Maximum number of particles.
pure subroutine limit_dt_endtime(t_left, dt_p)
integer num_particles
Number of particles.
double precision const_dt_particles
If positive, a constant time step for the particles.
subroutine output_individual()
integer n_output_destroyed
Output count for ensembles of destroyed particles.
subroutine ipe_cf(iD, igrid, ipe_is_neighbor)
integer it_particles
Iteration number of paritcles.
subroutine fields_from_mhd(igrid, w_mhd, w_part)
Determine fields from MHD variables.
subroutine particles_params_read(files)
Read this module's parameters from a file.
double precision min_particle_time
Minimum time of all particles.
subroutine output_ensemble(send_n_particles_for_output, send_particles, send_payload, typefile)
subroutine get_uniform_pos(x)
logical write_individual
Whether to write individual particle output (followed particles)
subroutine write_particle_output()
subroutine init_particles_com()
Initialise communicators for particles.
logical function particle_in_igrid(ipart, igrid)
Quick check if particle is still in igrid.
integer, dimension(:), allocatable particles_on_mype
Array of identity numbers of particles in current processor.
double precision dtsave_particles
Time interval to save particles.
subroutine output_particle(myparticle, payload, ipe, file_unit)
integer ngridvars
Number of variables for grid field.
double precision particles_etah
subroutine destroy_particles(destroy_n_particles_mype, particle_index_to_be_destroyed)
clean up destroyed particles on all cores
integer nusrpayload
Number of user-defined payload variables for a particle.
subroutine select_active_particles(end_time)
subroutine write_particles_snapshot()
double precision particles_eta
Resistivity.
subroutine particles_finish()
Finalize the particles module.
logical write_snapshot
Whether to write particle snapshots.
pure subroutine get_lfac(u, lfac)
Get Lorentz factor from relativistic momentum.
subroutine find_particle_ipe(x, igrid_particle, ipe_particle)
subroutine pull_particle_from_particles_on_mype(ipart)
subroutine init_particles_output()
double precision dtheta
pure subroutine get_lfac_from_velocity(v, lfac)
Get Lorentz factor from velocity.
integer integrator
Integrator to be used for particles.
integer, dimension(:), allocatable ep
Variable index for electric field.
subroutine handle_particles()
Let particles evolve in time. The routine also handles grid variables and output.
subroutine fill_gridvars_default
This routine fills the particle grid variables with the default for mhd, i.e. only E and B.
character(len=400) csv_header
Header string used in CSV files.
subroutine ipe_fc(iD, igrid, ipe_is_neighbor)
subroutine finish_gridvars()
Deallocate grid variables for particles.
subroutine particle_get_current(w, ixIL, ixOL, idirmin, current)
Calculate idirmin and the idirmin:3 components of the common current array make sure that dxlevel(^D)...
integer npayload
Number of total payload variables for a particle.
integer, parameter unitparticles
integer, dimension(:), allocatable particles_active_on_mype
Array of identity numbers of active particles in current processor.
logical relativistic
Use a relativistic particle mover?
character(len=name_len) integrator_type_particles
String describing the particle integrator type.
subroutine set_neighbor_ipe()
subroutine time_spent_on_particles()
subroutine get_vec(ix, igrid, x, tloc, vec)
logical write_ensemble
Whether to write ensemble output.
integer downsample_particles
Particle downsampling factor in CSV output.
subroutine locate_particle(index, igrid_particle, ipe_particle)
subroutine push_particle_into_particles_on_mype(ipart)
subroutine read_from_snapshot()
integer nparticles_active_on_mype
Number of active particles in current processor.
procedure(sub_define_additional_gridvars), pointer particles_define_additional_gridvars
subroutine partcoord_to_cartesian(xp, xpcart)
convert to cartesian coordinates
integer nparticles
Identity number and total number of particles.
integer nparticles_on_mype
Number of particles in current processor.
integer, dimension(:), allocatable bp
Variable index for magnetic field.
subroutine comm_particles()
double precision tmax_particles
Time limit of particles.
type(particle_ptr), dimension(:), allocatable particle
procedure(fun_destroy), pointer usr_destroy_particle
integer, dimension(:), allocatable vp
Variable index for fluid velocity.
procedure(sub_integrate), pointer particles_integrate
subroutine cross(a, b, c)
subroutine apply_periodb(particle, igrid_particle, ipe_particle, BC_applied)
procedure(sub_fill_gridvars), pointer particles_fill_gridvars
double precision t_next_output
Time to write next particle output.
subroutine advance_particles(end_time, steps_taken)
Routine handles the particle evolution.
integer ndefpayload
Number of default payload variables for a particle.
subroutine particle_base_init()
Give initial values to parameters.
integer, dimension(:), allocatable ipe_neighbor
integer igrid_working
set the current igrid for the particle integrator:
character(len=60) csv_format
Format string used in CSV files.
integer n_output_ensemble
Output count for ensembles.
integer, dimension(:), allocatable jp
Variable index for current.
subroutine append_to_snapshot(myparticle, mypayload)
integer ipart_working
set the current ipart for the particle integrator:
double precision particles_cfl
Particle CFL safety factor.
subroutine ipe_srl(iD, igrid, ipe_is_neighbor)
procedure(sub_fill_additional_gridvars), pointer particles_fill_additional_gridvars
logical function particle_in_domain(x)
Check if particle is inside computational domain.
character(len=name_len) physics_type_particles
String describing the particle physics type.
subroutine init_gridvars()
Initialize grid variables for particles.
character(len=128) function make_particle_filename(tout, index, type)
subroutine comm_particles_global()
subroutine read_particles_snapshot(file_exists)
subroutine interpolate_var(igrid, ixIL, ixOL, gf, x, xloc, gfloc)
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
Module for pseudo random number generation. The internal pseudo random generator is the xoroshiro128p...
Definition: mod_random.t:3
Writes D-1 slice, can do so in various formats, depending on slice_type.
Definition: mod_slice.t:2
subroutine get_igslice(dir, x, igslice)
Definition: mod_slice.t:1108
double precision tpartc_int_0
Definition: mod_timing.t:13
double precision timeio_tot
Definition: mod_timing.t:10
double precision tpartc_com
Definition: mod_timing.t:12
double precision tpartc
Definition: mod_timing.t:12
double precision tpartc0
Definition: mod_timing.t:13
double precision tpartc_grid_0
Definition: mod_timing.t:13
double precision tpartc_io
Definition: mod_timing.t:12
double precision timeloop
Definition: mod_timing.t:11
double precision tpartc_com0
Definition: mod_timing.t:13
double precision tpartc_grid
Definition: mod_timing.t:12
double precision tpartc_io_0
Definition: mod_timing.t:13
double precision tpartc_int
Definition: mod_timing.t:12
Module with all the methods that users can customize in AMRVAC.
procedure(particle_analytic), pointer usr_particle_analytic
procedure(particle_fields), pointer usr_particle_fields
Pointer to a tree_node.
Definition: mod_forest.t:6