MPI-AMRVAC  3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_particle_base.t
Go to the documentation of this file.
1 !> Module with shared functionality for all the particle movers
3  use mod_global_parameters, only: name_len, std_len
4  use mod_physics
5  use mod_random
6  use mod_constants
7  use mod_comm_lib, only: mpistop
8 
9  !> String describing the particle physics type
10  character(len=name_len) :: physics_type_particles = ""
11  !> String describing the particle integrator type
12  character(len=name_len) :: integrator_type_particles = ""
13  !> Header string used in CSV files
14  character(len=400) :: csv_header
15  !> Format string used in CSV files
16  character(len=60) :: csv_format
17  !> Maximum number of particles
18  integer :: nparticleshi
19  !> Maximum number of particles in one processor
21  !> Number of default payload variables for a particle
22  integer :: ndefpayload
23  !> Number of user-defined payload variables for a particle
24  integer :: nusrpayload
25  !> Number of total payload variables for a particle
26  integer :: npayload
27  !> Number of variables for grid field
28  integer :: ngridvars
29  !> Number of particles
30  integer :: num_particles
31  !> Time limit of particles
32  double precision :: tmax_particles
33  !> Minimum time of all particles
34  double precision :: min_particle_time
35  !> Time interval to save particles
36  double precision :: dtsave_particles
37  !> If positive, a constant time step for the particles
38  double precision :: const_dt_particles
39  !> Particle CFL safety factor
40  double precision :: particles_cfl
41  !> Time to write next particle output
42  double precision :: t_next_output
43  !> Whether to write individual particle output (followed particles)
44  logical :: write_individual
45  !> Whether to write ensemble output
46  logical :: write_ensemble
47  !> Whether to write particle snapshots
48  logical :: write_snapshot
49  !> Particle downsampling factor in CSV output
51  !> Use a relativistic particle mover?
52  logical :: relativistic
53  !> Resistivity
54  double precision :: particles_eta, particles_etah
55  double precision :: dtheta
56  logical :: losses
57  !> Identity number and total number of particles
58  integer :: nparticles
59  !> Iteration number of paritcles
60  integer :: it_particles
61  !> Output count for ensembles of destroyed particles
62  integer :: n_output_destroyed
63 
64  ! these two save the list of neighboring cpus:
65  integer, allocatable :: ipe_neighbor(:)
66  integer :: npe_neighbors
67 
68  integer :: type_particle
69  !> set the current igrid for the particle integrator:
70  integer :: igrid_working
71  !> set the current ipart for the particle integrator:
72  integer :: ipart_working
73 
74  integer, parameter :: unitparticles=15
75 
76  !> Array of identity numbers of particles in current processor
77  integer, dimension(:), allocatable :: particles_on_mype
78  !> Array of identity numbers of active particles in current processor
79  integer, dimension(:), allocatable :: particles_active_on_mype
80  !> Number of particles in current processor
81  integer :: nparticles_on_mype
82  !> Number of active particles in current processor
84  !> Normalization factor for velocity in the integrator
85  double precision :: integrator_velocity_factor(3)
86  !> Integrator to be used for particles
87  integer :: integrator
88 
89  !> Variable index for magnetic field
90  integer, allocatable :: bp(:)
91  !> Variable index for electric field
92  integer, allocatable :: ep(:)
93  !> Variable index for fluid velocity
94  integer, allocatable :: vp(:)
95  !> Variable index for current
96  integer, allocatable :: jp(:)
97  !> Variable index for density
98  integer :: rhop
99 
101  type(particle_t), pointer :: self
102  !> extra information carried by the particle
103  double precision, allocatable :: payload(:)
104  integer :: igrid, ipe
105  end type particle_ptr
106 
108  !> follow the history of the particle
109  logical :: follow
110  !> identity number
111  integer :: index
112  !> charge
113  double precision :: q
114  !> mass
115  double precision :: m
116  !> time
117  double precision :: time
118  !> time step
119  double precision :: dt
120  !> coordinates
121  double precision :: x(3)
122  !> velocity, momentum, or special ones
123  double precision :: u(3)
124  end type particle_t
125 
126  ! Array containing all particles
127  type(particle_ptr), dimension(:), allocatable :: particle
128 
129  ! The pseudo-random number generator
130  type(rng_t) :: rng
131 
132  ! Pointers for user-defined stuff
133  procedure(sub_fill_gridvars), pointer :: particles_fill_gridvars => null()
136  procedure(sub_integrate), pointer :: particles_integrate => null()
137  procedure(fun_destroy), pointer :: usr_destroy_particle => null()
138 
139  abstract interface
140 
141  subroutine sub_fill_gridvars
142  end subroutine sub_fill_gridvars
143 
144  subroutine sub_define_additional_gridvars(ngridvars)
145  integer, intent(inout) :: ngridvars
146  end subroutine sub_define_additional_gridvars
147 
149  end subroutine sub_fill_additional_gridvars
150 
151  subroutine sub_integrate(end_time)
152  double precision, intent(in) :: end_time
153  end subroutine sub_integrate
154 
155  function fun_destroy(myparticle)
156  import particle_ptr
157  logical :: fun_destroy
158  type(particle_ptr), intent(in) :: myparticle
159  end function fun_destroy
160 
161  end interface
162 
163 contains
164 
165  !> Finalize the particles module
166  subroutine particles_finish()
168 
170 
171  ! Clean up particle type
172  call mpi_type_free(type_particle,ierrmpi)
173 
174  ! Clean up variables
175  deallocate(particle)
176  deallocate(ipe_neighbor)
177  deallocate(particles_on_mype)
178  deallocate(particles_active_on_mype)
179  deallocate(gridvars)
180 
181  end subroutine particles_finish
182 
183  !> Read this module's parameters from a file
184  subroutine particles_params_read(files)
185  use mod_global_parameters, only: unitpar
186  character(len=*), intent(in) :: files(:)
187  integer :: n
188 
189  namelist /particles_list/ physics_type_particles, nparticleshi, nparticles_per_cpu_hi, &
195 
196  do n = 1, size(files)
197  open(unitpar, file=trim(files(n)), status="old")
198  read(unitpar, particles_list, end=111)
199 111 close(unitpar)
200  end do
201 
202  end subroutine particles_params_read
203 
204  !> Give initial values to parameters
205  subroutine particle_base_init()
207  integer :: n, idir
208  integer, parameter :: i8 = selected_int_kind(18)
209  integer(i8) :: seed(2)
210  character(len=20) :: strdata
211 
212  physics_type_particles = 'advect'
213  nparticleshi = 10000000
214  nparticles_per_cpu_hi = 1000000
215  num_particles = 1000
216  ndefpayload = 1
217  nusrpayload = 0
218  tmax_particles = bigdouble
219  dtsave_particles = bigdouble
220  const_dt_particles = -bigdouble
221  particles_cfl = 0.5
222  write_individual = .true.
223  write_ensemble = .true.
224  write_snapshot = .true.
226  relativistic = .false.
227  particles_eta = -1.d0
228  particles_etah = -1.d0
229  t_next_output = 0.0d0
230  dtheta = 2.0d0*dpi / 60.0d0
231  losses = .false.
232  nparticles = 0
233  it_particles = 0
237  integrator_velocity_factor(:) = 1.0d0
238  integrator_type_particles = 'Boris'
239 
241 
242  ! If sampling, ndefpayload = nw:
243  if (physics_type_particles == 'sample') ndefpayload = nw
244  ! Total number of payloads
246 
247  ! initialise the random number generator
248  seed = [310952_i8, 8948923749821_i8]
249  call rng%set_seed(seed)
250 
251  allocate(particle(1:nparticleshi))
252  allocate(ipe_neighbor(0:npe-1))
255 
256  particles_on_mype(:) = 0
258 
259  ! Generate header for CSV files
260  csv_header = ' time, dt, x1, x2, x3, u1, u2, u3,'
261  ! If sampling, name the payloads like the fluid quantities
262  if (physics_type_particles == 'sample') then
263  do n = 1, nw
264  write(strdata,"(a,a,a)") ' ', trim(prim_wnames(n)), ','
265  csv_header = trim(csv_header) // trim(strdata)
266  end do
267  else ! Otherwise, payloads are called pl01, pl02, ...
268  do n = 1, ndefpayload
269  write(strdata,"(a,i2.2,a)") ' pl', n, ','
270  csv_header = trim(csv_header) // trim(strdata)
271  end do
272  end if
273  ! User-defined payloads are called usrpl01, usrpl02, ...
274  if (nusrpayload > 0) then
275  do n = 1, nusrpayload
276  write(strdata,"(a,i2.2,a)") ' usrpl', n, ','
277  csv_header = trim(csv_header) // trim(strdata)
278  end do
279  end if
280  csv_header = trim(csv_header) // ' ipe, iteration, index'
281 
282  ! Generate format string for CSV files
283  write(csv_format, '(A,I0,A,A)') '(', 8 + npayload, '(es14.6,", ")', &
284  'i8.7,", ",i11.10,", ",i8.7)'
285 
286  end subroutine particle_base_init
287 
288  !> Initialize communicators for particles
289  subroutine init_particles_com()
291 
292  integer :: oldtypes(0:7), offsets(0:7), blocklengths(0:7)
293 
294  oldtypes(0) = mpi_logical
295  oldtypes(1) = mpi_integer
296  oldtypes(2:7) = mpi_double_precision
297 
298  blocklengths(0:5)=1
299  blocklengths(6)=3
300  blocklengths(7)=3
301 
302  offsets(0) = 0
303  offsets(1) = size_logical * blocklengths(0)
304  offsets(2) = offsets(1) + size_int * blocklengths(1)
305  offsets(3) = offsets(2) + size_double * blocklengths(2)
306  offsets(4) = offsets(3) + size_double * blocklengths(3)
307  offsets(5) = offsets(4) + size_double * blocklengths(4)
308  offsets(6) = offsets(5) + size_double * blocklengths(5)
309  offsets(7) = offsets(6) + size_double * blocklengths(6)
310 
311  call mpi_type_struct(8,blocklengths,offsets,oldtypes,type_particle,ierrmpi)
312  call mpi_type_commit(type_particle,ierrmpi)
313 
314  end subroutine init_particles_com
315 
316  subroutine get_maxwellian_velocity(v, velocity)
317  double precision, intent(out) :: v(3)
318  double precision, intent(in) :: velocity
319  double precision :: vtmp(3), vnorm
320 
321  vtmp(1) = velocity * rng%normal()
322  vtmp(2) = velocity * rng%normal()
323  vtmp(3) = velocity * rng%normal()
324  vnorm = norm2(vtmp)
325  v = rng%sphere(vnorm)
326  end subroutine get_maxwellian_velocity
327 
328  subroutine get_uniform_pos(x)
330  double precision, intent(out) :: x(3)
331 
332  call rng%unif_01_vec(x)
333 
334  {^d&x(^d) = xprobmin^d + x(^d) * (xprobmax^d - xprobmin^d)\}
335  x(ndim+1:) = 0.0d0
336  end subroutine get_uniform_pos
337 
338  !> Initialize grid variables for particles
339  subroutine init_gridvars()
341  use mod_timing
342 
343  integer :: igrid, iigrid
344 
345  tpartc_grid_0=mpi_wtime()
346 
347  do iigrid=1,igridstail; igrid=igrids(iigrid);
348  allocate(gridvars(igrid)%w(ixg^t,1:ngridvars),gridvars(igrid)%wold(ixg^t,1:ngridvars))
349  gridvars(igrid)%w = 0.0d0
350  gridvars(igrid)%wold = 0.0d0
351  end do
352 
354  if (associated(particles_define_additional_gridvars)) then
356  end if
357 
358  tpartc_grid = tpartc_grid + (mpi_wtime()-tpartc_grid_0)
359 
360  end subroutine init_gridvars
361 
362  !> Deallocate grid variables for particles
363  subroutine finish_gridvars()
365 
366  integer :: iigrid, igrid
367 
368  do iigrid=1,igridstail; igrid=igrids(iigrid);
369  deallocate(gridvars(igrid)%w,gridvars(igrid)%wold)
370  end do
371 
372  end subroutine finish_gridvars
373 
374  !> This routine fills the particle grid variables with the default for mhd, i.e. only E and B
378 
379  integer :: igrid, iigrid
380  double precision :: E(ixG^T, ndir)
381  double precision :: B(ixG^T, ndir)
382 
383  do iigrid=1,igridstail; igrid=igrids(iigrid);
384  if (associated(usr_particle_fields)) then ! FILL ONLY E AND B
385  call usr_particle_fields(ps(igrid)%w, ps(igrid)%x, e, b)
386  gridvars(igrid)%w(ixg^t,ep(:)) = e
387  gridvars(igrid)%w(ixg^t,bp(:)) = b
388  else
389  call fields_from_mhd(igrid, ps(igrid)%w, gridvars(igrid)%w)
390  end if
391  end do
392 
393  end subroutine fill_gridvars_default
394 
395  !> Determine fields from MHD variables
396  subroutine fields_from_mhd(igrid, w_mhd, w_part)
398  use mod_geometry
399  integer, intent(in) :: igrid
400  double precision, intent(in) :: w_mhd(ixG^T,nw)
401  double precision, intent(inout) :: w_part(ixG^T,ngridvars)
402  integer :: idirmin, idir
403  double precision :: current(ixG^T,7-2*ndir:3)
404  double precision :: w(ixG^T,1:nw)
405 
406  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
407 
408  w(ixg^t,1:nw) = w_mhd(ixg^t,1:nw)
409  w_part(ixg^t,bp(1):bp(3)) = 0.0d0
410  w_part(ixg^t,ep(1):ep(3)) = 0.0d0
411 
412  call phys_to_primitive(ixg^ll,ixg^ll,w,ps(igrid)%x)
413 
414  ! fill with density:
415  w_part(ixg^t,rhop) = w(ixg^t,iw_rho)
416 
417  ! fill with velocity:
418  w_part(ixg^t,vp(:)) = w(ixg^t,iw_mom(:))
419 
420  ! fill with magnetic field:
421  if (b0field) then
422  do idir = 1, ndir
423  w_part(ixg^t,bp(idir))=w(ixg^t,iw_mag(idir))+ps(igrid)%B0(ixg^t,idir,0)
424  end do
425  else
426  w_part(ixg^t,bp(:)) = w(ixg^t,iw_mag(:))
427  end if
428 
429  ! fill with current
430  current = zero
431  call particle_get_current(w,ixg^ll,ixg^llim^d^lsub1,idirmin,current)
432  w_part(ixg^t,jp(:)) = current(ixg^t,:)
433 
434  ! fill with electric field
435  select case (coordinate)
437  w_part(ixg^t,ep(1)) = w_part(ixg^t,bp(2)) * &
438  w(ixg^t,iw_mom(3)) - w_part(ixg^t,bp(3)) * &
439  w(ixg^t,iw_mom(2)) + particles_eta * current(ixg^t,1)
440  w_part(ixg^t,ep(2)) = w_part(ixg^t,bp(3)) * &
441  w(ixg^t,iw_mom(1)) - w_part(ixg^t,bp(1)) * &
442  w(ixg^t,iw_mom(3)) + particles_eta * current(ixg^t,2)
443  w_part(ixg^t,ep(3)) = w_part(ixg^t,bp(1)) * &
444  w(ixg^t,iw_mom(2)) - w_part(ixg^t,bp(2)) * &
445  w(ixg^t,iw_mom(1)) + particles_eta * current(ixg^t,3)
446  case (cylindrical)
447  w_part(ixg^t,ep(r_)) = w_part(ixg^t,bp(phi_)) * &
448  w(ixg^t,iw_mom(z_)) - w_part(ixg^t,bp(z_)) * &
449  w(ixg^t,iw_mom(phi_)) + particles_eta * current(ixg^t,r_)
450  w_part(ixg^t,ep(phi_)) = w_part(ixg^t,bp(z_)) * &
451  w(ixg^t,iw_mom(r_)) - w_part(ixg^t,bp(r_)) * &
452  w(ixg^t,iw_mom(z_)) + particles_eta * current(ixg^t,phi_)
453  w_part(ixg^t,ep(z_)) = w_part(ixg^t,bp(r_)) * &
454  w(ixg^t,iw_mom(phi_)) - w_part(ixg^t,bp(phi_)) * &
455  w(ixg^t,iw_mom(r_)) + particles_eta * current(ixg^t,z_)
456  end select
457 
458  ! Hall term
459  if (particles_etah > zero) then
460  select case (coordinate)
462  w_part(ixg^t,ep(1)) = w_part(ixg^t,ep(1)) + particles_etah/w(ixg^t,iw_rho) * &
463  (current(ixg^t,2) * w_part(ixg^t,bp(3)) - current(ixg^t,3) * w_part(ixg^t,bp(2)))
464  w_part(ixg^t,ep(2)) = w_part(ixg^t,ep(2)) + particles_etah/w(ixg^t,iw_rho) * &
465  (-current(ixg^t,1) * w_part(ixg^t,bp(3)) + current(ixg^t,3) * w_part(ixg^t,bp(1)))
466  w_part(ixg^t,ep(3)) = w_part(ixg^t,ep(3)) + particles_etah/w(ixg^t,iw_rho) * &
467  (current(ixg^t,1) * w_part(ixg^t,bp(2)) - current(ixg^t,2) * w_part(ixg^t,bp(1)))
468  case (cylindrical)
469  w_part(ixg^t,ep(r_)) = w_part(ixg^t,ep(r_)) + particles_etah/w(ixg^t,iw_rho) * &
470  (current(ixg^t,phi_) * w_part(ixg^t,bp(z_)) - current(ixg^t,z_) * w_part(ixg^t,bp(phi_)))
471  w_part(ixg^t,ep(phi_)) = w_part(ixg^t,ep(phi_)) + particles_etah/w(ixg^t,iw_rho) * &
472  (-current(ixg^t,r_) * w_part(ixg^t,bp(z_)) + current(ixg^t,z_) * w_part(ixg^t,bp(r_)))
473  w_part(ixg^t,ep(z_)) = w_part(ixg^t,ep(z_)) + particles_etah/w(ixg^t,iw_rho) * &
474  (current(ixg^t,r_) * w_part(ixg^t,bp(phi_)) - current(ixg^t,phi_) * w_part(ixg^t,bp(r_)))
475  end select
476  end if
477 
478  end subroutine fields_from_mhd
479 
480  !> Calculate idirmin and the idirmin:3 components of the common current array
481  !> make sure that dxlevel(^D) is set correctly.
482  subroutine particle_get_current(w,ixI^L,ixO^L,idirmin,current)
484  use mod_geometry
485 
486  integer :: idirmin0
487  integer :: ixO^L, idirmin, ixI^L
488  double precision :: w(ixI^S,1:nw)
489  integer :: idir
490  ! For ndir=2 only 3rd component of J can exist, ndir=1 is impossible for MHD
491  double precision :: current(ixI^S,7-2*ndir:3),bvec(ixI^S,1:ndir)
492 
493  idirmin0 = 7-2*ndir
494 
495  if (b0field) then
496  do idir = 1, ndir
497  bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))+block%B0(ixi^s,idir,0)
498  end do
499  else
500  do idir = 1, ndir
501  bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))
502  end do
503  end if
504 
505  call curlvector(bvec,ixi^l,ixo^l,current,idirmin,idirmin0,ndir)
506 
507  end subroutine particle_get_current
508 
509  !> update grid variables for particles
510  subroutine update_gridvars()
512  use mod_timing
513 
514  integer :: igrid, iigrid
515 
516  tpartc_grid_0=mpi_wtime()
517 
518  do iigrid=1,igridstail; igrid=igrids(iigrid);
519  gridvars(igrid)%wold=gridvars(igrid)%w
520  end do
521 
523  if (associated(particles_define_additional_gridvars)) then
525  end if
526 
527  tpartc_grid = tpartc_grid + (mpi_wtime()-tpartc_grid_0)
528 
529  end subroutine update_gridvars
530 
531  !> Let particles evolve in time. The routine also handles grid variables and
532  !> output.
533  subroutine handle_particles()
535  use mod_timing
536 
537  integer :: steps_taken, nparticles_left
538 
539  tpartc0 = mpi_wtime()
540 
541  call set_neighbor_ipe
542 
543  if (time_advance) then
545  call update_gridvars()
546  else
548  end if
549  tpartc_com0=mpi_wtime()
550  call comm_particles_global()
551  tpartc_com=tpartc_com + (mpi_wtime()-tpartc_com0)
552 
553 
554  ! main integration loop
555  do
556  if (tmax_particles >= t_next_output) then
557  call advance_particles(t_next_output, steps_taken)
558  tpartc_io_0 = mpi_wtime()
559  if (mype .eq. 0 .and. (.not. time_advance)) print*, "Writing particle output at time",t_next_output
560  call write_particle_output()
561  timeio_tot = timeio_tot+(mpi_wtime()-tpartc_io_0)
562  tpartc_io = tpartc_io+(mpi_wtime()-tpartc_io_0)
563 
564  ! If we are using a constant time step, this prevents multiple outputs
565  ! at the same time
568  else
569  call advance_particles(tmax_particles, steps_taken)
570 ! call write_particle_output()
571  exit
572  end if
573 
574  call mpi_allreduce(nparticles_on_mype, nparticles_left, 1, mpi_integer, &
575  mpi_sum, icomm, ierrmpi)
576  if (nparticles_left == 0 .and. convert) call mpistop('No particles left')
577 
578  end do
579 
580  tpartc = tpartc + (mpi_wtime() - tpartc0)
581 
582  end subroutine handle_particles
583 
584  !> Routine handles the particle evolution
585  subroutine advance_particles(end_time, steps_taken)
586  use mod_timing
588  double precision, intent(in) :: end_time !< Advance at most up to this time
589  integer, intent(out) :: steps_taken
590  integer :: n_active
591 
592  steps_taken = 0
593 
594  do
595  call select_active_particles(end_time)
596 
597  ! Determine total number of active particles
598  call mpi_allreduce(nparticles_active_on_mype, n_active, 1, mpi_integer, &
599  mpi_sum, icomm, ierrmpi)
600 
601  if (n_active == 0) exit
602 
603  tpartc_int_0=mpi_wtime()
604  call particles_integrate(end_time)
605  steps_taken = steps_taken + 1
606  tpartc_int=tpartc_int+(mpi_wtime()-tpartc_int_0)
607 
608  tpartc_com0=mpi_wtime()
609  call comm_particles()
610  tpartc_com=tpartc_com + (mpi_wtime()-tpartc_com0)
611 
613  end do
614 
615  end subroutine advance_particles
616 
617  pure subroutine limit_dt_endtime(t_left, dt_p)
618  double precision, intent(in) :: t_left
619  double precision, intent(inout) :: dt_p
620  double precision :: n_steps
621  double precision, parameter :: eps = 1d-10
622 
623  n_steps = t_left / dt_p
624 
625  if (n_steps < 1+eps) then
626  ! Last step
627  dt_p = t_left
628  else if (n_steps < 2-eps) then
629  ! Divide time left over two steps, to prevent one tiny time step
630  dt_p = 0.5d0 * t_left
631  end if
632 
633  end subroutine limit_dt_endtime
634 
635  ! Get a generic vector quantity stored on the grid at the particle position
636  ! NOTE: DON'T GET E OR B IN THIS WAY, USE FUNCTIONS BELOW!
637  subroutine get_vec(ix,igrid,x,tloc,vec)
640 
641  integer,intent(in) :: ix(ndir) !< Indices in gridvars
642  integer,intent(in) :: igrid
643  double precision,dimension(3), intent(in) :: x
644  double precision, intent(in) :: tloc
645  double precision,dimension(ndir), intent(out) :: vec
646  double precision,dimension(ndir) :: vec1, vec2
647  double precision :: td
648  integer :: ic^D,idir
649 
650  if (associated(usr_particle_analytic)) then
651  call usr_particle_analytic(ix, x, tloc, vec)
652  else
653  do idir=1,ndir
654  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,ix(idir)), &
655  ps(igrid)%x(ixg^t,1:ndim),x,vec(idir))
656  end do
657  if (time_advance) then
658  do idir=1,ndir
659  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,ix(idir)), &
660  ps(igrid)%x(ixg^t,1:ndim),x,vec2(idir))
661  end do
662  td = (tloc - global_time) / dt
663  vec(:) = vec(:) * (1.0d0 - td) + vec2(:) * td
664  end if
665  end if
666 
667  end subroutine get_vec
668 
669  ! Shorthand for getting specifically the B field at the particle position
670  subroutine get_bfield(igrid,x,tloc,b)
673 
674  integer,intent(in) :: igrid
675  double precision,dimension(3), intent(in) :: x
676  double precision, intent(in) :: tloc
677  double precision,dimension(ndir), intent(out) :: b
678  double precision,dimension(ndir) :: vec1, vec2, vec
679  double precision :: td
680  integer :: ic^D,idir
681 
682  if (associated(usr_particle_analytic)) then
683  call usr_particle_analytic(bp, x, tloc, vec)
684  else if (associated(usr_particle_fields)) then
685  call get_vec(bp, igrid, x, tloc, vec)
686  else
687  do idir=1,ndir
688  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,bp(idir)), &
689  ps(igrid)%x(ixg^t,1:ndim),x,vec(idir))
690  end do
691  if (time_advance) then
692  do idir=1,ndir
693  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,bp(idir)), &
694  ps(igrid)%x(ixg^t,1:ndim),x,vec2(idir))
695  end do
696  td = (tloc - global_time) / dt
697  vec(:) = vec(:) * (1.0d0 - td) + vec2(:) * td
698  end if
699  end if
700 
701  b(:) = vec(:)
702 
703  end subroutine get_bfield
704 
705  ! Shorthand for getting specifically the E field at the particle position
706  subroutine get_efield(igrid,x,tloc,e)
709  use mod_geometry
710 
711  integer,intent(in) :: igrid
712  double precision,dimension(3), intent(in) :: x
713  double precision, intent(in) :: tloc
714  double precision,dimension(ndir), intent(out) :: e
715  double precision,dimension(ndir) :: vec1, vec2, vec
716  double precision,dimension(ndir) :: vfluid, b
717  double precision,dimension(ndir) :: current
718  double precision :: rho = 1.d0, rho2 = 1.d0
719  double precision :: td
720  integer :: ic^D,idir
721 
722  current(:) = 0.d0
723 
724  if (associated(usr_particle_analytic)) then
725  call usr_particle_analytic(ep, x, tloc, vec)
726  else if (associated(usr_particle_fields)) then
727  call get_vec(ep, igrid, x, tloc, vec)
728  else
729  call get_bfield(igrid, x, tloc, b)
730  call get_vec(vp, igrid, x, tloc, vfluid)
731  if (particles_eta > zero) then
732  call get_vec(jp, igrid, x, tloc, current)
733  end if
734  if (particles_etah > zero) then
735  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,rhop),ps(igrid)%x(ixg^t,1:ndim),x,rho)
736  if (time_advance) then
737  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,rhop),ps(igrid)%x(ixg^t,1:ndim),x,rho2)
738  td = (tloc - global_time) / dt
739  rho = rho * (1.0d0 - td) + rho2 * td
740  end if
741  end if
742 
743  ! Build E
744  select case (coordinate)
746  vec(1) = -vfluid(2)*b(3)+vfluid(3)*b(2) + particles_eta*current(1) + particles_etah/rho*(current(2)*b(3) - current(3)*b(2))
747  vec(2) = vfluid(1)*b(3)-vfluid(3)*b(1) + particles_eta*current(2) + particles_etah/rho*(-current(1)*b(3) + current(3)*b(1))
748  vec(3) = -vfluid(1)*b(2)+vfluid(2)*b(1) + particles_eta*current(3) + particles_etah/rho*(current(1)*b(2) - current(2)*b(1))
749  case (cylindrical)
750  vec(r_) = -vfluid(phi_)*b(z_)+vfluid(z_)*b(phi_) + particles_eta*current(r_) + particles_etah/rho*(current(phi_)*b(z_) - current(z_)*b(phi_))
751  vec(phi_) = vfluid(r_)*b(z_)-vfluid(z_)*b(r_) + particles_eta*current(phi_) + particles_etah/rho*(-current(r_)*b(z_) + current(z_)*b(r_))
752  vec(z_) = -vfluid(r_)*b(phi_)+vfluid(phi_)*b(r_) + particles_eta*current(z_) + particles_etah/rho*(current(r_)*b(phi_) - current(phi_)*b(r_))
753  end select
754  end if
755 
756  e(:) = vec(:)
757 
758  end subroutine get_efield
759 
760  !> Get Lorentz factor from relativistic momentum
761  pure subroutine get_lfac(u,lfac)
762  use mod_global_parameters, only: ndir, c_norm
763  double precision,dimension(3), intent(in) :: u
764  double precision, intent(out) :: lfac
765 
766  if (relativistic) then
767  lfac = sqrt(1.0d0 + sum(u(:)**2)/c_norm**2)
768  else
769  lfac = 1.0d0
770  end if
771 
772  end subroutine get_lfac
773 
774  !> Get Lorentz factor from velocity
775  pure subroutine get_lfac_from_velocity(v,lfac)
776  use mod_global_parameters, only: ndir, c_norm
777  double precision,dimension(3), intent(in) :: v
778  double precision, intent(out) :: lfac
779 
780  if (relativistic) then
781  lfac = 1.0d0 / sqrt(1.0d0 - sum(v(:)**2)/c_norm**2)
782  else
783  lfac = 1.0d0
784  end if
785 
786  end subroutine get_lfac_from_velocity
787 
788  subroutine cross(a,b,c)
790  use mod_geometry
791  double precision, dimension(ndir), intent(in) :: a,b
792  double precision, dimension(ndir), intent(out) :: c
793 
794  ! ndir needs to be three for this to work!!!
795  if(ndir==3) then
796  select case(coordinate)
798  c(1) = a(2)*b(3) - a(3)*b(2)
799  c(2) = a(3)*b(1) - a(1)*b(3)
800  c(3) = a(1)*b(2) - a(2)*b(1)
801  case (cylindrical)
802  c(r_) = a(phi_)*b(z_) - a(z_)*b(phi_)
803  c(phi_) = a(z_)*b(r_) - a(r_)*b(z_)
804  c(z_) = a(r_)*b(phi_) - a(phi_)*b(r_)
805  case default
806  call mpistop('geometry not implemented in cross(a,b,c)')
807  end select
808  else
809  call mpistop("cross in particles needs to be run with three components!")
810  end if
811 
812  end subroutine cross
813 
814  subroutine interpolate_var(igrid,ixI^L,ixO^L,gf,x,xloc,gfloc)
816 
817  integer, intent(in) :: igrid,ixI^L, ixO^L
818  double precision, intent(in) :: gf(ixI^S)
819  double precision, intent(in) :: x(ixI^S,1:ndim)
820  double precision, intent(in) :: xloc(1:3)
821  double precision, intent(out) :: gfloc
822  double precision :: xd^D, myq, dx1
823  {^iftwod
824  double precision :: c00, c10
825  }
826  {^ifthreed
827  double precision :: c0, c1, c00, c10, c01, c11
828  }
829  integer :: ic^D, ic1^D, ic2^D, idir
830 
831  {
832  if(stretch_type(^d)==stretch_uni) then
833  ! uniform stretch from xprobmin
834  ic^d = ceiling(dlog((xloc(^d)-xprobmin^d)*(qstretch(ps(igrid)%level,^d)-one)/&
835  dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
836  (node(pig^d_,igrid)-1)*block_nx^d+nghostcells
837  else if(stretch_type(^d)==stretch_symm) then
838  ! symmetric stretch about 0.5*(xprobmin+xprobmax)
839  if(xloc(^d)<xprobmin^d+xstretch^d) then
840  ! stretch to left from xprobmin+xstretch
841  ic^d = block_nx^d-(int(dlog((xprobmin^d+xstretch^d-xloc(^d))*(qstretch(ps(igrid)%level,^d)-one)/&
842  dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
843  (nstretchedblocks(ps(igrid)%level,^d)/2-node(pig^d_,igrid))*block_nx^d)+nghostcells
844  else if(xloc(^d)>xprobmax^d-xstretch^d) then
845  ! stretch to right from xprobmax-xstretch
846  ic^d = ceiling(dlog((xloc(^d)-xprobmax^d+xstretch^d)*(qstretch(ps(igrid)%level,^d)-one)/&
847  dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
848  ((node(pig^d_,igrid)-1+nstretchedblocks(ps(igrid)%level,^d)/2)*block_nx^d-&
849  domain_nx^d*2**(ps(igrid)%level-1))+nghostcells
850  else
851  ! possible non-stretched central part
852  ic^d = int((xloc(^d)-rnode(rpxmin^d_,igrid))/rnode(rpdx^d_,igrid)) + 1 + nghostcells
853  end if
854  else
855  ! no stretch
856  ic^d = int((xloc(^d)-rnode(rpxmin^d_,igrid))/rnode(rpdx^d_,igrid)) + 1 + nghostcells
857  end if
858  \}
859 
860  ! linear interpolation:
861  {
862  if (x({ic^dd},^d) .lt. xloc(^d)) then
863  ic1^d = ic^d
864  else
865  ic1^d = ic^d -1
866  end if
867  ic2^d = ic1^d + 1
868  \}
869 
870  {^d&
871  if(ic1^d.lt.ixglo^d+1 .or. ic2^d.gt.ixghi^d-1) then
872  print *, 'direction: ',^d
873  print *, 'position: ',xloc(1:ndim)
874  print *, 'indices:', ic1^d,ic2^d
875  call mpistop('Trying to interpolate from out of grid!')
876  end if
877  \}
878 
879  {^ifoned
880  xd1 = (xloc(1)-x(ic11,1)) / (x(ic21,1) - x(ic11,1))
881  gfloc = gf(ic11) * (1.0d0 - xd1) + gf(ic21) * xd1
882  }
883  {^iftwod
884  xd1 = (xloc(1)-x(ic11,ic12,1)) / (x(ic21,ic12,1) - x(ic11,ic12,1))
885  xd2 = (xloc(2)-x(ic11,ic12,2)) / (x(ic11,ic22,2) - x(ic11,ic12,2))
886  c00 = gf(ic11,ic12) * (1.0d0 - xd1) + gf(ic21,ic12) * xd1
887  c10 = gf(ic11,ic22) * (1.0d0 - xd1) + gf(ic21,ic22) * xd1
888  gfloc = c00 * (1.0d0 - xd2) + c10 * xd2
889  }
890  {^ifthreed
891  xd1 = (xloc(1)-x(ic11,ic12,ic13,1)) / (x(ic21,ic12,ic13,1) - x(ic11,ic12,ic13,1))
892  xd2 = (xloc(2)-x(ic11,ic12,ic13,2)) / (x(ic11,ic22,ic13,2) - x(ic11,ic12,ic13,2))
893  xd3 = (xloc(3)-x(ic11,ic12,ic13,3)) / (x(ic11,ic12,ic23,3) - x(ic11,ic12,ic13,3))
894 
895  c00 = gf(ic11,ic12,ic13) * (1.0d0 - xd1) + gf(ic21,ic12,ic13) * xd1
896  c10 = gf(ic11,ic22,ic13) * (1.0d0 - xd1) + gf(ic21,ic22,ic13) * xd1
897  c01 = gf(ic11,ic12,ic23) * (1.0d0 - xd1) + gf(ic21,ic12,ic23) * xd1
898  c11 = gf(ic11,ic22,ic23) * (1.0d0 - xd1) + gf(ic21,ic22,ic23) * xd1
899 
900  c0 = c00 * (1.0d0 - xd2) + c10 * xd2
901  c1 = c01 * (1.0d0 - xd2) + c11 * xd2
902 
903  gfloc = c0 * (1.0d0 - xd3) + c1 * xd3
904  }
905 
906  end subroutine interpolate_var
907 
909  use mod_timing
911 
912  double precision :: tpartc_avg, tpartc_int_avg, tpartc_io_avg, tpartc_com_avg, tpartc_grid_avg
913 
914  call mpi_reduce(tpartc,tpartc_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
915  call mpi_reduce(tpartc_int,tpartc_int_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
916  call mpi_reduce(tpartc_io,tpartc_io_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
917  call mpi_reduce(tpartc_com,tpartc_com_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
918  call mpi_reduce(tpartc_grid,tpartc_grid_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
919 
920  if (mype ==0) then
921  tpartc_avg = tpartc_avg/dble(npe)
922  tpartc_int_avg = tpartc_int_avg/dble(npe)
923  tpartc_io_avg = tpartc_io_avg/dble(npe)
924  tpartc_com_avg = tpartc_com_avg/dble(npe)
925  tpartc_grid_avg = tpartc_grid_avg/dble(npe)
926  write(*,'(a,f12.3,a)')' Particle handling took : ',tpartc,' sec'
927  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc/(timeloop+smalldouble),' %'
928  write(*,'(a,f12.3,a)')' Particle IO took : ',tpartc_io_avg,' sec'
929  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_io_avg/(timeloop+smalldouble),' %'
930  write(*,'(a,f12.3,a)')' Particle COM took : ',tpartc_com_avg,' sec'
931  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_com_avg/(timeloop+smalldouble),' %'
932  write(*,'(a,f12.3,a)')' Particle integration took : ',tpartc_int_avg,' sec'
933  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_int_avg/(timeloop+smalldouble),' %'
934  write(*,'(a,f12.3,a)')' Particle init grid took : ',tpartc_grid_avg,' sec'
935  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_grid_avg/(timeloop+smalldouble),' %'
936  end if
937 
938  end subroutine time_spent_on_particles
939 
940  subroutine read_particles_snapshot(file_exists)
942 
943  logical,intent(out) :: file_exists
944  character(len=std_len) :: filename
945  integer :: mynpayload, mynparticles, pos
946 
947  ! some initialisations:
949  mynparticles = 0
950  nparticles = 0
951  it_particles = 0
952 
953  ! open the snapshot file on the headnode
954  file_exists=.false.
955  if (mype == 0) then
956 ! write(filename,"(a,a,i4.4,a)") trim(base_filename),'_particles',snapshotini,'.dat'
957  ! Strip restart_from_filename of the ending
958  pos = scan(restart_from_file, '.dat', back=.true.)
959  write(filename,"(a,a,i4.4,a)") trim(restart_from_file(1:pos-8)),'_particles',snapshotini,'.dat'
960  INQUIRE(file=filename, exist=file_exists)
961  if (.not. file_exists) then
962  write(*,*) 'WARNING: File '//trim(filename)//' with particle data does not exist.'
963  write(*,*) 'Initialising particles from user or default routines'
964  else
965  open(unit=unitparticles,file=filename,form='unformatted',action='read',status='unknown',access='stream')
966  read(unitparticles) nparticles,it_particles,mynpayload
967  if (mynpayload .ne. npayload) &
968  call mpistop('npayload in restart file does not match npayload in mod_particles')
969  end if
970  end if
971 
972  call mpi_bcast(file_exists,1,mpi_logical,0,icomm,ierrmpi)
973  if (.not. file_exists) return
974 
975  call mpi_bcast(nparticles,1,mpi_integer,0,icomm,ierrmpi)
976  call mpi_bcast(it_particles,1,mpi_integer,0,icomm,ierrmpi)
977 
978  do while (mynparticles .lt. nparticles)
979  if (mype == 0) then
981  .and. mynparticles .lt. nparticles)
982  call read_from_snapshot
983  mynparticles = mynparticles + 1
984  end do
985  end if ! mype==0
986  call mpi_bcast(mynparticles,1,mpi_integer,0,icomm,ierrmpi)
988  end do
989 
990  if (mype == 0) close(unit=unitparticles)
991 
992  end subroutine read_particles_snapshot
993 
994  subroutine read_from_snapshot()
995 
996  integer :: index,igrid_particle,ipe_particle
997 
998  read(unitparticles) index
999  allocate(particle(index)%self)
1000  allocate(particle(index)%payload(1:npayload))
1001  particle(index)%self%index = index
1002 
1003  read(unitparticles) particle(index)%self%follow
1004  read(unitparticles) particle(index)%self%q
1005  read(unitparticles) particle(index)%self%m
1006  read(unitparticles) particle(index)%self%time
1007  read(unitparticles) particle(index)%self%dt
1008  read(unitparticles) particle(index)%self%x
1009  read(unitparticles) particle(index)%self%u
1010  read(unitparticles) particle(index)%payload(1:npayload)
1011 
1012 ! if (particle(index)%self%follow) print*, 'follow index:', index
1013 
1014  call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
1015  particle(index)%igrid = igrid_particle
1016  particle(index)%ipe = ipe_particle
1017 
1019 
1020  end subroutine read_from_snapshot
1021 
1024 
1025  character(len=std_len) :: filename
1026 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1027 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
1028 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1029 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
1030  type(particle_t), allocatable, dimension(:) :: send_particles
1031  type(particle_t), allocatable, dimension(:) :: receive_particles
1032  double precision, allocatable, dimension(:,:) :: send_payload
1033  double precision, allocatable, dimension(:,:) :: receive_payload
1034  integer :: status(MPI_STATUS_SIZE)
1035  integer,dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1036  integer :: ipe, ipart, iipart, send_n_particles_for_output
1037  logical,save :: file_exists=.false.
1038 
1039  if (.not. write_snapshot) return
1040 
1041  receive_n_particles_for_output_from_ipe(:) = 0
1042 
1043  ! open the snapshot file on the headnode
1044  if (mype .eq. 0) then
1045  write(filename,"(a,a,i4.4,a)") trim(base_filename),'_particles',snapshotnext,'.dat'
1046  INQUIRE(file=filename, exist=file_exists)
1047  if (.not. file_exists) then
1048  open(unit=unitparticles,file=filename,form='unformatted',status='new',access='stream')
1049  else
1050  open(unit=unitparticles,file=filename,form='unformatted',status='replace',access='stream')
1051  end if
1053  end if
1054  if (npe==1) then
1055  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1056  call append_to_snapshot(particle(ipart)%self,particle(ipart)%payload)
1057  end do
1058  return
1059  end if
1060 
1061  if (mype .ne. 0) then
1062  call mpi_send(nparticles_on_mype,1,mpi_integer,0,mype,icomm,ierrmpi)
1063  ! fill the send_buffer
1064  send_n_particles_for_output = nparticles_on_mype
1065  allocate(send_particles(1:send_n_particles_for_output))
1066  allocate(send_payload(1:npayload,1:send_n_particles_for_output))
1067  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1068  send_particles(iipart) = particle(ipart)%self
1069  send_payload(1:npayload,iipart) = particle(ipart)%payload(1:npayload)
1070  end do
1071  else
1072  ! get number of particles on other ipes
1073  do ipe=1,npe-1
1074  call mpi_recv(receive_n_particles_for_output_from_ipe(ipe),1,mpi_integer,ipe,ipe,icomm,status,ierrmpi)
1075  end do
1076  end if
1077 
1078  if (mype .ne. 0) then
1079  call mpi_send(send_particles,send_n_particles_for_output,type_particle,0,mype,icomm,ierrmpi)
1080  call mpi_send(send_payload,npayload*send_n_particles_for_output,mpi_double_precision,0,mype,icomm,ierrmpi)
1081  deallocate(send_particles)
1082  deallocate(send_payload)
1083  end if
1084 
1085  if (mype==0) then
1086  ! first output own particles (already in receive buffer)
1087  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1088  call append_to_snapshot(particle(ipart)%self,particle(ipart)%payload)
1089  end do
1090  ! now output the particles sent from the other ipes
1091  do ipe=1,npe-1
1092  allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1093  allocate(receive_payload(1:npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1094  call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),type_particle,ipe,ipe,icomm,status,ierrmpi)
1095  call mpi_recv(receive_payload,npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,icomm,status,ierrmpi)
1096  do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1097  call append_to_snapshot(receive_particles(ipart),receive_payload(1:npayload,ipart))
1098  end do ! ipart
1099  deallocate(receive_particles)
1100  deallocate(receive_payload)
1101  end do ! ipe
1102  close(unit=unitparticles)
1103  end if ! mype == 0
1104 
1105  end subroutine write_particles_snapshot
1106 
1107  subroutine append_to_snapshot(myparticle,mypayload)
1108 
1109  type(particle_t),intent(in) :: myparticle
1110  double precision :: mypayload(1:npayload)
1111 
1112  write(unitparticles) myparticle%index
1113  write(unitparticles) myparticle%follow
1114  write(unitparticles) myparticle%q
1115  write(unitparticles) myparticle%m
1116  write(unitparticles) myparticle%time
1117  write(unitparticles) myparticle%dt
1118  write(unitparticles) myparticle%x
1119  write(unitparticles) myparticle%u
1120  write(unitparticles) mypayload(1:npayload)
1121 
1122  end subroutine append_to_snapshot
1123 
1126 
1127  character(len=std_len) :: filename
1128  character(len=1024) :: line, strdata
1129  integer :: iipart, ipart, icomp
1130 
1131  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1132  if (particle(ipart)%self%follow) then
1133  filename = make_particle_filename(particle(ipart)%self%time,particle(ipart)%self%index,'individual')
1134  open(unit=unitparticles,file=filename,status='replace')
1135  line=''
1136  do icomp=1, npayload
1137  write(strdata,"(a,i2.2,a)") 'payload',icomp,','
1138  line = trim(line)//trim(strdata)
1139  end do
1140  write(unitparticles,"(a,a,a)") 'time,dt,x1,x2,x3,u1,u2,u3,',trim(line),'ipe,iteration,index'
1141  close(unit=unitparticles)
1142  end if
1143  end do
1144 
1145  end subroutine init_particles_output
1146 
1147  subroutine select_active_particles(end_time)
1149  double precision, intent(in) :: end_time
1150 
1151  integer :: ipart, iipart
1152  logical :: activate
1153  double precision :: t_min_mype
1154 
1155  t_min_mype = bigdouble
1157 
1158  do iipart = 1, nparticles_on_mype
1159  ipart = particles_on_mype(iipart);
1160  activate = (particle(ipart)%self%time < end_time)
1161  t_min_mype = min(t_min_mype, particle(ipart)%self%time)
1162 
1163  if (activate) then
1166  end if
1167  end do
1168 
1169  call mpi_allreduce(t_min_mype, min_particle_time, 1, mpi_double_precision, &
1170  mpi_min, icomm, ierrmpi)
1171 
1172  end subroutine select_active_particles
1173 
1174  subroutine locate_particle(index,igrid_particle,ipe_particle)
1175  ! given the particles unique index, tell me on which cpu and igrid it is
1176  ! returns -1,-1 if particle was not found
1178 
1179  integer, intent(in) :: index
1180  integer, intent(out) :: igrid_particle, ipe_particle
1181  integer :: iipart,ipart,ipe_has_particle,ipe
1182  logical :: has_particle(0:npe-1)
1183  integer,dimension(0:1) :: buff
1184 
1185  has_particle(:) = .false.
1186  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1187  if (particle(ipart)%self%index == index) then
1188  has_particle(mype) = .true.
1189  exit
1190  end if
1191  end do
1192 
1193  if (has_particle(mype)) then
1194  buff(0) = particle(ipart)%igrid
1195  buff(1) = mype
1196  end if
1197 
1198  if (npe>0) call mpi_allgather(has_particle(mype),1,mpi_logical,has_particle,1,mpi_logical,icomm,ierrmpi)
1199 
1200  ipe_has_particle = -1
1201  do ipe=0,npe-1
1202  if (has_particle(ipe) .eqv. .true.) then
1203  ipe_has_particle = ipe
1204  exit
1205  end if
1206  end do
1207 
1208  if (ipe_has_particle .ne. -1) then
1209  if (npe>0) call mpi_bcast(buff,2,mpi_integer,ipe_has_particle,icomm,ierrmpi)
1210  igrid_particle=buff(0)
1211  ipe_particle = buff(1)
1212  else
1213  igrid_particle=-1
1214  ipe_particle = -1
1215  end if
1216 
1217  end subroutine locate_particle
1218 
1219  subroutine find_particle_ipe(x,igrid_particle,ipe_particle)
1220  use mod_forest, only: tree_node_ptr, tree_root
1221  use mod_slice, only: get_igslice
1223 
1224  double precision, intent(in) :: x(3)
1225  integer, intent(out) :: igrid_particle, ipe_particle
1226  integer :: ig(ndir,nlevelshi), ig_lvl(nlevelshi)
1227  integer :: idim, ic(ndim)
1228  type(tree_node_ptr) :: branch
1229 
1230  ! first check if the particle is in the domain
1231  if (.not. particle_in_domain(x)) then
1232  igrid_particle = -1
1233  ipe_particle = -1
1234  return
1235  end if
1236 
1237  ! get the index on each level
1238  do idim = 1, ndim
1239  call get_igslice(idim,x(idim), ig_lvl)
1240  ig(idim,:) = ig_lvl
1241  end do
1242 
1243  ! traverse the tree until leaf is found
1244  branch=tree_root({ig(^d,1)})
1245  do while (.not.branch%node%leaf)
1246  {ic(^d)=ig(^d,branch%node%level+1) - 2 * branch%node%ig^d +2\}
1247  branch%node => branch%node%child({ic(^d)})%node
1248  end do
1249 
1250  igrid_particle = branch%node%igrid
1251  ipe_particle = branch%node%ipe
1252 
1253  end subroutine find_particle_ipe
1254 
1255  !> Check if particle is inside computational domain
1256  logical function particle_in_domain(x)
1258  double precision, dimension(ndim), intent(in) :: x
1259 
1260  particle_in_domain = all(x >= [ xprobmin^d ]) .and. &
1261  all(x < [ xprobmax^d ]) ! Jannis: as before < instead of <= here, necessary?
1262  end function particle_in_domain
1263 
1264  !> Quick check if particle is still in igrid
1265  logical function particle_in_igrid(ipart, igrid)
1267  integer, intent(in) :: igrid, ipart
1268  double precision :: x(ndim), grid_rmin(ndim), grid_rmax(ndim)
1269 
1270  ! First check if the igrid is still there
1271  if (.not. allocated(ps(igrid)%w)) then
1272  particle_in_igrid = .false.
1273  else
1274  grid_rmin = [ {rnode(rpxmin^d_,igrid)} ]
1275  grid_rmax = [ {rnode(rpxmax^d_,igrid)} ]
1276  x = particle(ipart)%self%x(1:ndim)
1277  particle_in_igrid = all(x >= grid_rmin) .and. all(x < grid_rmax)
1278  end if
1279  end function particle_in_igrid
1280 
1281  subroutine set_neighbor_ipe()
1283 
1284  integer :: igrid, iigrid,ipe
1285  integer :: my_neighbor_type, i^D
1286  logical :: ipe_is_neighbor(0:npe-1)
1287 
1288  ipe_is_neighbor(:) = .false.
1289  do iigrid=1,igridstail; igrid=igrids(iigrid);
1290 
1291  {do i^db=-1,1\}
1292  if (i^d==0|.and.) then
1293  cycle
1294  end if
1295  my_neighbor_type=neighbor_type(i^d,igrid)
1296 
1297  select case (my_neighbor_type)
1298  case (neighbor_boundary) ! boundary
1299  ! do nothing
1300  case (neighbor_coarse) ! fine-coarse
1301  call ipe_fc(i^d,igrid,ipe_is_neighbor)
1302  case (neighbor_sibling) ! same level
1303  call ipe_srl(i^d,igrid,ipe_is_neighbor)
1304  case (neighbor_fine) ! coarse-fine
1305  call ipe_cf(i^d,igrid,ipe_is_neighbor)
1306  end select
1307 
1308  {end do\}
1309 
1310  end do
1311 
1312  ! remove self as neighbor
1313  ipe_is_neighbor(mype) = .false.
1314 
1315  ! Now make the list of neighbors
1316  npe_neighbors = 0
1317  do ipe=0,npe-1
1318  if (ipe_is_neighbor(ipe)) then
1321  end if
1322  end do ! ipe
1323 
1324  end subroutine set_neighbor_ipe
1325 
1326  subroutine ipe_fc(i^D,igrid,ipe_is_neighbor)
1328  integer, intent(in) :: i^D, igrid
1329  logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1330 
1331  ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1332 
1333  end subroutine ipe_fc
1334 
1335  subroutine ipe_srl(i^D,igrid,ipe_is_neighbor)
1337  integer, intent(in) :: i^D, igrid
1338  logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1339 
1340  ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1341 
1342  end subroutine ipe_srl
1343 
1344  subroutine ipe_cf(i^D,igrid,ipe_is_neighbor)
1346  integer, intent(in) :: i^D, igrid
1347  logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1348  integer :: ic^D, inc^D
1349 
1350  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1351  inc^db=2*i^db+ic^db
1352  \}
1353  ipe_is_neighbor( neighbor_child(2,inc^d,igrid) ) = .true.
1354 
1355  {end do\}
1356 
1357  end subroutine ipe_cf
1358 
1361 
1362  character(len=std_len) :: filename
1363  integer :: ipart,iipart
1364 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1365 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1366  type(particle_t), allocatable, dimension(:) :: send_particles
1367  double precision, allocatable, dimension(:,:) :: send_payload
1368  integer :: send_n_particles_for_output, cc
1369  integer :: nout
1370  double precision :: tout
1371 
1372  if (write_individual) then
1373  call output_individual()
1374  end if
1375 
1376  if (write_ensemble) then
1377  send_n_particles_for_output = 0
1378 
1379  do iipart=1,nparticles_on_mype
1380  ipart=particles_on_mype(iipart);
1381 
1382  ! have to send particle to rank zero for output
1383  if (mod(particle(ipart)%self%index,downsample_particles) .eq. 0) then
1384  send_n_particles_for_output = send_n_particles_for_output + 1
1385  end if
1386  end do
1387 
1388  allocate(send_particles(1:send_n_particles_for_output))
1389  allocate(send_payload(npayload,1:send_n_particles_for_output))
1390  cc = 1
1391  do iipart=1,nparticles_on_mype
1392  ipart=particles_on_mype(iipart);
1393 
1394  ! have to send particle to rank zero for output
1395  if (mod(particle(ipart)%self%index,downsample_particles) .eq. 0) then
1396  send_particles(cc) = particle(ipart)%self
1397  send_payload(1:npayload,cc) = particle(ipart)%payload(1:npayload)
1398  cc=cc+1
1399  end if
1400  end do
1401 
1402  call output_ensemble(send_n_particles_for_output,send_particles, &
1403  send_payload,'ensemble')
1404  deallocate(send_particles)
1405  deallocate(send_payload)
1406  end if
1407 
1408  end subroutine write_particle_output
1409 
1410  character(len=128) function make_particle_filename(tout,index,type)
1412 
1413  character(len=*), intent(in) :: type
1414  double precision, intent(in) :: tout
1415  integer, intent(in) :: index
1416  integer :: nout, mysnapshot
1417 
1418  select case(type)
1419  case ('ensemble')
1420  nout = nint(tout / dtsave_particles)
1421  write(make_particle_filename,"(a,a,i6.6,a)") trim(base_filename),'_ensemble',nout,'.csv'
1422  case ('destroy')
1423  write(make_particle_filename,"(a,a)") trim(base_filename),'_destroyed.csv'
1424  case ('individual')
1425  write(make_particle_filename,"(a,a,i6.6,a)") trim(base_filename),'_particle',index,'.csv'
1426  end select
1427 
1428  end function make_particle_filename
1429 
1430  subroutine output_ensemble(send_n_particles_for_output,send_particles,send_payload,typefile)
1432 
1433  integer, intent(in) :: send_n_particles_for_output
1434  type(particle_t), dimension(send_n_particles_for_output), intent(in) :: send_particles
1435  double precision, dimension(npayload,send_n_particles_for_output), intent(in) :: send_payload
1436  character(len=*), intent(in) :: typefile
1437  character(len=std_len) :: filename
1438  type(particle_t), allocatable, dimension(:) :: receive_particles
1439  double precision, allocatable, dimension(:,:) :: receive_payload
1440  integer :: status(MPI_STATUS_SIZE)
1441  integer,dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1442  integer :: ipe, ipart, nout
1443  logical :: file_exists
1444 
1445  receive_n_particles_for_output_from_ipe(:) = 0
1446 
1447  call mpi_allgather(send_n_particles_for_output, 1, mpi_integer, &
1448  receive_n_particles_for_output_from_ipe, 1, mpi_integer, icomm, ierrmpi)
1449 
1450  ! If there are no particles to be written, skip the output
1451  if (sum(receive_n_particles_for_output_from_ipe(:)) == 0) return
1452 
1453  if (mype > 0) then
1454  call mpi_send(send_particles,send_n_particles_for_output,type_particle,0,mype,icomm,ierrmpi)
1455  call mpi_send(send_payload,npayload*send_n_particles_for_output,mpi_double_precision,0,mype,icomm,ierrmpi)
1456  else
1457  ! Create file and write header
1458  if(typefile=='destroy') then ! Destroyed file
1459  write(filename,"(a,a,i6.6,a)") trim(base_filename) // '_', &
1460  trim(typefile) // '.csv'
1462  inquire(file=filename, exist=file_exists)
1463 
1464  if (.not. file_exists) then
1465  open(unit=unitparticles, file=filename)
1466  write(unitparticles,"(a)") trim(csv_header)
1467  else
1468  open(unit=unitparticles, file=filename, access='append')
1469  end if
1470 
1471  else ! Ensemble file
1472  write(filename,"(a,a,i6.6,a)") trim(base_filename) // '_', &
1473  trim(typefile) // '_', nint(t_next_output/dtsave_particles),'.csv'
1474  open(unit=unitparticles,file=filename)
1475  write(unitparticles,"(a)") trim(csv_header)
1476  end if
1477 
1478  ! Write own particles
1479  do ipart=1,send_n_particles_for_output
1480  call output_particle(send_particles(ipart),send_payload(1:npayload,ipart),0,unitparticles)
1481  end do
1482 
1483  ! Write particles from other tasks
1484  do ipe=1,npe-1
1485  allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1486  allocate(receive_payload(1:npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1487  call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),type_particle,ipe,ipe,icomm,status,ierrmpi)
1488  call mpi_recv(receive_payload,npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,icomm,status,ierrmpi)
1489  do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1490  call output_particle(receive_particles(ipart),receive_payload(1:npayload,ipart),ipe,unitparticles)
1491  end do ! ipart
1492  deallocate(receive_particles)
1493  deallocate(receive_payload)
1494  end do ! ipe
1495 
1496  close(unitparticles)
1497  end if
1498 
1499  end subroutine output_ensemble
1500 
1501  subroutine output_individual()
1503  character(len=std_len) :: filename
1504  integer :: ipart,iipart,ipe
1505  logical :: file_exists
1506 
1507  do iipart=1,nparticles_on_mype
1508  ipart=particles_on_mype(iipart)
1509 
1510  ! should the particle be dumped?
1511  if (particle(ipart)%self%follow) then
1512  write(filename,"(a,a,i6.6,a)") trim(base_filename), '_particle_', &
1513  particle(ipart)%self%index, '.csv'
1514  inquire(file=filename, exist=file_exists)
1515 
1516  ! Create empty file and write header on first iteration, or when the
1517  ! file does not exist yet
1518  if (it_particles == 0 .or. .not. file_exists) then
1519  open(unit=unitparticles, file=filename)
1520  write(unitparticles,"(a)") trim(csv_header)
1521  else
1522  open(unit=unitparticles, file=filename, access='append')
1523  end if
1524 
1525  call output_particle(particle(ipart)%self,&
1526  particle(ipart)%payload(1:npayload),mype,unitparticles)
1527 
1528  close(unitparticles)
1529  end if
1530  end do
1531 
1532  end subroutine output_individual
1533 
1534  subroutine output_particle(myparticle,payload,ipe,file_unit)
1536  use mod_geometry
1537 
1538  type(particle_t),intent(in) :: myparticle
1539  double precision, intent(in) :: payload(npayload)
1540  integer, intent(in) :: ipe
1541  integer, intent(in) :: file_unit
1542  double precision :: x(3), u(3)
1543  integer :: icomp
1544 
1545  ! convert and normalize the position
1546  if (nocartesian) then
1547  select case(coordinate)
1549  x = myparticle%x*length_convert_factor
1550  case(cylindrical)
1551  x(r_) = myparticle%x(r_)*length_convert_factor
1552  x(z_) = myparticle%x(z_)*length_convert_factor
1553  x(phi_) = myparticle%x(phi_)
1554  case(spherical)
1555  x(:) = myparticle%x(:)
1556  x(1) = x(1)*length_convert_factor
1557  end select
1558  else
1559  call partcoord_to_cartesian(myparticle%x,x)
1560  x(:) = x(:)*length_convert_factor
1561  end if
1562 
1563  u = myparticle%u(1:3) * integrator_velocity_factor
1564 
1565  write(file_unit, csv_format) myparticle%time, myparticle%dt, x(1:3), &
1566  u, payload(1:npayload), ipe, it_particles, &
1567  myparticle%index
1568 
1569  end subroutine output_particle
1570 
1571  !> Convert position to Cartesian coordinates
1572  subroutine partcoord_to_cartesian(xp,xpcart)
1573  ! conversion of particle coordinate from cylindrical/spherical to cartesian coordinates done here
1575  use mod_geometry
1576 
1577  double precision, intent(in) :: xp(1:3)
1578  double precision, intent(out) :: xpcart(1:3)
1579 
1580  select case (coordinate)
1582  xpcart(1:3) = xp(1:3)
1583  case (cylindrical)
1584  xpcart(1) = xp(r_)*cos(xp(phi_))
1585  xpcart(2) = xp(r_)*sin(xp(phi_))
1586  xpcart(3) = xp(z_)
1587  case (spherical)
1588  xpcart(1) = xp(1)*sin(xp(2))*cos(xp(3))
1589  {^iftwod
1590  xpcart(2) = xp(1)*cos(xp(2))
1591  xpcart(3) = xp(1)*sin(xp(2))*sin(xp(3))}
1592  {^ifthreed
1593  xpcart(2) = xp(1)*sin(xp(2))*sin(xp(3))
1594  xpcart(3) = xp(1)*cos(xp(2))}
1595  case default
1596  write(*,*) "No converter for coordinate=",coordinate
1597  end select
1598 
1599  end subroutine partcoord_to_cartesian
1600 
1601  !> Convert to noncartesian coordinates
1602  subroutine partcoord_from_cartesian(xp,xpcart)
1603  ! conversion of particle coordinate from Cartesian to cylindrical/spherical coordinates done here
1605  use mod_geometry
1606 
1607  double precision, intent(in) :: xpcart(1:3)
1608  double precision, intent(out) :: xp(1:3)
1609  double precision :: xx, yy, zz
1610  double precision :: rr, tth, pphi
1611 
1612  select case (coordinate)
1614  xp(1:3)=xpcart(1:3)
1615  case (cylindrical)
1616  xp(r_) = sqrt(xpcart(1)**2 + xpcart(2)**2)
1617  xp(phi_) = atan2(xpcart(2),xpcart(1))
1618  if (xp(phi_) .lt. 0.d0) xp(phi_) = 2.0d0*dpi + xp(phi_)
1619  xp(z_) = xpcart(3)
1620  case (spherical)
1621  xx = xpcart(1)
1622  {^iftwod
1623  yy = xpcart(3)
1624  zz = xpcart(2)}
1625  {^ifthreed
1626  yy = xpcart(2)
1627  zz = xpcart(3)}
1628  rr = sqrt(xx**2 + yy**2 + zz**2)
1629  tth = acos(zz/rr)
1630  if (yy > 0.d0) then
1631  pphi = acos(xx/sqrt(xx**2+yy**2))
1632  else
1633  pphi = 2.d0*dpi - acos(xx/sqrt(xx**2+yy**2))
1634  endif
1635  xp(1:3) = (/ rr, tth, pphi /)
1636  case default
1637  write(*,*) "No converter for coordinate=",coordinate
1638  end select
1639 
1640  end subroutine partcoord_from_cartesian
1641 
1642  !> Convert vector to Cartesian coordinates
1643  subroutine partvec_to_cartesian(xp,up,upcart)
1644  ! conversion of a generic vector from cylindrical/spherical to cartesian coordinates done here
1646  use mod_geometry
1647 
1648  double precision, intent(in) :: xp(1:3),up(1:3)
1649  double precision, intent(out) :: upcart(1:3)
1650 
1651  select case (coordinate)
1653  upcart(1:3)=up(1:3)
1654  case (cylindrical)
1655  upcart(1) = cos(xp(phi_)) * up(r_) - sin(xp(phi_)) * up(phi_)
1656  upcart(2) = cos(xp(phi_)) * up(phi_) + sin(xp(phi_)) * up(r_)
1657  upcart(3) = up(z_)
1658  case (spherical)
1659  upcart(1) = up(1)*sin(xp(2))*cos(xp(3)) + up(2)*cos(xp(2))*cos(xp(3)) - up(3)*sin(xp(3))
1660  {^iftwod
1661  upcart(2) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))
1662  upcart(3) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))}
1663  {^ifthreed
1664  upcart(2) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))
1665  upcart(3) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))}
1666  case default
1667  write(*,*) "No converter for coordinate=",coordinate
1668  end select
1669 
1670  end subroutine partvec_to_cartesian
1671 
1672  !> Convert vector from Cartesian coordinates
1673  subroutine partvec_from_cartesian(xp,up,upcart)
1674  ! conversion of a generic vector to cylindrical/spherical from cartesian coordinates done here
1676  use mod_geometry
1677 
1678  double precision, intent(in) :: xp(1:3),upcart(1:3)
1679  double precision, intent(out) :: up(1:3)
1680 
1681  select case (coordinate)
1683  up(1:3) = upcart(1:3)
1684  case (cylindrical)
1685  up(r_) = cos(xp(phi_)) * upcart(1) + sin(xp(phi_)) * upcart(2)
1686  up(phi_) = -sin(xp(phi_)) * upcart(1) + cos(xp(phi_)) * upcart(2)
1687  up(z_) = upcart(3)
1688  case (spherical)
1689  {^iftwod
1690  up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(3)*sin(xp(2))*sin(xp(3)) + upcart(2)*cos(xp(2))
1691  up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(3)*cos(xp(2))*sin(xp(3)) - upcart(2)*sin(xp(2))
1692  up(3) = -upcart(1)*sin(xp(3)) + upcart(3)*cos(xp(3))}
1693  {^ifthreed
1694  up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(2)*sin(xp(2))*sin(xp(3)) + upcart(3)*cos(xp(2))
1695  up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(2)*cos(xp(2))*sin(xp(3)) - upcart(3)*sin(xp(2))
1696  up(3) = -upcart(1)*sin(xp(3)) + upcart(2)*cos(xp(3))}
1697  case default
1698  write(*,*) "No converter for coordinate=",coordinate
1699  end select
1700 
1701  end subroutine partvec_from_cartesian
1702 
1703  !> Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates
1704  subroutine fix_phi_crossing(xp,igrid)
1706  use mod_geometry
1707 
1708  integer, intent(in) :: igrid
1709  double precision, intent(inout) :: xp(1:3)
1710  double precision :: xpmod(1:3)
1711  integer :: phiind
1712 
1713  select case (coordinate)
1715  ! Do nothing
1716  return
1717  case (cylindrical)
1718  phiind = phi_
1719  case (spherical)
1720  phiind = 3
1721  end select
1722 
1723  xpmod = xp
1724 
1725  ! Fix phi-boundary crossing
1726  ! Case 1: particle has crossed from ~2pi to ~0
1727  xpmod(phiind) = xp(phiind) + 2.d0*dpi
1728  if ((.not. point_in_igrid_ghostc(xp,igrid,0)) &
1729  .and. (point_in_igrid_ghostc(xpmod,igrid,0))) then
1730  xp(phiind) = xpmod(phiind)
1731  return
1732  end if
1733  ! Case 2: particle has crossed from ~0 to ~2pi
1734  xpmod(phiind) = xp(phiind) - 2.d0*dpi
1735  if ((.not. point_in_igrid_ghostc(xp,igrid,0)) &
1736  .and. (point_in_igrid_ghostc(xpmod,igrid,0))) then
1737  xp(phiind) = xpmod(phiind)
1738  return
1739  end if
1740 
1741  end subroutine fix_phi_crossing
1742 
1743  !> Quick check if particle coordinate is inside igrid
1744  !> (ghost cells included, except the last ngh)
1745  logical function point_in_igrid_ghostc(x, igrid, ngh)
1747  use mod_geometry
1748  integer, intent(in) :: igrid, ngh
1749  double precision, intent(in) :: x(ndim)
1750  double precision :: grid_rmin(ndim), grid_rmax(ndim)
1751 
1752  ! TODO: this does not work with stretched coordinates!
1753  ! TODO: but phi should not be stretched in principle...
1754  grid_rmin = [ {rnode(rpxmin^d_,igrid)-rnode(rpdx^d_,igrid)*(dble(nghostcells-ngh)-0.5d0)} ]
1755  grid_rmax = [ {rnode(rpxmax^d_,igrid)+rnode(rpdx^d_,igrid)*(dble(nghostcells-ngh)-0.5d0)} ]
1756  point_in_igrid_ghostc = all(x >= grid_rmin) .and. all(x < grid_rmax)
1757  end function point_in_igrid_ghostc
1758 
1759  subroutine comm_particles()
1760 
1762 
1763  integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1764  integer :: index
1765  integer :: tag_send, tag_receive, send_buff, rcv_buff
1766  integer :: status(MPI_STATUS_SIZE)
1767  integer, dimension(0:npe-1) :: send_n_particles_to_ipe
1768  integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
1769  type(particle_t), allocatable, dimension(:,:) :: send_particles
1770  type(particle_t), allocatable, dimension(:,:) :: receive_particles
1771  double precision, allocatable, dimension(:,:,:) :: send_payload
1772  double precision, allocatable, dimension(:,:,:) :: receive_payload
1773  integer, allocatable, dimension(:,:) :: particle_index_to_be_sent_to_ipe
1774  integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1775  integer :: destroy_n_particles_mype
1776  logical :: BC_applied
1777  integer, allocatable, dimension(:) :: sndrqst, rcvrqst
1778  integer, allocatable, dimension(:) :: sndrqst_payload, rcvrqst_payload
1779  integer :: isnd, ircv
1780  integer, parameter :: maxneighbors=56 ! maximum case: coarse-fine in 3D
1781  !-----------------------------------------------------------------------------
1782 
1783  send_n_particles_to_ipe(:) = 0
1784  receive_n_particles_from_ipe(:) = 0
1785  destroy_n_particles_mype = 0
1786 
1787  allocate( particle_index_to_be_sent_to_ipe(nparticles_per_cpu_hi,0:npe-1) )
1788  allocate( sndrqst(1:npe-1), rcvrqst(1:npe-1) )
1789  allocate( sndrqst_payload(1:npe-1), rcvrqst_payload(1:npe-1) )
1790  sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1791  sndrqst_payload = mpi_request_null; rcvrqst_payload = mpi_request_null;
1792 
1793  ! check if and where to send each particle, destroy if necessary
1794  ! !$OMP PARALLEL DO PRIVATE(ipart)
1795  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1796 
1797  ! first check if the particle should be destroyed (user defined criterion)
1798  if (associated(usr_destroy_particle)) then
1799  if (usr_destroy_particle(particle(ipart))) then
1800  ! !$OMP CRITICAL(destroy)
1801  destroy_n_particles_mype = destroy_n_particles_mype + 1
1802  particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1803  ! !$OMP END CRITICAL(destroy)
1804  cycle
1805  end if
1806  end if
1807 
1808  ! is my particle still in the same igrid?
1809  if (.not.particle_in_igrid(ipart,particle(ipart)%igrid)) then
1810  call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
1811 
1812  ! destroy particle if out of domain (signalled by return value of -1)
1813  if (igrid_particle == -1 )then
1814  call apply_periodb(particle(ipart)%self,igrid_particle,ipe_particle,bc_applied)
1815  if (.not. bc_applied .or. igrid_particle == -1) then
1816  ! !$OMP CRITICAL(destroy2)
1817  destroy_n_particles_mype = destroy_n_particles_mype + 1
1818  particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1819  ! !$OMP END CRITICAL(destroy2)
1820  cycle
1821  end if
1822  end if
1823 
1824  ! particle still present
1825  particle(ipart)%igrid = igrid_particle
1826  particle(ipart)%ipe = ipe_particle
1827 
1828  ! if we have more than one core, is it on another cpu?
1829  if (npe .gt. 1 .and. particle(ipart)%ipe .ne. mype) then
1830  ! !$OMP CRITICAL(send)
1831  send_n_particles_to_ipe(ipe_particle) = &
1832  send_n_particles_to_ipe(ipe_particle) + 1
1833  if (send_n_particles_to_ipe(ipe_particle) .gt. nparticles_per_cpu_hi) &
1834  call mpistop('too many particles, increase nparticles_per_cpu_hi')
1835  particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1836  ! !$OMP END CRITICAL(send)
1837  end if ! ipe_particle
1838 
1839  end if ! particle_in_grid
1840 
1841  end do ! ipart
1842  ! !$OMP END PARALLEL DO
1843 
1844  call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
1845 
1846  ! get out when only one core:
1847  if (npe == 1) return
1848 
1849  ! communicate amount of particles to be sent/received
1850  isnd = 0; ircv = 0;
1851  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1852  tag_send = mype * npe + ipe
1853  tag_receive = ipe * npe + mype
1854  isnd = isnd + 1; ircv = ircv + 1
1855  call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1856  call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1857  end do
1858 
1859  call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1860  call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1861  sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1862 
1863  ! allocate the send and receive buffers,
1864  ! If this is still not good enough, consider optimizing using lists of lists:
1865  allocate(send_particles(maxval(send_n_particles_to_ipe),1:min(maxneighbors,npe-1)))
1866  allocate(receive_particles(maxval(receive_n_particles_from_ipe),1:min(maxneighbors,npe-1)))
1867  allocate(send_payload(npayload,maxval(send_n_particles_to_ipe),1:min(maxneighbors,npe-1)))
1868  allocate(receive_payload(npayload,maxval(receive_n_particles_from_ipe),1:min(maxneighbors,npe-1)))
1869 
1870  isnd = 0; ircv = 0;
1871  ! send and receive the data of the particles
1872  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1873  tag_send = mype * npe + ipe
1874  tag_receive = ipe * npe + mype
1875 
1876  ! should i send some particles to ipe?
1877  if (send_n_particles_to_ipe(ipe) .gt. 0) then
1878 
1879  ! create the send buffer
1880  do ipart = 1, send_n_particles_to_ipe(ipe)
1881  send_particles(ipart,iipe) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
1882  send_payload(1:npayload,ipart,iipe) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
1883  end do ! ipart
1884  isnd = isnd + 1
1885  call mpi_isend(send_particles(:,iipe),send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1886  call mpi_isend(send_payload(:,:,iipe),npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,icomm,sndrqst_payload(isnd),ierrmpi)
1887 
1888  end if ! send .gt. 0
1889 
1890  ! should i receive some particles from ipe?
1891  if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1892 
1893  ircv = ircv + 1
1894  call mpi_irecv(receive_particles(:,iipe),receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1895  call mpi_irecv(receive_payload(:,:,iipe),npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,icomm,rcvrqst_payload(ircv),ierrmpi)
1896 
1897  end if ! receive .gt. 0
1898  end do ! ipe
1899 
1900  call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1901  call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1902  call mpi_waitall(isnd,sndrqst_payload,mpi_statuses_ignore,ierrmpi)
1903  call mpi_waitall(ircv,rcvrqst_payload,mpi_statuses_ignore,ierrmpi)
1904 
1905  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1906  do ipart = 1, send_n_particles_to_ipe(ipe)
1907  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1908  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1909  call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
1910  end do ! ipart
1911  end do
1912 
1913  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1914  if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1915  do ipart = 1, receive_n_particles_from_ipe(ipe)
1916 
1917  index = receive_particles(ipart,iipe)%index
1919  !if (.not. allocated(particle(index)%self)) &
1920  allocate(particle(index)%self)
1921  particle(index)%self = receive_particles(ipart,iipe)
1922  !if (.not. allocated(particle(index)%payload)) &
1923  allocate(particle(index)%payload(npayload))
1924  particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart,iipe)
1925 
1926  ! since we don't send the igrid, need to re-locate it
1927  call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
1928  particle(index)%igrid = igrid_particle
1929  particle(index)%ipe = ipe_particle
1930 
1931  end do ! ipart
1932 
1933  end if ! receive .gt. 0
1934  end do ! ipe
1935 
1936  end subroutine comm_particles
1937  !=============================================================================
1939 
1940  ! does not destroy particles like comm_particles
1941 
1943 
1944  integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1945  integer :: index
1946  integer :: tag_send, tag_receive, send_buff, rcv_buff
1947  integer :: status(MPI_STATUS_SIZE)
1948  integer, dimension(0:npe-1) :: send_n_particles_to_ipe
1949  integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
1950 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1951 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
1952 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1953 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
1954  type(particle_t), allocatable, dimension(:) :: send_particles
1955  type(particle_t), allocatable, dimension(:) :: receive_particles
1956  double precision, allocatable, dimension(:,:) :: send_payload
1957  double precision, allocatable, dimension(:,:) :: receive_payload
1958  integer, allocatable, dimension(:,:) :: particle_index_to_be_sent_to_ipe
1959  logical :: BC_applied
1960  integer, allocatable, dimension(:) :: sndrqst, rcvrqst
1961  integer :: isnd, ircv
1962  !-----------------------------------------------------------------------------
1963  send_n_particles_to_ipe(:) = 0
1964  receive_n_particles_from_ipe(:) = 0
1965 
1966  allocate( particle_index_to_be_sent_to_ipe(nparticles_per_cpu_hi,0:npe-1) )
1967  allocate( sndrqst(1:npe-1), rcvrqst(1:npe-1) )
1968  sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1969 
1970  ! check if and where to send each particle, relocate all of them (in case grid has changed)
1971  ! !$OMP PARALLEL DO PRIVATE(ipart)
1972  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1973 
1974  call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
1975 
1976  particle(ipart)%igrid = igrid_particle
1977  particle(ipart)%ipe = ipe_particle
1978 
1979  ! if we have more than one core, is it on another cpu?
1980  if (particle(ipart)%ipe .ne. mype) then
1981  ! !$OMP CRITICAL(send)
1982  send_n_particles_to_ipe(ipe_particle) = &
1983  send_n_particles_to_ipe(ipe_particle) + 1
1984  if (send_n_particles_to_ipe(ipe_particle) .gt. nparticles_per_cpu_hi) &
1985  call mpistop('G: too many particles increase nparticles_per_cpu_hi')
1986  particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1987  ! !$OMP END CRITICAL(send)
1988  end if ! ipe_particle
1989 
1990  end do ! ipart
1991  ! !$OMP END PARALLEL DO
1992 
1993  ! get out when only one core:
1994  if (npe == 1) then
1995  deallocate(sndrqst, rcvrqst)
1996  deallocate(particle_index_to_be_sent_to_ipe)
1997  return
1998  end if
1999 
2000  ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2001  ! communicate amount of particles to be sent/received
2002  ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2003  ! opedit: I think this was the main bottleneck.
2004  ! 31.12.2017: made it nonblocking (easy, least invasive)
2005  ! If this continues to be problematic, better to
2006  ! send particles with block in coarsen/refine/loadbalance. (best).
2007 
2008  isnd = 0; ircv = 0;
2009  do ipe=0,npe-1; if (ipe .eq. mype) cycle;
2010 
2011  tag_send = mype * npe + ipe; tag_receive = ipe * npe + mype;
2012  isnd = isnd + 1; ircv = ircv + 1;
2013  call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer, &
2014  ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2015  call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer, &
2016  ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2017  end do
2018 
2019  call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
2020  call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
2021  deallocate(sndrqst, rcvrqst)
2022 
2023 
2024  ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2025  ! send and receive the data of the particles
2026  ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2027 
2028  do ipe=0,npe-1
2029  if (ipe .eq. mype) cycle;
2030  tag_send = mype * npe + ipe
2031  tag_receive = ipe * npe + mype
2032 
2033  ! should i send some particles to ipe?
2034  if (send_n_particles_to_ipe(ipe) .gt. 0) then
2035 
2036  ! create the send buffer
2037  allocate(send_particles(1:send_n_particles_to_ipe(ipe)))
2038  allocate(send_payload(1:npayload,1:send_n_particles_to_ipe(ipe)))
2039  do ipart = 1, send_n_particles_to_ipe(ipe)
2040  send_particles(ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2041  send_payload(1:npayload,ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
2042  end do ! ipart
2043 
2044  call mpi_send(send_particles,send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,ierrmpi)
2045  call mpi_send(send_payload,npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,icomm,ierrmpi)
2046  do ipart = 1, send_n_particles_to_ipe(ipe)
2047  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2048  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2049  call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2050  end do ! ipart
2051  deallocate(send_particles)
2052  deallocate(send_payload)
2053 
2054  end if ! send .gt. 0
2055 
2056  ! should i receive some particles from ipe?
2057  if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2058  allocate(receive_particles(1:receive_n_particles_from_ipe(ipe)))
2059  allocate(receive_payload(1:npayload,1:receive_n_particles_from_ipe(ipe)))
2060 
2061  call mpi_recv(receive_particles,receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,status,ierrmpi)
2062  call mpi_recv(receive_payload,npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,icomm,status,ierrmpi)
2063  do ipart = 1, receive_n_particles_from_ipe(ipe)
2064 
2065  index = receive_particles(ipart)%index
2066  !if (.not. allocated(particle(index)%self)) &
2067  allocate(particle(index)%self)
2068  particle(index)%self = receive_particles(ipart)
2069  !if (.not. allocated(particle(index)%payload)) &
2070  allocate(particle(index)%payload(npayload))
2071  particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart)
2073 
2074  ! since we don't send the igrid, need to re-locate it
2075  call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2076  particle(index)%igrid = igrid_particle
2077  particle(index)%ipe = ipe_particle
2078 
2079  end do ! ipart
2080  deallocate(receive_particles)
2081  deallocate(receive_payload)
2082 
2083  end if ! receive .gt. 0
2084  end do ! ipe
2085  deallocate(particle_index_to_be_sent_to_ipe)
2086 
2087  end subroutine comm_particles_global
2088 
2089 ! subroutine comm_particles_old()
2090 ! use mod_global_parameters
2091 !
2092 ! integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2093 ! integer :: index
2094 ! integer :: tag_send, tag_receive, tag_send_p, tag_receive_p, send_buff, rcv_buff
2095 ! integer :: status(MPI_STATUS_SIZE)
2096 ! integer, dimension(0:npe-1) :: send_n_particles_to_ipe
2097 ! integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
2098 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
2099 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
2100 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
2101 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
2102 ! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
2103 ! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_received_from_ipe
2104 ! integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
2105 ! integer :: destroy_n_particles_mype
2106 ! logical :: BC_applied
2107 ! integer, allocatable, dimension(:) :: sndrqst, rcvrqst
2108 ! integer :: isnd, ircv
2109 ! integer,dimension(npe_neighbors,nparticles_per_cpu_hi) :: payload_index
2110 ! send_n_particles_to_ipe(:) = 0
2111 ! receive_n_particles_from_ipe(:) = 0
2112 ! destroy_n_particles_mype = 0
2113 !
2114 ! ! check if and where to send each particle, destroy if necessary
2115 ! do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
2116 !
2117 ! ! first check if the particle should be destroyed because t>tmax in a fixed snapshot
2118 ! if ( .not.time_advance .and. particle(ipart)%self%time .gt. tmax_particles ) then
2119 ! destroy_n_particles_mype = destroy_n_particles_mype + 1
2120 ! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2121 ! cycle
2122 ! end if
2123 !
2124 ! ! Then check dostroy due to user-defined condition
2125 ! if (associated(usr_destroy_particle)) then
2126 ! if (usr_destroy_particle(particle(ipart))) then
2127 ! destroy_n_particles_mype = destroy_n_particles_mype + 1
2128 ! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2129 ! cycle
2130 ! end if
2131 ! end if
2132 !
2133 ! ! is my particle still in the same igrid?
2134 ! if (.not.particle_in_igrid(ipart,particle(ipart)%igrid)) then
2135 ! call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
2136 !
2137 ! ! destroy particle if out of domain (signalled by return value of -1)
2138 ! if (igrid_particle == -1 )then
2139 ! call apply_periodB(particle(ipart)%self,igrid_particle,ipe_particle,BC_applied)
2140 ! if (.not. BC_applied .or. igrid_particle == -1) then
2141 ! destroy_n_particles_mype = destroy_n_particles_mype + 1
2142 ! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2143 ! cycle
2144 ! end if
2145 ! end if
2146 !
2147 ! ! particle still present
2148 ! particle(ipart)%igrid = igrid_particle
2149 ! particle(ipart)%ipe = ipe_particle
2150 !
2151 ! ! if we have more than one core, is it on another cpu?
2152 ! if (npe .gt. 1 .and. particle(ipart)%ipe .ne. mype) then
2153 ! send_n_particles_to_ipe(ipe_particle) = &
2154 ! send_n_particles_to_ipe(ipe_particle) + 1
2155 ! particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2156 ! end if ! ipe_particle
2157 !
2158 ! end if ! particle_in_grid
2159 !
2160 ! end do ! ipart
2161 !
2162 ! call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
2163 !
2164 ! ! get out when only one core:
2165 ! if (npe == 1) return
2166 !
2167 ! ! communicate amount of particles to be sent/received
2168 ! allocate( sndrqst(1:npe_neighbors), rcvrqst(1:npe_neighbors) )
2169 ! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2170 ! isnd = 0; ircv = 0;
2171 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2172 ! tag_send = mype * npe + ipe
2173 ! tag_receive = ipe * npe + mype
2174 ! isnd = isnd + 1; ircv = ircv + 1;
2175 ! call MPI_ISEND(send_n_particles_to_ipe(ipe),1,MPI_INTEGER, &
2176 ! ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2177 ! call MPI_IRECV(receive_n_particles_from_ipe(ipe),1,MPI_INTEGER, &
2178 ! ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2179 ! end do
2180 ! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2181 ! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2182 ! deallocate( sndrqst, rcvrqst )
2183 !
2184 ! ! Communicate index of the particles to be sent/received
2185 ! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2186 ! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2187 ! isnd = 0; ircv = 0;
2188 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2189 ! if (send_n_particles_to_ipe(ipe) > 0) then
2190 ! tag_send = mype * npe + ipe
2191 ! isnd = isnd + 1
2192 ! call MPI_ISEND(particle_index_to_be_sent_to_ipe(:,ipe),send_n_particles_to_ipe(ipe),MPI_INTEGER, &
2193 ! ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2194 ! end if
2195 ! if (receive_n_particles_from_ipe(ipe) > 0) then
2196 ! tag_receive = ipe * npe + mype
2197 ! ircv = ircv + 1
2198 ! call MPI_IRECV(particle_index_to_be_received_from_ipe(:,ipe),receive_n_particles_from_ipe(ipe),MPI_INTEGER, &
2199 ! ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2200 ! end if
2201 ! end do
2202 ! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2203 ! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2204 ! deallocate( sndrqst, rcvrqst )
2205 !
2206 ! ! Send and receive the data of the particles
2207 ! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2208 ! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2209 ! isnd = 0; ircv = 0;
2210 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2211 !
2212 ! ! should i send some particles to ipe?
2213 ! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2214 !
2215 ! do ipart = 1, send_n_particles_to_ipe(ipe)
2216 ! tag_send = mype * npe + ipe + ipart
2217 ! isnd = isnd+1
2218 ! call MPI_ISEND(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self, &
2219 ! 1,type_particle,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2220 ! end do ! ipart
2221 ! end if ! send .gt. 0
2222 !
2223 ! ! should i receive some particles from ipe?
2224 ! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2225 !
2226 ! do ipart = 1, receive_n_particles_from_ipe(ipe)
2227 ! tag_receive = ipe * npe + mype + ipart
2228 ! ircv = ircv+1
2229 ! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2230 ! allocate(particle(index)%self)
2231 ! call MPI_IRECV(particle(index)%self,1,type_particle,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2232 ! end do ! ipart
2233 !
2234 ! end if ! receive .gt. 0
2235 ! end do ! ipe
2236 ! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2237 ! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2238 ! deallocate( sndrqst, rcvrqst )
2239 !
2240 ! ! Send and receive the payloads
2241 ! ! NON-BLOCKING: one communication for each particle. Needed for whatever reason
2242 ! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2243 ! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2244 ! isnd = 0; ircv = 0;
2245 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2246 !
2247 ! ! should i send some particles to ipe?
2248 ! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2249 !
2250 ! do ipart = 1, send_n_particles_to_ipe(ipe)
2251 ! tag_send = mype * npe + ipe + ipart
2252 ! isnd = isnd+1
2253 ! call MPI_ISEND(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload), &
2254 ! npayload,MPI_DOUBLE_PRECISION,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2255 ! end do ! ipart
2256 ! end if ! send .gt. 0
2257 !
2258 ! ! should i receive some particles from ipe?
2259 ! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2260 ! do ipart = 1, receive_n_particles_from_ipe(ipe)
2261 ! tag_receive = ipe * npe + mype + ipart
2262 ! ircv = ircv+1
2263 ! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2264 ! allocate(particle(index)%payload(npayload))
2265 ! call MPI_IRECV(particle(index)%payload(1:npayload),npayload, &
2266 ! MPI_DOUBLE_PRECISION,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2267 ! end do ! ipart
2268 ! end if ! receive .gt. 0
2269 ! end do ! ipe
2270 ! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2271 ! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2272 ! deallocate( sndrqst, rcvrqst )
2273 !
2274 ! ! Deeallocate sent particles
2275 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2276 ! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2277 ! do ipart = 1, send_n_particles_to_ipe(ipe)
2278 ! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2279 ! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2280 ! call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2281 ! end do ! ipart
2282 ! end if ! send .gt. 0
2283 ! end do
2284 !
2285 ! ! Relocate received particles
2286 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2287 ! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2288 ! do ipart = 1, receive_n_particles_from_ipe(ipe)
2289 ! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2290 ! call push_particle_into_particles_on_mype(index)
2291 ! ! since we don't send the igrid, need to re-locate it
2292 ! call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2293 ! particle(index)%igrid = igrid_particle
2294 ! particle(index)%ipe = ipe_particle
2295 ! end do ! ipart
2296 ! end if ! receive .gt. 0
2297 ! end do ! ipe
2298 !
2299 ! end subroutine comm_particles_old
2300 !
2301 ! subroutine comm_particles_global_old()
2302 ! use mod_global_parameters
2303 !
2304 ! integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2305 ! integer :: index
2306 ! integer :: tag_send, tag_receive, send_buff, rcv_buff
2307 ! integer :: status(MPI_STATUS_SIZE)
2308 ! integer, dimension(0:npe-1) :: send_n_particles_to_ipe
2309 ! integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
2310 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
2311 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
2312 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
2313 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
2314 ! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
2315 ! integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
2316 ! integer :: destroy_n_particles_mype
2317 ! logical :: BC_applied
2318 !
2319 ! send_n_particles_to_ipe(:) = 0
2320 ! receive_n_particles_from_ipe(:) = 0
2321 ! destroy_n_particles_mype = 0
2322 !
2323 ! ! check if and where to send each particle, relocate all of them (in case grid has changed)
2324 ! do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
2325 !
2326 ! call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
2327 !
2328 ! particle(ipart)%igrid = igrid_particle
2329 ! particle(ipart)%ipe = ipe_particle
2330 !
2331 ! ! if we have more than one core, is it on another cpu?
2332 ! if (particle(ipart)%ipe .ne. mype) then
2333 ! send_n_particles_to_ipe(ipe_particle) = &
2334 ! send_n_particles_to_ipe(ipe_particle) + 1
2335 ! particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2336 ! end if ! ipe_particle
2337 !
2338 ! end do ! ipart
2339 !
2340 ! ! get out when only one core:
2341 ! if (npe == 1) return
2342 !
2343 ! ! communicate amount of particles to be sent/received
2344 ! do ipe=0,npe-1;if (ipe .eq. mype) cycle;
2345 ! tag_send = mype * npe + ipe
2346 ! tag_receive = ipe * npe + mype
2347 ! call MPI_SEND(send_n_particles_to_ipe(ipe),1,MPI_INTEGER,ipe,tag_send,icomm,ierrmpi)
2348 ! call MPI_RECV(receive_n_particles_from_ipe(ipe),1,MPI_INTEGER,ipe,tag_receive,icomm,status,ierrmpi)
2349 ! end do
2350 !
2351 ! ! send and receive the data of the particles
2352 ! do ipe=0,npe-1;if (ipe .eq. mype) cycle;
2353 ! tag_send = mype * npe + ipe
2354 ! tag_receive = ipe * npe + mype
2355 !
2356 ! ! should i send some particles to ipe?
2357 ! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2358 !
2359 ! ! create the send buffer
2360 ! do ipart = 1, send_n_particles_to_ipe(ipe)
2361 ! send_particles(ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2362 ! send_payload(1:npayload,ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
2363 ! end do ! ipart
2364 ! call MPI_SEND(send_particles,send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,ierrmpi)
2365 ! call MPI_SEND(send_payload,npayload*send_n_particles_to_ipe(ipe),MPI_DOUBLE_PRECISION,ipe,tag_send,icomm,ierrmpi)
2366 ! do ipart = 1, send_n_particles_to_ipe(ipe)
2367 ! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2368 ! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2369 ! call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2370 ! end do ! ipart
2371 !
2372 ! end if ! send .gt. 0
2373 !
2374 ! ! should i receive some particles from ipe?
2375 ! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2376 !
2377 ! call MPI_RECV(receive_particles,receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,status,ierrmpi)
2378 ! call MPI_RECV(receive_payload,npayload*receive_n_particles_from_ipe(ipe),MPI_DOUBLE_PRECISION,ipe,tag_receive,icomm,status,ierrmpi)
2379 !
2380 ! do ipart = 1, receive_n_particles_from_ipe(ipe)
2381 !
2382 ! index = receive_particles(ipart)%index
2383 ! allocate(particle(index)%self)
2384 ! particle(index)%self = receive_particles(ipart)
2385 ! allocate(particle(index)%payload(npayload))
2386 ! particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart)
2387 ! call push_particle_into_particles_on_mype(index)
2388 !
2389 ! ! since we don't send the igrid, need to re-locate it
2390 ! call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2391 ! particle(index)%igrid = igrid_particle
2392 ! particle(index)%ipe = ipe_particle
2393 !
2394 ! end do ! ipart
2395 !
2396 ! end if ! receive .gt. 0
2397 ! end do ! ipe
2398 !
2399 ! end subroutine comm_particles_global_old
2400 
2401  subroutine apply_periodb(particle,igrid_particle,ipe_particle,BC_applied)
2403 
2404  type(particle_t), intent(inout) :: particle
2405  integer, intent(inout) :: igrid_particle, ipe_particle
2406  logical,intent(out) :: BC_applied
2407  integer :: idim
2408 
2409  bc_applied = .false.
2410  ! get out if we don't have any periodic BC:
2411  if (.not. any(periodb(1:ndim))) return
2412 
2413  ! go through dimensions and try re-inject the particle at the other side
2414  do idim=1,ndim
2415 
2416  if (.not. periodb(idim)) cycle
2417 
2418  select case(idim)
2419  {case (^d)
2420  if (particle%x(^d) .lt. xprobmin^d) then
2421  particle%x(^d) = particle%x(^d) + (xprobmax^d - xprobmin^d)
2422  bc_applied = .true.
2423  end if
2424  if (particle%x(^d) .ge. xprobmax^d) then
2425  particle%x(^d) = particle%x(^d) - (xprobmax^d - xprobmin^d)
2426  bc_applied = .true.
2427  end if
2428  \}
2429  end select
2430 
2431  end do
2432 
2433  call find_particle_ipe(particle%x,igrid_particle,ipe_particle)
2434 
2435  end subroutine apply_periodb
2436 
2437  !> clean up destroyed particles on all cores
2438  subroutine destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed)
2440 
2441  integer, intent(in) :: destroy_n_particles_mype
2442  integer, dimension(1:destroy_n_particles_mype), intent(in) :: particle_index_to_be_destroyed
2443  type(particle_t), dimension(destroy_n_particles_mype):: destroy_particles_mype
2444  double precision, dimension(npayload,destroy_n_particles_mype):: destroy_payload_mype
2445  integer :: iipart,ipart,destroy_n_particles
2446 
2447  destroy_n_particles = 0
2448 
2449  ! append the particle to list of destroyed particles
2450  do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2451  destroy_particles_mype(iipart) = particle(ipart)%self
2452  destroy_payload_mype(1:npayload,iipart) = particle(ipart)%payload(1:npayload)
2453  end do
2454 
2455  call output_ensemble(destroy_n_particles_mype,destroy_particles_mype,destroy_payload_mype,'destroy')
2456 
2457  if (npe > 1) then
2458  call mpi_allreduce(destroy_n_particles_mype,destroy_n_particles,1,mpi_integer,mpi_sum,icomm,ierrmpi)
2459  else
2460  destroy_n_particles = destroy_n_particles_mype
2461  end if
2462 
2463  nparticles = nparticles - destroy_n_particles
2464 
2465  do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2466  particle(ipart)%igrid = -1
2467  particle(ipart)%ipe = -1
2468  if(time_advance) then
2469  write(*,*) particle(ipart)%self%time, ': particle',ipart,' has left at it ',it_particles,' on pe', mype
2470  write(*,*) particle(ipart)%self%time, ': particles remaining:',nparticles, '(total)', nparticles_on_mype-1, 'on pe', mype
2471  endif
2472  deallocate(particle(ipart)%self)
2473  deallocate(particle(ipart)%payload)
2475  end do
2476 
2477  end subroutine destroy_particles
2478 
2480  implicit none
2481 
2482  integer, intent(in) :: ipart
2483 
2486 
2488 
2490  implicit none
2491 
2492  integer, intent(in) :: ipart
2493  integer :: i
2494 
2495  do i=1,nparticles_on_mype
2496  if (particles_on_mype(i) == ipart) then
2498  exit
2499  end if
2500  end do
2501 
2503 
2505 
2506 end module mod_particle_base
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: mod_comm_lib.t:208
Module for physical and numeric constants.
Definition: mod_constants.t:2
Module with basic grid data structures.
Definition: mod_forest.t:2
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Definition: mod_forest.t:29
Module with geometry-related routines (e.g., divergence, curl)
Definition: mod_geometry.t:2
integer coordinate
Definition: mod_geometry.t:7
integer, parameter spherical
Definition: mod_geometry.t:11
integer, parameter cartesian
Definition: mod_geometry.t:8
integer, parameter cylindrical
Definition: mod_geometry.t:10
integer, parameter cartesian_expansion
Definition: mod_geometry.t:12
integer, parameter cartesian_stretched
Definition: mod_geometry.t:9
subroutine curlvector(qvec, ixIL, ixOL, curlvec, idirmin, idirmin0, ndir0, fourthorder)
Calculate curl of a vector qvec within ixL Options to employ standard second order CD evaluations use...
Definition: mod_geometry.t:663
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
logical nocartesian
IO switches for conversion.
integer ixghi
Upper index of grid block arrays.
integer, parameter stretch_uni
Unidirectional stretching from a side.
integer domain_nx
number of cells for each dimension in level-one mesh
integer, parameter unitpar
file handle for IO
double precision global_time
The global simulation time.
double precision time_max
End time for the simulation.
double precision xstretch
physical extent of stretched border in symmetric stretching
integer snapshotini
Resume from the snapshot with this index.
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
integer, dimension(:), allocatable, parameter d
double precision length_convert_factor
integer ndir
Number of spatial dimensions (components) for vector variables.
integer ixm
the mesh range of a physical block without ghost cells
integer ierrmpi
A global MPI error return code.
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision c_norm
Normalised speed of light.
logical b0field
split magnetic field as background B0 field
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nghostcells
Number of ghost cells surrounding a grid.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer, parameter stretch_symm
Symmetric stretching around the center.
integer r_
Indices for cylindrical coordinates FOR TESTS, negative value when not used:
double precision, dimension(:,:), allocatable dxfirst
integer, dimension(:,:), allocatable node
double precision, dimension(ndim) dxlevel
integer, parameter ixglo
Lower index of grid block arrays (always 1)
Module with shared functionality for all the particle movers.
subroutine get_maxwellian_velocity(v, velocity)
subroutine partcoord_from_cartesian(xp, xpcart)
Convert to noncartesian coordinates.
double precision, dimension(3) integrator_velocity_factor
Normalization factor for velocity in the integrator.
integer nparticles_per_cpu_hi
Maximum number of particles in one processor.
integer nparticleshi
Maximum number of particles.
pure subroutine limit_dt_endtime(t_left, dt_p)
integer num_particles
Number of particles.
subroutine fix_phi_crossing(xp, igrid)
Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates.
double precision const_dt_particles
If positive, a constant time step for the particles.
subroutine output_individual()
integer n_output_destroyed
Output count for ensembles of destroyed particles.
subroutine ipe_cf(iD, igrid, ipe_is_neighbor)
integer it_particles
Iteration number of paritcles.
subroutine fields_from_mhd(igrid, w_mhd, w_part)
Determine fields from MHD variables.
subroutine particles_params_read(files)
Read this module's parameters from a file.
double precision min_particle_time
Minimum time of all particles.
subroutine output_ensemble(send_n_particles_for_output, send_particles, send_payload, typefile)
subroutine get_uniform_pos(x)
logical write_individual
Whether to write individual particle output (followed particles)
subroutine write_particle_output()
subroutine init_particles_com()
Initialize communicators for particles.
logical function particle_in_igrid(ipart, igrid)
Quick check if particle is still in igrid.
integer, dimension(:), allocatable particles_on_mype
Array of identity numbers of particles in current processor.
double precision dtsave_particles
Time interval to save particles.
subroutine output_particle(myparticle, payload, ipe, file_unit)
subroutine update_gridvars()
update grid variables for particles
integer ngridvars
Number of variables for grid field.
subroutine get_efield(igrid, x, tloc, e)
double precision particles_etah
subroutine destroy_particles(destroy_n_particles_mype, particle_index_to_be_destroyed)
clean up destroyed particles on all cores
integer nusrpayload
Number of user-defined payload variables for a particle.
subroutine select_active_particles(end_time)
subroutine write_particles_snapshot()
double precision particles_eta
Resistivity.
subroutine particles_finish()
Finalize the particles module.
logical write_snapshot
Whether to write particle snapshots.
pure subroutine get_lfac(u, lfac)
Get Lorentz factor from relativistic momentum.
subroutine find_particle_ipe(x, igrid_particle, ipe_particle)
subroutine pull_particle_from_particles_on_mype(ipart)
subroutine init_particles_output()
double precision dtheta
pure subroutine get_lfac_from_velocity(v, lfac)
Get Lorentz factor from velocity.
integer integrator
Integrator to be used for particles.
integer, dimension(:), allocatable ep
Variable index for electric field.
subroutine handle_particles()
Let particles evolve in time. The routine also handles grid variables and output.
subroutine partvec_from_cartesian(xp, up, upcart)
Convert vector from Cartesian coordinates.
subroutine fill_gridvars_default
This routine fills the particle grid variables with the default for mhd, i.e. only E and B.
character(len=400) csv_header
Header string used in CSV files.
subroutine ipe_fc(iD, igrid, ipe_is_neighbor)
subroutine finish_gridvars()
Deallocate grid variables for particles.
subroutine particle_get_current(w, ixIL, ixOL, idirmin, current)
Calculate idirmin and the idirmin:3 components of the common current array make sure that dxlevel(^D)...
subroutine get_bfield(igrid, x, tloc, b)
integer npayload
Number of total payload variables for a particle.
integer, parameter unitparticles
integer, dimension(:), allocatable particles_active_on_mype
Array of identity numbers of active particles in current processor.
logical relativistic
Use a relativistic particle mover?
character(len=name_len) integrator_type_particles
String describing the particle integrator type.
subroutine set_neighbor_ipe()
subroutine time_spent_on_particles()
subroutine get_vec(ix, igrid, x, tloc, vec)
logical write_ensemble
Whether to write ensemble output.
integer downsample_particles
Particle downsampling factor in CSV output.
subroutine locate_particle(index, igrid_particle, ipe_particle)
subroutine push_particle_into_particles_on_mype(ipart)
subroutine read_from_snapshot()
integer nparticles_active_on_mype
Number of active particles in current processor.
procedure(sub_define_additional_gridvars), pointer particles_define_additional_gridvars
subroutine partcoord_to_cartesian(xp, xpcart)
Convert position to Cartesian coordinates.
integer nparticles
Identity number and total number of particles.
integer nparticles_on_mype
Number of particles in current processor.
integer, dimension(:), allocatable bp
Variable index for magnetic field.
subroutine comm_particles()
double precision tmax_particles
Time limit of particles.
type(particle_ptr), dimension(:), allocatable particle
procedure(fun_destroy), pointer usr_destroy_particle
integer, dimension(:), allocatable vp
Variable index for fluid velocity.
procedure(sub_integrate), pointer particles_integrate
subroutine cross(a, b, c)
subroutine apply_periodb(particle, igrid_particle, ipe_particle, BC_applied)
procedure(sub_fill_gridvars), pointer particles_fill_gridvars
double precision t_next_output
Time to write next particle output.
subroutine advance_particles(end_time, steps_taken)
Routine handles the particle evolution.
subroutine partvec_to_cartesian(xp, up, upcart)
Convert vector to Cartesian coordinates.
integer ndefpayload
Number of default payload variables for a particle.
logical function point_in_igrid_ghostc(x, igrid, ngh)
Quick check if particle coordinate is inside igrid (ghost cells included, except the last ngh)
subroutine particle_base_init()
Give initial values to parameters.
integer, dimension(:), allocatable ipe_neighbor
integer igrid_working
set the current igrid for the particle integrator:
character(len=60) csv_format
Format string used in CSV files.
integer, dimension(:), allocatable jp
Variable index for current.
subroutine append_to_snapshot(myparticle, mypayload)
integer ipart_working
set the current ipart for the particle integrator:
double precision particles_cfl
Particle CFL safety factor.
subroutine ipe_srl(iD, igrid, ipe_is_neighbor)
procedure(sub_fill_additional_gridvars), pointer particles_fill_additional_gridvars
logical function particle_in_domain(x)
Check if particle is inside computational domain.
character(len=name_len) physics_type_particles
String describing the particle physics type.
subroutine init_gridvars()
Initialize grid variables for particles.
character(len=128) function make_particle_filename(tout, index, type)
subroutine comm_particles_global()
subroutine read_particles_snapshot(file_exists)
integer rhop
Variable index for density.
subroutine interpolate_var(igrid, ixIL, ixOL, gf, x, xloc, gfloc)
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
Module for pseudo random number generation. The internal pseudo random generator is the xoroshiro128p...
Definition: mod_random.t:3
Writes D-1 slice, can do so in various formats, depending on slice_type.
Definition: mod_slice.t:2
subroutine get_igslice(dir, x, igslice)
Definition: mod_slice.t:1109
double precision tpartc_int_0
Definition: mod_timing.t:11
double precision timeio_tot
Definition: mod_timing.t:8
double precision tpartc_com
Definition: mod_timing.t:10
double precision tpartc
Definition: mod_timing.t:10
double precision tpartc0
Definition: mod_timing.t:11
double precision tpartc_grid_0
Definition: mod_timing.t:11
double precision tpartc_io
Definition: mod_timing.t:10
double precision timeloop
Definition: mod_timing.t:9
double precision tpartc_com0
Definition: mod_timing.t:11
double precision tpartc_grid
Definition: mod_timing.t:10
double precision tpartc_io_0
Definition: mod_timing.t:11
double precision tpartc_int
Definition: mod_timing.t:10
Module with all the methods that users can customize in AMRVAC.
procedure(particle_analytic), pointer usr_particle_analytic
procedure(particle_fields), pointer usr_particle_fields
Pointer to a tree_node.
Definition: mod_forest.t:6