91 integer,
allocatable ::
bp(:)
93 integer,
allocatable ::
ep(:)
95 integer,
allocatable ::
vp(:)
97 integer,
allocatable ::
jp(:)
102 double precision,
allocatable :: payload(:)
103 integer :: igrid, ipe
112 double precision :: q
114 double precision :: m
116 double precision :: time
118 double precision :: dt
120 double precision :: x(3)
122 double precision :: u(3)
144 integer,
intent(inout) :: ngridvars
151 double precision,
intent(in) :: end_time
185 character(len=*),
intent(in) :: files(:)
195 do n = 1,
size(files)
196 open(
unitpar, file=trim(files(n)), status=
"old")
197 read(
unitpar, particles_list,
end=111)
207 integer,
parameter :: i8 = selected_int_kind(18)
208 integer(i8) :: seed(2)
209 character(len=20) :: strdata
229 dtheta = 2.0d0*dpi / 60.0d0
248 seed = [310952_i8, 8948923749821_i8]
249 call rng%set_seed(seed)
260 csv_header =
' time, dt, x1, x2, x3, u1, u2, u3,'
264 write(strdata,
"(a,a,a)")
' ', trim(prim_wnames(n)),
','
269 write(strdata,
"(a,i2.2,a)")
' pl', n,
','
276 write(strdata,
"(a,i2.2,a)")
' usrpl', n,
','
284 'i8.7,", ",i11.10,", ",i8.7)'
292 integer :: oldtypes(0:7), offsets(0:7), blocklengths(0:7)
294 oldtypes(0) = mpi_logical
295 oldtypes(1) = mpi_integer
296 oldtypes(2:7) = mpi_double_precision
303 offsets(1) = size_logical * blocklengths(0)
304 offsets(2) = offsets(1) + size_int * blocklengths(1)
305 offsets(3) = offsets(2) + size_double * blocklengths(2)
306 offsets(4) = offsets(3) + size_double * blocklengths(3)
307 offsets(5) = offsets(4) + size_double * blocklengths(4)
308 offsets(6) = offsets(5) + size_double * blocklengths(5)
309 offsets(7) = offsets(6) + size_double * blocklengths(6)
317 double precision,
intent(out) :: v(3)
318 double precision,
intent(in) :: velocity
319 double precision :: vtmp(3), vnorm
321 vtmp(1) = velocity *
rng%normal()
322 vtmp(2) = velocity *
rng%normal()
323 vtmp(3) = velocity *
rng%normal()
325 v =
rng%sphere(vnorm)
330 double precision,
intent(out) :: x(3)
332 call rng%unif_01_vec(x)
334 {^
d&x(^
d) = xprobmin^
d + x(^
d) * (xprobmax^
d - xprobmin^
d)\}
342 integer :: igrid, iigrid
344 do iigrid=1,igridstail; igrid=igrids(iigrid);
345 allocate(gridvars(igrid)%w(ixg^t,1:
ngridvars))
346 gridvars(igrid)%w = 0.0d0
349 allocate(gridvars(igrid)%wold(ixg^t,1:
ngridvars))
350 gridvars(igrid)%wold = 0.0d0
365 integer :: iigrid, igrid
367 do iigrid=1,igridstail; igrid=igrids(iigrid);
368 deallocate(gridvars(igrid)%w)
379 integer :: igrid, iigrid
380 double precision :: E(ixG^T, ndir)
381 double precision :: B(ixG^T, ndir)
383 do iigrid=1,igridstail; igrid=igrids(iigrid);
386 gridvars(igrid)%w(ixg^t,
ep(:)) = e
387 gridvars(igrid)%w(ixg^t,
bp(:)) = b
395 gridvars(igrid)%wold(ixg^t,
ep(:)) = e
396 gridvars(igrid)%wold(ixg^t,
bp(:)) = b
407 integer,
intent(in) :: igrid
408 double precision,
intent(in) :: w_mhd(ixG^T,nw)
409 double precision,
intent(inout) :: w_part(ixG^T,ngridvars)
410 integer :: idirmin, idir
411 double precision :: current(ixG^T,7-2*ndir:3)
412 double precision :: w(ixG^T,1:nw)
416 w(ixg^t,1:nw) = w_mhd(ixg^t,1:nw)
417 w_part(ixg^t,
bp(1):
bp(3)) = 0.0d0
418 w_part(ixg^t,
ep(1):
ep(3)) = 0.0d0
420 call phys_to_primitive(ixg^
ll,ixg^
ll,w,ps(igrid)%x)
425 w_part(ixg^t,
bp(idir))=w(ixg^t,iw_mag(idir))+ps(igrid)%B0(ixg^t,idir,0)
428 w_part(ixg^t,
bp(:)) = w(ixg^t,iw_mag(:))
435 w_part(ixg^t,
jp(:)) = current(ixg^t,:)
438 w_part(ixg^t,
ep(1)) = w_part(ixg^t,
bp(2)) * &
439 w(ixg^t,iw_mom(3)) - w_part(ixg^t,
bp(3)) * &
441 w_part(ixg^t,
ep(2)) = w_part(ixg^t,
bp(3)) * &
442 w(ixg^t,iw_mom(1)) - w_part(ixg^t,
bp(1)) * &
444 w_part(ixg^t,
ep(3)) = w_part(ixg^t,
bp(1)) * &
445 w(ixg^t,iw_mom(2)) - w_part(ixg^t,
bp(2)) * &
451 (current(ixg^t,2) * w_part(ixg^t,
bp(3)) - current(ixg^t,3) * w_part(ixg^t,
bp(2)))
453 (-current(ixg^t,1) * w_part(ixg^t,
bp(3)) + current(ixg^t,3) * w_part(ixg^t,
bp(1)))
455 (current(ixg^t,1) * w_part(ixg^t,
bp(2)) - current(ixg^t,2) * w_part(ixg^t,
bp(1)))
467 integer :: ixO^L, idirmin, ixI^L
468 double precision :: w(ixI^S,1:nw)
471 double precision :: current(ixI^S,7-2*ndir:3),bvec(ixI^S,1:ndir)
477 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))+
block%B0(ixi^s,idir,0)
481 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))
485 call curlvector(bvec,ixi^l,ixo^l,current,idirmin,idirmin0,ndir)
495 integer :: steps_taken, nparticles_left
536 if (nparticles_left == 0 .and.
convert)
call mpistop(
'No particles left')
550 double precision,
intent(in) :: end_time
551 integer,
intent(out) :: steps_taken
563 if (n_active == 0)
exit
567 steps_taken = steps_taken + 1
580 double precision,
intent(in) :: t_left
581 double precision,
intent(inout) :: dt_p
582 double precision :: n_steps
583 double precision,
parameter :: eps = 1d-10
585 n_steps = t_left / dt_p
587 if (n_steps < 1+eps)
then
590 else if (n_steps < 2-eps)
then
592 dt_p = 0.5d0 * t_left
609 integer,
intent(in) :: ix(ndir)
610 integer,
intent(in) :: igrid
611 double precision,
dimension(3),
intent(in) :: x
612 double precision,
intent(in) :: tloc
613 double precision,
dimension(ndir),
intent(out) :: vec
614 double precision,
dimension(ndir) :: vec1, vec2
615 double precision :: td
623 ps(igrid)%x(ixg^t,1:
ndim),x,vec(idir))
628 ps(igrid)%x(ixg^t,1:
ndim),x,vec1(idir))
630 ps(igrid)%x(ixg^t,1:
ndim),x,vec2(idir))
633 vec(:) = vec1(:) * (1.0d0 - td) + vec2(:) * td
641 double precision,
dimension(3),
intent(in) :: u
642 double precision,
intent(out) :: lfac
645 lfac = sqrt(1.0d0 + sum(u(:)**2)/
c_norm**2)
654 double precision,
dimension(3),
intent(in) :: v
655 double precision,
intent(out) :: lfac
658 lfac = 1.0d0 / sqrt(1.0d0 - sum(v(:)**2)/
c_norm**2)
668 double precision,
dimension(ndir),
intent(in) :: a,b
669 double precision,
dimension(ndir),
intent(out) :: c
675 c(1) = a(2)*b(3) - a(3)*b(2)
676 c(2) = a(3)*b(1) - a(1)*b(3)
677 c(3) = a(1)*b(2) - a(2)*b(1)
683 call mpistop(
'geometry not implemented in cross(a,b,c)')
686 call mpistop(
"cross in particles needs to be run with three components!")
694 integer,
intent(in) :: igrid,ixI^L, ixO^L
695 double precision,
intent(in) :: gf(ixI^S)
696 double precision,
intent(in) :: x(ixI^S,1:ndim)
697 double precision,
intent(in) :: xloc(1:3)
698 double precision,
intent(out) :: gfloc
699 double precision :: xd^D
701 double precision :: c00, c10
704 double precision :: c0, c1, c00, c10, c01, c11
706 integer :: ic^D, ic1^D, ic2^D, idir
714 if (x({ic^dd},^d) .lt. xloc(^d))
then
723 if(ic1^d.lt.
ixglo^d+1 .or. ic2^d.gt.
ixghi^d-1)
then
724 print *,
'direction: ',^d
725 print *,
'position: ',xloc(1:ndim)
726 print *,
'indices:', ic1^d,ic2^d
727 call mpistop(
'Trying to interpolate from out of grid!')
732 xd1 = (xloc(1)-x(ic11,1)) / (x(ic21,1) - x(ic11,1))
733 gfloc = gf(ic11) * (1.0d0 - xd1) + gf(ic21) * xd1
736 xd1 = (xloc(1)-x(ic11,ic12,1)) / (x(ic21,ic12,1) - x(ic11,ic12,1))
737 xd2 = (xloc(2)-x(ic11,ic12,2)) / (x(ic11,ic22,2) - x(ic11,ic12,2))
738 c00 = gf(ic11,ic12) * (1.0d0 - xd1) + gf(ic21,ic12) * xd1
739 c10 = gf(ic11,ic22) * (1.0d0 - xd1) + gf(ic21,ic22) * xd1
740 gfloc = c00 * (1.0d0 - xd2) + c10 * xd2
743 xd1 = (xloc(1)-x(ic11,ic12,ic13,1)) / (x(ic21,ic12,ic13,1) - x(ic11,ic12,ic13,1))
744 xd2 = (xloc(2)-x(ic11,ic12,ic13,2)) / (x(ic11,ic22,ic13,2) - x(ic11,ic12,ic13,2))
745 xd3 = (xloc(3)-x(ic11,ic12,ic13,3)) / (x(ic11,ic12,ic23,3) - x(ic11,ic12,ic13,3))
747 c00 = gf(ic11,ic12,ic13) * (1.0d0 - xd1) + gf(ic21,ic12,ic13) * xd1
748 c10 = gf(ic11,ic22,ic13) * (1.0d0 - xd1) + gf(ic21,ic22,ic13) * xd1
749 c01 = gf(ic11,ic12,ic23) * (1.0d0 - xd1) + gf(ic21,ic12,ic23) * xd1
750 c11 = gf(ic11,ic22,ic23) * (1.0d0 - xd1) + gf(ic21,ic22,ic23) * xd1
752 c0 = c00 * (1.0d0 - xd2) + c10 * xd2
753 c1 = c01 * (1.0d0 - xd2) + c11 * xd2
755 gfloc = c0 * (1.0d0 - xd3) + c1 * xd3
764 double precision :: tpartc_avg, tpartc_int_avg, tpartc_io_avg, tpartc_com_avg, tpartc_grid_avg
773 tpartc_avg = tpartc_avg/dble(
npe)
774 tpartc_int_avg = tpartc_int_avg/dble(
npe)
775 tpartc_io_avg = tpartc_io_avg/dble(
npe)
776 tpartc_com_avg = tpartc_com_avg/dble(
npe)
777 tpartc_grid_avg = tpartc_grid_avg/dble(
npe)
778 write(*,
'(a,f12.3,a)')
' Particle handling took : ',
tpartc,
' sec'
779 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*
tpartc/
timeloop,
' %'
780 write(*,
'(a,f12.3,a)')
' Particle IO took : ',tpartc_io_avg,
' sec'
781 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_io_avg/
timeloop,
' %'
782 write(*,
'(a,f12.3,a)')
' Particle COM took : ',tpartc_com_avg,
' sec'
783 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_com_avg/
timeloop,
' %'
784 write(*,
'(a,f12.3,a)')
' Particle integration took : ',tpartc_int_avg,
' sec'
785 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_int_avg/
timeloop,
' %'
786 write(*,
'(a,f12.3,a)')
' Particle init grid took : ',tpartc_grid_avg,
' sec'
787 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_grid_avg/
timeloop,
' %'
795 logical,
intent(out) :: file_exists
796 character(len=std_len) :: filename
797 integer :: mynpayload, mynparticles, pos
812 INQUIRE(file=filename, exist=file_exists)
813 if (.not. file_exists)
then
814 write(*,*)
'WARNING: File '//trim(filename)//
' with particle data does not exist.'
815 write(*,*)
'Initialising particles from user or default routines'
817 open(unit=
unitparticles,file=filename,form=
'unformatted',action=
'read',status=
'unknown',access=
'stream')
820 call mpistop(
'npayload in restart file does not match npayload in mod_particles')
824 call mpi_bcast(file_exists,1,mpi_logical,0,
icomm,
ierrmpi)
825 if (.not. file_exists)
return
835 mynparticles = mynparticles + 1
838 call mpi_bcast(mynparticles,1,mpi_integer,0,
icomm,
ierrmpi)
848 integer :: index,igrid_particle,ipe_particle
867 particle(index)%igrid = igrid_particle
877 character(len=std_len) :: filename
878 type(
particle_t),
dimension(nparticles_per_cpu_hi) :: send_particles
879 type(
particle_t),
dimension(nparticles_per_cpu_hi) :: receive_particles
880 double precision,
dimension(npayload,nparticles_per_cpu_hi) :: send_payload
881 double precision,
dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
882 integer :: status(MPI_STATUS_SIZE)
883 integer,
dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
884 integer :: ipe, ipart, iipart, send_n_particles_for_output
885 logical,
save :: file_exists=.false.
889 receive_n_particles_for_output_from_ipe(:) = 0
892 if (
mype .eq. 0)
then
894 INQUIRE(file=filename, exist=file_exists)
895 if (.not. file_exists)
then
896 open(unit=
unitparticles,file=filename,form=
'unformatted',status=
'new',access=
'stream')
898 open(unit=
unitparticles,file=filename,form=
'unformatted',status=
'replace',access=
'stream')
910 if (
mype .ne. 0)
then
915 send_particles(iipart) =
particle(ipart)%self
921 call mpi_recv(receive_n_particles_for_output_from_ipe(ipe),1,mpi_integer,ipe,ipe,
icomm,status,
ierrmpi)
925 if (
mype .ne. 0)
then
938 call mpi_recv(receive_payload,
npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,
icomm,status,
ierrmpi)
939 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
951 double precision :: mypayload(1:npayload)
968 character(len=std_len) :: filename
969 character(len=1024) :: line, strdata
970 integer :: iipart, ipart, icomp
973 if (
particle(ipart)%self%follow)
then
978 write(strdata,
"(a,i2.2,a)")
'payload',icomp,
','
979 line = trim(line)//trim(strdata)
981 write(
unitparticles,
"(a,a,a)")
'time,dt,x1,x2,x3,u1,u2,u3,',trim(line),
'ipe,iteration,index'
990 double precision,
intent(in) :: end_time
992 integer :: ipart, iipart
994 double precision :: t_min_mype
996 t_min_mype = bigdouble
1001 activate = (
particle(ipart)%self%time < end_time)
1002 t_min_mype = min(t_min_mype,
particle(ipart)%self%time)
1020 integer,
intent(in) :: index
1021 integer,
intent(out) :: igrid_particle, ipe_particle
1022 integer :: iipart,ipart,ipe_has_particle,ipe
1023 logical :: has_particle(0:npe-1)
1024 integer,
dimension(0:1) :: buff
1026 has_particle(:) = .false.
1028 if (
particle(ipart)%self%index == index)
then
1029 has_particle(
mype) = .true.
1034 if (has_particle(
mype))
then
1039 if (npe>0)
call mpi_allgather(has_particle(
mype),1,mpi_logical,has_particle,1,mpi_logical,
icomm,
ierrmpi)
1041 ipe_has_particle = -1
1043 if (has_particle(ipe) .eqv. .true.)
then
1044 ipe_has_particle = ipe
1049 if (ipe_has_particle .ne. -1)
then
1050 if (npe>0)
call mpi_bcast(buff,2,mpi_integer,ipe_has_particle,
icomm,
ierrmpi)
1051 igrid_particle=buff(0)
1052 ipe_particle = buff(1)
1065 double precision,
intent(in) :: x(3)
1066 integer,
intent(out) :: igrid_particle, ipe_particle
1067 integer :: ig(ndir,nlevelshi), ig_lvl(nlevelshi)
1068 integer :: idim, ic(ndim)
1086 do while (.not.branch%node%leaf)
1087 {ic(^
d)=ig(^
d,branch%node%level+1) - 2 * branch%node%ig^
d +2\}
1088 branch%node => branch%node%child({ic(^
d)})%node
1091 igrid_particle = branch%node%igrid
1092 ipe_particle = branch%node%ipe
1099 double precision,
dimension(ndim),
intent(in) :: x
1102 all(x < [ xprobmax^
d ])
1108 integer,
intent(in) :: igrid, ipart
1109 double precision :: x(
ndim), grid_rmin(
ndim), grid_rmax(
ndim)
1112 if (.not.
allocated(ps(igrid)%w))
then
1125 integer :: igrid, iigrid,ipe
1126 integer :: my_neighbor_type, i^D
1127 logical :: ipe_is_neighbor(0:npe-1)
1129 ipe_is_neighbor(:) = .false.
1130 do iigrid=1,igridstail; igrid=igrids(iigrid);
1133 if (i^d==0|.and.)
then
1136 my_neighbor_type=neighbor_type(i^d,igrid)
1138 select case (my_neighbor_type)
1139 case (neighbor_boundary)
1141 case (neighbor_coarse)
1142 call ipe_fc(i^d,igrid,ipe_is_neighbor)
1143 case (neighbor_sibling)
1144 call ipe_srl(i^d,igrid,ipe_is_neighbor)
1145 case (neighbor_fine)
1146 call ipe_cf(i^d,igrid,ipe_is_neighbor)
1154 ipe_is_neighbor(mype) = .false.
1159 if (ipe_is_neighbor(ipe))
then
1169 integer,
intent(in) :: i^D, igrid
1170 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1172 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1178 integer,
intent(in) :: i^D, igrid
1179 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1181 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1187 integer,
intent(in) :: i^D, igrid
1188 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1189 integer :: ic^D, inc^D
1191 {
do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1194 ipe_is_neighbor( neighbor_child(2,inc^d,igrid) ) = .true.
1203 character(len=std_len) :: filename
1204 integer :: ipart,iipart
1205 type(
particle_t),
dimension(nparticles_per_cpu_hi) :: send_particles
1206 double precision,
dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1207 integer :: send_n_particles_for_output
1209 double precision :: tout
1216 send_n_particles_for_output = 0
1223 send_n_particles_for_output = send_n_particles_for_output + 1
1224 send_particles(send_n_particles_for_output) =
particle(ipart)%self
1230 send_payload,
'ensemble')
1238 character(len=*),
intent(in) :: type
1239 double precision,
intent(in) :: tout
1240 integer,
intent(in) :: index
1241 integer :: nout, mysnapshot
1255 subroutine output_ensemble(send_n_particles_for_output,send_particles,send_payload,typefile)
1258 integer,
intent(in) :: send_n_particles_for_output
1259 type(
particle_t),
dimension(send_n_particles_for_output),
intent(in) :: send_particles
1260 double precision,
dimension(npayload,send_n_particles_for_output),
intent(in) :: send_payload
1261 character(len=*),
intent(in) :: typefile
1262 character(len=std_len) :: filename
1263 type(
particle_t),
dimension(nparticles_per_cpu_hi) :: receive_particles
1264 double precision,
dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
1265 integer :: status(MPI_STATUS_SIZE)
1266 integer,
dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1267 integer :: ipe, ipart, nout
1268 logical :: file_exists
1270 receive_n_particles_for_output_from_ipe(:) = 0
1272 call mpi_allgather(send_n_particles_for_output, 1, mpi_integer, &
1273 receive_n_particles_for_output_from_ipe, 1, mpi_integer,
icomm,
ierrmpi)
1276 if (sum(receive_n_particles_for_output_from_ipe(:)) == 0)
return
1283 if(typefile==
'destroy')
then
1284 write(filename,
"(a,a,i6.6,a)") trim(
base_filename) //
'_', &
1285 trim(typefile) //
'.csv'
1287 inquire(file=filename, exist=file_exists)
1289 if (.not. file_exists)
then
1297 write(filename,
"(a,a,i6.6,a)") trim(
base_filename) //
'_', &
1305 do ipart=1,send_n_particles_for_output
1311 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),
type_particle,ipe,ipe,
icomm,status,
ierrmpi)
1312 call mpi_recv(receive_payload,
npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,
icomm,status,
ierrmpi)
1313 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1325 character(len=std_len) :: filename
1326 integer :: ipart,iipart,ipe
1327 logical :: file_exists
1333 if (
particle(ipart)%self%follow)
then
1334 write(filename,
"(a,a,i6.6,a)") trim(
base_filename),
'_particle_', &
1336 inquire(file=filename, exist=file_exists)
1361 double precision,
intent(in) :: payload(npayload)
1362 integer,
intent(in) :: ipe
1363 integer,
intent(in) :: file_unit
1364 double precision :: x(3), u(3)
1377 x(:) = myparticle%x(:)
1387 write(file_unit,
csv_format) myparticle%time, myparticle%dt, x(1:3), &
1401 double precision,
intent(in) :: xp(1:3)
1402 double precision,
intent(out) :: xpcart(1:3)
1408 xpcart(1)=xp(1)*cos(xp(
phi_))
1409 xpcart(2)=xp(1)*sin(xp(
phi_))
1412 xpcart(1)=xp(1)*sin(xp(2))*cos(xp(3))
1414 xpcart(2)=xp(1)*cos(xp(2))
1415 xpcart(3)=xp(1)*sin(xp(2))*sin(xp(3))}
1417 xpcart(2)=xp(1)*sin(xp(2))*sin(xp(3))
1418 xpcart(3)=xp(1)*cos(xp(2))}
1420 write(*,*)
"No converter for coordinate=",
coordinate
1428 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1430 integer :: tag_send, tag_receive, tag_send_p, tag_receive_p, send_buff, rcv_buff
1431 integer :: status(MPI_STATUS_SIZE)
1432 integer,
dimension(0:npe-1) :: send_n_particles_to_ipe
1433 integer,
dimension(0:npe-1) :: receive_n_particles_from_ipe
1434 type(
particle_t),
dimension(nparticles_per_cpu_hi) :: send_particles
1435 type(
particle_t),
dimension(nparticles_per_cpu_hi) :: receive_particles
1436 double precision,
dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1437 double precision,
dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
1438 integer,
dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
1439 integer,
dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_received_from_ipe
1440 integer,
dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1441 integer :: destroy_n_particles_mype
1442 logical :: BC_applied
1443 integer,
allocatable,
dimension(:) :: sndrqst, rcvrqst
1444 integer :: isnd, ircv
1445 integer,
dimension(npe_neighbors,nparticles_per_cpu_hi) :: payload_index
1446 send_n_particles_to_ipe(:) = 0
1447 receive_n_particles_from_ipe(:) = 0
1448 destroy_n_particles_mype = 0
1455 destroy_n_particles_mype = destroy_n_particles_mype + 1
1456 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1463 destroy_n_particles_mype = destroy_n_particles_mype + 1
1464 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1474 if (igrid_particle == -1 )
then
1476 if (.not. bc_applied .or. igrid_particle == -1)
then
1477 destroy_n_particles_mype = destroy_n_particles_mype + 1
1478 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1484 particle(ipart)%igrid = igrid_particle
1489 send_n_particles_to_ipe(ipe_particle) = &
1490 send_n_particles_to_ipe(ipe_particle) + 1
1491 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1498 call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
1501 if (
npe == 1)
return
1505 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1509 tag_receive = ipe *
npe +
mype
1510 isnd = isnd + 1; ircv = ircv + 1;
1511 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer, &
1513 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer, &
1516 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1517 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1518 deallocate( sndrqst, rcvrqst )
1522 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1525 if (send_n_particles_to_ipe(ipe) > 0)
then
1528 call mpi_isend(particle_index_to_be_sent_to_ipe(:,ipe),send_n_particles_to_ipe(ipe),mpi_integer, &
1531 if (receive_n_particles_from_ipe(ipe) > 0)
then
1532 tag_receive = ipe *
npe +
mype
1534 call mpi_irecv(particle_index_to_be_received_from_ipe(:,ipe),receive_n_particles_from_ipe(ipe),mpi_integer, &
1538 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1539 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1540 deallocate( sndrqst, rcvrqst )
1544 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1549 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
1551 do ipart = 1, send_n_particles_to_ipe(ipe)
1552 tag_send =
mype *
npe + ipe + ipart
1554 call mpi_isend(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self, &
1560 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1562 do ipart = 1, receive_n_particles_from_ipe(ipe)
1563 tag_receive = ipe *
npe +
mype + ipart
1565 index = particle_index_to_be_received_from_ipe(ipart,ipe)
1572 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1573 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1574 deallocate( sndrqst, rcvrqst )
1579 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1584 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
1586 do ipart = 1, send_n_particles_to_ipe(ipe)
1587 tag_send =
mype *
npe + ipe + ipart
1589 call mpi_isend(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:
npayload), &
1595 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1596 do ipart = 1, receive_n_particles_from_ipe(ipe)
1597 tag_receive = ipe *
npe +
mype + ipart
1599 index = particle_index_to_be_received_from_ipe(ipart,ipe)
1602 mpi_double_precision,ipe,tag_receive,
icomm,rcvrqst(ircv),
ierrmpi)
1606 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1607 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1608 deallocate( sndrqst, rcvrqst )
1612 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
1613 do ipart = 1, send_n_particles_to_ipe(ipe)
1614 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1615 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1623 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1624 do ipart = 1, receive_n_particles_from_ipe(ipe)
1625 index = particle_index_to_be_received_from_ipe(ipart,ipe)
1629 particle(index)%igrid = igrid_particle
1640 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1642 integer :: tag_send, tag_receive, send_buff, rcv_buff
1643 integer :: status(MPI_STATUS_SIZE)
1644 integer,
dimension(0:npe-1) :: send_n_particles_to_ipe
1645 integer,
dimension(0:npe-1) :: receive_n_particles_from_ipe
1646 type(
particle_t),
dimension(nparticles_per_cpu_hi) :: send_particles
1647 type(
particle_t),
dimension(nparticles_per_cpu_hi) :: receive_particles
1648 double precision,
dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1649 double precision,
dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
1650 integer,
dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
1651 integer,
dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1652 integer :: destroy_n_particles_mype
1653 logical :: BC_applied
1655 send_n_particles_to_ipe(:) = 0
1656 receive_n_particles_from_ipe(:) = 0
1657 destroy_n_particles_mype = 0
1664 particle(ipart)%igrid = igrid_particle
1669 send_n_particles_to_ipe(ipe_particle) = &
1670 send_n_particles_to_ipe(ipe_particle) + 1
1671 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1677 if (
npe == 1)
return
1680 do ipe=0,
npe-1;
if (ipe .eq.
mype) cycle;
1682 tag_receive = ipe *
npe +
mype
1683 call mpi_send(send_n_particles_to_ipe(ipe),1,mpi_integer,ipe,tag_send,
icomm,
ierrmpi)
1684 call mpi_recv(receive_n_particles_from_ipe(ipe),1,mpi_integer,ipe,tag_receive,
icomm,status,
ierrmpi)
1688 do ipe=0,
npe-1;
if (ipe .eq.
mype) cycle;
1690 tag_receive = ipe *
npe +
mype
1693 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
1696 do ipart = 1, send_n_particles_to_ipe(ipe)
1697 send_particles(ipart) =
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
1701 call mpi_send(send_payload,
npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,
icomm,
ierrmpi)
1702 do ipart = 1, send_n_particles_to_ipe(ipe)
1703 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1704 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1711 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1714 call mpi_recv(receive_payload,
npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,
icomm,status,
ierrmpi)
1716 do ipart = 1, receive_n_particles_from_ipe(ipe)
1718 index = receive_particles(ipart)%index
1720 particle(index)%self = receive_particles(ipart)
1727 particle(index)%igrid = igrid_particle
1741 integer,
intent(inout) :: igrid_particle, ipe_particle
1742 logical,
intent(out) :: BC_applied
1745 bc_applied = .false.
1752 if (.not.
periodb(idim)) cycle
1756 if (particle%x(^
d) .lt. xprobmin^
d)
then
1757 particle%x(^
d) = particle%x(^
d) + (xprobmax^
d - xprobmin^
d)
1760 if (particle%x(^
d) .ge. xprobmax^
d)
then
1761 particle%x(^
d) = particle%x(^
d) - (xprobmax^
d - xprobmin^
d)
1777 integer,
intent(in) :: destroy_n_particles_mype
1778 integer,
dimension(1:destroy_n_particles_mype),
intent(in) :: particle_index_to_be_destroyed
1779 type(
particle_t),
dimension(destroy_n_particles_mype):: destroy_particles_mype
1780 double precision,
dimension(npayload,destroy_n_particles_mype):: destroy_payload_mype
1781 integer :: iipart,ipart,destroy_n_particles
1783 destroy_n_particles = 0
1786 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
1787 destroy_particles_mype(iipart) =
particle(ipart)%self
1791 call output_ensemble(destroy_n_particles_mype,destroy_particles_mype,destroy_payload_mype,
'destroy')
1794 call mpi_allreduce(destroy_n_particles_mype,destroy_n_particles,1,mpi_integer,mpi_sum,
icomm,
ierrmpi)
1796 destroy_n_particles = destroy_n_particles_mype
1801 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
1809 deallocate(
particle(ipart)%payload)
1818 integer,
intent(in) :: ipart
1828 integer,
intent(in) :: ipart
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Module for physical and numeric constants.
Module with basic grid data structures.
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Module with geometry-related routines (e.g., divergence, curl)
integer, parameter spherical
integer, parameter cartesian
integer, parameter cylindrical
integer, parameter cartesian_expansion
integer, parameter cartesian_stretched
subroutine curlvector(qvec, ixIL, ixOL, curlvec, idirmin, idirmin0, ndir0, fourthorder)
Calculate curl of a vector qvec within ixL Options to employ standard second order CD evaluations use...
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
logical nocartesian
IO switches for conversion.
integer ixghi
Upper index of grid block arrays.
integer, parameter unitpar
file handle for IO
double precision global_time
The global simulation time.
double precision time_max
End time for the simulation.
integer snapshotini
Resume from the snapshot with this index.
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, parameter rpxmin
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer, dimension(:), allocatable, parameter d
double precision length_convert_factor
integer ndir
Number of spatial dimensions (components) for vector variables.
integer ixm
the mesh range (within a block with ghost cells)
integer ierrmpi
A global MPI error return code.
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision c_norm
Normalised speed of light.
logical b0field
split magnetic field as background B0 field
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nghostcells
Number of ghost cells surrounding a grid.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, parameter rpxmax
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer r_
Indices for cylindrical coordinates FOR TESTS, negative value when not used:
double precision, dimension(ndim) dxlevel
integer, parameter ixglo
Lower index of grid block arrays (always 1)
Module with shared functionality for all the particle movers.
subroutine get_maxwellian_velocity(v, velocity)
double precision, dimension(3) integrator_velocity_factor
Normalization factor for velocity in the integrator.
integer nparticles_per_cpu_hi
Maximum number of particles in one processor.
integer nparticleshi
Maximum number of particles.
pure subroutine limit_dt_endtime(t_left, dt_p)
integer num_particles
Number of particles.
double precision const_dt_particles
If positive, a constant time step for the particles.
subroutine output_individual()
integer n_output_destroyed
Output count for ensembles of destroyed particles.
subroutine ipe_cf(iD, igrid, ipe_is_neighbor)
integer it_particles
Iteration number of paritcles.
subroutine fields_from_mhd(igrid, w_mhd, w_part)
Determine fields from MHD variables.
subroutine particles_params_read(files)
Read this module's parameters from a file.
double precision min_particle_time
Minimum time of all particles.
subroutine output_ensemble(send_n_particles_for_output, send_particles, send_payload, typefile)
subroutine get_uniform_pos(x)
logical write_individual
Whether to write individual particle output (followed particles)
subroutine write_particle_output()
subroutine init_particles_com()
Initialise communicators for particles.
logical function particle_in_igrid(ipart, igrid)
Quick check if particle is still in igrid.
integer, dimension(:), allocatable particles_on_mype
Array of identity numbers of particles in current processor.
double precision dtsave_particles
Time interval to save particles.
subroutine output_particle(myparticle, payload, ipe, file_unit)
integer ngridvars
Number of variables for grid field.
double precision particles_etah
subroutine destroy_particles(destroy_n_particles_mype, particle_index_to_be_destroyed)
clean up destroyed particles on all cores
integer nusrpayload
Number of user-defined payload variables for a particle.
subroutine select_active_particles(end_time)
subroutine write_particles_snapshot()
double precision particles_eta
Resistivity.
subroutine particles_finish()
Finalize the particles module.
logical write_snapshot
Whether to write particle snapshots.
pure subroutine get_lfac(u, lfac)
Get Lorentz factor from relativistic momentum.
subroutine find_particle_ipe(x, igrid_particle, ipe_particle)
subroutine pull_particle_from_particles_on_mype(ipart)
subroutine init_particles_output()
pure subroutine get_lfac_from_velocity(v, lfac)
Get Lorentz factor from velocity.
integer integrator
Integrator to be used for particles.
integer, dimension(:), allocatable ep
Variable index for electric field.
subroutine handle_particles()
Let particles evolve in time. The routine also handles grid variables and output.
subroutine fill_gridvars_default
This routine fills the particle grid variables with the default for mhd, i.e. only E and B.
character(len=400) csv_header
Header string used in CSV files.
subroutine ipe_fc(iD, igrid, ipe_is_neighbor)
subroutine finish_gridvars()
Deallocate grid variables for particles.
subroutine particle_get_current(w, ixIL, ixOL, idirmin, current)
Calculate idirmin and the idirmin:3 components of the common current array make sure that dxlevel(^D)...
integer npayload
Number of total payload variables for a particle.
integer, parameter unitparticles
integer, dimension(:), allocatable particles_active_on_mype
Array of identity numbers of active particles in current processor.
logical relativistic
Use a relativistic particle mover?
character(len=name_len) integrator_type_particles
String describing the particle integrator type.
subroutine set_neighbor_ipe()
subroutine time_spent_on_particles()
subroutine get_vec(ix, igrid, x, tloc, vec)
logical write_ensemble
Whether to write ensemble output.
integer downsample_particles
Particle downsampling factor in CSV output.
subroutine locate_particle(index, igrid_particle, ipe_particle)
subroutine push_particle_into_particles_on_mype(ipart)
subroutine read_from_snapshot()
integer nparticles_active_on_mype
Number of active particles in current processor.
procedure(sub_define_additional_gridvars), pointer particles_define_additional_gridvars
subroutine partcoord_to_cartesian(xp, xpcart)
convert to cartesian coordinates
integer nparticles
Identity number and total number of particles.
integer nparticles_on_mype
Number of particles in current processor.
integer, dimension(:), allocatable bp
Variable index for magnetic field.
subroutine comm_particles()
double precision tmax_particles
Time limit of particles.
type(particle_ptr), dimension(:), allocatable particle
procedure(fun_destroy), pointer usr_destroy_particle
integer, dimension(:), allocatable vp
Variable index for fluid velocity.
procedure(sub_integrate), pointer particles_integrate
subroutine cross(a, b, c)
subroutine apply_periodb(particle, igrid_particle, ipe_particle, BC_applied)
procedure(sub_fill_gridvars), pointer particles_fill_gridvars
double precision t_next_output
Time to write next particle output.
subroutine advance_particles(end_time, steps_taken)
Routine handles the particle evolution.
integer ndefpayload
Number of default payload variables for a particle.
subroutine particle_base_init()
Give initial values to parameters.
integer, dimension(:), allocatable ipe_neighbor
integer igrid_working
set the current igrid for the particle integrator:
character(len=60) csv_format
Format string used in CSV files.
integer n_output_ensemble
Output count for ensembles.
integer, dimension(:), allocatable jp
Variable index for current.
subroutine append_to_snapshot(myparticle, mypayload)
integer ipart_working
set the current ipart for the particle integrator:
double precision particles_cfl
Particle CFL safety factor.
subroutine ipe_srl(iD, igrid, ipe_is_neighbor)
procedure(sub_fill_additional_gridvars), pointer particles_fill_additional_gridvars
logical function particle_in_domain(x)
Check if particle is inside computational domain.
character(len=name_len) physics_type_particles
String describing the particle physics type.
subroutine init_gridvars()
Initialize grid variables for particles.
character(len=128) function make_particle_filename(tout, index, type)
subroutine comm_particles_global()
subroutine read_particles_snapshot(file_exists)
subroutine interpolate_var(igrid, ixIL, ixOL, gf, x, xloc, gfloc)
This module defines the procedures of a physics module. It contains function pointers for the various...
Module for pseudo random number generation. The internal pseudo random generator is the xoroshiro128p...
Writes D-1 slice, can do so in various formats, depending on slice_type.
subroutine get_igslice(dir, x, igslice)
double precision tpartc_int_0
double precision timeio_tot
double precision tpartc_com
double precision tpartc_grid_0
double precision tpartc_io
double precision timeloop
double precision tpartc_com0
double precision tpartc_grid
double precision tpartc_io_0
double precision tpartc_int
Module with all the methods that users can customize in AMRVAC.
procedure(particle_analytic), pointer usr_particle_analytic
procedure(particle_fields), pointer usr_particle_fields