90 integer,
allocatable ::
bp(:)
92 integer,
allocatable ::
ep(:)
94 integer,
allocatable ::
vp(:)
96 integer,
allocatable ::
jp(:)
103 double precision,
allocatable :: payload(:)
104 integer :: igrid, ipe
113 double precision :: q
115 double precision :: m
117 double precision :: time
119 double precision :: dt
121 double precision :: x(3)
123 double precision :: u(3)
145 integer,
intent(inout) :: ngridvars
152 double precision,
intent(in) :: end_time
186 character(len=*),
intent(in) :: files(:)
196 do n = 1,
size(files)
197 open(
unitpar, file=trim(files(n)), status=
"old")
198 read(
unitpar, particles_list,
end=111)
208 integer,
parameter :: i8 = selected_int_kind(18)
209 integer(i8) :: seed(2)
210 character(len=20) :: strdata
230 dtheta = 2.0d0*dpi / 60.0d0
248 seed = [310952_i8, 8948923749821_i8]
249 call rng%set_seed(seed)
260 csv_header =
' time, dt, x1, x2, x3, u1, u2, u3,'
264 write(strdata,
"(a,a,a)")
' ', trim(prim_wnames(n)),
','
269 write(strdata,
"(a,i2.2,a)")
' pl', n,
','
276 write(strdata,
"(a,i2.2,a)")
' usrpl', n,
','
284 'i8.7,", ",i11.10,", ",i8.7)'
292 integer :: oldtypes(0:7), offsets(0:7), blocklengths(0:7)
294 oldtypes(0) = mpi_logical
295 oldtypes(1) = mpi_integer
296 oldtypes(2:7) = mpi_double_precision
303 offsets(1) = size_logical * blocklengths(0)
304 offsets(2) = offsets(1) + size_int * blocklengths(1)
305 offsets(3) = offsets(2) + size_double * blocklengths(2)
306 offsets(4) = offsets(3) + size_double * blocklengths(3)
307 offsets(5) = offsets(4) + size_double * blocklengths(4)
308 offsets(6) = offsets(5) + size_double * blocklengths(5)
309 offsets(7) = offsets(6) + size_double * blocklengths(6)
317 double precision,
intent(out) :: v(3)
318 double precision,
intent(in) :: velocity
319 double precision :: vtmp(3), vnorm
321 vtmp(1) = velocity *
rng%normal()
322 vtmp(2) = velocity *
rng%normal()
323 vtmp(3) = velocity *
rng%normal()
325 v =
rng%sphere(vnorm)
330 double precision,
intent(out) :: x(3)
332 call rng%unif_01_vec(x)
334 {^
d&x(^
d) = xprobmin^
d + x(^
d) * (xprobmax^
d - xprobmin^
d)\}
343 integer :: igrid, iigrid
347 do iigrid=1,igridstail; igrid=igrids(iigrid);
348 allocate(gridvars(igrid)%w(ixg^t,1:
ngridvars),gridvars(igrid)%wold(ixg^t,1:
ngridvars))
349 gridvars(igrid)%w = 0.0d0
350 gridvars(igrid)%wold = 0.0d0
366 integer :: iigrid, igrid
368 do iigrid=1,igridstail; igrid=igrids(iigrid);
369 deallocate(gridvars(igrid)%w,gridvars(igrid)%wold)
379 integer :: igrid, iigrid
380 double precision :: E(ixG^T, ndir)
381 double precision :: B(ixG^T, ndir)
383 do iigrid=1,igridstail; igrid=igrids(iigrid);
386 gridvars(igrid)%w(ixg^t,
ep(:)) = e
387 gridvars(igrid)%w(ixg^t,
bp(:)) = b
399 integer,
intent(in) :: igrid
400 double precision,
intent(in) :: w_mhd(ixG^T,nw)
401 double precision,
intent(inout) :: w_part(ixG^T,ngridvars)
402 integer :: idirmin, idir
403 double precision :: current(ixG^T,7-2*ndir:3)
404 double precision :: w(ixG^T,1:nw)
408 w(ixg^t,1:nw) = w_mhd(ixg^t,1:nw)
409 w_part(ixg^t,
bp(1):
bp(3)) = 0.0d0
410 w_part(ixg^t,
ep(1):
ep(3)) = 0.0d0
412 call phys_to_primitive(ixg^
ll,ixg^
ll,w,ps(igrid)%x)
415 w_part(ixg^t,
rhop) = w(ixg^t,iw_rho)
418 w_part(ixg^t,
vp(:)) = w(ixg^t,iw_mom(:))
423 w_part(ixg^t,
bp(idir))=w(ixg^t,iw_mag(idir))+ps(igrid)%B0(ixg^t,idir,0)
426 w_part(ixg^t,
bp(:)) = w(ixg^t,iw_mag(:))
432 w_part(ixg^t,
jp(:)) = current(ixg^t,:)
437 w_part(ixg^t,
ep(1)) = w_part(ixg^t,
bp(2)) * &
438 w(ixg^t,iw_mom(3)) - w_part(ixg^t,
bp(3)) * &
440 w_part(ixg^t,
ep(2)) = w_part(ixg^t,
bp(3)) * &
441 w(ixg^t,iw_mom(1)) - w_part(ixg^t,
bp(1)) * &
443 w_part(ixg^t,
ep(3)) = w_part(ixg^t,
bp(1)) * &
444 w(ixg^t,iw_mom(2)) - w_part(ixg^t,
bp(2)) * &
447 w_part(ixg^t,
ep(
r_)) = w_part(ixg^t,
bp(
phi_)) * &
448 w(ixg^t,iw_mom(
z_)) - w_part(ixg^t,
bp(
z_)) * &
450 w_part(ixg^t,
ep(
phi_)) = w_part(ixg^t,
bp(
z_)) * &
451 w(ixg^t,iw_mom(
r_)) - w_part(ixg^t,
bp(
r_)) * &
453 w_part(ixg^t,
ep(
z_)) = w_part(ixg^t,
bp(
r_)) * &
454 w(ixg^t,iw_mom(
phi_)) - w_part(ixg^t,
bp(
phi_)) * &
463 (current(ixg^t,2) * w_part(ixg^t,
bp(3)) - current(ixg^t,3) * w_part(ixg^t,
bp(2)))
465 (-current(ixg^t,1) * w_part(ixg^t,
bp(3)) + current(ixg^t,3) * w_part(ixg^t,
bp(1)))
467 (current(ixg^t,1) * w_part(ixg^t,
bp(2)) - current(ixg^t,2) * w_part(ixg^t,
bp(1)))
470 (current(ixg^t,
phi_) * w_part(ixg^t,
bp(
z_)) - current(ixg^t,
z_) * w_part(ixg^t,
bp(
phi_)))
472 (-current(ixg^t,
r_) * w_part(ixg^t,
bp(
z_)) + current(ixg^t,
z_) * w_part(ixg^t,
bp(
r_)))
474 (current(ixg^t,
r_) * w_part(ixg^t,
bp(
phi_)) - current(ixg^t,
phi_) * w_part(ixg^t,
bp(
r_)))
487 integer :: ixO^L, idirmin, ixI^L
488 double precision :: w(ixI^S,1:nw)
491 double precision :: current(ixI^S,7-2*ndir:3),bvec(ixI^S,1:ndir)
497 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))+
block%B0(ixi^s,idir,0)
501 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))
505 call curlvector(bvec,ixi^l,ixo^l,current,idirmin,idirmin0,ndir)
514 integer :: igrid, iigrid
518 do iigrid=1,igridstail; igrid=igrids(iigrid);
519 gridvars(igrid)%wold=gridvars(igrid)%w
537 integer :: steps_taken, nparticles_left
576 if (nparticles_left == 0 .and.
convert)
call mpistop(
'No particles left')
588 double precision,
intent(in) :: end_time
589 integer,
intent(out) :: steps_taken
601 if (n_active == 0)
exit
605 steps_taken = steps_taken + 1
618 double precision,
intent(in) :: t_left
619 double precision,
intent(inout) :: dt_p
620 double precision :: n_steps
621 double precision,
parameter :: eps = 1d-10
623 n_steps = t_left / dt_p
625 if (n_steps < 1+eps)
then
628 else if (n_steps < 2-eps)
then
630 dt_p = 0.5d0 * t_left
641 integer,
intent(in) :: ix(ndir)
642 integer,
intent(in) :: igrid
643 double precision,
dimension(3),
intent(in) :: x
644 double precision,
intent(in) :: tloc
645 double precision,
dimension(ndir),
intent(out) :: vec
646 double precision,
dimension(ndir) :: vec1, vec2
647 double precision :: td
655 ps(igrid)%x(ixg^t,1:
ndim),x,vec(idir))
660 ps(igrid)%x(ixg^t,1:
ndim),x,vec2(idir))
663 vec(:) = vec(:) * (1.0d0 - td) + vec2(:) * td
674 integer,
intent(in) :: igrid
675 double precision,
dimension(3),
intent(in) :: x
676 double precision,
intent(in) :: tloc
677 double precision,
dimension(ndir),
intent(out) :: b
678 double precision,
dimension(ndir) :: vec1, vec2, vec
679 double precision :: td
689 ps(igrid)%x(ixg^t,1:
ndim),x,vec(idir))
694 ps(igrid)%x(ixg^t,1:
ndim),x,vec2(idir))
697 vec(:) = vec(:) * (1.0d0 - td) + vec2(:) * td
711 integer,
intent(in) :: igrid
712 double precision,
dimension(3),
intent(in) :: x
713 double precision,
intent(in) :: tloc
714 double precision,
dimension(ndir),
intent(out) :: e
715 double precision,
dimension(ndir) :: vec1, vec2, vec
716 double precision,
dimension(ndir) :: vfluid, b
717 double precision,
dimension(ndir) :: current
718 double precision :: rho = 1.d0, rho2 = 1.d0
719 double precision :: td
732 call get_vec(
jp, igrid, x, tloc, current)
739 rho = rho * (1.0d0 - td) + rho2 * td
763 double precision,
dimension(3),
intent(in) :: u
764 double precision,
intent(out) :: lfac
767 lfac = sqrt(1.0d0 + sum(u(:)**2)/
c_norm**2)
777 double precision,
dimension(3),
intent(in) :: v
778 double precision,
intent(out) :: lfac
781 lfac = 1.0d0 / sqrt(1.0d0 - sum(v(:)**2)/
c_norm**2)
791 double precision,
dimension(ndir),
intent(in) :: a,b
792 double precision,
dimension(ndir),
intent(out) :: c
798 c(1) = a(2)*b(3) - a(3)*b(2)
799 c(2) = a(3)*b(1) - a(1)*b(3)
800 c(3) = a(1)*b(2) - a(2)*b(1)
806 call mpistop(
'geometry not implemented in cross(a,b,c)')
809 call mpistop(
"cross in particles needs to be run with three components!")
817 integer,
intent(in) :: igrid,ixI^L, ixO^L
818 double precision,
intent(in) :: gf(ixI^S)
819 double precision,
intent(in) :: x(ixI^S,1:ndim)
820 double precision,
intent(in) :: xloc(1:3)
821 double precision,
intent(out) :: gfloc
822 double precision :: xd^D, myq, dx1
824 double precision :: c00, c10
827 double precision :: c0, c1, c00, c10, c01, c11
829 integer :: ic^D, ic1^D, ic2^D, idir
834 ic^d = ceiling(dlog((xloc(^d)-xprobmin^d)*(
qstretch(ps(igrid)%level,^d)-one)/&
839 if(xloc(^d)<xprobmin^d+
xstretch^d)
then
844 else if(xloc(^d)>xprobmax^d-
xstretch^d)
then
846 ic^d = ceiling(dlog((xloc(^d)-xprobmax^d+
xstretch^d)*(
qstretch(ps(igrid)%level,^d)-one)/&
862 if (x({ic^dd},^d) .lt. xloc(^d))
then
871 if(ic1^d.lt.
ixglo^d+1 .or. ic2^d.gt.
ixghi^d-1)
then
872 print *,
'direction: ',^d
873 print *,
'position: ',xloc(1:ndim)
874 print *,
'indices:', ic1^d,ic2^d
875 call mpistop(
'Trying to interpolate from out of grid!')
880 xd1 = (xloc(1)-x(ic11,1)) / (x(ic21,1) - x(ic11,1))
881 gfloc = gf(ic11) * (1.0d0 - xd1) + gf(ic21) * xd1
884 xd1 = (xloc(1)-x(ic11,ic12,1)) / (x(ic21,ic12,1) - x(ic11,ic12,1))
885 xd2 = (xloc(2)-x(ic11,ic12,2)) / (x(ic11,ic22,2) - x(ic11,ic12,2))
886 c00 = gf(ic11,ic12) * (1.0d0 - xd1) + gf(ic21,ic12) * xd1
887 c10 = gf(ic11,ic22) * (1.0d0 - xd1) + gf(ic21,ic22) * xd1
888 gfloc = c00 * (1.0d0 - xd2) + c10 * xd2
891 xd1 = (xloc(1)-x(ic11,ic12,ic13,1)) / (x(ic21,ic12,ic13,1) - x(ic11,ic12,ic13,1))
892 xd2 = (xloc(2)-x(ic11,ic12,ic13,2)) / (x(ic11,ic22,ic13,2) - x(ic11,ic12,ic13,2))
893 xd3 = (xloc(3)-x(ic11,ic12,ic13,3)) / (x(ic11,ic12,ic23,3) - x(ic11,ic12,ic13,3))
895 c00 = gf(ic11,ic12,ic13) * (1.0d0 - xd1) + gf(ic21,ic12,ic13) * xd1
896 c10 = gf(ic11,ic22,ic13) * (1.0d0 - xd1) + gf(ic21,ic22,ic13) * xd1
897 c01 = gf(ic11,ic12,ic23) * (1.0d0 - xd1) + gf(ic21,ic12,ic23) * xd1
898 c11 = gf(ic11,ic22,ic23) * (1.0d0 - xd1) + gf(ic21,ic22,ic23) * xd1
900 c0 = c00 * (1.0d0 - xd2) + c10 * xd2
901 c1 = c01 * (1.0d0 - xd2) + c11 * xd2
903 gfloc = c0 * (1.0d0 - xd3) + c1 * xd3
912 double precision :: tpartc_avg, tpartc_int_avg, tpartc_io_avg, tpartc_com_avg, tpartc_grid_avg
921 tpartc_avg = tpartc_avg/dble(
npe)
922 tpartc_int_avg = tpartc_int_avg/dble(
npe)
923 tpartc_io_avg = tpartc_io_avg/dble(
npe)
924 tpartc_com_avg = tpartc_com_avg/dble(
npe)
925 tpartc_grid_avg = tpartc_grid_avg/dble(
npe)
926 write(*,
'(a,f12.3,a)')
' Particle handling took : ',
tpartc,
' sec'
927 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*
tpartc/(
timeloop+smalldouble),
' %'
928 write(*,
'(a,f12.3,a)')
' Particle IO took : ',tpartc_io_avg,
' sec'
929 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_io_avg/(
timeloop+smalldouble),
' %'
930 write(*,
'(a,f12.3,a)')
' Particle COM took : ',tpartc_com_avg,
' sec'
931 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_com_avg/(
timeloop+smalldouble),
' %'
932 write(*,
'(a,f12.3,a)')
' Particle integration took : ',tpartc_int_avg,
' sec'
933 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_int_avg/(
timeloop+smalldouble),
' %'
934 write(*,
'(a,f12.3,a)')
' Particle init grid took : ',tpartc_grid_avg,
' sec'
935 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_grid_avg/(
timeloop+smalldouble),
' %'
943 logical,
intent(out) :: file_exists
944 character(len=std_len) :: filename
945 integer :: mynpayload, mynparticles, pos
960 INQUIRE(file=filename, exist=file_exists)
961 if (.not. file_exists)
then
962 write(*,*)
'WARNING: File '//trim(filename)//
' with particle data does not exist.'
963 write(*,*)
'Initialising particles from user or default routines'
965 open(unit=
unitparticles,file=filename,form=
'unformatted',action=
'read',status=
'unknown',access=
'stream')
968 call mpistop(
'npayload in restart file does not match npayload in mod_particles')
972 call mpi_bcast(file_exists,1,mpi_logical,0,
icomm,
ierrmpi)
973 if (.not. file_exists)
return
983 mynparticles = mynparticles + 1
986 call mpi_bcast(mynparticles,1,mpi_integer,0,
icomm,
ierrmpi)
996 integer :: index,igrid_particle,ipe_particle
1015 particle(index)%igrid = igrid_particle
1025 character(len=std_len) :: filename
1030 type(
particle_t),
allocatable,
dimension(:) :: send_particles
1031 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
1032 double precision,
allocatable,
dimension(:,:) :: send_payload
1033 double precision,
allocatable,
dimension(:,:) :: receive_payload
1034 integer :: status(MPI_STATUS_SIZE)
1035 integer,
dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1036 integer :: ipe, ipart, iipart, send_n_particles_for_output
1037 logical,
save :: file_exists=.false.
1041 receive_n_particles_for_output_from_ipe(:) = 0
1044 if (
mype .eq. 0)
then
1046 INQUIRE(file=filename, exist=file_exists)
1047 if (.not. file_exists)
then
1048 open(unit=
unitparticles,file=filename,form=
'unformatted',status=
'new',access=
'stream')
1050 open(unit=
unitparticles,file=filename,form=
'unformatted',status=
'replace',access=
'stream')
1061 if (
mype .ne. 0)
then
1065 allocate(send_particles(1:send_n_particles_for_output))
1066 allocate(send_payload(1:
npayload,1:send_n_particles_for_output))
1068 send_particles(iipart) =
particle(ipart)%self
1074 call mpi_recv(receive_n_particles_for_output_from_ipe(ipe),1,mpi_integer,ipe,ipe,
icomm,status,
ierrmpi)
1078 if (
mype .ne. 0)
then
1081 deallocate(send_particles)
1082 deallocate(send_payload)
1092 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1093 allocate(receive_payload(1:
npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1094 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),
type_particle,ipe,ipe,
icomm,status,
ierrmpi)
1095 call mpi_recv(receive_payload,
npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,
icomm,status,
ierrmpi)
1096 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1099 deallocate(receive_particles)
1100 deallocate(receive_payload)
1110 double precision :: mypayload(1:npayload)
1127 character(len=std_len) :: filename
1128 character(len=1024) :: line, strdata
1129 integer :: iipart, ipart, icomp
1132 if (
particle(ipart)%self%follow)
then
1137 write(strdata,
"(a,i2.2,a)")
'payload',icomp,
','
1138 line = trim(line)//trim(strdata)
1140 write(
unitparticles,
"(a,a,a)")
'time,dt,x1,x2,x3,u1,u2,u3,',trim(line),
'ipe,iteration,index'
1149 double precision,
intent(in) :: end_time
1151 integer :: ipart, iipart
1153 double precision :: t_min_mype
1155 t_min_mype = bigdouble
1160 activate = (
particle(ipart)%self%time < end_time)
1161 t_min_mype = min(t_min_mype,
particle(ipart)%self%time)
1179 integer,
intent(in) :: index
1180 integer,
intent(out) :: igrid_particle, ipe_particle
1181 integer :: iipart,ipart,ipe_has_particle,ipe
1182 logical :: has_particle(0:npe-1)
1183 integer,
dimension(0:1) :: buff
1185 has_particle(:) = .false.
1187 if (
particle(ipart)%self%index == index)
then
1188 has_particle(
mype) = .true.
1193 if (has_particle(
mype))
then
1198 if (npe>0)
call mpi_allgather(has_particle(
mype),1,mpi_logical,has_particle,1,mpi_logical,
icomm,
ierrmpi)
1200 ipe_has_particle = -1
1202 if (has_particle(ipe) .eqv. .true.)
then
1203 ipe_has_particle = ipe
1208 if (ipe_has_particle .ne. -1)
then
1209 if (npe>0)
call mpi_bcast(buff,2,mpi_integer,ipe_has_particle,
icomm,
ierrmpi)
1210 igrid_particle=buff(0)
1211 ipe_particle = buff(1)
1224 double precision,
intent(in) :: x(3)
1225 integer,
intent(out) :: igrid_particle, ipe_particle
1226 integer :: ig(ndir,nlevelshi), ig_lvl(nlevelshi)
1227 integer :: idim, ic(ndim)
1245 do while (.not.branch%node%leaf)
1246 {ic(^
d)=ig(^
d,branch%node%level+1) - 2 * branch%node%ig^
d +2\}
1247 branch%node => branch%node%child({ic(^
d)})%node
1250 igrid_particle = branch%node%igrid
1251 ipe_particle = branch%node%ipe
1258 double precision,
dimension(ndim),
intent(in) :: x
1261 all(x < [ xprobmax^
d ])
1267 integer,
intent(in) :: igrid, ipart
1268 double precision :: x(
ndim), grid_rmin(
ndim), grid_rmax(
ndim)
1271 if (.not.
allocated(ps(igrid)%w))
then
1284 integer :: igrid, iigrid,ipe
1285 integer :: my_neighbor_type, i^D
1286 logical :: ipe_is_neighbor(0:npe-1)
1288 ipe_is_neighbor(:) = .false.
1289 do iigrid=1,igridstail; igrid=igrids(iigrid);
1292 if (i^d==0|.and.)
then
1295 my_neighbor_type=neighbor_type(i^d,igrid)
1297 select case (my_neighbor_type)
1298 case (neighbor_boundary)
1300 case (neighbor_coarse)
1301 call ipe_fc(i^d,igrid,ipe_is_neighbor)
1302 case (neighbor_sibling)
1303 call ipe_srl(i^d,igrid,ipe_is_neighbor)
1304 case (neighbor_fine)
1305 call ipe_cf(i^d,igrid,ipe_is_neighbor)
1313 ipe_is_neighbor(mype) = .false.
1318 if (ipe_is_neighbor(ipe))
then
1328 integer,
intent(in) :: i^D, igrid
1329 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1331 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1337 integer,
intent(in) :: i^D, igrid
1338 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1340 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1346 integer,
intent(in) :: i^D, igrid
1347 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1348 integer :: ic^D, inc^D
1350 {
do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1353 ipe_is_neighbor( neighbor_child(2,inc^d,igrid) ) = .true.
1362 character(len=std_len) :: filename
1363 integer :: ipart,iipart
1366 type(
particle_t),
allocatable,
dimension(:) :: send_particles
1367 double precision,
allocatable,
dimension(:,:) :: send_payload
1368 integer :: send_n_particles_for_output, cc
1370 double precision :: tout
1377 send_n_particles_for_output = 0
1384 send_n_particles_for_output = send_n_particles_for_output + 1
1388 allocate(send_particles(1:send_n_particles_for_output))
1389 allocate(send_payload(
npayload,1:send_n_particles_for_output))
1396 send_particles(cc) =
particle(ipart)%self
1403 send_payload,
'ensemble')
1404 deallocate(send_particles)
1405 deallocate(send_payload)
1413 character(len=*),
intent(in) :: type
1414 double precision,
intent(in) :: tout
1415 integer,
intent(in) :: index
1416 integer :: nout, mysnapshot
1430 subroutine output_ensemble(send_n_particles_for_output,send_particles,send_payload,typefile)
1433 integer,
intent(in) :: send_n_particles_for_output
1434 type(
particle_t),
dimension(send_n_particles_for_output),
intent(in) :: send_particles
1435 double precision,
dimension(npayload,send_n_particles_for_output),
intent(in) :: send_payload
1436 character(len=*),
intent(in) :: typefile
1437 character(len=std_len) :: filename
1438 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
1439 double precision,
allocatable,
dimension(:,:) :: receive_payload
1440 integer :: status(MPI_STATUS_SIZE)
1441 integer,
dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1442 integer :: ipe, ipart, nout
1443 logical :: file_exists
1445 receive_n_particles_for_output_from_ipe(:) = 0
1447 call mpi_allgather(send_n_particles_for_output, 1, mpi_integer, &
1448 receive_n_particles_for_output_from_ipe, 1, mpi_integer,
icomm,
ierrmpi)
1451 if (sum(receive_n_particles_for_output_from_ipe(:)) == 0)
return
1458 if(typefile==
'destroy')
then
1459 write(filename,
"(a,a,i6.6,a)") trim(
base_filename) //
'_', &
1460 trim(typefile) //
'.csv'
1462 inquire(file=filename, exist=file_exists)
1464 if (.not. file_exists)
then
1472 write(filename,
"(a,a,i6.6,a)") trim(
base_filename) //
'_', &
1479 do ipart=1,send_n_particles_for_output
1485 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1486 allocate(receive_payload(1:
npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1487 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),
type_particle,ipe,ipe,
icomm,status,
ierrmpi)
1488 call mpi_recv(receive_payload,
npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,
icomm,status,
ierrmpi)
1489 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1492 deallocate(receive_particles)
1493 deallocate(receive_payload)
1503 character(len=std_len) :: filename
1504 integer :: ipart,iipart,ipe
1505 logical :: file_exists
1511 if (
particle(ipart)%self%follow)
then
1512 write(filename,
"(a,a,i6.6,a)") trim(
base_filename),
'_particle_', &
1514 inquire(file=filename, exist=file_exists)
1539 double precision,
intent(in) :: payload(npayload)
1540 integer,
intent(in) :: ipe
1541 integer,
intent(in) :: file_unit
1542 double precision :: x(3), u(3)
1555 x(:) = myparticle%x(:)
1565 write(file_unit,
csv_format) myparticle%time, myparticle%dt, x(1:3), &
1577 double precision,
intent(in) :: xp(1:3)
1578 double precision,
intent(out) :: xpcart(1:3)
1582 xpcart(1:3) = xp(1:3)
1584 xpcart(1) = xp(
r_)*cos(xp(
phi_))
1585 xpcart(2) = xp(
r_)*sin(xp(
phi_))
1588 xpcart(1) = xp(1)*sin(xp(2))*cos(xp(3))
1590 xpcart(2) = xp(1)*cos(xp(2))
1591 xpcart(3) = xp(1)*sin(xp(2))*sin(xp(3))}
1593 xpcart(2) = xp(1)*sin(xp(2))*sin(xp(3))
1594 xpcart(3) = xp(1)*cos(xp(2))}
1596 write(*,*)
"No converter for coordinate=",
coordinate
1607 double precision,
intent(in) :: xpcart(1:3)
1608 double precision,
intent(out) :: xp(1:3)
1609 double precision :: xx, yy, zz
1610 double precision :: rr, tth, pphi
1616 xp(
r_) = sqrt(xpcart(1)**2 + xpcart(2)**2)
1617 xp(
phi_) = atan2(xpcart(2),xpcart(1))
1618 if (xp(
phi_) .lt. 0.d0) xp(
phi_) = 2.0d0*dpi + xp(
phi_)
1628 rr = sqrt(xx**2 + yy**2 + zz**2)
1631 pphi = acos(xx/sqrt(xx**2+yy**2))
1633 pphi = 2.d0*dpi - acos(xx/sqrt(xx**2+yy**2))
1635 xp(1:3) = (/ rr, tth, pphi /)
1637 write(*,*)
"No converter for coordinate=",
coordinate
1648 double precision,
intent(in) :: xp(1:3),up(1:3)
1649 double precision,
intent(out) :: upcart(1:3)
1659 upcart(1) = up(1)*sin(xp(2))*cos(xp(3)) + up(2)*cos(xp(2))*cos(xp(3)) - up(3)*sin(xp(3))
1661 upcart(2) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))
1662 upcart(3) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))}
1664 upcart(2) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))
1665 upcart(3) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))}
1667 write(*,*)
"No converter for coordinate=",
coordinate
1678 double precision,
intent(in) :: xp(1:3),upcart(1:3)
1679 double precision,
intent(out) :: up(1:3)
1683 up(1:3) = upcart(1:3)
1685 up(
r_) = cos(xp(
phi_)) * upcart(1) + sin(xp(
phi_)) * upcart(2)
1686 up(
phi_) = -sin(xp(
phi_)) * upcart(1) + cos(xp(
phi_)) * upcart(2)
1690 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(3)*sin(xp(2))*sin(xp(3)) + upcart(2)*cos(xp(2))
1691 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(3)*cos(xp(2))*sin(xp(3)) - upcart(2)*sin(xp(2))
1692 up(3) = -upcart(1)*sin(xp(3)) + upcart(3)*cos(xp(3))}
1694 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(2)*sin(xp(2))*sin(xp(3)) + upcart(3)*cos(xp(2))
1695 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(2)*cos(xp(2))*sin(xp(3)) - upcart(3)*sin(xp(2))
1696 up(3) = -upcart(1)*sin(xp(3)) + upcart(2)*cos(xp(3))}
1698 write(*,*)
"No converter for coordinate=",
coordinate
1708 integer,
intent(in) :: igrid
1709 double precision,
intent(inout) :: xp(1:3)
1710 double precision :: xpmod(1:3)
1727 xpmod(phiind) = xp(phiind) + 2.d0*dpi
1730 xp(phiind) = xpmod(phiind)
1734 xpmod(phiind) = xp(phiind) - 2.d0*dpi
1737 xp(phiind) = xpmod(phiind)
1748 integer,
intent(in) :: igrid, ngh
1749 double precision,
intent(in) :: x(
ndim)
1750 double precision :: grid_rmin(
ndim), grid_rmax(
ndim)
1763 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1765 integer :: tag_send, tag_receive, send_buff, rcv_buff
1766 integer :: status(MPI_STATUS_SIZE)
1767 integer,
dimension(0:npe-1) :: send_n_particles_to_ipe
1768 integer,
dimension(0:npe-1) :: receive_n_particles_from_ipe
1769 type(
particle_t),
allocatable,
dimension(:,:) :: send_particles
1770 type(
particle_t),
allocatable,
dimension(:,:) :: receive_particles
1771 double precision,
allocatable,
dimension(:,:,:) :: send_payload
1772 double precision,
allocatable,
dimension(:,:,:) :: receive_payload
1773 integer,
allocatable,
dimension(:,:) :: particle_index_to_be_sent_to_ipe
1774 integer,
dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1775 integer :: destroy_n_particles_mype
1776 logical :: BC_applied
1777 integer,
allocatable,
dimension(:) :: sndrqst, rcvrqst
1778 integer,
allocatable,
dimension(:) :: sndrqst_payload, rcvrqst_payload
1779 integer :: isnd, ircv
1780 integer,
parameter :: maxneighbors=56
1783 send_n_particles_to_ipe(:) = 0
1784 receive_n_particles_from_ipe(:) = 0
1785 destroy_n_particles_mype = 0
1788 allocate( sndrqst(1:
npe-1), rcvrqst(1:
npe-1) )
1789 allocate( sndrqst_payload(1:
npe-1), rcvrqst_payload(1:
npe-1) )
1790 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1791 sndrqst_payload = mpi_request_null; rcvrqst_payload = mpi_request_null;
1801 destroy_n_particles_mype = destroy_n_particles_mype + 1
1802 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1813 if (igrid_particle == -1 )
then
1815 if (.not. bc_applied .or. igrid_particle == -1)
then
1817 destroy_n_particles_mype = destroy_n_particles_mype + 1
1818 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1825 particle(ipart)%igrid = igrid_particle
1831 send_n_particles_to_ipe(ipe_particle) = &
1832 send_n_particles_to_ipe(ipe_particle) + 1
1834 call mpistop(
'too many particles, increase nparticles_per_cpu_hi')
1835 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1844 call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
1847 if (
npe == 1)
return
1853 tag_receive = ipe *
npe +
mype
1854 isnd = isnd + 1; ircv = ircv + 1
1855 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer,ipe,tag_send,
icomm,sndrqst(isnd),
ierrmpi)
1856 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer,ipe,tag_receive,
icomm,rcvrqst(ircv),
ierrmpi)
1859 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1860 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1861 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1865 allocate(send_particles(maxval(send_n_particles_to_ipe),1:min(maxneighbors,
npe-1)))
1866 allocate(receive_particles(maxval(receive_n_particles_from_ipe),1:min(maxneighbors,
npe-1)))
1867 allocate(send_payload(
npayload,maxval(send_n_particles_to_ipe),1:min(maxneighbors,
npe-1)))
1868 allocate(receive_payload(
npayload,maxval(receive_n_particles_from_ipe),1:min(maxneighbors,
npe-1)))
1874 tag_receive = ipe *
npe +
mype
1877 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
1880 do ipart = 1, send_n_particles_to_ipe(ipe)
1881 send_particles(ipart,iipe) =
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
1885 call mpi_isend(send_particles(:,iipe),send_n_particles_to_ipe(ipe),
type_particle,ipe,tag_send,
icomm,sndrqst(isnd),
ierrmpi)
1886 call mpi_isend(send_payload(:,:,iipe),
npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,
icomm,sndrqst_payload(isnd),
ierrmpi)
1891 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1894 call mpi_irecv(receive_particles(:,iipe),receive_n_particles_from_ipe(ipe),
type_particle,ipe,tag_receive,
icomm,rcvrqst(ircv),
ierrmpi)
1895 call mpi_irecv(receive_payload(:,:,iipe),
npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,
icomm,rcvrqst_payload(ircv),
ierrmpi)
1900 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1901 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1902 call mpi_waitall(isnd,sndrqst_payload,mpi_statuses_ignore,
ierrmpi)
1903 call mpi_waitall(ircv,rcvrqst_payload,mpi_statuses_ignore,
ierrmpi)
1906 do ipart = 1, send_n_particles_to_ipe(ipe)
1907 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1908 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1914 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1915 do ipart = 1, receive_n_particles_from_ipe(ipe)
1917 index = receive_particles(ipart,iipe)%index
1921 particle(index)%self = receive_particles(ipart,iipe)
1928 particle(index)%igrid = igrid_particle
1944 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1946 integer :: tag_send, tag_receive, send_buff, rcv_buff
1947 integer :: status(MPI_STATUS_SIZE)
1948 integer,
dimension(0:npe-1) :: send_n_particles_to_ipe
1949 integer,
dimension(0:npe-1) :: receive_n_particles_from_ipe
1954 type(
particle_t),
allocatable,
dimension(:) :: send_particles
1955 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
1956 double precision,
allocatable,
dimension(:,:) :: send_payload
1957 double precision,
allocatable,
dimension(:,:) :: receive_payload
1958 integer,
allocatable,
dimension(:,:) :: particle_index_to_be_sent_to_ipe
1959 logical :: BC_applied
1960 integer,
allocatable,
dimension(:) :: sndrqst, rcvrqst
1961 integer :: isnd, ircv
1963 send_n_particles_to_ipe(:) = 0
1964 receive_n_particles_from_ipe(:) = 0
1967 allocate( sndrqst(1:
npe-1), rcvrqst(1:
npe-1) )
1968 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1976 particle(ipart)%igrid = igrid_particle
1982 send_n_particles_to_ipe(ipe_particle) = &
1983 send_n_particles_to_ipe(ipe_particle) + 1
1985 call mpistop(
'G: too many particles increase nparticles_per_cpu_hi')
1986 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1995 deallocate(sndrqst, rcvrqst)
1996 deallocate(particle_index_to_be_sent_to_ipe)
2009 do ipe=0,
npe-1;
if (ipe .eq.
mype) cycle;
2012 isnd = isnd + 1; ircv = ircv + 1;
2013 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer, &
2015 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer, &
2019 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
2020 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
2021 deallocate(sndrqst, rcvrqst)
2029 if (ipe .eq.
mype) cycle;
2031 tag_receive = ipe *
npe +
mype
2034 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
2037 allocate(send_particles(1:send_n_particles_to_ipe(ipe)))
2038 allocate(send_payload(1:
npayload,1:send_n_particles_to_ipe(ipe)))
2039 do ipart = 1, send_n_particles_to_ipe(ipe)
2040 send_particles(ipart) =
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2045 call mpi_send(send_payload,
npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,
icomm,
ierrmpi)
2046 do ipart = 1, send_n_particles_to_ipe(ipe)
2047 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2048 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2051 deallocate(send_particles)
2052 deallocate(send_payload)
2057 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
2058 allocate(receive_particles(1:receive_n_particles_from_ipe(ipe)))
2059 allocate(receive_payload(1:
npayload,1:receive_n_particles_from_ipe(ipe)))
2062 call mpi_recv(receive_payload,
npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,
icomm,status,
ierrmpi)
2063 do ipart = 1, receive_n_particles_from_ipe(ipe)
2065 index = receive_particles(ipart)%index
2068 particle(index)%self = receive_particles(ipart)
2076 particle(index)%igrid = igrid_particle
2080 deallocate(receive_particles)
2081 deallocate(receive_payload)
2085 deallocate(particle_index_to_be_sent_to_ipe)
2405 integer,
intent(inout) :: igrid_particle, ipe_particle
2406 logical,
intent(out) :: BC_applied
2409 bc_applied = .false.
2416 if (.not.
periodb(idim)) cycle
2420 if (particle%x(^
d) .lt. xprobmin^
d)
then
2421 particle%x(^
d) = particle%x(^
d) + (xprobmax^
d - xprobmin^
d)
2424 if (particle%x(^
d) .ge. xprobmax^
d)
then
2425 particle%x(^
d) = particle%x(^
d) - (xprobmax^
d - xprobmin^
d)
2441 integer,
intent(in) :: destroy_n_particles_mype
2442 integer,
dimension(1:destroy_n_particles_mype),
intent(in) :: particle_index_to_be_destroyed
2443 type(
particle_t),
dimension(destroy_n_particles_mype):: destroy_particles_mype
2444 double precision,
dimension(npayload,destroy_n_particles_mype):: destroy_payload_mype
2445 integer :: iipart,ipart,destroy_n_particles
2447 destroy_n_particles = 0
2450 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2451 destroy_particles_mype(iipart) =
particle(ipart)%self
2455 call output_ensemble(destroy_n_particles_mype,destroy_particles_mype,destroy_payload_mype,
'destroy')
2458 call mpi_allreduce(destroy_n_particles_mype,destroy_n_particles,1,mpi_integer,mpi_sum,
icomm,
ierrmpi)
2460 destroy_n_particles = destroy_n_particles_mype
2465 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2473 deallocate(
particle(ipart)%payload)
2482 integer,
intent(in) :: ipart
2492 integer,
intent(in) :: ipart
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Module for physical and numeric constants.
Module with basic grid data structures.
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Module with geometry-related routines (e.g., divergence, curl)
integer, parameter spherical
integer, parameter cartesian
integer, parameter cylindrical
integer, parameter cartesian_expansion
integer, parameter cartesian_stretched
subroutine curlvector(qvec, ixIL, ixOL, curlvec, idirmin, idirmin0, ndir0, fourthorder)
Calculate curl of a vector qvec within ixL Options to employ standard second order CD evaluations use...
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
logical nocartesian
IO switches for conversion.
integer ixghi
Upper index of grid block arrays.
integer, parameter stretch_uni
Unidirectional stretching from a side.
integer domain_nx
number of cells for each dimension in level-one mesh
integer, parameter unitpar
file handle for IO
double precision global_time
The global simulation time.
double precision time_max
End time for the simulation.
double precision xstretch
physical extent of stretched border in symmetric stretching
integer snapshotini
Resume from the snapshot with this index.
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, parameter rpxmin
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
integer, dimension(:), allocatable, parameter d
double precision length_convert_factor
integer ndir
Number of spatial dimensions (components) for vector variables.
integer ixm
the mesh range of a physical block without ghost cells
integer ierrmpi
A global MPI error return code.
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision c_norm
Normalised speed of light.
logical b0field
split magnetic field as background B0 field
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nghostcells
Number of ghost cells surrounding a grid.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
integer, parameter rpxmax
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer, parameter stretch_symm
Symmetric stretching around the center.
integer r_
Indices for cylindrical coordinates FOR TESTS, negative value when not used:
double precision, dimension(:,:), allocatable dxfirst
integer, dimension(:,:), allocatable node
double precision, dimension(ndim) dxlevel
integer, parameter ixglo
Lower index of grid block arrays (always 1)
Module with shared functionality for all the particle movers.
subroutine get_maxwellian_velocity(v, velocity)
subroutine partcoord_from_cartesian(xp, xpcart)
Convert to noncartesian coordinates.
double precision, dimension(3) integrator_velocity_factor
Normalization factor for velocity in the integrator.
integer nparticles_per_cpu_hi
Maximum number of particles in one processor.
integer nparticleshi
Maximum number of particles.
pure subroutine limit_dt_endtime(t_left, dt_p)
integer num_particles
Number of particles.
subroutine fix_phi_crossing(xp, igrid)
Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates.
double precision const_dt_particles
If positive, a constant time step for the particles.
subroutine output_individual()
integer n_output_destroyed
Output count for ensembles of destroyed particles.
subroutine ipe_cf(iD, igrid, ipe_is_neighbor)
integer it_particles
Iteration number of paritcles.
subroutine fields_from_mhd(igrid, w_mhd, w_part)
Determine fields from MHD variables.
subroutine particles_params_read(files)
Read this module's parameters from a file.
double precision min_particle_time
Minimum time of all particles.
subroutine output_ensemble(send_n_particles_for_output, send_particles, send_payload, typefile)
subroutine get_uniform_pos(x)
logical write_individual
Whether to write individual particle output (followed particles)
subroutine write_particle_output()
subroutine init_particles_com()
Initialize communicators for particles.
logical function particle_in_igrid(ipart, igrid)
Quick check if particle is still in igrid.
integer, dimension(:), allocatable particles_on_mype
Array of identity numbers of particles in current processor.
double precision dtsave_particles
Time interval to save particles.
subroutine output_particle(myparticle, payload, ipe, file_unit)
subroutine update_gridvars()
update grid variables for particles
integer ngridvars
Number of variables for grid field.
subroutine get_efield(igrid, x, tloc, e)
double precision particles_etah
subroutine destroy_particles(destroy_n_particles_mype, particle_index_to_be_destroyed)
clean up destroyed particles on all cores
integer nusrpayload
Number of user-defined payload variables for a particle.
subroutine select_active_particles(end_time)
subroutine write_particles_snapshot()
double precision particles_eta
Resistivity.
subroutine particles_finish()
Finalize the particles module.
logical write_snapshot
Whether to write particle snapshots.
pure subroutine get_lfac(u, lfac)
Get Lorentz factor from relativistic momentum.
subroutine find_particle_ipe(x, igrid_particle, ipe_particle)
subroutine pull_particle_from_particles_on_mype(ipart)
subroutine init_particles_output()
pure subroutine get_lfac_from_velocity(v, lfac)
Get Lorentz factor from velocity.
integer integrator
Integrator to be used for particles.
integer, dimension(:), allocatable ep
Variable index for electric field.
subroutine handle_particles()
Let particles evolve in time. The routine also handles grid variables and output.
subroutine partvec_from_cartesian(xp, up, upcart)
Convert vector from Cartesian coordinates.
subroutine fill_gridvars_default
This routine fills the particle grid variables with the default for mhd, i.e. only E and B.
character(len=400) csv_header
Header string used in CSV files.
subroutine ipe_fc(iD, igrid, ipe_is_neighbor)
subroutine finish_gridvars()
Deallocate grid variables for particles.
subroutine particle_get_current(w, ixIL, ixOL, idirmin, current)
Calculate idirmin and the idirmin:3 components of the common current array make sure that dxlevel(^D)...
subroutine get_bfield(igrid, x, tloc, b)
integer npayload
Number of total payload variables for a particle.
integer, parameter unitparticles
integer, dimension(:), allocatable particles_active_on_mype
Array of identity numbers of active particles in current processor.
logical relativistic
Use a relativistic particle mover?
character(len=name_len) integrator_type_particles
String describing the particle integrator type.
subroutine set_neighbor_ipe()
subroutine time_spent_on_particles()
subroutine get_vec(ix, igrid, x, tloc, vec)
logical write_ensemble
Whether to write ensemble output.
integer downsample_particles
Particle downsampling factor in CSV output.
subroutine locate_particle(index, igrid_particle, ipe_particle)
subroutine push_particle_into_particles_on_mype(ipart)
subroutine read_from_snapshot()
integer nparticles_active_on_mype
Number of active particles in current processor.
procedure(sub_define_additional_gridvars), pointer particles_define_additional_gridvars
subroutine partcoord_to_cartesian(xp, xpcart)
Convert position to Cartesian coordinates.
integer nparticles
Identity number and total number of particles.
integer nparticles_on_mype
Number of particles in current processor.
integer, dimension(:), allocatable bp
Variable index for magnetic field.
subroutine comm_particles()
double precision tmax_particles
Time limit of particles.
type(particle_ptr), dimension(:), allocatable particle
procedure(fun_destroy), pointer usr_destroy_particle
integer, dimension(:), allocatable vp
Variable index for fluid velocity.
procedure(sub_integrate), pointer particles_integrate
subroutine cross(a, b, c)
subroutine apply_periodb(particle, igrid_particle, ipe_particle, BC_applied)
procedure(sub_fill_gridvars), pointer particles_fill_gridvars
double precision t_next_output
Time to write next particle output.
subroutine advance_particles(end_time, steps_taken)
Routine handles the particle evolution.
subroutine partvec_to_cartesian(xp, up, upcart)
Convert vector to Cartesian coordinates.
integer ndefpayload
Number of default payload variables for a particle.
logical function point_in_igrid_ghostc(x, igrid, ngh)
Quick check if particle coordinate is inside igrid (ghost cells included, except the last ngh)
subroutine particle_base_init()
Give initial values to parameters.
integer, dimension(:), allocatable ipe_neighbor
integer igrid_working
set the current igrid for the particle integrator:
character(len=60) csv_format
Format string used in CSV files.
integer, dimension(:), allocatable jp
Variable index for current.
subroutine append_to_snapshot(myparticle, mypayload)
integer ipart_working
set the current ipart for the particle integrator:
double precision particles_cfl
Particle CFL safety factor.
subroutine ipe_srl(iD, igrid, ipe_is_neighbor)
procedure(sub_fill_additional_gridvars), pointer particles_fill_additional_gridvars
logical function particle_in_domain(x)
Check if particle is inside computational domain.
character(len=name_len) physics_type_particles
String describing the particle physics type.
subroutine init_gridvars()
Initialize grid variables for particles.
character(len=128) function make_particle_filename(tout, index, type)
subroutine comm_particles_global()
subroutine read_particles_snapshot(file_exists)
integer rhop
Variable index for density.
subroutine interpolate_var(igrid, ixIL, ixOL, gf, x, xloc, gfloc)
This module defines the procedures of a physics module. It contains function pointers for the various...
Module for pseudo random number generation. The internal pseudo random generator is the xoroshiro128p...
Writes D-1 slice, can do so in various formats, depending on slice_type.
subroutine get_igslice(dir, x, igslice)
double precision tpartc_int_0
double precision timeio_tot
double precision tpartc_com
double precision tpartc_grid_0
double precision tpartc_io
double precision timeloop
double precision tpartc_com0
double precision tpartc_grid
double precision tpartc_io_0
double precision tpartc_int
Module with all the methods that users can customize in AMRVAC.
procedure(particle_analytic), pointer usr_particle_analytic
procedure(particle_fields), pointer usr_particle_fields