73 integer,
allocatable ::
bp(:)
75 integer,
allocatable ::
ep(:)
77 integer,
allocatable ::
vp(:)
79 integer,
allocatable ::
jp(:)
105 double precision,
allocatable :: payload(:)
106 integer :: igrid, ipe
115 double precision :: q
117 double precision :: m
119 double precision :: time
121 double precision ::
dt
123 double precision :: x(3)
125 double precision :: u(3)
147 integer,
intent(inout) :: ngridvars
154 double precision,
intent(in) :: end_time
188 character(len=*),
intent(in) :: files(:)
199 do n = 1,
size(files)
200 open(
unitpar, file=trim(files(n)), status=
"old")
201 read(
unitpar, particles_list,
end=111)
211 integer,
parameter :: i8 = selected_int_kind(18)
212 integer(i8) :: seed(2)
214 character(len=20) :: strdata
234 dtheta = 2.0d0*dpi / 60.0d0
253 seed = [310952_i8, 8948923749821_i8]
254 call rng%set_seed(seed)
265 csv_header =
' time, dt, x1, x2, x3, u1, u2, u3,'
269 write(strdata,
"(a,a,a)")
' ', trim(prim_wnames(n)),
','
274 write(strdata,
"(a,i2.2,a)")
' pl', n,
','
281 write(strdata,
"(a,i2.2,a)")
' usrpl', n,
','
289 'i8.7,", ",i11.10,", ",i8.7)'
297 integer :: oldtypes(0:7), offsets(0:7), blocklengths(0:7)
299 oldtypes(0) = mpi_logical
300 oldtypes(1) = mpi_integer
301 oldtypes(2:7) = mpi_double_precision
308 offsets(1) = size_logical * blocklengths(0)
309 offsets(2) = offsets(1) + size_int * blocklengths(1)
310 offsets(3) = offsets(2) + size_double * blocklengths(2)
311 offsets(4) = offsets(3) + size_double * blocklengths(3)
312 offsets(5) = offsets(4) + size_double * blocklengths(4)
313 offsets(6) = offsets(5) + size_double * blocklengths(5)
314 offsets(7) = offsets(6) + size_double * blocklengths(6)
322 double precision,
intent(out) :: v(3)
323 double precision,
intent(in) :: velocity
324 double precision :: vtmp(3), vnorm
326 vtmp(1) = velocity *
rng%normal()
327 vtmp(2) = velocity *
rng%normal()
328 vtmp(3) = velocity *
rng%normal()
330 v =
rng%sphere(vnorm)
335 double precision,
intent(out) :: x(3)
337 call rng%unif_01_vec(x)
339 {^
d&x(^
d) = xprobmin^
d + x(^
d) * (xprobmax^
d - xprobmin^
d)\}
348 integer :: igrid, iigrid
352 do iigrid=1,igridstail; igrid=igrids(iigrid);
353 allocate(gridvars(igrid)%w(ixg^t,1:
ngridvars),gridvars(igrid)%wold(ixg^t,1:
ngridvars))
354 gridvars(igrid)%w = 0.0d0
355 gridvars(igrid)%wold = 0.0d0
371 integer :: iigrid, igrid
373 do iigrid=1,igridstail; igrid=igrids(iigrid);
374 deallocate(gridvars(igrid)%w,gridvars(igrid)%wold)
384 double precision :: E(ixG^T, ndir)
385 double precision :: B(ixG^T, ndir)
386 integer :: igrid, iigrid
388 do iigrid=1,igridstail; igrid=igrids(iigrid);
391 gridvars(igrid)%w(ixg^t,
ep(:)) = e
392 gridvars(igrid)%w(ixg^t,
bp(:)) = b
404 integer,
intent(in) :: igrid
405 double precision,
intent(in) :: w_mhd(ixG^T,nw)
406 double precision,
intent(inout) :: w_part(ixG^T,ngridvars)
408 double precision :: current(ixG^T,7-2*ndir:3)
409 double precision :: w(ixG^T,1:nw)
410 integer :: idirmin, idir
414 w(ixg^t,1:nw) = w_mhd(ixg^t,1:nw)
415 w_part(ixg^t,
bp(1):
bp(3)) = 0.0d0
416 w_part(ixg^t,
ep(1):
ep(3)) = 0.0d0
418 call phys_to_primitive(ixg^
ll,ixg^
ll,w,ps(igrid)%x)
421 w_part(ixg^t,
rhop) = w(ixg^t,iw_rho)
424 w_part(ixg^t,
vp(:)) = w(ixg^t,iw_mom(:))
429 w_part(ixg^t,
bp(idir))=w(ixg^t,iw_mag(idir))+ps(igrid)%B0(ixg^t,idir,0)
432 w_part(ixg^t,
bp(:)) = w(ixg^t,iw_mag(:))
438 w_part(ixg^t,
jp(:)) = current(ixg^t,:)
443 w_part(ixg^t,
ep(1)) = w_part(ixg^t,
bp(2)) * &
444 w(ixg^t,iw_mom(3)) - w_part(ixg^t,
bp(3)) * &
446 w_part(ixg^t,
ep(2)) = w_part(ixg^t,
bp(3)) * &
447 w(ixg^t,iw_mom(1)) - w_part(ixg^t,
bp(1)) * &
449 w_part(ixg^t,
ep(3)) = w_part(ixg^t,
bp(1)) * &
450 w(ixg^t,iw_mom(2)) - w_part(ixg^t,
bp(2)) * &
453 w_part(ixg^t,
ep(
r_)) = w_part(ixg^t,
bp(
phi_)) * &
454 w(ixg^t,iw_mom(
z_)) - w_part(ixg^t,
bp(
z_)) * &
456 w_part(ixg^t,
ep(
phi_)) = w_part(ixg^t,
bp(
z_)) * &
457 w(ixg^t,iw_mom(
r_)) - w_part(ixg^t,
bp(
r_)) * &
459 w_part(ixg^t,
ep(
z_)) = w_part(ixg^t,
bp(
r_)) * &
460 w(ixg^t,iw_mom(
phi_)) - w_part(ixg^t,
bp(
phi_)) * &
469 (current(ixg^t,2) * w_part(ixg^t,
bp(3)) - current(ixg^t,3) * w_part(ixg^t,
bp(2)))
471 (-current(ixg^t,1) * w_part(ixg^t,
bp(3)) + current(ixg^t,3) * w_part(ixg^t,
bp(1)))
473 (current(ixg^t,1) * w_part(ixg^t,
bp(2)) - current(ixg^t,2) * w_part(ixg^t,
bp(1)))
476 (current(ixg^t,
phi_) * w_part(ixg^t,
bp(
z_)) - current(ixg^t,
z_) * w_part(ixg^t,
bp(
phi_)))
478 (-current(ixg^t,
r_) * w_part(ixg^t,
bp(
z_)) + current(ixg^t,
z_) * w_part(ixg^t,
bp(
r_)))
480 (current(ixg^t,
r_) * w_part(ixg^t,
bp(
phi_)) - current(ixg^t,
phi_) * w_part(ixg^t,
bp(
r_)))
492 integer :: ixO^L, idirmin, ixI^L
493 double precision :: w(ixI^S,1:nw)
495 double precision :: current(ixI^S,7-2*ndir:3),bvec(ixI^S,1:ndir)
503 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))+
block%B0(ixi^s,idir,0)
507 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))
511 call curlvector(bvec,ixi^l,ixo^l,current,idirmin,idirmin0,ndir)
520 integer :: igrid, iigrid
524 do iigrid=1,igridstail; igrid=igrids(iigrid);
525 gridvars(igrid)%wold=gridvars(igrid)%w
543 integer :: steps_taken, nparticles_left
583 if (nparticles_left == 0 .and.
convert)
call mpistop(
'No particles left')
595 double precision,
intent(in) :: end_time
596 integer,
intent(out) :: steps_taken
608 if (n_active == 0)
exit
612 steps_taken = steps_taken + 1
625 double precision,
intent(in) :: t_left
626 double precision,
intent(inout) :: dt_p
627 double precision :: n_steps
628 double precision,
parameter :: eps = 1d-10
630 n_steps = t_left / dt_p
632 if (n_steps < 1+eps)
then
635 else if (n_steps < 2-eps)
then
637 dt_p = 0.5d0 * t_left
648 integer,
intent(in) :: ix(ndir)
649 integer,
intent(in) :: igrid
650 double precision,
dimension(3),
intent(in) :: x
651 double precision,
intent(in) :: tloc
652 double precision,
dimension(ndir),
intent(out) :: vec
653 double precision,
dimension(ndir) :: vec1, vec2
654 double precision :: td, bb
662 ps(igrid)%x(ixg^t,1:
ndim),x,vec(idir))
668 ps(igrid)%x(ixg^t,1:
ndim),x,vec2(idir))
673 vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
679 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
681 sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=
ndim+1)*td &
682 +sum(gridvars(igrid)%wold(ixg^t,ix(:))**2,dim=
ndim+1)*(1.d0-td), &
683 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
686 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
688 sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=
ndim+1), &
689 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
691 vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
702 integer,
intent(in) :: igrid
703 double precision,
dimension(3),
intent(in) :: x
704 double precision,
intent(in) :: tloc
705 double precision,
dimension(ndir),
intent(out) :: b
706 double precision,
dimension(ndir) :: vec1, vec2, vec
707 double precision :: td, bb
717 ps(igrid)%x(ixg^t,1:
ndim),x,vec(idir))
723 ps(igrid)%x(ixg^t,1:
ndim),x,vec2(idir))
728 vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
734 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
736 sum(gridvars(igrid)%w(ixg^t,
bp(:))**2,dim=
ndim+1)*td &
737 +sum(gridvars(igrid)%wold(ixg^t,
bp(:))**2,dim=
ndim+1)*(1.d0-td), &
738 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
741 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
743 sum(gridvars(igrid)%w(ixg^t,
bp(:))**2,dim=
ndim+1), &
744 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
746 vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
762 integer,
intent(in) :: igrid
763 double precision,
dimension(3),
intent(in) :: x
764 double precision,
intent(in) :: tloc
765 double precision,
dimension(ndir),
intent(out) :: e
766 double precision,
dimension(ndir) :: vec1, vec2, vec
767 double precision,
dimension(ndir) :: vfluid, b
768 double precision,
dimension(ndir) :: current
769 double precision :: rho = 1.d0, rho2 = 1.d0
770 double precision :: td
783 call get_vec(
jp, igrid, x, tloc, current)
790 rho = rho2 * (1.0d0 - td) + rho * td
814 double precision,
dimension(3),
intent(in) :: u
815 double precision,
intent(out) :: lfac
818 lfac = sqrt(1.0d0 + sum(u(:)**2)/
c_norm**2)
828 double precision,
dimension(3),
intent(in) :: v
829 double precision,
intent(out) :: lfac
832 lfac = 1.0d0 / sqrt(1.0d0 - sum(v(:)**2)/
c_norm**2)
842 double precision,
dimension(ndir),
intent(in) :: a,b
843 double precision,
dimension(ndir),
intent(out) :: c
849 c(1) = a(2)*b(3) - a(3)*b(2)
850 c(2) = a(3)*b(1) - a(1)*b(3)
851 c(3) = a(1)*b(2) - a(2)*b(1)
857 call mpistop(
'geometry not implemented in cross(a,b,c)')
860 call mpistop(
"cross in particles needs to be run with three components!")
868 integer,
intent(in) :: igrid,ixI^L, ixO^L
869 double precision,
intent(in) :: gf(ixI^S)
870 double precision,
intent(in) :: x(ixI^S,1:ndim)
871 double precision,
intent(in) :: xloc(1:3)
872 double precision,
intent(out) :: gfloc
873 double precision :: xd^D, myq, dx1
875 double precision :: c00, c10
878 double precision :: c0, c1, c00, c10, c01, c11
880 integer :: ic^D, ic1^D, ic2^D, idir
885 ic^d = ceiling(dlog((xloc(^d)-xprobmin^d)*(
qstretch(ps(igrid)%level,^d)-one)/&
890 if(xloc(^d)<xprobmin^d+
xstretch^d)
then
895 else if(xloc(^d)>xprobmax^d-
xstretch^d)
then
897 ic^d = ceiling(dlog((xloc(^d)-xprobmax^d+
xstretch^d)*(
qstretch(ps(igrid)%level,^d)-one)/&
913 if (x({ic^dd},^d) .lt. xloc(^d))
then
922 if(ic1^d.lt.
ixglo^d+1 .or. ic2^d.gt.
ixghi^d-1)
then
923 print *,
'direction: ',^d
924 print *,
'position: ',xloc(1:ndim)
925 print *,
'indices:', ic1^d,ic2^d
926 call mpistop(
'Trying to interpolate from out of grid!')
931 xd1 = (xloc(1)-x(ic11,1)) / (x(ic21,1) - x(ic11,1))
932 gfloc = gf(ic11) * (1.0d0 - xd1) + gf(ic21) * xd1
935 xd1 = (xloc(1)-x(ic11,ic12,1)) / (x(ic21,ic12,1) - x(ic11,ic12,1))
936 xd2 = (xloc(2)-x(ic11,ic12,2)) / (x(ic11,ic22,2) - x(ic11,ic12,2))
937 c00 = gf(ic11,ic12) * (1.0d0 - xd1) + gf(ic21,ic12) * xd1
938 c10 = gf(ic11,ic22) * (1.0d0 - xd1) + gf(ic21,ic22) * xd1
939 gfloc = c00 * (1.0d0 - xd2) + c10 * xd2
942 xd1 = (xloc(1)-x(ic11,ic12,ic13,1)) / (x(ic21,ic12,ic13,1) - x(ic11,ic12,ic13,1))
943 xd2 = (xloc(2)-x(ic11,ic12,ic13,2)) / (x(ic11,ic22,ic13,2) - x(ic11,ic12,ic13,2))
944 xd3 = (xloc(3)-x(ic11,ic12,ic13,3)) / (x(ic11,ic12,ic23,3) - x(ic11,ic12,ic13,3))
946 c00 = gf(ic11,ic12,ic13) * (1.0d0 - xd1) + gf(ic21,ic12,ic13) * xd1
947 c10 = gf(ic11,ic22,ic13) * (1.0d0 - xd1) + gf(ic21,ic22,ic13) * xd1
948 c01 = gf(ic11,ic12,ic23) * (1.0d0 - xd1) + gf(ic21,ic12,ic23) * xd1
949 c11 = gf(ic11,ic22,ic23) * (1.0d0 - xd1) + gf(ic21,ic22,ic23) * xd1
951 c0 = c00 * (1.0d0 - xd2) + c10 * xd2
952 c1 = c01 * (1.0d0 - xd2) + c11 * xd2
954 gfloc = c0 * (1.0d0 - xd3) + c1 * xd3
963 double precision :: tpartc_avg, tpartc_int_avg, tpartc_io_avg, tpartc_com_avg, tpartc_grid_avg
972 tpartc_avg = tpartc_avg/dble(
npe)
973 tpartc_int_avg = tpartc_int_avg/dble(
npe)
974 tpartc_io_avg = tpartc_io_avg/dble(
npe)
975 tpartc_com_avg = tpartc_com_avg/dble(
npe)
976 tpartc_grid_avg = tpartc_grid_avg/dble(
npe)
977 write(*,
'(a,f12.3,a)')
' Particle handling took : ',
tpartc,
' sec'
978 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*
tpartc/(
timeloop+smalldouble),
' %'
979 write(*,
'(a,f12.3,a)')
' Particle IO took : ',tpartc_io_avg,
' sec'
980 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_io_avg/(
timeloop+smalldouble),
' %'
981 write(*,
'(a,f12.3,a)')
' Particle COM took : ',tpartc_com_avg,
' sec'
982 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_com_avg/(
timeloop+smalldouble),
' %'
983 write(*,
'(a,f12.3,a)')
' Particle integration took : ',tpartc_int_avg,
' sec'
984 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_int_avg/(
timeloop+smalldouble),
' %'
985 write(*,
'(a,f12.3,a)')
' Particle init grid took : ',tpartc_grid_avg,
' sec'
986 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_grid_avg/(
timeloop+smalldouble),
' %'
994 logical,
intent(out) :: file_exists
995 character(len=std_len) :: filename
996 integer :: mynpayload, mynparticles, pos
1011 INQUIRE(file=filename, exist=file_exists)
1012 if (.not. file_exists)
then
1013 write(*,*)
'WARNING: File '//trim(filename)//
' with particle data does not exist.'
1014 write(*,*)
'Initialising particles from user or default routines'
1016 open(unit=
unitparticles,file=filename,form=
'unformatted',action=
'read',status=
'unknown',access=
'stream')
1019 call mpistop(
'npayload in restart file does not match npayload in mod_particles')
1023 call mpi_bcast(file_exists,1,mpi_logical,0,
icomm,
ierrmpi)
1024 if (.not. file_exists)
return
1034 mynparticles = mynparticles + 1
1037 call mpi_bcast(mynparticles,1,mpi_integer,0,
icomm,
ierrmpi)
1047 integer :: index,igrid_particle,ipe_particle
1066 particle(index)%igrid = igrid_particle
1076 character(len=std_len) :: filename
1081 type(
particle_t),
allocatable,
dimension(:) :: send_particles
1082 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
1083 double precision,
allocatable,
dimension(:,:) :: send_payload
1084 double precision,
allocatable,
dimension(:,:) :: receive_payload
1085 integer :: status(MPI_STATUS_SIZE)
1086 integer,
dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1087 integer :: ipe, ipart, iipart, send_n_particles_for_output
1088 logical,
save :: file_exists=.false.
1089 integer :: snapshotnumber
1097 receive_n_particles_for_output_from_ipe(:) = 0
1100 if (
mype .eq. 0)
then
1101 write(filename,
"(a,a,i4.4,a)") trim(
base_filename),
'_particles',snapshotnumber,
'.dat'
1102 INQUIRE(file=filename, exist=file_exists)
1103 if (.not. file_exists)
then
1104 open(unit=
unitparticles,file=filename,form=
'unformatted',status=
'new',access=
'stream')
1106 open(unit=
unitparticles,file=filename,form=
'unformatted',status=
'replace',access=
'stream')
1117 if (
mype .ne. 0)
then
1121 allocate(send_particles(1:send_n_particles_for_output))
1122 allocate(send_payload(1:
npayload,1:send_n_particles_for_output))
1124 send_particles(iipart) =
particle(ipart)%self
1130 call mpi_recv(receive_n_particles_for_output_from_ipe(ipe),1,mpi_integer,ipe,ipe,
icomm,status,
ierrmpi)
1134 if (
mype .ne. 0)
then
1137 deallocate(send_particles)
1138 deallocate(send_payload)
1148 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1149 allocate(receive_payload(1:
npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1150 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),
type_particle,ipe,ipe,
icomm,status,
ierrmpi)
1151 call mpi_recv(receive_payload,
npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,
icomm,status,
ierrmpi)
1152 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1155 deallocate(receive_particles)
1156 deallocate(receive_payload)
1166 double precision :: mypayload(1:npayload)
1183 integer :: iipart, ipart, icomp
1184 character(len=std_len) :: filename
1185 character(len=1024) :: line, strdata
1188 if (
particle(ipart)%self%follow)
then
1193 write(strdata,
"(a,i2.2,a)")
'payload',icomp,
','
1194 line = trim(line)//trim(strdata)
1196 write(
unitparticles,
"(a,a,a)")
'time,dt,x1,x2,x3,u1,u2,u3,',trim(line),
'ipe,iteration,index'
1205 double precision,
intent(in) :: end_time
1207 double precision :: t_min_mype
1208 integer :: ipart, iipart
1211 t_min_mype = bigdouble
1216 activate = (
particle(ipart)%self%time < end_time)
1217 t_min_mype = min(t_min_mype,
particle(ipart)%self%time)
1235 integer,
intent(in) :: index
1236 integer,
intent(out) :: igrid_particle, ipe_particle
1237 integer :: iipart,ipart,ipe_has_particle,ipe
1238 integer,
dimension(0:1) :: buff
1239 logical :: has_particle(0:npe-1)
1241 has_particle(:) = .false.
1243 if (
particle(ipart)%self%index == index)
then
1244 has_particle(
mype) = .true.
1249 if (has_particle(
mype))
then
1254 if (npe>0)
call mpi_allgather(has_particle(
mype),1,mpi_logical,has_particle,1,mpi_logical,
icomm,
ierrmpi)
1256 ipe_has_particle = -1
1258 if (has_particle(ipe) .eqv. .true.)
then
1259 ipe_has_particle = ipe
1264 if (ipe_has_particle .ne. -1)
then
1265 if (npe>0)
call mpi_bcast(buff,2,mpi_integer,ipe_has_particle,
icomm,
ierrmpi)
1266 igrid_particle=buff(0)
1267 ipe_particle = buff(1)
1280 double precision,
intent(in) :: x(3)
1281 integer,
intent(out) :: igrid_particle, ipe_particle
1282 integer :: ig(ndir,nlevelshi), ig_lvl(nlevelshi)
1283 integer :: idim, ic(ndim)
1301 do while (.not.branch%node%leaf)
1302 {ic(^
d)=ig(^
d,branch%node%level+1) - 2 * branch%node%ig^
d +2\}
1303 branch%node => branch%node%child({ic(^
d)})%node
1306 igrid_particle = branch%node%igrid
1307 ipe_particle = branch%node%ipe
1314 double precision,
dimension(ndim),
intent(in) :: x
1317 all(x < [ xprobmax^
d ])
1323 integer,
intent(in) :: igrid, ipart
1324 double precision :: x(
ndim), grid_rmin(
ndim), grid_rmax(
ndim)
1327 if (.not.
allocated(ps(igrid)%w))
then
1340 integer :: igrid, iigrid,ipe
1341 integer :: my_neighbor_type, i^D
1342 logical :: ipe_is_neighbor(0:npe-1)
1344 ipe_is_neighbor(:) = .false.
1345 do iigrid=1,igridstail; igrid=igrids(iigrid);
1348 if (i^d==0|.and.)
then
1351 my_neighbor_type=neighbor_type(i^d,igrid)
1353 select case (my_neighbor_type)
1354 case (neighbor_boundary)
1356 case (neighbor_coarse)
1357 call ipe_fc(i^d,igrid,ipe_is_neighbor)
1358 case (neighbor_sibling)
1359 call ipe_srl(i^d,igrid,ipe_is_neighbor)
1360 case (neighbor_fine)
1361 call ipe_cf(i^d,igrid,ipe_is_neighbor)
1369 ipe_is_neighbor(mype) = .false.
1374 if (ipe_is_neighbor(ipe))
then
1384 integer,
intent(in) :: i^D, igrid
1385 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1387 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1393 integer,
intent(in) :: i^D, igrid
1394 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1396 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1402 integer,
intent(in) :: i^D, igrid
1403 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1404 integer :: ic^D, inc^D
1406 {
do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1409 ipe_is_neighbor( neighbor_child(2,inc^d,igrid) ) = .true.
1418 character(len=std_len) :: filename
1421 type(
particle_t),
allocatable,
dimension(:) :: send_particles
1422 double precision,
allocatable,
dimension(:,:) :: send_payload
1423 double precision :: tout
1424 integer :: send_n_particles_for_output, cc
1426 integer :: ipart,iipart
1433 send_n_particles_for_output = 0
1440 send_n_particles_for_output = send_n_particles_for_output + 1
1444 allocate(send_particles(1:send_n_particles_for_output))
1445 allocate(send_payload(
npayload,1:send_n_particles_for_output))
1452 send_particles(cc) =
particle(ipart)%self
1459 send_payload,
'ensemble')
1460 deallocate(send_particles)
1461 deallocate(send_payload)
1469 character(len=*),
intent(in) :: type
1470 double precision,
intent(in) :: tout
1471 integer,
intent(in) :: index
1472 integer :: nout, mysnapshot
1486 subroutine output_ensemble(send_n_particles_for_output,send_particles,send_payload,typefile)
1489 integer,
intent(in) :: send_n_particles_for_output
1490 type(
particle_t),
dimension(send_n_particles_for_output),
intent(in) :: send_particles
1491 double precision,
dimension(npayload,send_n_particles_for_output),
intent(in) :: send_payload
1492 character(len=*),
intent(in) :: typefile
1493 character(len=std_len) :: filename
1494 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
1495 double precision,
allocatable,
dimension(:,:) :: receive_payload
1496 integer :: status(MPI_STATUS_SIZE)
1497 integer,
dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1498 integer :: ipe, ipart, nout
1499 logical :: file_exists
1501 receive_n_particles_for_output_from_ipe(:) = 0
1503 call mpi_allgather(send_n_particles_for_output, 1, mpi_integer, &
1504 receive_n_particles_for_output_from_ipe, 1, mpi_integer,
icomm,
ierrmpi)
1507 if (sum(receive_n_particles_for_output_from_ipe(:)) == 0)
return
1514 if(typefile==
'destroy')
then
1515 write(filename,
"(a,a,i6.6,a)") trim(
base_filename) //
'_', &
1516 trim(typefile) //
'.csv'
1518 inquire(file=filename, exist=file_exists)
1520 if (.not. file_exists)
then
1528 write(filename,
"(a,a,i6.6,a)") trim(
base_filename) //
'_', &
1535 do ipart=1,send_n_particles_for_output
1541 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1542 allocate(receive_payload(1:
npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1543 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),
type_particle,ipe,ipe,
icomm,status,
ierrmpi)
1544 call mpi_recv(receive_payload,
npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,
icomm,status,
ierrmpi)
1545 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1548 deallocate(receive_particles)
1549 deallocate(receive_payload)
1559 character(len=std_len) :: filename
1560 integer :: ipart,iipart,ipe
1561 logical :: file_exists
1567 if (
particle(ipart)%self%follow)
then
1568 write(filename,
"(a,a,i6.6,a)") trim(
base_filename),
'_particle_', &
1570 inquire(file=filename, exist=file_exists)
1595 double precision,
intent(in) :: payload(npayload)
1596 integer,
intent(in) :: ipe
1597 integer,
intent(in) :: file_unit
1598 double precision :: x(3), u(3)
1611 x(:) = myparticle%x(:)
1621 write(file_unit,
csv_format) myparticle%time, myparticle%dt, x(1:3), &
1633 double precision,
intent(in) :: xp(1:3)
1634 double precision,
intent(out) :: xpcart(1:3)
1638 xpcart(1:3) = xp(1:3)
1640 xpcart(1) = xp(
r_)*cos(xp(
phi_))
1641 xpcart(2) = xp(
r_)*sin(xp(
phi_))
1644 xpcart(1) = xp(1)*sin(xp(2))*cos(xp(3))
1646 xpcart(2) = xp(1)*cos(xp(2))
1647 xpcart(3) = xp(1)*sin(xp(2))*sin(xp(3))}
1649 xpcart(2) = xp(1)*sin(xp(2))*sin(xp(3))
1650 xpcart(3) = xp(1)*cos(xp(2))}
1652 write(*,*)
"No converter for coordinate=",
coordinate
1663 double precision,
intent(in) :: xpcart(1:3)
1664 double precision,
intent(out) :: xp(1:3)
1665 double precision :: xx, yy, zz
1666 double precision :: rr, tth, pphi
1672 xp(
r_) = sqrt(xpcart(1)**2 + xpcart(2)**2)
1673 xp(
phi_) = atan2(xpcart(2),xpcart(1))
1674 if (xp(
phi_) .lt. 0.d0) xp(
phi_) = 2.0d0*dpi + xp(
phi_)
1684 rr = sqrt(xx**2 + yy**2 + zz**2)
1687 pphi = acos(xx/sqrt(xx**2+yy**2))
1689 pphi = 2.d0*dpi - acos(xx/sqrt(xx**2+yy**2))
1691 xp(1:3) = (/ rr, tth, pphi /)
1693 write(*,*)
"No converter for coordinate=",
coordinate
1704 double precision,
intent(in) :: xp(1:3),up(1:3)
1705 double precision,
intent(out) :: upcart(1:3)
1715 upcart(1) = up(1)*sin(xp(2))*cos(xp(3)) + up(2)*cos(xp(2))*cos(xp(3)) - up(3)*sin(xp(3))
1717 upcart(2) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))
1718 upcart(3) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))}
1720 upcart(2) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))
1721 upcart(3) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))}
1723 write(*,*)
"No converter for coordinate=",
coordinate
1734 double precision,
intent(in) :: xp(1:3),upcart(1:3)
1735 double precision,
intent(out) :: up(1:3)
1739 up(1:3) = upcart(1:3)
1741 up(
r_) = cos(xp(
phi_)) * upcart(1) + sin(xp(
phi_)) * upcart(2)
1742 up(
phi_) = -sin(xp(
phi_)) * upcart(1) + cos(xp(
phi_)) * upcart(2)
1746 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(3)*sin(xp(2))*sin(xp(3)) + upcart(2)*cos(xp(2))
1747 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(3)*cos(xp(2))*sin(xp(3)) - upcart(2)*sin(xp(2))
1748 up(3) = -upcart(1)*sin(xp(3)) + upcart(3)*cos(xp(3))}
1750 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(2)*sin(xp(2))*sin(xp(3)) + upcart(3)*cos(xp(2))
1751 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(2)*cos(xp(2))*sin(xp(3)) - upcart(3)*sin(xp(2))
1752 up(3) = -upcart(1)*sin(xp(3)) + upcart(2)*cos(xp(3))}
1754 write(*,*)
"No converter for coordinate=",
coordinate
1764 integer,
intent(in) :: igrid
1765 double precision,
intent(inout) :: xp(1:3)
1766 double precision :: xpmod(1:3)
1783 xpmod(phiind) = xp(phiind) + 2.d0*dpi
1786 xp(phiind) = xpmod(phiind)
1790 xpmod(phiind) = xp(phiind) - 2.d0*dpi
1793 xp(phiind) = xpmod(phiind)
1804 integer,
intent(in) :: igrid, ngh
1805 double precision,
intent(in) :: x(
ndim)
1806 double precision :: grid_rmin(
ndim), grid_rmax(
ndim)
1819 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1821 integer :: tag_send, tag_receive, send_buff, rcv_buff
1822 integer :: status(MPI_STATUS_SIZE)
1823 integer,
dimension(0:npe-1) :: send_n_particles_to_ipe
1824 integer,
dimension(0:npe-1) :: receive_n_particles_from_ipe
1825 type(
particle_t),
allocatable,
dimension(:,:) :: send_particles
1826 type(
particle_t),
allocatable,
dimension(:,:) :: receive_particles
1827 double precision,
allocatable,
dimension(:,:,:) :: send_payload
1828 double precision,
allocatable,
dimension(:,:,:) :: receive_payload
1829 integer,
allocatable,
dimension(:,:) :: particle_index_to_be_sent_to_ipe
1830 integer,
dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1831 integer :: destroy_n_particles_mype
1832 integer,
allocatable,
dimension(:) :: sndrqst, rcvrqst
1833 integer,
allocatable,
dimension(:) :: sndrqst_payload, rcvrqst_payload
1834 integer :: isnd, ircv
1835 integer,
parameter :: maxneighbors=56
1836 logical :: BC_applied
1839 send_n_particles_to_ipe(:) = 0
1840 receive_n_particles_from_ipe(:) = 0
1841 destroy_n_particles_mype = 0
1844 allocate( sndrqst(1:
npe-1), rcvrqst(1:
npe-1) )
1845 allocate( sndrqst_payload(1:
npe-1), rcvrqst_payload(1:
npe-1) )
1846 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1847 sndrqst_payload = mpi_request_null; rcvrqst_payload = mpi_request_null;
1857 destroy_n_particles_mype = destroy_n_particles_mype + 1
1858 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1869 if (igrid_particle == -1 )
then
1871 if (.not. bc_applied .or. igrid_particle == -1)
then
1873 destroy_n_particles_mype = destroy_n_particles_mype + 1
1874 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1881 particle(ipart)%igrid = igrid_particle
1887 send_n_particles_to_ipe(ipe_particle) = &
1888 send_n_particles_to_ipe(ipe_particle) + 1
1890 call mpistop(
'too many particles, increase nparticles_per_cpu_hi')
1891 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1900 call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
1903 if (
npe == 1)
return
1909 tag_receive = ipe *
npe +
mype
1910 isnd = isnd + 1; ircv = ircv + 1
1911 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer,ipe,tag_send,
icomm,sndrqst(isnd),
ierrmpi)
1912 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer,ipe,tag_receive,
icomm,rcvrqst(ircv),
ierrmpi)
1915 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1916 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1917 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1921 allocate(send_particles(maxval(send_n_particles_to_ipe),1:min(maxneighbors,
npe-1)))
1922 allocate(receive_particles(maxval(receive_n_particles_from_ipe),1:min(maxneighbors,
npe-1)))
1923 allocate(send_payload(
npayload,maxval(send_n_particles_to_ipe),1:min(maxneighbors,
npe-1)))
1924 allocate(receive_payload(
npayload,maxval(receive_n_particles_from_ipe),1:min(maxneighbors,
npe-1)))
1930 tag_receive = ipe *
npe +
mype
1933 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
1936 do ipart = 1, send_n_particles_to_ipe(ipe)
1937 send_particles(ipart,iipe) =
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
1941 call mpi_isend(send_particles(:,iipe),send_n_particles_to_ipe(ipe),
type_particle,ipe,tag_send,
icomm,sndrqst(isnd),
ierrmpi)
1942 call mpi_isend(send_payload(:,:,iipe),
npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,
icomm,sndrqst_payload(isnd),
ierrmpi)
1947 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1950 call mpi_irecv(receive_particles(:,iipe),receive_n_particles_from_ipe(ipe),
type_particle,ipe,tag_receive,
icomm,rcvrqst(ircv),
ierrmpi)
1951 call mpi_irecv(receive_payload(:,:,iipe),
npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,
icomm,rcvrqst_payload(ircv),
ierrmpi)
1956 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1957 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1958 call mpi_waitall(isnd,sndrqst_payload,mpi_statuses_ignore,
ierrmpi)
1959 call mpi_waitall(ircv,rcvrqst_payload,mpi_statuses_ignore,
ierrmpi)
1962 do ipart = 1, send_n_particles_to_ipe(ipe)
1963 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1964 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1970 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1971 do ipart = 1, receive_n_particles_from_ipe(ipe)
1973 index = receive_particles(ipart,iipe)%index
1977 particle(index)%self = receive_particles(ipart,iipe)
1984 particle(index)%igrid = igrid_particle
2000 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2002 integer :: tag_send, tag_receive, send_buff, rcv_buff
2003 integer :: status(MPI_STATUS_SIZE)
2004 integer,
dimension(0:npe-1) :: send_n_particles_to_ipe
2005 integer,
dimension(0:npe-1) :: receive_n_particles_from_ipe
2010 type(
particle_t),
allocatable,
dimension(:) :: send_particles
2011 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
2012 double precision,
allocatable,
dimension(:,:) :: send_payload
2013 double precision,
allocatable,
dimension(:,:) :: receive_payload
2014 integer,
allocatable,
dimension(:,:) :: particle_index_to_be_sent_to_ipe
2015 integer,
allocatable,
dimension(:) :: sndrqst, rcvrqst
2016 integer :: isnd, ircv
2017 logical :: BC_applied
2019 send_n_particles_to_ipe(:) = 0
2020 receive_n_particles_from_ipe(:) = 0
2023 allocate( sndrqst(1:
npe-1), rcvrqst(1:
npe-1) )
2024 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
2032 particle(ipart)%igrid = igrid_particle
2038 send_n_particles_to_ipe(ipe_particle) = &
2039 send_n_particles_to_ipe(ipe_particle) + 1
2041 call mpistop(
'G: too many particles increase nparticles_per_cpu_hi')
2042 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2051 deallocate(sndrqst, rcvrqst)
2052 deallocate(particle_index_to_be_sent_to_ipe)
2065 do ipe=0,
npe-1;
if (ipe .eq.
mype) cycle;
2068 isnd = isnd + 1; ircv = ircv + 1;
2069 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer, &
2071 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer, &
2075 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
2076 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
2077 deallocate(sndrqst, rcvrqst)
2085 if (ipe .eq.
mype) cycle;
2087 tag_receive = ipe *
npe +
mype
2090 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
2093 allocate(send_particles(1:send_n_particles_to_ipe(ipe)))
2094 allocate(send_payload(1:
npayload,1:send_n_particles_to_ipe(ipe)))
2095 do ipart = 1, send_n_particles_to_ipe(ipe)
2096 send_particles(ipart) =
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2101 call mpi_send(send_payload,
npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,
icomm,
ierrmpi)
2102 do ipart = 1, send_n_particles_to_ipe(ipe)
2103 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2104 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2107 deallocate(send_particles)
2108 deallocate(send_payload)
2113 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
2114 allocate(receive_particles(1:receive_n_particles_from_ipe(ipe)))
2115 allocate(receive_payload(1:
npayload,1:receive_n_particles_from_ipe(ipe)))
2118 call mpi_recv(receive_payload,
npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,
icomm,status,
ierrmpi)
2119 do ipart = 1, receive_n_particles_from_ipe(ipe)
2121 index = receive_particles(ipart)%index
2124 particle(index)%self = receive_particles(ipart)
2132 particle(index)%igrid = igrid_particle
2136 deallocate(receive_particles)
2137 deallocate(receive_payload)
2141 deallocate(particle_index_to_be_sent_to_ipe)
2461 integer,
intent(inout) :: igrid_particle, ipe_particle
2462 logical,
intent(out) :: BC_applied
2465 bc_applied = .false.
2472 if (.not.
periodb(idim)) cycle
2476 if (particle%x(^
d) .lt. xprobmin^
d)
then
2477 particle%x(^
d) = particle%x(^
d) + (xprobmax^
d - xprobmin^
d)
2480 if (particle%x(^
d) .ge. xprobmax^
d)
then
2481 particle%x(^
d) = particle%x(^
d) - (xprobmax^
d - xprobmin^
d)
2497 integer,
intent(in) :: destroy_n_particles_mype
2498 integer,
dimension(1:destroy_n_particles_mype),
intent(in) :: particle_index_to_be_destroyed
2499 type(
particle_t),
dimension(destroy_n_particles_mype):: destroy_particles_mype
2500 double precision,
dimension(npayload,destroy_n_particles_mype):: destroy_payload_mype
2501 integer :: iipart,ipart,destroy_n_particles
2503 destroy_n_particles = 0
2506 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2507 destroy_particles_mype(iipart) =
particle(ipart)%self
2511 call output_ensemble(destroy_n_particles_mype,destroy_particles_mype,destroy_payload_mype,
'destroy')
2514 call mpi_allreduce(destroy_n_particles_mype,destroy_n_particles,1,mpi_integer,mpi_sum,
icomm,
ierrmpi)
2516 destroy_n_particles = destroy_n_particles_mype
2521 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2529 deallocate(
particle(ipart)%payload)
2538 integer,
intent(in) :: ipart
2548 integer,
intent(in) :: ipart
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Module for physical and numeric constants.
Module with basic grid data structures.
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Module with geometry-related routines (e.g., divergence, curl)
integer, parameter spherical
integer, parameter cartesian
integer, parameter cylindrical
integer, parameter cartesian_expansion
integer, parameter cartesian_stretched
subroutine curlvector(qvec, ixil, ixol, curlvec, idirmin, idirmin0, ndir0, fourthorder)
Calculate curl of a vector qvec within ixL Options to employ standard second order CD evaluations use...
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
logical nocartesian
IO switches for conversion.
integer ixghi
Upper index of grid block arrays.
integer, parameter stretch_uni
Unidirectional stretching from a side.
integer domain_nx
number of cells for each dimension in level-one mesh
integer, parameter unitpar
file handle for IO
double precision global_time
The global simulation time.
double precision time_max
End time for the simulation.
double precision xstretch
physical extent of stretched border in symmetric stretching
integer snapshotini
Resume from the snapshot with this index.
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, parameter rpxmin
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
double precision, dimension(:), allocatable, parameter d
double precision dt
global time step
double precision length_convert_factor
Conversion factor for length unit.
integer ndir
Number of spatial dimensions (components) for vector variables.
integer ixm
the mesh range of a physical block without ghost cells
integer ierrmpi
A global MPI error return code.
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
logical time_advance
do time evolving
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision c_norm
Normalised speed of light.
logical b0field
split magnetic field as background B0 field
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nghostcells
Number of ghost cells surrounding a grid.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
double precision, dimension(^nd) dxlevel
store unstretched cell size of current level
integer, parameter rpxmax
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer, parameter stretch_symm
Symmetric stretching around the center.
integer r_
Indices for cylindrical coordinates FOR TESTS, negative value when not used:
double precision, dimension(:,:), allocatable dxfirst
integer, dimension(:,:), allocatable node
integer, parameter ixglo
Lower index of grid block arrays (always 1)
Module with shared functionality for all the particle movers.
subroutine get_maxwellian_velocity(v, velocity)
subroutine partcoord_from_cartesian(xp, xpcart)
Convert to noncartesian coordinates.
double precision, dimension(3) integrator_velocity_factor
Normalization factor for velocity in the integrator.
integer nparticles_per_cpu_hi
Maximum number of particles in one processor.
subroutine particle_get_current(w, ixil, ixol, idirmin, current)
Calculate idirmin and the idirmin:3 components of the common current array make sure that dxlevel(^D)...
integer nparticleshi
Maximum number of particles.
pure subroutine limit_dt_endtime(t_left, dt_p)
integer num_particles
Number of particles.
subroutine fix_phi_crossing(xp, igrid)
Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates.
double precision const_dt_particles
If positive, a constant time step for the particles.
subroutine output_individual()
integer n_output_destroyed
Output count for ensembles of destroyed particles.
integer it_particles
Iteration number of paritcles.
subroutine fields_from_mhd(igrid, w_mhd, w_part)
Determine fields from MHD variables.
subroutine particles_params_read(files)
Read this module's parameters from a file.
double precision min_particle_time
Minimum time of all particles.
subroutine output_ensemble(send_n_particles_for_output, send_particles, send_payload, typefile)
subroutine get_uniform_pos(x)
logical write_individual
Whether to write individual particle output (followed particles)
subroutine write_particle_output()
subroutine init_particles_com()
Initialize communicators for particles.
logical function particle_in_igrid(ipart, igrid)
Quick check if particle is still in igrid.
integer, dimension(:), allocatable particles_on_mype
Array of identity numbers of particles in current processor.
double precision dtsave_particles
Time interval to save particles.
subroutine output_particle(myparticle, payload, ipe, file_unit)
subroutine update_gridvars()
update grid variables for particles
integer ngridvars
Number of variables for grid field.
subroutine ipe_fc(id, igrid, ipe_is_neighbor)
subroutine get_efield(igrid, x, tloc, e)
double precision particles_etah
subroutine destroy_particles(destroy_n_particles_mype, particle_index_to_be_destroyed)
clean up destroyed particles on all cores
integer nusrpayload
Number of user-defined payload variables for a particle.
subroutine select_active_particles(end_time)
subroutine write_particles_snapshot()
double precision particles_eta
Resistivity.
subroutine particles_finish()
Finalize the particles module.
logical write_snapshot
Whether to write particle snapshots.
pure subroutine get_lfac(u, lfac)
Get Lorentz factor from relativistic momentum.
subroutine find_particle_ipe(x, igrid_particle, ipe_particle)
subroutine pull_particle_from_particles_on_mype(ipart)
subroutine init_particles_output()
pure subroutine get_lfac_from_velocity(v, lfac)
Get Lorentz factor from velocity.
integer integrator
Integrator to be used for particles.
integer, dimension(:), allocatable ep
Variable index for electric field.
subroutine handle_particles()
Let particles evolve in time. The routine also handles grid variables and output.
subroutine partvec_from_cartesian(xp, up, upcart)
Convert vector from Cartesian coordinates.
subroutine fill_gridvars_default
This routine fills the particle grid variables with the default for mhd, i.e. only E and B.
character(len=400) csv_header
Header string used in CSV files.
subroutine finish_gridvars()
Deallocate grid variables for particles.
subroutine ipe_srl(id, igrid, ipe_is_neighbor)
subroutine get_bfield(igrid, x, tloc, b)
integer npayload
Number of total payload variables for a particle.
integer, parameter unitparticles
integer, dimension(:), allocatable particles_active_on_mype
Array of identity numbers of active particles in current processor.
logical relativistic
Use a relativistic particle mover?
character(len=name_len) integrator_type_particles
String describing the particle integrator type.
subroutine set_neighbor_ipe()
subroutine time_spent_on_particles()
subroutine get_vec(ix, igrid, x, tloc, vec)
logical write_ensemble
Whether to write ensemble output.
character(len=name_len) interp_type_particles
String describing the particle interpolation type.
integer downsample_particles
Particle downsampling factor in CSV output.
subroutine locate_particle(index, igrid_particle, ipe_particle)
subroutine push_particle_into_particles_on_mype(ipart)
subroutine read_from_snapshot()
integer nparticles_active_on_mype
Number of active particles in current processor.
procedure(sub_define_additional_gridvars), pointer particles_define_additional_gridvars
subroutine partcoord_to_cartesian(xp, xpcart)
Convert position to Cartesian coordinates.
integer nparticles
Identity number and total number of particles.
integer nparticles_on_mype
Number of particles in current processor.
integer, dimension(:), allocatable bp
Variable index for magnetic field.
subroutine comm_particles()
double precision tmax_particles
Time limit of particles.
type(particle_ptr), dimension(:), allocatable particle
procedure(fun_destroy), pointer usr_destroy_particle
integer, dimension(:), allocatable vp
Variable index for fluid velocity.
procedure(sub_integrate), pointer particles_integrate
subroutine cross(a, b, c)
procedure(sub_fill_gridvars), pointer particles_fill_gridvars
double precision t_next_output
Time to write next particle output.
subroutine advance_particles(end_time, steps_taken)
Routine handles the particle evolution.
subroutine partvec_to_cartesian(xp, up, upcart)
Convert vector to Cartesian coordinates.
integer ndefpayload
Number of default payload variables for a particle.
logical function point_in_igrid_ghostc(x, igrid, ngh)
Quick check if particle coordinate is inside igrid (ghost cells included, except the last ngh)
subroutine particle_base_init()
Give initial values to parameters.
integer, dimension(:), allocatable ipe_neighbor
integer igrid_working
set the current igrid for the particle integrator:
character(len=60) csv_format
Format string used in CSV files.
integer, dimension(:), allocatable jp
Variable index for current.
subroutine append_to_snapshot(myparticle, mypayload)
subroutine apply_periodb(particle, igrid_particle, ipe_particle, bc_applied)
integer ipart_working
set the current ipart for the particle integrator:
subroutine interpolate_var(igrid, ixil, ixol, gf, x, xloc, gfloc)
double precision particles_cfl
Particle CFL safety factor.
procedure(sub_fill_additional_gridvars), pointer particles_fill_additional_gridvars
subroutine ipe_cf(id, igrid, ipe_is_neighbor)
logical function particle_in_domain(x)
Check if particle is inside computational domain.
character(len=name_len) physics_type_particles
String describing the particle physics type.
subroutine init_gridvars()
Initialize grid variables for particles.
character(len=128) function make_particle_filename(tout, index, type)
subroutine comm_particles_global()
subroutine read_particles_snapshot(file_exists)
integer rhop
Variable index for density.
This module defines the procedures of a physics module. It contains function pointers for the various...
Module for pseudo random number generation. The internal pseudo random generator is the xoroshiro128p...
Writes D-1 slice, can do so in various formats, depending on slice_type.
subroutine get_igslice(dir, x, igslice)
double precision tpartc_int_0
double precision timeio_tot
double precision tpartc_com
double precision tpartc_grid_0
double precision tpartc_io
double precision timeloop
double precision tpartc_com0
double precision tpartc_grid
double precision tpartc_io_0
double precision tpartc_int
Module with all the methods that users can customize in AMRVAC.
procedure(particle_analytic), pointer usr_particle_analytic
procedure(particle_fields), pointer usr_particle_fields
Random number generator type, which contains the state.