73 integer,
allocatable ::
bp(:)
75 integer,
allocatable ::
ep(:)
77 integer,
allocatable ::
vp(:)
79 integer,
allocatable ::
jp(:)
105 double precision,
allocatable :: payload(:)
106 integer :: igrid, ipe
115 double precision :: q
117 double precision :: m
119 double precision :: time
121 double precision ::
dt
123 double precision :: x(3)
125 double precision :: u(3)
147 integer,
intent(inout) :: ngridvars
154 double precision,
intent(in) :: end_time
188 character(len=*),
intent(in) :: files(:)
199 do n = 1,
size(files)
200 open(
unitpar, file=trim(files(n)), status=
"old")
201 read(
unitpar, particles_list,
end=111)
211 integer,
parameter :: i8 = selected_int_kind(18)
212 integer(i8) :: seed(2)
214 character(len=20) :: strdata
234 dtheta = 2.0d0*dpi / 60.0d0
253 seed = [310952_i8, 8948923749821_i8]
254 call rng%set_seed(seed)
265 csv_header =
' time, dt, x1, x2, x3, u1, u2, u3,'
269 write(strdata,
"(a,a,a)")
' ', trim(prim_wnames(n)),
','
274 write(strdata,
"(a,i2.2,a)")
' pl', n,
','
281 write(strdata,
"(a,i2.2,a)")
' usrpl', n,
','
289 'i8.7,", ",i11.10,", ",i8.7)'
297 integer :: oldtypes(0:7), offsets(0:7), blocklengths(0:7)
299 oldtypes(0) = mpi_logical
300 oldtypes(1) = mpi_integer
301 oldtypes(2:7) = mpi_double_precision
308 offsets(1) = size_logical * blocklengths(0)
309 offsets(2) = offsets(1) + size_int * blocklengths(1)
310 offsets(3) = offsets(2) + size_double * blocklengths(2)
311 offsets(4) = offsets(3) + size_double * blocklengths(3)
312 offsets(5) = offsets(4) + size_double * blocklengths(4)
313 offsets(6) = offsets(5) + size_double * blocklengths(5)
314 offsets(7) = offsets(6) + size_double * blocklengths(6)
322 double precision,
intent(out) :: v(3)
323 double precision,
intent(in) :: velocity
324 double precision :: vtmp(3), vnorm
326 vtmp(1) = velocity *
rng%normal()
327 vtmp(2) = velocity *
rng%normal()
328 vtmp(3) = velocity *
rng%normal()
330 v =
rng%sphere(vnorm)
335 double precision,
intent(out) :: x(3)
337 call rng%unif_01_vec(x)
339 {^
d&x(^
d) = xprobmin^
d + x(^
d) * (xprobmax^
d - xprobmin^
d)\}
348 integer :: igrid, iigrid
352 do iigrid=1,igridstail; igrid=igrids(iigrid);
353 allocate(gridvars(igrid)%w(ixg^t,1:
ngridvars),gridvars(igrid)%wold(ixg^t,1:
ngridvars))
354 gridvars(igrid)%w = 0.0d0
355 gridvars(igrid)%wold = 0.0d0
371 integer :: iigrid, igrid
373 do iigrid=1,igridstail; igrid=igrids(iigrid);
374 deallocate(gridvars(igrid)%w,gridvars(igrid)%wold)
384 double precision :: E(ixG^T, ndir)
385 double precision :: B(ixG^T, ndir)
386 integer :: igrid, iigrid
388 do iigrid=1,igridstail; igrid=igrids(iigrid);
391 gridvars(igrid)%w(ixg^t,
ep(:)) = e
392 gridvars(igrid)%w(ixg^t,
bp(:)) = b
404 integer,
intent(in) :: igrid
405 double precision,
intent(in) :: w_mhd(ixG^T,nw)
406 double precision,
intent(inout) :: w_part(ixG^T,ngridvars)
408 double precision :: current(ixG^T,7-2*ndir:3)
409 double precision :: w(ixG^T,1:nw)
410 integer :: idirmin, idir
414 w(ixg^t,1:nw) = w_mhd(ixg^t,1:nw)
415 w_part(ixg^t,
bp(1):
bp(3)) = 0.0d0
416 w_part(ixg^t,
ep(1):
ep(3)) = 0.0d0
418 call phys_to_primitive(ixg^
ll,ixg^
ll,w,ps(igrid)%x)
421 w_part(ixg^t,
rhop) = w(ixg^t,iw_rho)
424 w_part(ixg^t,
vp(:)) = w(ixg^t,iw_mom(:))
429 w_part(ixg^t,
bp(idir))=w(ixg^t,iw_mag(idir))+ps(igrid)%B0(ixg^t,idir,0)
432 w_part(ixg^t,
bp(:)) = w(ixg^t,iw_mag(:))
438 w_part(ixg^t,
jp(:)) = current(ixg^t,:)
443 w_part(ixg^t,
ep(1)) = w_part(ixg^t,
bp(2)) * &
444 w(ixg^t,iw_mom(3)) - w_part(ixg^t,
bp(3)) * &
446 w_part(ixg^t,
ep(2)) = w_part(ixg^t,
bp(3)) * &
447 w(ixg^t,iw_mom(1)) - w_part(ixg^t,
bp(1)) * &
449 w_part(ixg^t,
ep(3)) = w_part(ixg^t,
bp(1)) * &
450 w(ixg^t,iw_mom(2)) - w_part(ixg^t,
bp(2)) * &
453 w_part(ixg^t,
ep(
r_)) = w_part(ixg^t,
bp(
phi_)) * &
454 w(ixg^t,iw_mom(
z_)) - w_part(ixg^t,
bp(
z_)) * &
456 w_part(ixg^t,
ep(
phi_)) = w_part(ixg^t,
bp(
z_)) * &
457 w(ixg^t,iw_mom(
r_)) - w_part(ixg^t,
bp(
r_)) * &
459 w_part(ixg^t,
ep(
z_)) = w_part(ixg^t,
bp(
r_)) * &
460 w(ixg^t,iw_mom(
phi_)) - w_part(ixg^t,
bp(
phi_)) * &
469 (current(ixg^t,2) * w_part(ixg^t,
bp(3)) - current(ixg^t,3) * w_part(ixg^t,
bp(2)))
471 (-current(ixg^t,1) * w_part(ixg^t,
bp(3)) + current(ixg^t,3) * w_part(ixg^t,
bp(1)))
473 (current(ixg^t,1) * w_part(ixg^t,
bp(2)) - current(ixg^t,2) * w_part(ixg^t,
bp(1)))
476 (current(ixg^t,
phi_) * w_part(ixg^t,
bp(
z_)) - current(ixg^t,
z_) * w_part(ixg^t,
bp(
phi_)))
478 (-current(ixg^t,
r_) * w_part(ixg^t,
bp(
z_)) + current(ixg^t,
z_) * w_part(ixg^t,
bp(
r_)))
480 (current(ixg^t,
r_) * w_part(ixg^t,
bp(
phi_)) - current(ixg^t,
phi_) * w_part(ixg^t,
bp(
r_)))
492 integer :: ixO^L, idirmin, ixI^L
493 double precision :: w(ixI^S,1:nw)
495 double precision :: current(ixI^S,7-2*ndir:3),bvec(ixI^S,1:ndir)
503 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))+
block%B0(ixi^s,idir,0)
507 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))
511 call curlvector(bvec,ixi^l,ixo^l,current,idirmin,idirmin0,ndir)
520 integer :: igrid, iigrid
524 do iigrid=1,igridstail; igrid=igrids(iigrid);
525 gridvars(igrid)%wold=gridvars(igrid)%w
543 integer :: steps_taken, nparticles_left
585 if (nparticles_left == 0 .and.
convert)
call mpistop(
'No particles left')
597 double precision,
intent(in) :: end_time
598 integer,
intent(out) :: steps_taken
610 if (n_active == 0)
exit
614 steps_taken = steps_taken + 1
627 double precision,
intent(in) :: t_left
628 double precision,
intent(inout) :: dt_p
629 double precision :: n_steps
630 double precision,
parameter :: eps = 1d-10
632 n_steps = t_left / dt_p
634 if (n_steps < 1+eps)
then
637 else if (n_steps < 2-eps)
then
639 dt_p = 0.5d0 * t_left
650 integer,
intent(in) :: ix(ndir)
651 integer,
intent(in) :: igrid
652 double precision,
dimension(3),
intent(in) :: x
653 double precision,
intent(in) :: tloc
654 double precision,
dimension(ndir),
intent(out) :: vec
655 double precision,
dimension(ndir) :: vec1, vec2
656 double precision :: td, bb
664 ps(igrid)%x(ixg^t,1:
ndim),x,vec(idir))
670 ps(igrid)%x(ixg^t,1:
ndim),x,vec2(idir))
675 vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
681 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
683 sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=
ndim+1)*td &
684 +sum(gridvars(igrid)%wold(ixg^t,ix(:))**2,dim=
ndim+1)*(1.d0-td), &
685 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
688 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
690 sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=
ndim+1), &
691 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
693 vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
704 integer,
intent(in) :: igrid
705 double precision,
dimension(3),
intent(in) :: x
706 double precision,
intent(in) :: tloc
707 double precision,
dimension(ndir),
intent(out) :: b
708 double precision,
dimension(ndir) :: vec1, vec2, vec
709 double precision :: td, bb
719 ps(igrid)%x(ixg^t,1:
ndim),x,vec(idir))
725 ps(igrid)%x(ixg^t,1:
ndim),x,vec2(idir))
730 vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
736 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
738 sum(gridvars(igrid)%w(ixg^t,
bp(:))**2,dim=
ndim+1)*td &
739 +sum(gridvars(igrid)%wold(ixg^t,
bp(:))**2,dim=
ndim+1)*(1.d0-td), &
740 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
743 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
745 sum(gridvars(igrid)%w(ixg^t,
bp(:))**2,dim=
ndim+1), &
746 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
748 vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
764 integer,
intent(in) :: igrid
765 double precision,
dimension(3),
intent(in) :: x
766 double precision,
intent(in) :: tloc
767 double precision,
dimension(ndir),
intent(out) :: e
768 double precision,
dimension(ndir) :: vec1, vec2, vec
769 double precision,
dimension(ndir) :: vfluid, b
770 double precision,
dimension(ndir) :: current
771 double precision :: rho = 1.d0, rho2 = 1.d0
772 double precision :: td
785 call get_vec(
jp, igrid, x, tloc, current)
792 rho = rho2 * (1.0d0 - td) + rho * td
816 double precision,
dimension(3),
intent(in) :: u
817 double precision,
intent(out) :: lfac
820 lfac = sqrt(1.0d0 + sum(u(:)**2)/
c_norm**2)
830 double precision,
dimension(3),
intent(in) :: v
831 double precision,
intent(out) :: lfac
834 lfac = 1.0d0 / sqrt(1.0d0 - sum(v(:)**2)/
c_norm**2)
844 double precision,
dimension(ndir),
intent(in) :: a,b
845 double precision,
dimension(ndir),
intent(out) :: c
851 c(1) = a(2)*b(3) - a(3)*b(2)
852 c(2) = a(3)*b(1) - a(1)*b(3)
853 c(3) = a(1)*b(2) - a(2)*b(1)
859 call mpistop(
'geometry not implemented in cross(a,b,c)')
862 call mpistop(
"cross in particles needs to be run with three components!")
870 integer,
intent(in) :: igrid,ixI^L, ixO^L
871 double precision,
intent(in) :: gf(ixI^S)
872 double precision,
intent(in) :: x(ixI^S,1:ndim)
873 double precision,
intent(in) :: xloc(1:3)
874 double precision,
intent(out) :: gfloc
875 double precision :: xd^D, myq, dx1
877 double precision :: c00, c10
880 double precision :: c0, c1, c00, c10, c01, c11
882 integer :: ic^D, ic1^D, ic2^D, idir
887 ic^d = ceiling(dlog((xloc(^d)-xprobmin^d)*(
qstretch(ps(igrid)%level,^d)-one)/&
892 if(xloc(^d)<xprobmin^d+
xstretch^d)
then
897 else if(xloc(^d)>xprobmax^d-
xstretch^d)
then
899 ic^d = ceiling(dlog((xloc(^d)-xprobmax^d+
xstretch^d)*(
qstretch(ps(igrid)%level,^d)-one)/&
915 if (x({ic^dd},^d) .lt. xloc(^d))
then
924 if(ic1^d.lt.
ixglo^d+1 .or. ic2^d.gt.
ixghi^d-1)
then
925 print *,
'direction: ',^d
926 print *,
'position: ',xloc(1:ndim)
927 print *,
'indices:', ic1^d,ic2^d
928 call mpistop(
'Trying to interpolate from out of grid!')
933 xd1 = (xloc(1)-x(ic11,1)) / (x(ic21,1) - x(ic11,1))
934 gfloc = gf(ic11) * (1.0d0 - xd1) + gf(ic21) * xd1
937 xd1 = (xloc(1)-x(ic11,ic12,1)) / (x(ic21,ic12,1) - x(ic11,ic12,1))
938 xd2 = (xloc(2)-x(ic11,ic12,2)) / (x(ic11,ic22,2) - x(ic11,ic12,2))
939 c00 = gf(ic11,ic12) * (1.0d0 - xd1) + gf(ic21,ic12) * xd1
940 c10 = gf(ic11,ic22) * (1.0d0 - xd1) + gf(ic21,ic22) * xd1
941 gfloc = c00 * (1.0d0 - xd2) + c10 * xd2
944 xd1 = (xloc(1)-x(ic11,ic12,ic13,1)) / (x(ic21,ic12,ic13,1) - x(ic11,ic12,ic13,1))
945 xd2 = (xloc(2)-x(ic11,ic12,ic13,2)) / (x(ic11,ic22,ic13,2) - x(ic11,ic12,ic13,2))
946 xd3 = (xloc(3)-x(ic11,ic12,ic13,3)) / (x(ic11,ic12,ic23,3) - x(ic11,ic12,ic13,3))
948 c00 = gf(ic11,ic12,ic13) * (1.0d0 - xd1) + gf(ic21,ic12,ic13) * xd1
949 c10 = gf(ic11,ic22,ic13) * (1.0d0 - xd1) + gf(ic21,ic22,ic13) * xd1
950 c01 = gf(ic11,ic12,ic23) * (1.0d0 - xd1) + gf(ic21,ic12,ic23) * xd1
951 c11 = gf(ic11,ic22,ic23) * (1.0d0 - xd1) + gf(ic21,ic22,ic23) * xd1
953 c0 = c00 * (1.0d0 - xd2) + c10 * xd2
954 c1 = c01 * (1.0d0 - xd2) + c11 * xd2
956 gfloc = c0 * (1.0d0 - xd3) + c1 * xd3
965 double precision :: tpartc_avg, tpartc_int_avg, tpartc_io_avg, tpartc_com_avg, tpartc_grid_avg
974 tpartc_avg = tpartc_avg/dble(
npe)
975 tpartc_int_avg = tpartc_int_avg/dble(
npe)
976 tpartc_io_avg = tpartc_io_avg/dble(
npe)
977 tpartc_com_avg = tpartc_com_avg/dble(
npe)
978 tpartc_grid_avg = tpartc_grid_avg/dble(
npe)
979 write(*,
'(a,f12.3,a)')
' Particle handling took : ',
tpartc,
' sec'
980 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*
tpartc/(
timeloop+smalldouble),
' %'
981 write(*,
'(a,f12.3,a)')
' Particle IO took : ',tpartc_io_avg,
' sec'
982 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_io_avg/(
timeloop+smalldouble),
' %'
983 write(*,
'(a,f12.3,a)')
' Particle COM took : ',tpartc_com_avg,
' sec'
984 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_com_avg/(
timeloop+smalldouble),
' %'
985 write(*,
'(a,f12.3,a)')
' Particle integration took : ',tpartc_int_avg,
' sec'
986 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_int_avg/(
timeloop+smalldouble),
' %'
987 write(*,
'(a,f12.3,a)')
' Particle init grid took : ',tpartc_grid_avg,
' sec'
988 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_grid_avg/(
timeloop+smalldouble),
' %'
996 logical,
intent(out) :: file_exists
997 character(len=std_len) :: filename
998 integer :: mynpayload, mynparticles, pos, index_latest
1011 do index_latest=9999,0,-1
1012 write(filename,
"(a,a,i4.4,a)") trim(
restart_from_file(1:pos-8)),
'_particles',index_latest,
'.dat'
1013 INQUIRE(file=filename, exist=file_exists)
1014 if(file_exists)
exit
1016 if (.not. file_exists)
then
1017 write(*,*)
'WARNING: File '//trim(filename)//
' with particle data does not exist.'
1018 write(*,*)
'Initialising particles from user or default routines'
1020 open(unit=
unitparticles,file=filename,form=
'unformatted',action=
'read',status=
'unknown',access=
'stream')
1023 call mpistop(
'npayload in restart file does not match npayload in mod_particles')
1027 call mpi_bcast(file_exists,1,mpi_logical,0,
icomm,
ierrmpi)
1028 if (.not. file_exists)
return
1038 mynparticles = mynparticles + 1
1041 call mpi_bcast(mynparticles,1,mpi_integer,0,
icomm,
ierrmpi)
1053 integer :: index,igrid_particle,ipe_particle
1072 particle(index)%igrid = igrid_particle
1082 character(len=std_len) :: filename
1087 type(
particle_t),
allocatable,
dimension(:) :: send_particles
1088 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
1089 double precision,
allocatable,
dimension(:,:) :: send_payload
1090 double precision,
allocatable,
dimension(:,:) :: receive_payload
1091 integer :: status(MPI_STATUS_SIZE)
1092 integer,
dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1093 integer :: ipe, ipart, iipart, send_n_particles_for_output
1094 logical,
save :: file_exists=.false.
1095 integer :: snapshotnumber
1103 receive_n_particles_for_output_from_ipe(:) = 0
1106 if (
mype .eq. 0)
then
1107 write(filename,
"(a,a,i4.4,a)") trim(
base_filename),
'_particles',snapshotnumber,
'.dat'
1108 INQUIRE(file=filename, exist=file_exists)
1109 if (.not. file_exists)
then
1110 open(unit=
unitparticles,file=filename,form=
'unformatted',status=
'new',access=
'stream')
1112 open(unit=
unitparticles,file=filename,form=
'unformatted',status=
'replace',access=
'stream')
1123 if (
mype .ne. 0)
then
1127 allocate(send_particles(1:send_n_particles_for_output))
1128 allocate(send_payload(1:
npayload,1:send_n_particles_for_output))
1130 send_particles(iipart) =
particle(ipart)%self
1136 call mpi_recv(receive_n_particles_for_output_from_ipe(ipe),1,mpi_integer,ipe,ipe,
icomm,status,
ierrmpi)
1140 if (
mype .ne. 0)
then
1143 deallocate(send_particles)
1144 deallocate(send_payload)
1154 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1155 allocate(receive_payload(1:
npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1156 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),
type_particle,ipe,ipe,
icomm,status,
ierrmpi)
1157 call mpi_recv(receive_payload,
npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,
icomm,status,
ierrmpi)
1158 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1161 deallocate(receive_particles)
1162 deallocate(receive_payload)
1172 double precision :: mypayload(1:npayload)
1189 integer :: iipart, ipart, icomp
1190 character(len=std_len) :: filename
1191 character(len=1024) :: line, strdata
1194 if (
particle(ipart)%self%follow)
then
1199 write(strdata,
"(a,i2.2,a)")
'payload',icomp,
','
1200 line = trim(line)//trim(strdata)
1202 write(
unitparticles,
"(a,a,a)")
'time,dt,x1,x2,x3,u1,u2,u3,',trim(line),
'ipe,iteration,index'
1211 double precision,
intent(in) :: end_time
1213 double precision :: t_min_mype
1214 integer :: ipart, iipart
1217 t_min_mype = bigdouble
1222 activate = (
particle(ipart)%self%time < end_time)
1223 t_min_mype = min(t_min_mype,
particle(ipart)%self%time)
1241 integer,
intent(in) :: index
1242 integer,
intent(out) :: igrid_particle, ipe_particle
1243 integer :: iipart,ipart,ipe_has_particle,ipe
1244 integer,
dimension(0:1) :: buff
1245 logical :: has_particle(0:npe-1)
1247 has_particle(:) = .false.
1249 if (
particle(ipart)%self%index == index)
then
1250 has_particle(
mype) = .true.
1255 if (has_particle(
mype))
then
1260 if (npe>0)
call mpi_allgather(has_particle(
mype),1,mpi_logical,has_particle,1,mpi_logical,
icomm,
ierrmpi)
1262 ipe_has_particle = -1
1264 if (has_particle(ipe) .eqv. .true.)
then
1265 ipe_has_particle = ipe
1270 if (ipe_has_particle .ne. -1)
then
1271 if (npe>0)
call mpi_bcast(buff,2,mpi_integer,ipe_has_particle,
icomm,
ierrmpi)
1272 igrid_particle=buff(0)
1273 ipe_particle = buff(1)
1286 double precision,
intent(in) :: x(3)
1287 integer,
intent(out) :: igrid_particle, ipe_particle
1288 integer :: ig(ndir,nlevelshi), ig_lvl(nlevelshi)
1289 integer :: idim, ic(ndim)
1307 do while (.not.branch%node%leaf)
1308 {ic(^
d)=ig(^
d,branch%node%level+1) - 2 * branch%node%ig^
d +2\}
1309 branch%node => branch%node%child({ic(^
d)})%node
1312 igrid_particle = branch%node%igrid
1313 ipe_particle = branch%node%ipe
1320 double precision,
dimension(ndim),
intent(in) :: x
1323 all(x < [ xprobmax^
d ])
1329 integer,
intent(in) :: igrid, ipart
1330 double precision :: x(
ndim), grid_rmin(
ndim), grid_rmax(
ndim)
1333 if (.not.
allocated(ps(igrid)%w))
then
1346 integer :: igrid, iigrid,ipe
1347 integer :: my_neighbor_type, i^D
1348 logical :: ipe_is_neighbor(0:npe-1)
1350 ipe_is_neighbor(:) = .false.
1351 do iigrid=1,igridstail; igrid=igrids(iigrid);
1354 if (i^d==0|.and.)
then
1357 my_neighbor_type=neighbor_type(i^d,igrid)
1359 select case (my_neighbor_type)
1360 case (neighbor_boundary)
1362 case (neighbor_coarse)
1363 call ipe_fc(i^d,igrid,ipe_is_neighbor)
1364 case (neighbor_sibling)
1365 call ipe_srl(i^d,igrid,ipe_is_neighbor)
1366 case (neighbor_fine)
1367 call ipe_cf(i^d,igrid,ipe_is_neighbor)
1375 ipe_is_neighbor(mype) = .false.
1380 if (ipe_is_neighbor(ipe))
then
1390 integer,
intent(in) :: i^D, igrid
1391 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1393 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1399 integer,
intent(in) :: i^D, igrid
1400 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1402 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1408 integer,
intent(in) :: i^D, igrid
1409 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1410 integer :: ic^D, inc^D
1412 {
do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1415 ipe_is_neighbor( neighbor_child(2,inc^d,igrid) ) = .true.
1424 character(len=std_len) :: filename
1427 type(
particle_t),
allocatable,
dimension(:) :: send_particles
1428 double precision,
allocatable,
dimension(:,:) :: send_payload
1429 double precision :: tout
1430 integer :: send_n_particles_for_output, cc
1432 integer :: ipart,iipart
1439 send_n_particles_for_output = 0
1446 send_n_particles_for_output = send_n_particles_for_output + 1
1450 allocate(send_particles(1:send_n_particles_for_output))
1451 allocate(send_payload(
npayload,1:send_n_particles_for_output))
1458 send_particles(cc) =
particle(ipart)%self
1465 send_payload,
'ensemble')
1466 deallocate(send_particles)
1467 deallocate(send_payload)
1475 character(len=*),
intent(in) :: type
1476 double precision,
intent(in) :: tout
1477 integer,
intent(in) :: index
1478 integer :: nout, mysnapshot
1492 subroutine output_ensemble(send_n_particles_for_output,send_particles,send_payload,typefile)
1495 integer,
intent(in) :: send_n_particles_for_output
1496 type(
particle_t),
dimension(send_n_particles_for_output),
intent(in) :: send_particles
1497 double precision,
dimension(npayload,send_n_particles_for_output),
intent(in) :: send_payload
1498 character(len=*),
intent(in) :: typefile
1499 character(len=std_len) :: filename
1500 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
1501 double precision,
allocatable,
dimension(:,:) :: receive_payload
1502 integer :: status(MPI_STATUS_SIZE)
1503 integer,
dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1504 integer :: ipe, ipart, nout
1505 logical :: file_exists
1507 receive_n_particles_for_output_from_ipe(:) = 0
1509 call mpi_allgather(send_n_particles_for_output, 1, mpi_integer, &
1510 receive_n_particles_for_output_from_ipe, 1, mpi_integer,
icomm,
ierrmpi)
1513 if (sum(receive_n_particles_for_output_from_ipe(:)) == 0)
return
1520 if(typefile==
'destroy')
then
1521 write(filename,
"(a,a,i6.6,a)") trim(
base_filename) //
'_', &
1522 trim(typefile) //
'.csv'
1524 inquire(file=filename, exist=file_exists)
1526 if (.not. file_exists)
then
1534 write(filename,
"(a,a,i6.6,a)") trim(
base_filename) //
'_', &
1541 do ipart=1,send_n_particles_for_output
1547 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1548 allocate(receive_payload(1:
npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1549 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),
type_particle,ipe,ipe,
icomm,status,
ierrmpi)
1550 call mpi_recv(receive_payload,
npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,
icomm,status,
ierrmpi)
1551 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1554 deallocate(receive_particles)
1555 deallocate(receive_payload)
1565 character(len=std_len) :: filename
1566 integer :: ipart,iipart,ipe
1567 logical :: file_exists
1573 if (
particle(ipart)%self%follow)
then
1574 write(filename,
"(a,a,i6.6,a)") trim(
base_filename),
'_particle_', &
1576 inquire(file=filename, exist=file_exists)
1601 double precision,
intent(in) :: payload(npayload)
1602 integer,
intent(in) :: ipe
1603 integer,
intent(in) :: file_unit
1604 double precision :: x(3), u(3)
1617 x(:) = myparticle%x(:)
1627 write(file_unit,
csv_format) myparticle%time, myparticle%dt, x(1:3), &
1639 double precision,
intent(in) :: xp(1:3)
1640 double precision,
intent(out) :: xpcart(1:3)
1644 xpcart(1:3) = xp(1:3)
1646 xpcart(1) = xp(
r_)*cos(xp(
phi_))
1647 xpcart(2) = xp(
r_)*sin(xp(
phi_))
1650 xpcart(1) = xp(1)*sin(xp(2))*cos(xp(3))
1652 xpcart(2) = xp(1)*cos(xp(2))
1653 xpcart(3) = xp(1)*sin(xp(2))*sin(xp(3))}
1655 xpcart(2) = xp(1)*sin(xp(2))*sin(xp(3))
1656 xpcart(3) = xp(1)*cos(xp(2))}
1658 write(*,*)
"No converter for coordinate=",
coordinate
1669 double precision,
intent(in) :: xpcart(1:3)
1670 double precision,
intent(out) :: xp(1:3)
1671 double precision :: xx, yy, zz
1672 double precision :: rr, tth, pphi
1678 xp(
r_) = sqrt(xpcart(1)**2 + xpcart(2)**2)
1679 xp(
phi_) = atan2(xpcart(2),xpcart(1))
1680 if (xp(
phi_) .lt. 0.d0) xp(
phi_) = 2.0d0*dpi + xp(
phi_)
1690 rr = sqrt(xx**2 + yy**2 + zz**2)
1693 pphi = acos(xx/sqrt(xx**2+yy**2))
1695 pphi = 2.d0*dpi - acos(xx/sqrt(xx**2+yy**2))
1697 xp(1:3) = (/ rr, tth, pphi /)
1699 write(*,*)
"No converter for coordinate=",
coordinate
1710 double precision,
intent(in) :: xp(1:3),up(1:3)
1711 double precision,
intent(out) :: upcart(1:3)
1721 upcart(1) = up(1)*sin(xp(2))*cos(xp(3)) + up(2)*cos(xp(2))*cos(xp(3)) - up(3)*sin(xp(3))
1723 upcart(2) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))
1724 upcart(3) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))}
1726 upcart(2) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))
1727 upcart(3) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))}
1729 write(*,*)
"No converter for coordinate=",
coordinate
1740 double precision,
intent(in) :: xp(1:3),upcart(1:3)
1741 double precision,
intent(out) :: up(1:3)
1745 up(1:3) = upcart(1:3)
1747 up(
r_) = cos(xp(
phi_)) * upcart(1) + sin(xp(
phi_)) * upcart(2)
1748 up(
phi_) = -sin(xp(
phi_)) * upcart(1) + cos(xp(
phi_)) * upcart(2)
1752 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(3)*sin(xp(2))*sin(xp(3)) + upcart(2)*cos(xp(2))
1753 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(3)*cos(xp(2))*sin(xp(3)) - upcart(2)*sin(xp(2))
1754 up(3) = -upcart(1)*sin(xp(3)) + upcart(3)*cos(xp(3))}
1756 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(2)*sin(xp(2))*sin(xp(3)) + upcart(3)*cos(xp(2))
1757 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(2)*cos(xp(2))*sin(xp(3)) - upcart(3)*sin(xp(2))
1758 up(3) = -upcart(1)*sin(xp(3)) + upcart(2)*cos(xp(3))}
1760 write(*,*)
"No converter for coordinate=",
coordinate
1770 integer,
intent(in) :: igrid
1771 double precision,
intent(inout) :: xp(1:3)
1772 double precision :: xpmod(1:3)
1789 xpmod(phiind) = xp(phiind) + 2.d0*dpi
1792 xp(phiind) = xpmod(phiind)
1796 xpmod(phiind) = xp(phiind) - 2.d0*dpi
1799 xp(phiind) = xpmod(phiind)
1810 integer,
intent(in) :: igrid, ngh
1811 double precision,
intent(in) :: x(
ndim)
1812 double precision :: grid_rmin(
ndim), grid_rmax(
ndim)
1825 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1827 integer :: tag_send, tag_receive, send_buff, rcv_buff
1828 integer :: status(MPI_STATUS_SIZE)
1829 integer,
dimension(0:npe-1) :: send_n_particles_to_ipe
1830 integer,
dimension(0:npe-1) :: receive_n_particles_from_ipe
1831 type(
particle_t),
allocatable,
dimension(:,:) :: send_particles
1832 type(
particle_t),
allocatable,
dimension(:,:) :: receive_particles
1833 double precision,
allocatable,
dimension(:,:,:) :: send_payload
1834 double precision,
allocatable,
dimension(:,:,:) :: receive_payload
1835 integer,
allocatable,
dimension(:,:) :: particle_index_to_be_sent_to_ipe
1836 integer,
dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1837 integer :: destroy_n_particles_mype
1838 integer,
allocatable,
dimension(:) :: sndrqst, rcvrqst
1839 integer,
allocatable,
dimension(:) :: sndrqst_payload, rcvrqst_payload
1840 integer :: isnd, ircv
1841 integer,
parameter :: maxneighbors=56
1842 logical :: BC_applied
1845 send_n_particles_to_ipe(:) = 0
1846 receive_n_particles_from_ipe(:) = 0
1847 destroy_n_particles_mype = 0
1850 allocate( sndrqst(1:
npe-1), rcvrqst(1:
npe-1) )
1851 allocate( sndrqst_payload(1:
npe-1), rcvrqst_payload(1:
npe-1) )
1852 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1853 sndrqst_payload = mpi_request_null; rcvrqst_payload = mpi_request_null;
1863 destroy_n_particles_mype = destroy_n_particles_mype + 1
1864 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1875 if (igrid_particle == -1 )
then
1877 if (.not. bc_applied .or. igrid_particle == -1)
then
1879 destroy_n_particles_mype = destroy_n_particles_mype + 1
1880 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1887 particle(ipart)%igrid = igrid_particle
1893 send_n_particles_to_ipe(ipe_particle) = &
1894 send_n_particles_to_ipe(ipe_particle) + 1
1896 call mpistop(
'too many particles, increase nparticles_per_cpu_hi')
1897 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1906 call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
1909 if (
npe == 1)
return
1915 tag_receive = ipe *
npe +
mype
1916 isnd = isnd + 1; ircv = ircv + 1
1917 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer,ipe,tag_send,
icomm,sndrqst(isnd),
ierrmpi)
1918 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer,ipe,tag_receive,
icomm,rcvrqst(ircv),
ierrmpi)
1921 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1922 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1923 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1927 allocate(send_particles(maxval(send_n_particles_to_ipe),1:min(maxneighbors,
npe-1)))
1928 allocate(receive_particles(maxval(receive_n_particles_from_ipe),1:min(maxneighbors,
npe-1)))
1929 allocate(send_payload(
npayload,maxval(send_n_particles_to_ipe),1:min(maxneighbors,
npe-1)))
1930 allocate(receive_payload(
npayload,maxval(receive_n_particles_from_ipe),1:min(maxneighbors,
npe-1)))
1936 tag_receive = ipe *
npe +
mype
1939 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
1942 do ipart = 1, send_n_particles_to_ipe(ipe)
1943 send_particles(ipart,iipe) =
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
1947 call mpi_isend(send_particles(:,iipe),send_n_particles_to_ipe(ipe),
type_particle,ipe,tag_send,
icomm,sndrqst(isnd),
ierrmpi)
1948 call mpi_isend(send_payload(:,:,iipe),
npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,
icomm,sndrqst_payload(isnd),
ierrmpi)
1953 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1956 call mpi_irecv(receive_particles(:,iipe),receive_n_particles_from_ipe(ipe),
type_particle,ipe,tag_receive,
icomm,rcvrqst(ircv),
ierrmpi)
1957 call mpi_irecv(receive_payload(:,:,iipe),
npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,
icomm,rcvrqst_payload(ircv),
ierrmpi)
1962 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1963 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1964 call mpi_waitall(isnd,sndrqst_payload,mpi_statuses_ignore,
ierrmpi)
1965 call mpi_waitall(ircv,rcvrqst_payload,mpi_statuses_ignore,
ierrmpi)
1968 do ipart = 1, send_n_particles_to_ipe(ipe)
1969 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1970 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1976 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1977 do ipart = 1, receive_n_particles_from_ipe(ipe)
1979 index = receive_particles(ipart,iipe)%index
1983 particle(index)%self = receive_particles(ipart,iipe)
1990 particle(index)%igrid = igrid_particle
2006 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2008 integer :: tag_send, tag_receive, send_buff, rcv_buff
2009 integer :: status(MPI_STATUS_SIZE)
2010 integer,
dimension(0:npe-1) :: send_n_particles_to_ipe
2011 integer,
dimension(0:npe-1) :: receive_n_particles_from_ipe
2016 type(
particle_t),
allocatable,
dimension(:) :: send_particles
2017 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
2018 double precision,
allocatable,
dimension(:,:) :: send_payload
2019 double precision,
allocatable,
dimension(:,:) :: receive_payload
2020 integer,
allocatable,
dimension(:,:) :: particle_index_to_be_sent_to_ipe
2021 integer,
allocatable,
dimension(:) :: sndrqst, rcvrqst
2022 integer :: isnd, ircv
2023 logical :: BC_applied
2025 send_n_particles_to_ipe(:) = 0
2026 receive_n_particles_from_ipe(:) = 0
2029 allocate( sndrqst(1:
npe-1), rcvrqst(1:
npe-1) )
2030 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
2038 particle(ipart)%igrid = igrid_particle
2044 send_n_particles_to_ipe(ipe_particle) = &
2045 send_n_particles_to_ipe(ipe_particle) + 1
2047 call mpistop(
'G: too many particles increase nparticles_per_cpu_hi')
2048 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2057 deallocate(sndrqst, rcvrqst)
2058 deallocate(particle_index_to_be_sent_to_ipe)
2071 do ipe=0,
npe-1;
if (ipe .eq.
mype) cycle;
2074 isnd = isnd + 1; ircv = ircv + 1;
2075 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer, &
2077 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer, &
2081 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
2082 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
2083 deallocate(sndrqst, rcvrqst)
2091 if (ipe .eq.
mype) cycle;
2093 tag_receive = ipe *
npe +
mype
2096 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
2099 allocate(send_particles(1:send_n_particles_to_ipe(ipe)))
2100 allocate(send_payload(1:
npayload,1:send_n_particles_to_ipe(ipe)))
2101 do ipart = 1, send_n_particles_to_ipe(ipe)
2102 send_particles(ipart) =
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2107 call mpi_send(send_payload,
npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,
icomm,
ierrmpi)
2108 do ipart = 1, send_n_particles_to_ipe(ipe)
2109 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2110 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2113 deallocate(send_particles)
2114 deallocate(send_payload)
2119 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
2120 allocate(receive_particles(1:receive_n_particles_from_ipe(ipe)))
2121 allocate(receive_payload(1:
npayload,1:receive_n_particles_from_ipe(ipe)))
2124 call mpi_recv(receive_payload,
npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,
icomm,status,
ierrmpi)
2125 do ipart = 1, receive_n_particles_from_ipe(ipe)
2127 index = receive_particles(ipart)%index
2130 particle(index)%self = receive_particles(ipart)
2138 particle(index)%igrid = igrid_particle
2142 deallocate(receive_particles)
2143 deallocate(receive_payload)
2147 deallocate(particle_index_to_be_sent_to_ipe)
2467 integer,
intent(inout) :: igrid_particle, ipe_particle
2468 logical,
intent(out) :: BC_applied
2471 bc_applied = .false.
2478 if (.not.
periodb(idim)) cycle
2482 if (particle%x(^
d) .lt. xprobmin^
d)
then
2483 particle%x(^
d) = particle%x(^
d) + (xprobmax^
d - xprobmin^
d)
2486 if (particle%x(^
d) .ge. xprobmax^
d)
then
2487 particle%x(^
d) = particle%x(^
d) - (xprobmax^
d - xprobmin^
d)
2503 integer,
intent(in) :: destroy_n_particles_mype
2504 integer,
dimension(1:destroy_n_particles_mype),
intent(in) :: particle_index_to_be_destroyed
2505 type(
particle_t),
dimension(destroy_n_particles_mype):: destroy_particles_mype
2506 double precision,
dimension(npayload,destroy_n_particles_mype):: destroy_payload_mype
2507 integer :: iipart,ipart,destroy_n_particles
2509 destroy_n_particles = 0
2512 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2513 destroy_particles_mype(iipart) =
particle(ipart)%self
2517 call output_ensemble(destroy_n_particles_mype,destroy_particles_mype,destroy_payload_mype,
'destroy')
2520 call mpi_allreduce(destroy_n_particles_mype,destroy_n_particles,1,mpi_integer,mpi_sum,
icomm,
ierrmpi)
2522 destroy_n_particles = destroy_n_particles_mype
2527 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2535 deallocate(
particle(ipart)%payload)
2544 integer,
intent(in) :: ipart
2554 integer,
intent(in) :: ipart
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Module for physical and numeric constants.
Module with basic grid data structures.
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Module with geometry-related routines (e.g., divergence, curl)
integer, parameter spherical
integer, parameter cartesian
integer, parameter cylindrical
integer, parameter cartesian_expansion
integer, parameter cartesian_stretched
subroutine curlvector(qvec, ixil, ixol, curlvec, idirmin, idirmin0, ndir0, fourthorder)
Calculate curl of a vector qvec within ixL Options to employ standard second order CD evaluations use...
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
logical nocartesian
IO switches for conversion.
integer ixghi
Upper index of grid block arrays.
integer, parameter stretch_uni
Unidirectional stretching from a side.
integer domain_nx
number of cells for each dimension in level-one mesh
integer, parameter unitpar
file handle for IO
double precision global_time
The global simulation time.
double precision time_max
End time for the simulation.
double precision xstretch
physical extent of stretched border in symmetric stretching
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, parameter rpxmin
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
double precision, dimension(:), allocatable, parameter d
double precision dt
global time step
double precision length_convert_factor
Conversion factor for length unit.
integer ndir
Number of spatial dimensions (components) for vector variables.
integer ixm
the mesh range of a physical block without ghost cells
integer ierrmpi
A global MPI error return code.
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
logical time_advance
do time evolving
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision c_norm
Normalised speed of light.
logical b0field
split magnetic field as background B0 field
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nghostcells
Number of ghost cells surrounding a grid.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
double precision, dimension(^nd) dxlevel
store unstretched cell size of current level
integer, parameter rpxmax
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer, parameter stretch_symm
Symmetric stretching around the center.
integer r_
Indices for cylindrical coordinates FOR TESTS, negative value when not used:
double precision, dimension(:,:), allocatable dxfirst
integer, dimension(:,:), allocatable node
integer, parameter ixglo
Lower index of grid block arrays (always 1)
Module with shared functionality for all the particle movers.
subroutine get_maxwellian_velocity(v, velocity)
subroutine partcoord_from_cartesian(xp, xpcart)
Convert to noncartesian coordinates.
double precision, dimension(3) integrator_velocity_factor
Normalization factor for velocity in the integrator.
integer nparticles_per_cpu_hi
Maximum number of particles in one processor.
subroutine particle_get_current(w, ixil, ixol, idirmin, current)
Calculate idirmin and the idirmin:3 components of the common current array make sure that dxlevel(^D)...
integer nparticleshi
Maximum number of particles.
pure subroutine limit_dt_endtime(t_left, dt_p)
integer num_particles
Number of particles.
subroutine fix_phi_crossing(xp, igrid)
Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates.
double precision const_dt_particles
If positive, a constant time step for the particles.
subroutine output_individual()
integer n_output_destroyed
Output count for ensembles of destroyed particles.
integer it_particles
Iteration number of paritcles.
subroutine fields_from_mhd(igrid, w_mhd, w_part)
Determine fields from MHD variables.
subroutine particles_params_read(files)
Read this module's parameters from a file.
double precision min_particle_time
Minimum time of all particles.
subroutine output_ensemble(send_n_particles_for_output, send_particles, send_payload, typefile)
subroutine get_uniform_pos(x)
logical write_individual
Whether to write individual particle output (followed particles)
subroutine write_particle_output()
subroutine init_particles_com()
Initialize communicators for particles.
logical function particle_in_igrid(ipart, igrid)
Quick check if particle is still in igrid.
integer, dimension(:), allocatable particles_on_mype
Array of identity numbers of particles in current processor.
double precision dtsave_particles
Time interval to save particles.
subroutine output_particle(myparticle, payload, ipe, file_unit)
subroutine update_gridvars()
update grid variables for particles
integer ngridvars
Number of variables for grid field.
subroutine ipe_fc(id, igrid, ipe_is_neighbor)
subroutine get_efield(igrid, x, tloc, e)
double precision particles_etah
subroutine destroy_particles(destroy_n_particles_mype, particle_index_to_be_destroyed)
clean up destroyed particles on all cores
integer nusrpayload
Number of user-defined payload variables for a particle.
subroutine select_active_particles(end_time)
subroutine write_particles_snapshot()
double precision particles_eta
Resistivity.
subroutine particles_finish()
Finalize the particles module.
logical write_snapshot
Whether to write particle snapshots.
pure subroutine get_lfac(u, lfac)
Get Lorentz factor from relativistic momentum.
subroutine find_particle_ipe(x, igrid_particle, ipe_particle)
subroutine pull_particle_from_particles_on_mype(ipart)
subroutine init_particles_output()
pure subroutine get_lfac_from_velocity(v, lfac)
Get Lorentz factor from velocity.
integer integrator
Integrator to be used for particles.
integer, dimension(:), allocatable ep
Variable index for electric field.
subroutine handle_particles()
Let particles evolve in time. The routine also handles grid variables and output.
subroutine partvec_from_cartesian(xp, up, upcart)
Convert vector from Cartesian coordinates.
subroutine fill_gridvars_default
This routine fills the particle grid variables with the default for mhd, i.e. only E and B.
character(len=400) csv_header
Header string used in CSV files.
subroutine finish_gridvars()
Deallocate grid variables for particles.
subroutine ipe_srl(id, igrid, ipe_is_neighbor)
subroutine get_bfield(igrid, x, tloc, b)
integer npayload
Number of total payload variables for a particle.
integer, parameter unitparticles
integer, dimension(:), allocatable particles_active_on_mype
Array of identity numbers of active particles in current processor.
logical relativistic
Use a relativistic particle mover?
character(len=name_len) integrator_type_particles
String describing the particle integrator type.
subroutine set_neighbor_ipe()
subroutine time_spent_on_particles()
subroutine get_vec(ix, igrid, x, tloc, vec)
logical write_ensemble
Whether to write ensemble output.
character(len=name_len) interp_type_particles
String describing the particle interpolation type.
integer downsample_particles
Particle downsampling factor in CSV output.
subroutine locate_particle(index, igrid_particle, ipe_particle)
subroutine push_particle_into_particles_on_mype(ipart)
subroutine read_from_snapshot()
integer nparticles_active_on_mype
Number of active particles in current processor.
procedure(sub_define_additional_gridvars), pointer particles_define_additional_gridvars
subroutine partcoord_to_cartesian(xp, xpcart)
Convert position to Cartesian coordinates.
integer nparticles
Identity number and total number of particles.
integer nparticles_on_mype
Number of particles in current processor.
integer, dimension(:), allocatable bp
Variable index for magnetic field.
subroutine comm_particles()
double precision tmax_particles
Time limit of particles.
type(particle_ptr), dimension(:), allocatable particle
procedure(fun_destroy), pointer usr_destroy_particle
integer, dimension(:), allocatable vp
Variable index for fluid velocity.
procedure(sub_integrate), pointer particles_integrate
subroutine cross(a, b, c)
procedure(sub_fill_gridvars), pointer particles_fill_gridvars
double precision t_next_output
Time to write next particle output.
subroutine advance_particles(end_time, steps_taken)
Routine handles the particle evolution.
subroutine partvec_to_cartesian(xp, up, upcart)
Convert vector to Cartesian coordinates.
integer ndefpayload
Number of default payload variables for a particle.
logical function point_in_igrid_ghostc(x, igrid, ngh)
Quick check if particle coordinate is inside igrid (ghost cells included, except the last ngh)
subroutine particle_base_init()
Give initial values to parameters.
integer, dimension(:), allocatable ipe_neighbor
integer igrid_working
set the current igrid for the particle integrator:
character(len=60) csv_format
Format string used in CSV files.
integer, dimension(:), allocatable jp
Variable index for current.
subroutine append_to_snapshot(myparticle, mypayload)
subroutine apply_periodb(particle, igrid_particle, ipe_particle, bc_applied)
integer ipart_working
set the current ipart for the particle integrator:
subroutine interpolate_var(igrid, ixil, ixol, gf, x, xloc, gfloc)
double precision particles_cfl
Particle CFL safety factor.
procedure(sub_fill_additional_gridvars), pointer particles_fill_additional_gridvars
subroutine ipe_cf(id, igrid, ipe_is_neighbor)
logical function particle_in_domain(x)
Check if particle is inside computational domain.
character(len=name_len) physics_type_particles
String describing the particle physics type.
subroutine init_gridvars()
Initialize grid variables for particles.
character(len=128) function make_particle_filename(tout, index, type)
subroutine comm_particles_global()
subroutine read_particles_snapshot(file_exists)
integer rhop
Variable index for density.
This module defines the procedures of a physics module. It contains function pointers for the various...
Module for pseudo random number generation. The internal pseudo random generator is the xoroshiro128p...
Writes D-1 slice, can do so in various formats, depending on slice_type.
subroutine get_igslice(dir, x, igslice)
double precision tpartc_int_0
double precision timeio_tot
double precision tpartc_com
double precision tpartc_grid_0
double precision tpartc_io
double precision timeloop
double precision tpartc_com0
double precision tpartc_grid
double precision tpartc_io_0
double precision tpartc_int
Module with all the methods that users can customize in AMRVAC.
procedure(particle_analytic), pointer usr_particle_analytic
procedure(particle_fields), pointer usr_particle_fields
Random number generator type, which contains the state.