73 integer,
allocatable ::
bp(:)
75 integer,
allocatable ::
ep(:)
77 integer,
allocatable ::
vp(:)
79 integer,
allocatable ::
jp(:)
105 double precision,
allocatable :: payload(:)
106 integer :: igrid, ipe
115 double precision :: q
117 double precision :: m
119 double precision :: time
121 double precision :: dt
123 double precision :: x(3)
125 double precision :: u(3)
147 integer,
intent(inout) :: ngridvars
154 double precision,
intent(in) :: end_time
188 character(len=*),
intent(in) :: files(:)
199 do n = 1,
size(files)
200 open(
unitpar, file=trim(files(n)), status=
"old")
201 read(
unitpar, particles_list,
end=111)
211 integer,
parameter :: i8 = selected_int_kind(18)
212 integer(i8) :: seed(2)
214 character(len=20) :: strdata
236 dtheta = 2.0d0*dpi / 60.0d0
255 seed = [310952_i8, 8948923749821_i8]
256 call rng%set_seed(seed)
267 csv_header =
' time, dt, x1, x2, x3, u1, u2, u3,'
271 write(strdata,
"(a,a,a)")
' ', trim(prim_wnames(n)),
','
276 write(strdata,
"(a,i2.2,a)")
' pl', n,
','
283 write(strdata,
"(a,i2.2,a)")
' usrpl', n,
','
291 'i8.7,", ",i11.10,", ",i8.7)'
299 integer :: oldtypes(0:7), offsets(0:7), blocklengths(0:7)
301 oldtypes(0) = mpi_logical
302 oldtypes(1) = mpi_integer
303 oldtypes(2:7) = mpi_double_precision
310 offsets(1) = size_logical * blocklengths(0)
311 offsets(2) = offsets(1) + size_int * blocklengths(1)
312 offsets(3) = offsets(2) + size_double * blocklengths(2)
313 offsets(4) = offsets(3) + size_double * blocklengths(3)
314 offsets(5) = offsets(4) + size_double * blocklengths(4)
315 offsets(6) = offsets(5) + size_double * blocklengths(5)
316 offsets(7) = offsets(6) + size_double * blocklengths(6)
324 double precision,
intent(out) :: v(3)
325 double precision,
intent(in) :: velocity
326 double precision :: vtmp(3), vnorm
328 vtmp(1) = velocity *
rng%normal()
329 vtmp(2) = velocity *
rng%normal()
330 vtmp(3) = velocity *
rng%normal()
332 v =
rng%sphere(vnorm)
337 double precision,
intent(out) :: x(3)
339 call rng%unif_01_vec(x)
341 {^
d&x(^
d) = xprobmin^
d + x(^
d) * (xprobmax^
d - xprobmin^
d)\}
350 integer :: igrid, iigrid
354 do iigrid=1,igridstail; igrid=igrids(iigrid);
355 allocate(gridvars(igrid)%w(ixg^t,1:
ngridvars),gridvars(igrid)%wold(ixg^t,1:
ngridvars))
356 gridvars(igrid)%w = 0.0d0
357 gridvars(igrid)%wold = 0.0d0
373 integer :: iigrid, igrid
375 do iigrid=1,igridstail; igrid=igrids(iigrid);
376 deallocate(gridvars(igrid)%w,gridvars(igrid)%wold)
386 double precision :: E(ixG^T, ndir)
387 double precision :: B(ixG^T, ndir)
388 integer :: igrid, iigrid
390 do iigrid=1,igridstail; igrid=igrids(iigrid);
393 gridvars(igrid)%w(ixg^t,
ep(:)) = e
394 gridvars(igrid)%w(ixg^t,
bp(:)) = b
406 integer,
intent(in) :: igrid
407 double precision,
intent(in) :: w_mhd(ixG^T,nw)
408 double precision,
intent(inout) :: w_part(ixG^T,ngridvars)
410 double precision :: current(ixG^T,7-2*ndir:3)
411 double precision :: w(ixG^T,1:nw)
412 integer :: idirmin, idir
416 w(ixg^t,1:nw) = w_mhd(ixg^t,1:nw)
417 w_part(ixg^t,
bp(1):
bp(3)) = 0.0d0
418 w_part(ixg^t,
ep(1):
ep(3)) = 0.0d0
420 call phys_to_primitive(ixg^
ll,ixg^
ll,w,ps(igrid)%x)
423 w_part(ixg^t,
rhop) = w(ixg^t,iw_rho)
426 w_part(ixg^t,
vp(:)) = w(ixg^t,iw_mom(:))
431 w_part(ixg^t,
bp(idir))=w(ixg^t,iw_mag(idir))+ps(igrid)%B0(ixg^t,idir,0)
434 w_part(ixg^t,
bp(:)) = w(ixg^t,iw_mag(:))
440 w_part(ixg^t,
jp(:)) = current(ixg^t,:)
445 w_part(ixg^t,
ep(1)) = w_part(ixg^t,
bp(2)) * &
446 w(ixg^t,iw_mom(3)) - w_part(ixg^t,
bp(3)) * &
448 w_part(ixg^t,
ep(2)) = w_part(ixg^t,
bp(3)) * &
449 w(ixg^t,iw_mom(1)) - w_part(ixg^t,
bp(1)) * &
451 w_part(ixg^t,
ep(3)) = w_part(ixg^t,
bp(1)) * &
452 w(ixg^t,iw_mom(2)) - w_part(ixg^t,
bp(2)) * &
455 w_part(ixg^t,
ep(
r_)) = w_part(ixg^t,
bp(
phi_)) * &
456 w(ixg^t,iw_mom(
z_)) - w_part(ixg^t,
bp(
z_)) * &
458 w_part(ixg^t,
ep(
phi_)) = w_part(ixg^t,
bp(
z_)) * &
459 w(ixg^t,iw_mom(
r_)) - w_part(ixg^t,
bp(
r_)) * &
461 w_part(ixg^t,
ep(
z_)) = w_part(ixg^t,
bp(
r_)) * &
462 w(ixg^t,iw_mom(
phi_)) - w_part(ixg^t,
bp(
phi_)) * &
471 (current(ixg^t,2) * w_part(ixg^t,
bp(3)) - current(ixg^t,3) * w_part(ixg^t,
bp(2)))
473 (-current(ixg^t,1) * w_part(ixg^t,
bp(3)) + current(ixg^t,3) * w_part(ixg^t,
bp(1)))
475 (current(ixg^t,1) * w_part(ixg^t,
bp(2)) - current(ixg^t,2) * w_part(ixg^t,
bp(1)))
478 (current(ixg^t,
phi_) * w_part(ixg^t,
bp(
z_)) - current(ixg^t,
z_) * w_part(ixg^t,
bp(
phi_)))
480 (-current(ixg^t,
r_) * w_part(ixg^t,
bp(
z_)) + current(ixg^t,
z_) * w_part(ixg^t,
bp(
r_)))
482 (current(ixg^t,
r_) * w_part(ixg^t,
bp(
phi_)) - current(ixg^t,
phi_) * w_part(ixg^t,
bp(
r_)))
494 integer :: ixO^L, idirmin, ixI^L
495 double precision :: w(ixI^S,1:nw)
497 double precision :: current(ixI^S,7-2*ndir:3),bvec(ixI^S,1:ndir)
505 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))+
block%B0(ixi^s,idir,0)
509 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))
513 call curlvector(bvec,ixi^l,ixo^l,current,idirmin,idirmin0,ndir)
522 integer :: igrid, iigrid
526 do iigrid=1,igridstail; igrid=igrids(iigrid);
527 gridvars(igrid)%wold=gridvars(igrid)%w
545 integer :: steps_taken, nparticles_left
585 if (nparticles_left == 0 .and.
convert)
call mpistop(
'No particles left')
597 double precision,
intent(in) :: end_time
598 integer,
intent(out) :: steps_taken
610 if (n_active == 0)
exit
614 steps_taken = steps_taken + 1
627 double precision,
intent(in) :: t_left
628 double precision,
intent(inout) :: dt_p
629 double precision :: n_steps
630 double precision,
parameter :: eps = 1d-10
632 n_steps = t_left / dt_p
634 if (n_steps < 1+eps)
then
637 else if (n_steps < 2-eps)
then
639 dt_p = 0.5d0 * t_left
650 integer,
intent(in) :: ix(ndir)
651 integer,
intent(in) :: igrid
652 double precision,
dimension(3),
intent(in) :: x
653 double precision,
intent(in) :: tloc
654 double precision,
dimension(ndir),
intent(out) :: vec
655 double precision,
dimension(ndir) :: vec1, vec2
656 double precision :: td, bb
664 ps(igrid)%x(ixg^t,1:
ndim),x,vec(idir))
670 ps(igrid)%x(ixg^t,1:
ndim),x,vec2(idir))
675 vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
681 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
683 sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=
ndim+1)*td &
684 +sum(gridvars(igrid)%wold(ixg^t,ix(:))**2,dim=
ndim+1)*(1.d0-td), &
685 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
688 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
690 sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=
ndim+1), &
691 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
693 vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
704 integer,
intent(in) :: igrid
705 double precision,
dimension(3),
intent(in) :: x
706 double precision,
intent(in) :: tloc
707 double precision,
dimension(ndir),
intent(out) :: b
708 double precision,
dimension(ndir) :: vec1, vec2, vec
709 double precision :: td, bb
719 ps(igrid)%x(ixg^t,1:
ndim),x,vec(idir))
725 ps(igrid)%x(ixg^t,1:
ndim),x,vec2(idir))
730 vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
736 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
738 sum(gridvars(igrid)%w(ixg^t,
bp(:))**2,dim=
ndim+1)*td &
739 +sum(gridvars(igrid)%wold(ixg^t,
bp(:))**2,dim=
ndim+1)*(1.d0-td), &
740 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
743 if (sqrt(sum(vec(:)**2)) .gt. 0.d0)
then
745 sum(gridvars(igrid)%w(ixg^t,
bp(:))**2,dim=
ndim+1), &
746 ps(igrid)%x(ixg^t,1:
ndim),x,bb)
748 vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
764 integer,
intent(in) :: igrid
765 double precision,
dimension(3),
intent(in) :: x
766 double precision,
intent(in) :: tloc
767 double precision,
dimension(ndir),
intent(out) :: e
768 double precision,
dimension(ndir) :: vec1, vec2, vec
769 double precision,
dimension(ndir) :: vfluid, b
770 double precision,
dimension(ndir) :: current
771 double precision :: rho = 1.d0, rho2 = 1.d0
772 double precision :: td
785 call get_vec(
jp, igrid, x, tloc, current)
792 rho = rho2 * (1.0d0 - td) + rho * td
816 double precision,
dimension(3),
intent(in) :: u
817 double precision,
intent(out) :: lfac
820 lfac = sqrt(1.0d0 + sum(u(:)**2)/
c_norm**2)
830 double precision,
dimension(3),
intent(in) :: v
831 double precision,
intent(out) :: lfac
834 lfac = 1.0d0 / sqrt(1.0d0 - sum(v(:)**2)/
c_norm**2)
844 double precision,
dimension(ndir),
intent(in) :: a,b
845 double precision,
dimension(ndir),
intent(out) :: c
851 c(1) = a(2)*b(3) - a(3)*b(2)
852 c(2) = a(3)*b(1) - a(1)*b(3)
853 c(3) = a(1)*b(2) - a(2)*b(1)
859 call mpistop(
'geometry not implemented in cross(a,b,c)')
862 call mpistop(
"cross in particles needs to be run with three components!")
870 integer,
intent(in) :: igrid,ixI^L, ixO^L
871 double precision,
intent(in) :: gf(ixI^S)
872 double precision,
intent(in) :: x(ixI^S,1:ndim)
873 double precision,
intent(in) :: xloc(1:3)
874 double precision,
intent(out) :: gfloc
875 double precision :: xd^D, myq, dx1
877 double precision :: c00, c10
880 double precision :: c0, c1, c00, c10, c01, c11
882 integer :: ic^D, ic1^D, ic2^D, idir
887 ic^d = ceiling(dlog((xloc(^d)-xprobmin^d)*(
qstretch(ps(igrid)%level,^d)-one)/&
892 if(xloc(^d)<xprobmin^d+
xstretch^d)
then
897 else if(xloc(^d)>xprobmax^d-
xstretch^d)
then
899 ic^d = ceiling(dlog((xloc(^d)-xprobmax^d+
xstretch^d)*(
qstretch(ps(igrid)%level,^d)-one)/&
915 if (x({ic^dd},^d) .lt. xloc(^d))
then
924 if(ic1^d.lt.
ixglo^d+1 .or. ic2^d.gt.
ixghi^d-1)
then
925 print *,
'direction: ',^d
926 print *,
'position: ',xloc(1:ndim)
927 print *,
'indices:', ic1^d,ic2^d
928 call mpistop(
'Trying to interpolate from out of grid!')
933 xd1 = (xloc(1)-x(ic11,1)) / (x(ic21,1) - x(ic11,1))
934 gfloc = gf(ic11) * (1.0d0 - xd1) + gf(ic21) * xd1
937 xd1 = (xloc(1)-x(ic11,ic12,1)) / (x(ic21,ic12,1) - x(ic11,ic12,1))
938 xd2 = (xloc(2)-x(ic11,ic12,2)) / (x(ic11,ic22,2) - x(ic11,ic12,2))
939 c00 = gf(ic11,ic12) * (1.0d0 - xd1) + gf(ic21,ic12) * xd1
940 c10 = gf(ic11,ic22) * (1.0d0 - xd1) + gf(ic21,ic22) * xd1
941 gfloc = c00 * (1.0d0 - xd2) + c10 * xd2
944 xd1 = (xloc(1)-x(ic11,ic12,ic13,1)) / (x(ic21,ic12,ic13,1) - x(ic11,ic12,ic13,1))
945 xd2 = (xloc(2)-x(ic11,ic12,ic13,2)) / (x(ic11,ic22,ic13,2) - x(ic11,ic12,ic13,2))
946 xd3 = (xloc(3)-x(ic11,ic12,ic13,3)) / (x(ic11,ic12,ic23,3) - x(ic11,ic12,ic13,3))
948 c00 = gf(ic11,ic12,ic13) * (1.0d0 - xd1) + gf(ic21,ic12,ic13) * xd1
949 c10 = gf(ic11,ic22,ic13) * (1.0d0 - xd1) + gf(ic21,ic22,ic13) * xd1
950 c01 = gf(ic11,ic12,ic23) * (1.0d0 - xd1) + gf(ic21,ic12,ic23) * xd1
951 c11 = gf(ic11,ic22,ic23) * (1.0d0 - xd1) + gf(ic21,ic22,ic23) * xd1
953 c0 = c00 * (1.0d0 - xd2) + c10 * xd2
954 c1 = c01 * (1.0d0 - xd2) + c11 * xd2
956 gfloc = c0 * (1.0d0 - xd3) + c1 * xd3
965 double precision :: tpartc_avg, tpartc_int_avg, tpartc_io_avg, tpartc_com_avg, tpartc_grid_avg
974 tpartc_avg = tpartc_avg/dble(
npe)
975 tpartc_int_avg = tpartc_int_avg/dble(
npe)
976 tpartc_io_avg = tpartc_io_avg/dble(
npe)
977 tpartc_com_avg = tpartc_com_avg/dble(
npe)
978 tpartc_grid_avg = tpartc_grid_avg/dble(
npe)
979 write(*,
'(a,f12.3,a)')
' Particle handling took : ',
tpartc,
' sec'
980 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*
tpartc/(
timeloop+smalldouble),
' %'
981 write(*,
'(a,f12.3,a)')
' Particle IO took : ',tpartc_io_avg,
' sec'
982 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_io_avg/(
timeloop+smalldouble),
' %'
983 write(*,
'(a,f12.3,a)')
' Particle COM took : ',tpartc_com_avg,
' sec'
984 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_com_avg/(
timeloop+smalldouble),
' %'
985 write(*,
'(a,f12.3,a)')
' Particle integration took : ',tpartc_int_avg,
' sec'
986 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_int_avg/(
timeloop+smalldouble),
' %'
987 write(*,
'(a,f12.3,a)')
' Particle init grid took : ',tpartc_grid_avg,
' sec'
988 write(*,
'(a,f12.2,a)')
' Percentage: ',100.0d0*tpartc_grid_avg/(
timeloop+smalldouble),
' %'
996 logical,
intent(out) :: file_exists
997 character(len=std_len) :: filename
998 integer :: mynpayload, mynparticles, pos
1013 INQUIRE(file=filename, exist=file_exists)
1014 if (.not. file_exists)
then
1015 write(*,*)
'WARNING: File '//trim(filename)//
' with particle data does not exist.'
1016 write(*,*)
'Initialising particles from user or default routines'
1018 open(unit=
unitparticles,file=filename,form=
'unformatted',action=
'read',status=
'unknown',access=
'stream')
1021 call mpistop(
'npayload in restart file does not match npayload in mod_particles')
1025 call mpi_bcast(file_exists,1,mpi_logical,0,
icomm,
ierrmpi)
1026 if (.not. file_exists)
return
1036 mynparticles = mynparticles + 1
1039 call mpi_bcast(mynparticles,1,mpi_integer,0,
icomm,
ierrmpi)
1049 integer :: index,igrid_particle,ipe_particle
1068 particle(index)%igrid = igrid_particle
1078 character(len=std_len) :: filename
1083 type(
particle_t),
allocatable,
dimension(:) :: send_particles
1084 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
1085 double precision,
allocatable,
dimension(:,:) :: send_payload
1086 double precision,
allocatable,
dimension(:,:) :: receive_payload
1087 integer :: status(MPI_STATUS_SIZE)
1088 integer,
dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1089 integer :: ipe, ipart, iipart, send_n_particles_for_output
1090 logical,
save :: file_exists=.false.
1091 integer :: snapshotnumber
1101 receive_n_particles_for_output_from_ipe(:) = 0
1104 if (
mype .eq. 0)
then
1105 write(filename,
"(a,a,i4.4,a)") trim(
base_filename),
'_particles',snapshotnumber,
'.dat'
1106 INQUIRE(file=filename, exist=file_exists)
1107 if (.not. file_exists)
then
1108 open(unit=
unitparticles,file=filename,form=
'unformatted',status=
'new',access=
'stream')
1110 open(unit=
unitparticles,file=filename,form=
'unformatted',status=
'replace',access=
'stream')
1121 if (
mype .ne. 0)
then
1125 allocate(send_particles(1:send_n_particles_for_output))
1126 allocate(send_payload(1:
npayload,1:send_n_particles_for_output))
1128 send_particles(iipart) =
particle(ipart)%self
1134 call mpi_recv(receive_n_particles_for_output_from_ipe(ipe),1,mpi_integer,ipe,ipe,
icomm,status,
ierrmpi)
1138 if (
mype .ne. 0)
then
1141 deallocate(send_particles)
1142 deallocate(send_payload)
1152 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1153 allocate(receive_payload(1:
npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1154 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),
type_particle,ipe,ipe,
icomm,status,
ierrmpi)
1155 call mpi_recv(receive_payload,
npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,
icomm,status,
ierrmpi)
1156 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1159 deallocate(receive_particles)
1160 deallocate(receive_payload)
1170 double precision :: mypayload(1:npayload)
1187 integer :: iipart, ipart, icomp
1188 character(len=std_len) :: filename
1189 character(len=1024) :: line, strdata
1192 if (
particle(ipart)%self%follow)
then
1197 write(strdata,
"(a,i2.2,a)")
'payload',icomp,
','
1198 line = trim(line)//trim(strdata)
1200 write(
unitparticles,
"(a,a,a)")
'time,dt,x1,x2,x3,u1,u2,u3,',trim(line),
'ipe,iteration,index'
1209 double precision,
intent(in) :: end_time
1211 double precision :: t_min_mype
1212 integer :: ipart, iipart
1215 t_min_mype = bigdouble
1220 activate = (
particle(ipart)%self%time < end_time)
1221 t_min_mype = min(t_min_mype,
particle(ipart)%self%time)
1239 integer,
intent(in) :: index
1240 integer,
intent(out) :: igrid_particle, ipe_particle
1241 integer :: iipart,ipart,ipe_has_particle,ipe
1242 integer,
dimension(0:1) :: buff
1243 logical :: has_particle(0:npe-1)
1245 has_particle(:) = .false.
1247 if (
particle(ipart)%self%index == index)
then
1248 has_particle(
mype) = .true.
1253 if (has_particle(
mype))
then
1258 if (npe>0)
call mpi_allgather(has_particle(
mype),1,mpi_logical,has_particle,1,mpi_logical,
icomm,
ierrmpi)
1260 ipe_has_particle = -1
1262 if (has_particle(ipe) .eqv. .true.)
then
1263 ipe_has_particle = ipe
1268 if (ipe_has_particle .ne. -1)
then
1269 if (npe>0)
call mpi_bcast(buff,2,mpi_integer,ipe_has_particle,
icomm,
ierrmpi)
1270 igrid_particle=buff(0)
1271 ipe_particle = buff(1)
1284 double precision,
intent(in) :: x(3)
1285 integer,
intent(out) :: igrid_particle, ipe_particle
1286 integer :: ig(ndir,nlevelshi), ig_lvl(nlevelshi)
1287 integer :: idim, ic(ndim)
1305 do while (.not.branch%node%leaf)
1306 {ic(^
d)=ig(^
d,branch%node%level+1) - 2 * branch%node%ig^
d +2\}
1307 branch%node => branch%node%child({ic(^
d)})%node
1310 igrid_particle = branch%node%igrid
1311 ipe_particle = branch%node%ipe
1318 double precision,
dimension(ndim),
intent(in) :: x
1321 all(x < [ xprobmax^
d ])
1327 integer,
intent(in) :: igrid, ipart
1328 double precision :: x(
ndim), grid_rmin(
ndim), grid_rmax(
ndim)
1331 if (.not.
allocated(ps(igrid)%w))
then
1344 integer :: igrid, iigrid,ipe
1345 integer :: my_neighbor_type, i^D
1346 logical :: ipe_is_neighbor(0:npe-1)
1348 ipe_is_neighbor(:) = .false.
1349 do iigrid=1,igridstail; igrid=igrids(iigrid);
1352 if (i^d==0|.and.)
then
1355 my_neighbor_type=neighbor_type(i^d,igrid)
1357 select case (my_neighbor_type)
1358 case (neighbor_boundary)
1360 case (neighbor_coarse)
1361 call ipe_fc(i^d,igrid,ipe_is_neighbor)
1362 case (neighbor_sibling)
1363 call ipe_srl(i^d,igrid,ipe_is_neighbor)
1364 case (neighbor_fine)
1365 call ipe_cf(i^d,igrid,ipe_is_neighbor)
1373 ipe_is_neighbor(mype) = .false.
1378 if (ipe_is_neighbor(ipe))
then
1388 integer,
intent(in) :: i^D, igrid
1389 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1391 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1397 integer,
intent(in) :: i^D, igrid
1398 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1400 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1406 integer,
intent(in) :: i^D, igrid
1407 logical,
intent(inout) :: ipe_is_neighbor(0:npe-1)
1408 integer :: ic^D, inc^D
1410 {
do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1413 ipe_is_neighbor( neighbor_child(2,inc^d,igrid) ) = .true.
1422 character(len=std_len) :: filename
1425 type(
particle_t),
allocatable,
dimension(:) :: send_particles
1426 double precision,
allocatable,
dimension(:,:) :: send_payload
1427 double precision :: tout
1428 integer :: send_n_particles_for_output, cc
1430 integer :: ipart,iipart
1437 send_n_particles_for_output = 0
1444 send_n_particles_for_output = send_n_particles_for_output + 1
1448 allocate(send_particles(1:send_n_particles_for_output))
1449 allocate(send_payload(
npayload,1:send_n_particles_for_output))
1456 send_particles(cc) =
particle(ipart)%self
1463 send_payload,
'ensemble')
1464 deallocate(send_particles)
1465 deallocate(send_payload)
1473 character(len=*),
intent(in) :: type
1474 double precision,
intent(in) :: tout
1475 integer,
intent(in) :: index
1476 integer :: nout, mysnapshot
1490 subroutine output_ensemble(send_n_particles_for_output,send_particles,send_payload,typefile)
1493 integer,
intent(in) :: send_n_particles_for_output
1494 type(
particle_t),
dimension(send_n_particles_for_output),
intent(in) :: send_particles
1495 double precision,
dimension(npayload,send_n_particles_for_output),
intent(in) :: send_payload
1496 character(len=*),
intent(in) :: typefile
1497 character(len=std_len) :: filename
1498 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
1499 double precision,
allocatable,
dimension(:,:) :: receive_payload
1500 integer :: status(MPI_STATUS_SIZE)
1501 integer,
dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1502 integer :: ipe, ipart, nout
1503 logical :: file_exists
1505 receive_n_particles_for_output_from_ipe(:) = 0
1507 call mpi_allgather(send_n_particles_for_output, 1, mpi_integer, &
1508 receive_n_particles_for_output_from_ipe, 1, mpi_integer,
icomm,
ierrmpi)
1511 if (sum(receive_n_particles_for_output_from_ipe(:)) == 0)
return
1518 if(typefile==
'destroy')
then
1519 write(filename,
"(a,a,i6.6,a)") trim(
base_filename) //
'_', &
1520 trim(typefile) //
'.csv'
1522 inquire(file=filename, exist=file_exists)
1524 if (.not. file_exists)
then
1532 write(filename,
"(a,a,i6.6,a)") trim(
base_filename) //
'_', &
1539 do ipart=1,send_n_particles_for_output
1545 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1546 allocate(receive_payload(1:
npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1547 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),
type_particle,ipe,ipe,
icomm,status,
ierrmpi)
1548 call mpi_recv(receive_payload,
npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,
icomm,status,
ierrmpi)
1549 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1552 deallocate(receive_particles)
1553 deallocate(receive_payload)
1563 character(len=std_len) :: filename
1564 integer :: ipart,iipart,ipe
1565 logical :: file_exists
1571 if (
particle(ipart)%self%follow)
then
1572 write(filename,
"(a,a,i6.6,a)") trim(
base_filename),
'_particle_', &
1574 inquire(file=filename, exist=file_exists)
1599 double precision,
intent(in) :: payload(npayload)
1600 integer,
intent(in) :: ipe
1601 integer,
intent(in) :: file_unit
1602 double precision :: x(3), u(3)
1615 x(:) = myparticle%x(:)
1625 write(file_unit,
csv_format) myparticle%time, myparticle%dt, x(1:3), &
1637 double precision,
intent(in) :: xp(1:3)
1638 double precision,
intent(out) :: xpcart(1:3)
1642 xpcart(1:3) = xp(1:3)
1644 xpcart(1) = xp(
r_)*cos(xp(
phi_))
1645 xpcart(2) = xp(
r_)*sin(xp(
phi_))
1648 xpcart(1) = xp(1)*sin(xp(2))*cos(xp(3))
1650 xpcart(2) = xp(1)*cos(xp(2))
1651 xpcart(3) = xp(1)*sin(xp(2))*sin(xp(3))}
1653 xpcart(2) = xp(1)*sin(xp(2))*sin(xp(3))
1654 xpcart(3) = xp(1)*cos(xp(2))}
1656 write(*,*)
"No converter for coordinate=",
coordinate
1667 double precision,
intent(in) :: xpcart(1:3)
1668 double precision,
intent(out) :: xp(1:3)
1669 double precision :: xx, yy, zz
1670 double precision :: rr, tth, pphi
1676 xp(
r_) = sqrt(xpcart(1)**2 + xpcart(2)**2)
1677 xp(
phi_) = atan2(xpcart(2),xpcart(1))
1678 if (xp(
phi_) .lt. 0.d0) xp(
phi_) = 2.0d0*dpi + xp(
phi_)
1688 rr = sqrt(xx**2 + yy**2 + zz**2)
1691 pphi = acos(xx/sqrt(xx**2+yy**2))
1693 pphi = 2.d0*dpi - acos(xx/sqrt(xx**2+yy**2))
1695 xp(1:3) = (/ rr, tth, pphi /)
1697 write(*,*)
"No converter for coordinate=",
coordinate
1708 double precision,
intent(in) :: xp(1:3),up(1:3)
1709 double precision,
intent(out) :: upcart(1:3)
1719 upcart(1) = up(1)*sin(xp(2))*cos(xp(3)) + up(2)*cos(xp(2))*cos(xp(3)) - up(3)*sin(xp(3))
1721 upcart(2) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))
1722 upcart(3) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))}
1724 upcart(2) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))
1725 upcart(3) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))}
1727 write(*,*)
"No converter for coordinate=",
coordinate
1738 double precision,
intent(in) :: xp(1:3),upcart(1:3)
1739 double precision,
intent(out) :: up(1:3)
1743 up(1:3) = upcart(1:3)
1745 up(
r_) = cos(xp(
phi_)) * upcart(1) + sin(xp(
phi_)) * upcart(2)
1746 up(
phi_) = -sin(xp(
phi_)) * upcart(1) + cos(xp(
phi_)) * upcart(2)
1750 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(3)*sin(xp(2))*sin(xp(3)) + upcart(2)*cos(xp(2))
1751 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(3)*cos(xp(2))*sin(xp(3)) - upcart(2)*sin(xp(2))
1752 up(3) = -upcart(1)*sin(xp(3)) + upcart(3)*cos(xp(3))}
1754 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(2)*sin(xp(2))*sin(xp(3)) + upcart(3)*cos(xp(2))
1755 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(2)*cos(xp(2))*sin(xp(3)) - upcart(3)*sin(xp(2))
1756 up(3) = -upcart(1)*sin(xp(3)) + upcart(2)*cos(xp(3))}
1758 write(*,*)
"No converter for coordinate=",
coordinate
1768 integer,
intent(in) :: igrid
1769 double precision,
intent(inout) :: xp(1:3)
1770 double precision :: xpmod(1:3)
1787 xpmod(phiind) = xp(phiind) + 2.d0*dpi
1790 xp(phiind) = xpmod(phiind)
1794 xpmod(phiind) = xp(phiind) - 2.d0*dpi
1797 xp(phiind) = xpmod(phiind)
1808 integer,
intent(in) :: igrid, ngh
1809 double precision,
intent(in) :: x(
ndim)
1810 double precision :: grid_rmin(
ndim), grid_rmax(
ndim)
1823 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1825 integer :: tag_send, tag_receive, send_buff, rcv_buff
1826 integer :: status(MPI_STATUS_SIZE)
1827 integer,
dimension(0:npe-1) :: send_n_particles_to_ipe
1828 integer,
dimension(0:npe-1) :: receive_n_particles_from_ipe
1829 type(
particle_t),
allocatable,
dimension(:,:) :: send_particles
1830 type(
particle_t),
allocatable,
dimension(:,:) :: receive_particles
1831 double precision,
allocatable,
dimension(:,:,:) :: send_payload
1832 double precision,
allocatable,
dimension(:,:,:) :: receive_payload
1833 integer,
allocatable,
dimension(:,:) :: particle_index_to_be_sent_to_ipe
1834 integer,
dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1835 integer :: destroy_n_particles_mype
1836 integer,
allocatable,
dimension(:) :: sndrqst, rcvrqst
1837 integer,
allocatable,
dimension(:) :: sndrqst_payload, rcvrqst_payload
1838 integer :: isnd, ircv
1839 integer,
parameter :: maxneighbors=56
1840 logical :: BC_applied
1843 send_n_particles_to_ipe(:) = 0
1844 receive_n_particles_from_ipe(:) = 0
1845 destroy_n_particles_mype = 0
1848 allocate( sndrqst(1:
npe-1), rcvrqst(1:
npe-1) )
1849 allocate( sndrqst_payload(1:
npe-1), rcvrqst_payload(1:
npe-1) )
1850 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1851 sndrqst_payload = mpi_request_null; rcvrqst_payload = mpi_request_null;
1861 destroy_n_particles_mype = destroy_n_particles_mype + 1
1862 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1873 if (igrid_particle == -1 )
then
1875 if (.not. bc_applied .or. igrid_particle == -1)
then
1877 destroy_n_particles_mype = destroy_n_particles_mype + 1
1878 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1885 particle(ipart)%igrid = igrid_particle
1891 send_n_particles_to_ipe(ipe_particle) = &
1892 send_n_particles_to_ipe(ipe_particle) + 1
1894 call mpistop(
'too many particles, increase nparticles_per_cpu_hi')
1895 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1904 call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
1907 if (
npe == 1)
return
1913 tag_receive = ipe *
npe +
mype
1914 isnd = isnd + 1; ircv = ircv + 1
1915 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer,ipe,tag_send,
icomm,sndrqst(isnd),
ierrmpi)
1916 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer,ipe,tag_receive,
icomm,rcvrqst(ircv),
ierrmpi)
1919 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1920 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1921 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1925 allocate(send_particles(maxval(send_n_particles_to_ipe),1:min(maxneighbors,
npe-1)))
1926 allocate(receive_particles(maxval(receive_n_particles_from_ipe),1:min(maxneighbors,
npe-1)))
1927 allocate(send_payload(
npayload,maxval(send_n_particles_to_ipe),1:min(maxneighbors,
npe-1)))
1928 allocate(receive_payload(
npayload,maxval(receive_n_particles_from_ipe),1:min(maxneighbors,
npe-1)))
1934 tag_receive = ipe *
npe +
mype
1937 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
1940 do ipart = 1, send_n_particles_to_ipe(ipe)
1941 send_particles(ipart,iipe) =
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
1945 call mpi_isend(send_particles(:,iipe),send_n_particles_to_ipe(ipe),
type_particle,ipe,tag_send,
icomm,sndrqst(isnd),
ierrmpi)
1946 call mpi_isend(send_payload(:,:,iipe),
npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,
icomm,sndrqst_payload(isnd),
ierrmpi)
1951 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1954 call mpi_irecv(receive_particles(:,iipe),receive_n_particles_from_ipe(ipe),
type_particle,ipe,tag_receive,
icomm,rcvrqst(ircv),
ierrmpi)
1955 call mpi_irecv(receive_payload(:,:,iipe),
npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,
icomm,rcvrqst_payload(ircv),
ierrmpi)
1960 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
1961 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
1962 call mpi_waitall(isnd,sndrqst_payload,mpi_statuses_ignore,
ierrmpi)
1963 call mpi_waitall(ircv,rcvrqst_payload,mpi_statuses_ignore,
ierrmpi)
1966 do ipart = 1, send_n_particles_to_ipe(ipe)
1967 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1968 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1974 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
1975 do ipart = 1, receive_n_particles_from_ipe(ipe)
1977 index = receive_particles(ipart,iipe)%index
1981 particle(index)%self = receive_particles(ipart,iipe)
1988 particle(index)%igrid = igrid_particle
2004 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2006 integer :: tag_send, tag_receive, send_buff, rcv_buff
2007 integer :: status(MPI_STATUS_SIZE)
2008 integer,
dimension(0:npe-1) :: send_n_particles_to_ipe
2009 integer,
dimension(0:npe-1) :: receive_n_particles_from_ipe
2014 type(
particle_t),
allocatable,
dimension(:) :: send_particles
2015 type(
particle_t),
allocatable,
dimension(:) :: receive_particles
2016 double precision,
allocatable,
dimension(:,:) :: send_payload
2017 double precision,
allocatable,
dimension(:,:) :: receive_payload
2018 integer,
allocatable,
dimension(:,:) :: particle_index_to_be_sent_to_ipe
2019 integer,
allocatable,
dimension(:) :: sndrqst, rcvrqst
2020 integer :: isnd, ircv
2021 logical :: BC_applied
2023 send_n_particles_to_ipe(:) = 0
2024 receive_n_particles_from_ipe(:) = 0
2027 allocate( sndrqst(1:
npe-1), rcvrqst(1:
npe-1) )
2028 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
2036 particle(ipart)%igrid = igrid_particle
2042 send_n_particles_to_ipe(ipe_particle) = &
2043 send_n_particles_to_ipe(ipe_particle) + 1
2045 call mpistop(
'G: too many particles increase nparticles_per_cpu_hi')
2046 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2055 deallocate(sndrqst, rcvrqst)
2056 deallocate(particle_index_to_be_sent_to_ipe)
2069 do ipe=0,
npe-1;
if (ipe .eq.
mype) cycle;
2072 isnd = isnd + 1; ircv = ircv + 1;
2073 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer, &
2075 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer, &
2079 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,
ierrmpi)
2080 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,
ierrmpi)
2081 deallocate(sndrqst, rcvrqst)
2089 if (ipe .eq.
mype) cycle;
2091 tag_receive = ipe *
npe +
mype
2094 if (send_n_particles_to_ipe(ipe) .gt. 0)
then
2097 allocate(send_particles(1:send_n_particles_to_ipe(ipe)))
2098 allocate(send_payload(1:
npayload,1:send_n_particles_to_ipe(ipe)))
2099 do ipart = 1, send_n_particles_to_ipe(ipe)
2100 send_particles(ipart) =
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2105 call mpi_send(send_payload,
npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,
icomm,
ierrmpi)
2106 do ipart = 1, send_n_particles_to_ipe(ipe)
2107 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2108 deallocate(
particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2111 deallocate(send_particles)
2112 deallocate(send_payload)
2117 if (receive_n_particles_from_ipe(ipe) .gt. 0)
then
2118 allocate(receive_particles(1:receive_n_particles_from_ipe(ipe)))
2119 allocate(receive_payload(1:
npayload,1:receive_n_particles_from_ipe(ipe)))
2122 call mpi_recv(receive_payload,
npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,
icomm,status,
ierrmpi)
2123 do ipart = 1, receive_n_particles_from_ipe(ipe)
2125 index = receive_particles(ipart)%index
2128 particle(index)%self = receive_particles(ipart)
2136 particle(index)%igrid = igrid_particle
2140 deallocate(receive_particles)
2141 deallocate(receive_payload)
2145 deallocate(particle_index_to_be_sent_to_ipe)
2465 integer,
intent(inout) :: igrid_particle, ipe_particle
2466 logical,
intent(out) :: BC_applied
2469 bc_applied = .false.
2476 if (.not.
periodb(idim)) cycle
2480 if (particle%x(^
d) .lt. xprobmin^
d)
then
2481 particle%x(^
d) = particle%x(^
d) + (xprobmax^
d - xprobmin^
d)
2484 if (particle%x(^
d) .ge. xprobmax^
d)
then
2485 particle%x(^
d) = particle%x(^
d) - (xprobmax^
d - xprobmin^
d)
2501 integer,
intent(in) :: destroy_n_particles_mype
2502 integer,
dimension(1:destroy_n_particles_mype),
intent(in) :: particle_index_to_be_destroyed
2503 type(
particle_t),
dimension(destroy_n_particles_mype):: destroy_particles_mype
2504 double precision,
dimension(npayload,destroy_n_particles_mype):: destroy_payload_mype
2505 integer :: iipart,ipart,destroy_n_particles
2507 destroy_n_particles = 0
2510 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2511 destroy_particles_mype(iipart) =
particle(ipart)%self
2515 call output_ensemble(destroy_n_particles_mype,destroy_particles_mype,destroy_payload_mype,
'destroy')
2518 call mpi_allreduce(destroy_n_particles_mype,destroy_n_particles,1,mpi_integer,mpi_sum,
icomm,
ierrmpi)
2520 destroy_n_particles = destroy_n_particles_mype
2525 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2533 deallocate(
particle(ipart)%payload)
2542 integer,
intent(in) :: ipart
2552 integer,
intent(in) :: ipart
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Module for physical and numeric constants.
Module with basic grid data structures.
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Module with geometry-related routines (e.g., divergence, curl)
integer, parameter spherical
integer, parameter cartesian
integer, parameter cylindrical
integer, parameter cartesian_expansion
integer, parameter cartesian_stretched
subroutine curlvector(qvec, ixIL, ixOL, curlvec, idirmin, idirmin0, ndir0, fourthorder)
Calculate curl of a vector qvec within ixL Options to employ standard second order CD evaluations use...
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
logical nocartesian
IO switches for conversion.
integer ixghi
Upper index of grid block arrays.
integer, parameter stretch_uni
Unidirectional stretching from a side.
integer domain_nx
number of cells for each dimension in level-one mesh
integer, parameter unitpar
file handle for IO
double precision global_time
The global simulation time.
double precision time_max
End time for the simulation.
double precision xstretch
physical extent of stretched border in symmetric stretching
integer snapshotini
Resume from the snapshot with this index.
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, parameter rpxmin
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
double precision, dimension(:), allocatable, parameter d
double precision dt
global time step
double precision length_convert_factor
Conversion factor for length unit.
integer ndir
Number of spatial dimensions (components) for vector variables.
integer ixm
the mesh range of a physical block without ghost cells
integer ierrmpi
A global MPI error return code.
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
logical time_advance
do time evolving
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision c_norm
Normalised speed of light.
logical b0field
split magnetic field as background B0 field
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nghostcells
Number of ghost cells surrounding a grid.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
double precision, dimension(^nd) dxlevel
store unstretched cell size of current level
integer, parameter rpxmax
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer, parameter stretch_symm
Symmetric stretching around the center.
integer r_
Indices for cylindrical coordinates FOR TESTS, negative value when not used:
double precision, dimension(:,:), allocatable dxfirst
integer, dimension(:,:), allocatable node
integer, parameter ixglo
Lower index of grid block arrays (always 1)
Module with shared functionality for all the particle movers.
subroutine get_maxwellian_velocity(v, velocity)
subroutine partcoord_from_cartesian(xp, xpcart)
Convert to noncartesian coordinates.
double precision, dimension(3) integrator_velocity_factor
Normalization factor for velocity in the integrator.
integer nparticles_per_cpu_hi
Maximum number of particles in one processor.
integer nparticleshi
Maximum number of particles.
pure subroutine limit_dt_endtime(t_left, dt_p)
integer num_particles
Number of particles.
subroutine fix_phi_crossing(xp, igrid)
Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates.
double precision const_dt_particles
If positive, a constant time step for the particles.
subroutine output_individual()
integer n_output_destroyed
Output count for ensembles of destroyed particles.
subroutine ipe_cf(iD, igrid, ipe_is_neighbor)
integer it_particles
Iteration number of paritcles.
subroutine fields_from_mhd(igrid, w_mhd, w_part)
Determine fields from MHD variables.
subroutine particles_params_read(files)
Read this module's parameters from a file.
double precision min_particle_time
Minimum time of all particles.
subroutine output_ensemble(send_n_particles_for_output, send_particles, send_payload, typefile)
subroutine get_uniform_pos(x)
logical write_individual
Whether to write individual particle output (followed particles)
subroutine write_particle_output()
subroutine init_particles_com()
Initialize communicators for particles.
logical function particle_in_igrid(ipart, igrid)
Quick check if particle is still in igrid.
integer, dimension(:), allocatable particles_on_mype
Array of identity numbers of particles in current processor.
double precision dtsave_particles
Time interval to save particles.
subroutine output_particle(myparticle, payload, ipe, file_unit)
subroutine update_gridvars()
update grid variables for particles
integer ngridvars
Number of variables for grid field.
subroutine get_efield(igrid, x, tloc, e)
double precision particles_etah
subroutine destroy_particles(destroy_n_particles_mype, particle_index_to_be_destroyed)
clean up destroyed particles on all cores
integer nusrpayload
Number of user-defined payload variables for a particle.
subroutine select_active_particles(end_time)
subroutine write_particles_snapshot()
double precision particles_eta
Resistivity.
subroutine particles_finish()
Finalize the particles module.
logical write_snapshot
Whether to write particle snapshots.
pure subroutine get_lfac(u, lfac)
Get Lorentz factor from relativistic momentum.
subroutine find_particle_ipe(x, igrid_particle, ipe_particle)
subroutine pull_particle_from_particles_on_mype(ipart)
subroutine init_particles_output()
pure subroutine get_lfac_from_velocity(v, lfac)
Get Lorentz factor from velocity.
integer integrator
Integrator to be used for particles.
integer, dimension(:), allocatable ep
Variable index for electric field.
subroutine handle_particles()
Let particles evolve in time. The routine also handles grid variables and output.
subroutine partvec_from_cartesian(xp, up, upcart)
Convert vector from Cartesian coordinates.
subroutine fill_gridvars_default
This routine fills the particle grid variables with the default for mhd, i.e. only E and B.
character(len=400) csv_header
Header string used in CSV files.
subroutine ipe_fc(iD, igrid, ipe_is_neighbor)
subroutine finish_gridvars()
Deallocate grid variables for particles.
subroutine particle_get_current(w, ixIL, ixOL, idirmin, current)
Calculate idirmin and the idirmin:3 components of the common current array make sure that dxlevel(^D)...
subroutine get_bfield(igrid, x, tloc, b)
integer npayload
Number of total payload variables for a particle.
integer, parameter unitparticles
integer, dimension(:), allocatable particles_active_on_mype
Array of identity numbers of active particles in current processor.
logical relativistic
Use a relativistic particle mover?
character(len=name_len) integrator_type_particles
String describing the particle integrator type.
subroutine set_neighbor_ipe()
subroutine time_spent_on_particles()
subroutine get_vec(ix, igrid, x, tloc, vec)
logical write_ensemble
Whether to write ensemble output.
character(len=name_len) interp_type_particles
String describing the particle interpolation type.
integer downsample_particles
Particle downsampling factor in CSV output.
subroutine locate_particle(index, igrid_particle, ipe_particle)
subroutine push_particle_into_particles_on_mype(ipart)
subroutine read_from_snapshot()
integer nparticles_active_on_mype
Number of active particles in current processor.
procedure(sub_define_additional_gridvars), pointer particles_define_additional_gridvars
subroutine partcoord_to_cartesian(xp, xpcart)
Convert position to Cartesian coordinates.
integer nparticles
Identity number and total number of particles.
integer nparticles_on_mype
Number of particles in current processor.
integer, dimension(:), allocatable bp
Variable index for magnetic field.
subroutine comm_particles()
double precision tmax_particles
Time limit of particles.
type(particle_ptr), dimension(:), allocatable particle
procedure(fun_destroy), pointer usr_destroy_particle
integer, dimension(:), allocatable vp
Variable index for fluid velocity.
procedure(sub_integrate), pointer particles_integrate
subroutine cross(a, b, c)
subroutine apply_periodb(particle, igrid_particle, ipe_particle, BC_applied)
procedure(sub_fill_gridvars), pointer particles_fill_gridvars
double precision t_next_output
Time to write next particle output.
subroutine advance_particles(end_time, steps_taken)
Routine handles the particle evolution.
subroutine partvec_to_cartesian(xp, up, upcart)
Convert vector to Cartesian coordinates.
integer ndefpayload
Number of default payload variables for a particle.
logical function point_in_igrid_ghostc(x, igrid, ngh)
Quick check if particle coordinate is inside igrid (ghost cells included, except the last ngh)
subroutine particle_base_init()
Give initial values to parameters.
integer, dimension(:), allocatable ipe_neighbor
integer igrid_working
set the current igrid for the particle integrator:
character(len=60) csv_format
Format string used in CSV files.
integer, dimension(:), allocatable jp
Variable index for current.
subroutine append_to_snapshot(myparticle, mypayload)
integer ipart_working
set the current ipart for the particle integrator:
double precision particles_cfl
Particle CFL safety factor.
subroutine ipe_srl(iD, igrid, ipe_is_neighbor)
procedure(sub_fill_additional_gridvars), pointer particles_fill_additional_gridvars
logical function particle_in_domain(x)
Check if particle is inside computational domain.
character(len=name_len) physics_type_particles
String describing the particle physics type.
subroutine init_gridvars()
Initialize grid variables for particles.
character(len=128) function make_particle_filename(tout, index, type)
subroutine comm_particles_global()
subroutine read_particles_snapshot(file_exists)
integer rhop
Variable index for density.
subroutine interpolate_var(igrid, ixIL, ixOL, gf, x, xloc, gfloc)
This module defines the procedures of a physics module. It contains function pointers for the various...
Module for pseudo random number generation. The internal pseudo random generator is the xoroshiro128p...
Writes D-1 slice, can do so in various formats, depending on slice_type.
subroutine get_igslice(dir, x, igslice)
double precision tpartc_int_0
double precision timeio_tot
double precision tpartc_com
double precision tpartc_grid_0
double precision tpartc_io
double precision timeloop
double precision tpartc_com0
double precision tpartc_grid
double precision tpartc_io_0
double precision tpartc_int
Module with all the methods that users can customize in AMRVAC.
procedure(particle_analytic), pointer usr_particle_analytic
procedure(particle_fields), pointer usr_particle_fields