MPI-AMRVAC  3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
mod_particle_base.t
Go to the documentation of this file.
1 !> Module with shared functionality for all the particle movers
3  use mod_global_parameters, only: name_len, std_len
4  use mod_physics
5  use mod_random
6  use mod_constants
7  use mod_comm_lib, only: mpistop
8 
9  !> Normalization factor for velocity in the integrator
10  double precision :: integrator_velocity_factor(3)
11  !> Time limit of particles
12  double precision :: tmax_particles
13  !> Minimum time of all particles
14  double precision :: min_particle_time
15  !> Time interval to save particles
16  double precision :: dtsave_particles
17  !> If positive, a constant time step for the particles
18  double precision :: const_dt_particles
19  !> Particle CFL safety factor
20  double precision :: particles_cfl
21  !> Time to write next particle output
22  double precision :: t_next_output
23  !> Resistivity
24  double precision :: particles_eta, particles_etah
25  double precision :: dtheta
26  !> Particle downsampling factor in CSV output
28  !> Maximum number of particles
29  integer :: nparticleshi
30  !> Maximum number of particles in one processor
32  !> Number of default payload variables for a particle
33  integer :: ndefpayload
34  !> Number of user-defined payload variables for a particle
35  integer :: nusrpayload
36  !> Number of total payload variables for a particle
37  integer :: npayload
38  !> Number of variables for grid field
39  integer :: ngridvars
40  !> Number of particles
41  integer :: num_particles
42  !> Identity number and total number of particles
43  integer :: nparticles
44  !> Iteration number of paritcles
45  integer :: it_particles
46  !> Output count for ensembles of destroyed particles
47  integer :: n_output_destroyed
48 
49  ! these two save the list of neighboring cpus:
50  integer, allocatable :: ipe_neighbor(:)
51  integer :: npe_neighbors
52 
53  integer :: type_particle
54  !> set the current igrid for the particle integrator:
55  integer :: igrid_working
56  !> set the current ipart for the particle integrator:
57  integer :: ipart_working
58 
59  integer, parameter :: unitparticles=15
60 
61  !> Array of identity numbers of particles in current processor
62  integer, dimension(:), allocatable :: particles_on_mype
63  !> Array of identity numbers of active particles in current processor
64  integer, dimension(:), allocatable :: particles_active_on_mype
65  !> Number of particles in current processor
66  integer :: nparticles_on_mype
67  !> Number of active particles in current processor
69  !> Integrator to be used for particles
70  integer :: integrator
71 
72  !> Variable index for magnetic field
73  integer, allocatable :: bp(:)
74  !> Variable index for electric field
75  integer, allocatable :: ep(:)
76  !> Variable index for fluid velocity
77  integer, allocatable :: vp(:)
78  !> Variable index for current
79  integer, allocatable :: jp(:)
80  !> Variable index for density
81  integer :: rhop
82  !> Use a relativistic particle mover?
83  logical :: relativistic
84  !> Whether to write individual particle output (followed particles)
85  logical :: write_individual
86  !> Whether to write ensemble output
87  logical :: write_ensemble
88  !> Whether to write particle snapshots
89  logical :: write_snapshot
90  logical :: losses
91  !> String describing the particle interpolation type
92  character(len=name_len) :: interp_type_particles = ""
93  !> String describing the particle physics type
94  character(len=name_len) :: physics_type_particles = ""
95  !> String describing the particle integrator type
96  character(len=name_len) :: integrator_type_particles = ""
97  !> Header string used in CSV files
98  character(len=400) :: csv_header
99  !> Format string used in CSV files
100  character(len=60) :: csv_format
101 
103  type(particle_t), pointer :: self
104  !> extra information carried by the particle
105  double precision, allocatable :: payload(:)
106  integer :: igrid, ipe
107  end type particle_ptr
108 
110  !> follow the history of the particle
111  logical :: follow
112  !> identity number
113  integer :: index
114  !> charge
115  double precision :: q
116  !> mass
117  double precision :: m
118  !> time
119  double precision :: time
120  !> time step
121  double precision :: dt
122  !> coordinates
123  double precision :: x(3)
124  !> velocity, momentum, or special ones
125  double precision :: u(3)
126  end type particle_t
127 
128  ! Array containing all particles
129  type(particle_ptr), dimension(:), allocatable :: particle
130 
131  ! The pseudo-random number generator
132  type(rng_t) :: rng
133 
134  ! Pointers for user-defined stuff
135  procedure(sub_fill_gridvars), pointer :: particles_fill_gridvars => null()
138  procedure(sub_integrate), pointer :: particles_integrate => null()
139  procedure(fun_destroy), pointer :: usr_destroy_particle => null()
140 
141  abstract interface
142 
143  subroutine sub_fill_gridvars
144  end subroutine sub_fill_gridvars
145 
146  subroutine sub_define_additional_gridvars(ngridvars)
147  integer, intent(inout) :: ngridvars
148  end subroutine sub_define_additional_gridvars
149 
151  end subroutine sub_fill_additional_gridvars
152 
153  subroutine sub_integrate(end_time)
154  double precision, intent(in) :: end_time
155  end subroutine sub_integrate
156 
157  function fun_destroy(myparticle)
158  import particle_ptr
159  logical :: fun_destroy
160  type(particle_ptr), intent(in) :: myparticle
161  end function fun_destroy
162 
163  end interface
164 
165 contains
166 
167  !> Finalize the particles module
168  subroutine particles_finish()
170 
172 
173  ! Clean up particle type
174  call mpi_type_free(type_particle,ierrmpi)
175 
176  ! Clean up variables
177  deallocate(particle)
178  deallocate(ipe_neighbor)
179  deallocate(particles_on_mype)
180  deallocate(particles_active_on_mype)
181  deallocate(gridvars)
182 
183  end subroutine particles_finish
184 
185  !> Read this module's parameters from a file
186  subroutine particles_params_read(files)
187  use mod_global_parameters, only: unitpar
188  character(len=*), intent(in) :: files(:)
189  integer :: n
190 
191  namelist /particles_list/ physics_type_particles, nparticleshi, nparticles_per_cpu_hi, &
198 
199  do n = 1, size(files)
200  open(unitpar, file=trim(files(n)), status="old")
201  read(unitpar, particles_list, end=111)
202 111 close(unitpar)
203  end do
204 
205  end subroutine particles_params_read
206 
207  !> Give initial values to parameters
208  subroutine particle_base_init()
210  integer :: n
211  integer, parameter :: i8 = selected_int_kind(18)
212  integer(i8) :: seed(2)
213  integer :: idir
214  character(len=20) :: strdata
215 
216  physics_type_particles = 'advect'
217  nparticleshi = 10000000
218  nparticles_per_cpu_hi = 1000000
219  num_particles = 1000
220  ndefpayload = 1
221  nusrpayload = 0
222  tmax_particles = bigdouble
223  dtsave_particles = bigdouble
224  const_dt_particles = -bigdouble
225  particles_cfl = 0.5
226  write_individual = .true.
227  write_ensemble = .true.
228  write_snapshot = .true.
229  ! avoid writing by default .dat particle files if running in static fields
230  if (.not. time_advance) write_snapshot = .false.
232  relativistic = .false.
233  particles_eta = -1.d0
234  particles_etah = -1.d0
235  t_next_output = 0.0d0
236  dtheta = 2.0d0*dpi / 60.0d0
237  losses = .false.
238  nparticles = 0
239  it_particles = 0
243  integrator_velocity_factor(:) = 1.0d0
244  integrator_type_particles = 'Boris'
245  interp_type_particles = 'default'
246 
248 
249  ! If sampling, ndefpayload = nw:
250  if (physics_type_particles == 'sample') ndefpayload = nw
251  ! Total number of payloads
253 
254  ! initialise the random number generator
255  seed = [310952_i8, 8948923749821_i8]
256  call rng%set_seed(seed)
257 
258  allocate(particle(1:nparticleshi))
259  allocate(ipe_neighbor(0:npe-1))
262 
263  particles_on_mype(:) = 0
265 
266  ! Generate header for CSV files
267  csv_header = ' time, dt, x1, x2, x3, u1, u2, u3,'
268  ! If sampling, name the payloads like the fluid quantities
269  if (physics_type_particles == 'sample') then
270  do n = 1, nw
271  write(strdata,"(a,a,a)") ' ', trim(prim_wnames(n)), ','
272  csv_header = trim(csv_header) // trim(strdata)
273  end do
274  else ! Otherwise, payloads are called pl01, pl02, ...
275  do n = 1, ndefpayload
276  write(strdata,"(a,i2.2,a)") ' pl', n, ','
277  csv_header = trim(csv_header) // trim(strdata)
278  end do
279  end if
280  ! User-defined payloads are called usrpl01, usrpl02, ...
281  if (nusrpayload > 0) then
282  do n = 1, nusrpayload
283  write(strdata,"(a,i2.2,a)") ' usrpl', n, ','
284  csv_header = trim(csv_header) // trim(strdata)
285  end do
286  end if
287  csv_header = trim(csv_header) // ' ipe, iteration, index'
288 
289  ! Generate format string for CSV files
290  write(csv_format, '(A,I0,A,A)') '(', 8 + npayload, '(es14.6,", ")', &
291  'i8.7,", ",i11.10,", ",i8.7)'
292 
293  end subroutine particle_base_init
294 
295  !> Initialize communicators for particles
296  subroutine init_particles_com()
298 
299  integer :: oldtypes(0:7), offsets(0:7), blocklengths(0:7)
300 
301  oldtypes(0) = mpi_logical
302  oldtypes(1) = mpi_integer
303  oldtypes(2:7) = mpi_double_precision
304 
305  blocklengths(0:5)=1
306  blocklengths(6)=3
307  blocklengths(7)=3
308 
309  offsets(0) = 0
310  offsets(1) = size_logical * blocklengths(0)
311  offsets(2) = offsets(1) + size_int * blocklengths(1)
312  offsets(3) = offsets(2) + size_double * blocklengths(2)
313  offsets(4) = offsets(3) + size_double * blocklengths(3)
314  offsets(5) = offsets(4) + size_double * blocklengths(4)
315  offsets(6) = offsets(5) + size_double * blocklengths(5)
316  offsets(7) = offsets(6) + size_double * blocklengths(6)
317 
318  call mpi_type_struct(8,blocklengths,offsets,oldtypes,type_particle,ierrmpi)
319  call mpi_type_commit(type_particle,ierrmpi)
320 
321  end subroutine init_particles_com
322 
323  subroutine get_maxwellian_velocity(v, velocity)
324  double precision, intent(out) :: v(3)
325  double precision, intent(in) :: velocity
326  double precision :: vtmp(3), vnorm
327 
328  vtmp(1) = velocity * rng%normal()
329  vtmp(2) = velocity * rng%normal()
330  vtmp(3) = velocity * rng%normal()
331  vnorm = norm2(vtmp)
332  v = rng%sphere(vnorm)
333  end subroutine get_maxwellian_velocity
334 
335  subroutine get_uniform_pos(x)
337  double precision, intent(out) :: x(3)
338 
339  call rng%unif_01_vec(x)
340 
341  {^d&x(^d) = xprobmin^d + x(^d) * (xprobmax^d - xprobmin^d)\}
342  x(ndim+1:) = 0.0d0
343  end subroutine get_uniform_pos
344 
345  !> Initialize grid variables for particles
346  subroutine init_gridvars()
348  use mod_timing
349 
350  integer :: igrid, iigrid
351 
352  tpartc_grid_0=mpi_wtime()
353 
354  do iigrid=1,igridstail; igrid=igrids(iigrid);
355  allocate(gridvars(igrid)%w(ixg^t,1:ngridvars),gridvars(igrid)%wold(ixg^t,1:ngridvars))
356  gridvars(igrid)%w = 0.0d0
357  gridvars(igrid)%wold = 0.0d0
358  end do
359 
361  if (associated(particles_define_additional_gridvars)) then
363  end if
364 
365  tpartc_grid = tpartc_grid + (mpi_wtime()-tpartc_grid_0)
366 
367  end subroutine init_gridvars
368 
369  !> Deallocate grid variables for particles
370  subroutine finish_gridvars()
372 
373  integer :: iigrid, igrid
374 
375  do iigrid=1,igridstail; igrid=igrids(iigrid);
376  deallocate(gridvars(igrid)%w,gridvars(igrid)%wold)
377  end do
378 
379  end subroutine finish_gridvars
380 
381  !> This routine fills the particle grid variables with the default for mhd, i.e. only E and B
385 
386  double precision :: E(ixG^T, ndir)
387  double precision :: B(ixG^T, ndir)
388  integer :: igrid, iigrid
389 
390  do iigrid=1,igridstail; igrid=igrids(iigrid);
391  if (associated(usr_particle_fields)) then ! FILL ONLY E AND B
392  call usr_particle_fields(ps(igrid)%w, ps(igrid)%x, e, b)
393  gridvars(igrid)%w(ixg^t,ep(:)) = e
394  gridvars(igrid)%w(ixg^t,bp(:)) = b
395  else
396  call fields_from_mhd(igrid, ps(igrid)%w, gridvars(igrid)%w)
397  end if
398  end do
399 
400  end subroutine fill_gridvars_default
401 
402  !> Determine fields from MHD variables
403  subroutine fields_from_mhd(igrid, w_mhd, w_part)
405  use mod_geometry
406  integer, intent(in) :: igrid
407  double precision, intent(in) :: w_mhd(ixG^T,nw)
408  double precision, intent(inout) :: w_part(ixG^T,ngridvars)
409 
410  double precision :: current(ixG^T,7-2*ndir:3)
411  double precision :: w(ixG^T,1:nw)
412  integer :: idirmin, idir
413 
414  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
415 
416  w(ixg^t,1:nw) = w_mhd(ixg^t,1:nw)
417  w_part(ixg^t,bp(1):bp(3)) = 0.0d0
418  w_part(ixg^t,ep(1):ep(3)) = 0.0d0
419 
420  call phys_to_primitive(ixg^ll,ixg^ll,w,ps(igrid)%x)
421 
422  ! fill with density:
423  w_part(ixg^t,rhop) = w(ixg^t,iw_rho)
424 
425  ! fill with velocity:
426  w_part(ixg^t,vp(:)) = w(ixg^t,iw_mom(:))
427 
428  ! fill with magnetic field:
429  if (b0field) then
430  do idir = 1, ndir
431  w_part(ixg^t,bp(idir))=w(ixg^t,iw_mag(idir))+ps(igrid)%B0(ixg^t,idir,0)
432  end do
433  else
434  w_part(ixg^t,bp(:)) = w(ixg^t,iw_mag(:))
435  end if
436 
437  ! fill with current
438  current = zero
439  call particle_get_current(w,ixg^ll,ixg^llim^d^lsub1,idirmin,current)
440  w_part(ixg^t,jp(:)) = current(ixg^t,:)
441 
442  ! fill with electric field
443  select case (coordinate)
445  w_part(ixg^t,ep(1)) = w_part(ixg^t,bp(2)) * &
446  w(ixg^t,iw_mom(3)) - w_part(ixg^t,bp(3)) * &
447  w(ixg^t,iw_mom(2)) + particles_eta * current(ixg^t,1)
448  w_part(ixg^t,ep(2)) = w_part(ixg^t,bp(3)) * &
449  w(ixg^t,iw_mom(1)) - w_part(ixg^t,bp(1)) * &
450  w(ixg^t,iw_mom(3)) + particles_eta * current(ixg^t,2)
451  w_part(ixg^t,ep(3)) = w_part(ixg^t,bp(1)) * &
452  w(ixg^t,iw_mom(2)) - w_part(ixg^t,bp(2)) * &
453  w(ixg^t,iw_mom(1)) + particles_eta * current(ixg^t,3)
454  case (cylindrical)
455  w_part(ixg^t,ep(r_)) = w_part(ixg^t,bp(phi_)) * &
456  w(ixg^t,iw_mom(z_)) - w_part(ixg^t,bp(z_)) * &
457  w(ixg^t,iw_mom(phi_)) + particles_eta * current(ixg^t,r_)
458  w_part(ixg^t,ep(phi_)) = w_part(ixg^t,bp(z_)) * &
459  w(ixg^t,iw_mom(r_)) - w_part(ixg^t,bp(r_)) * &
460  w(ixg^t,iw_mom(z_)) + particles_eta * current(ixg^t,phi_)
461  w_part(ixg^t,ep(z_)) = w_part(ixg^t,bp(r_)) * &
462  w(ixg^t,iw_mom(phi_)) - w_part(ixg^t,bp(phi_)) * &
463  w(ixg^t,iw_mom(r_)) + particles_eta * current(ixg^t,z_)
464  end select
465 
466  ! Hall term
467  if (particles_etah > zero) then
468  select case (coordinate)
470  w_part(ixg^t,ep(1)) = w_part(ixg^t,ep(1)) + particles_etah/w(ixg^t,iw_rho) * &
471  (current(ixg^t,2) * w_part(ixg^t,bp(3)) - current(ixg^t,3) * w_part(ixg^t,bp(2)))
472  w_part(ixg^t,ep(2)) = w_part(ixg^t,ep(2)) + particles_etah/w(ixg^t,iw_rho) * &
473  (-current(ixg^t,1) * w_part(ixg^t,bp(3)) + current(ixg^t,3) * w_part(ixg^t,bp(1)))
474  w_part(ixg^t,ep(3)) = w_part(ixg^t,ep(3)) + particles_etah/w(ixg^t,iw_rho) * &
475  (current(ixg^t,1) * w_part(ixg^t,bp(2)) - current(ixg^t,2) * w_part(ixg^t,bp(1)))
476  case (cylindrical)
477  w_part(ixg^t,ep(r_)) = w_part(ixg^t,ep(r_)) + particles_etah/w(ixg^t,iw_rho) * &
478  (current(ixg^t,phi_) * w_part(ixg^t,bp(z_)) - current(ixg^t,z_) * w_part(ixg^t,bp(phi_)))
479  w_part(ixg^t,ep(phi_)) = w_part(ixg^t,ep(phi_)) + particles_etah/w(ixg^t,iw_rho) * &
480  (-current(ixg^t,r_) * w_part(ixg^t,bp(z_)) + current(ixg^t,z_) * w_part(ixg^t,bp(r_)))
481  w_part(ixg^t,ep(z_)) = w_part(ixg^t,ep(z_)) + particles_etah/w(ixg^t,iw_rho) * &
482  (current(ixg^t,r_) * w_part(ixg^t,bp(phi_)) - current(ixg^t,phi_) * w_part(ixg^t,bp(r_)))
483  end select
484  end if
485 
486  end subroutine fields_from_mhd
487 
488  !> Calculate idirmin and the idirmin:3 components of the common current array
489  !> make sure that dxlevel(^D) is set correctly.
490  subroutine particle_get_current(w,ixI^L,ixO^L,idirmin,current)
492  use mod_geometry
493 
494  integer :: ixO^L, idirmin, ixI^L
495  double precision :: w(ixI^S,1:nw)
496  ! For ndir=2 only 3rd component of J can exist, ndir=1 is impossible for MHD
497  double precision :: current(ixI^S,7-2*ndir:3),bvec(ixI^S,1:ndir)
498  integer :: idir
499  integer :: idirmin0
500 
501  idirmin0 = 7-2*ndir
502 
503  if (b0field) then
504  do idir = 1, ndir
505  bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))+block%B0(ixi^s,idir,0)
506  end do
507  else
508  do idir = 1, ndir
509  bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))
510  end do
511  end if
512 
513  call curlvector(bvec,ixi^l,ixo^l,current,idirmin,idirmin0,ndir)
514 
515  end subroutine particle_get_current
516 
517  !> update grid variables for particles
518  subroutine update_gridvars()
520  use mod_timing
521 
522  integer :: igrid, iigrid
523 
524  tpartc_grid_0=mpi_wtime()
525 
526  do iigrid=1,igridstail; igrid=igrids(iigrid);
527  gridvars(igrid)%wold=gridvars(igrid)%w
528  end do
529 
531  if (associated(particles_define_additional_gridvars)) then
533  end if
534 
535  tpartc_grid = tpartc_grid + (mpi_wtime()-tpartc_grid_0)
536 
537  end subroutine update_gridvars
538 
539  !> Let particles evolve in time. The routine also handles grid variables and
540  !> output.
541  subroutine handle_particles()
543  use mod_timing
544 
545  integer :: steps_taken, nparticles_left
546 
547  tpartc0 = mpi_wtime()
548 
549  call set_neighbor_ipe
550 
551  if (time_advance) then
553  call update_gridvars()
554  else
556  end if
557  tpartc_com0=mpi_wtime()
558  call comm_particles_global()
559  tpartc_com=tpartc_com + (mpi_wtime()-tpartc_com0)
560 
561 
562  ! main integration loop
563  do
564  if (tmax_particles >= t_next_output) then
565  call advance_particles(t_next_output, steps_taken)
566  tpartc_io_0 = mpi_wtime()
567  if (mype .eq. 0 .and. (.not. time_advance)) print*, "Writing particle output at time",t_next_output
568  call write_particle_output()
569  if (.not. time_advance) call write_particles_snapshot()
570  timeio_tot = timeio_tot+(mpi_wtime()-tpartc_io_0)
571  tpartc_io = tpartc_io+(mpi_wtime()-tpartc_io_0)
572 
573  ! If we are using a constant time step, this prevents multiple outputs
574  ! at the same time
577  else
578  call advance_particles(tmax_particles, steps_taken)
579 ! call write_particle_output()
580  exit
581  end if
582 
583  call mpi_allreduce(nparticles_on_mype, nparticles_left, 1, mpi_integer, &
584  mpi_sum, icomm, ierrmpi)
585  if (nparticles_left == 0 .and. convert) call mpistop('No particles left')
586 
587  end do
588 
589  tpartc = tpartc + (mpi_wtime() - tpartc0)
590 
591  end subroutine handle_particles
592 
593  !> Routine handles the particle evolution
594  subroutine advance_particles(end_time, steps_taken)
595  use mod_timing
597  double precision, intent(in) :: end_time !< Advance at most up to this time
598  integer, intent(out) :: steps_taken
599  integer :: n_active
600 
601  steps_taken = 0
602 
603  do
604  call select_active_particles(end_time)
605 
606  ! Determine total number of active particles
607  call mpi_allreduce(nparticles_active_on_mype, n_active, 1, mpi_integer, &
608  mpi_sum, icomm, ierrmpi)
609 
610  if (n_active == 0) exit
611 
612  tpartc_int_0=mpi_wtime()
613  call particles_integrate(end_time)
614  steps_taken = steps_taken + 1
615  tpartc_int=tpartc_int+(mpi_wtime()-tpartc_int_0)
616 
617  tpartc_com0=mpi_wtime()
618  call comm_particles()
619  tpartc_com=tpartc_com + (mpi_wtime()-tpartc_com0)
620 
622  end do
623 
624  end subroutine advance_particles
625 
626  pure subroutine limit_dt_endtime(t_left, dt_p)
627  double precision, intent(in) :: t_left
628  double precision, intent(inout) :: dt_p
629  double precision :: n_steps
630  double precision, parameter :: eps = 1d-10
631 
632  n_steps = t_left / dt_p
633 
634  if (n_steps < 1+eps) then
635  ! Last step
636  dt_p = t_left
637  else if (n_steps < 2-eps) then
638  ! Divide time left over two steps, to prevent one tiny time step
639  dt_p = 0.5d0 * t_left
640  end if
641 
642  end subroutine limit_dt_endtime
643 
644  ! Get a generic vector quantity stored on the grid at the particle position
645  ! NOTE: DON'T GET E OR B IN THIS WAY, USE FUNCTIONS BELOW!
646  subroutine get_vec(ix,igrid,x,tloc,vec)
649 
650  integer,intent(in) :: ix(ndir) !< Indices in gridvars
651  integer,intent(in) :: igrid
652  double precision,dimension(3), intent(in) :: x
653  double precision, intent(in) :: tloc
654  double precision,dimension(ndir), intent(out) :: vec
655  double precision,dimension(ndir) :: vec1, vec2
656  double precision :: td, bb
657  integer :: ic^D,idir
658 
659  if (associated(usr_particle_analytic)) then
660  call usr_particle_analytic(ix, x, tloc, vec)
661  else
662  do idir=1,ndir
663  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,ix(idir)), &
664  ps(igrid)%x(ixg^t,1:ndim),x,vec(idir))
665  end do
666 
667  if (time_advance) then
668  do idir=1,ndir
669  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,ix(idir)), &
670  ps(igrid)%x(ixg^t,1:ndim),x,vec2(idir))
671  end do
672 
673  ! Interpolate in time
674  td = (tloc - global_time) / dt
675  vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
676  end if
677 
678  ! Fabio June 2024: Renormalize vector by its grid-evaluated norm
679  if (interp_type_particles=='renormalized') then
680  if (time_advance) then
681  if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
682  call interpolate_var(igrid,ixg^ll,ixm^ll, &
683  sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=ndim+1)*td &
684  +sum(gridvars(igrid)%wold(ixg^t,ix(:))**2,dim=ndim+1)*(1.d0-td), &
685  ps(igrid)%x(ixg^t,1:ndim),x,bb)
686  end if
687  else
688  if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
689  call interpolate_var(igrid,ixg^ll,ixm^ll, &
690  sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=ndim+1), &
691  ps(igrid)%x(ixg^t,1:ndim),x,bb)
692  end if
693  vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
694  end if
695  end if
696  end if
697 
698  end subroutine get_vec
699 
700  subroutine get_bfield(igrid,x,tloc,b)
703 
704  integer,intent(in) :: igrid
705  double precision,dimension(3), intent(in) :: x
706  double precision, intent(in) :: tloc
707  double precision,dimension(ndir), intent(out) :: b
708  double precision,dimension(ndir) :: vec1, vec2, vec
709  double precision :: td, bb
710  integer :: ic^D,idir
711 
712  if (associated(usr_particle_analytic)) then
713  call usr_particle_analytic(bp, x, tloc, vec)
714  else if (associated(usr_particle_fields)) then
715  call get_vec(bp, igrid, x, tloc, vec)
716  else
717  do idir=1,ndir
718  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,bp(idir)), &
719  ps(igrid)%x(ixg^t,1:ndim),x,vec(idir))
720  end do
721 
722  if (time_advance) then
723  do idir=1,ndir
724  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,bp(idir)), &
725  ps(igrid)%x(ixg^t,1:ndim),x,vec2(idir))
726  end do
727 
728  ! Interpolate in time
729  td = (tloc - global_time) / dt
730  vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
731  end if
732 
733  ! Fabio June 2024: Renormalize vector by its grid-evaluated norm
734  if (interp_type_particles=='renormalized') then
735  if (time_advance) then
736  if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
737  call interpolate_var(igrid,ixg^ll,ixm^ll, &
738  sum(gridvars(igrid)%w(ixg^t,bp(:))**2,dim=ndim+1)*td &
739  +sum(gridvars(igrid)%wold(ixg^t,bp(:))**2,dim=ndim+1)*(1.d0-td), &
740  ps(igrid)%x(ixg^t,1:ndim),x,bb)
741  end if
742  else
743  if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
744  call interpolate_var(igrid,ixg^ll,ixm^ll, &
745  sum(gridvars(igrid)%w(ixg^t,bp(:))**2,dim=ndim+1), &
746  ps(igrid)%x(ixg^t,1:ndim),x,bb)
747  end if
748  vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
749  end if
750  end if
751 
752  end if
753 
754  b(:) = vec(:)
755 
756  end subroutine get_bfield
757 
758  ! Shorthand for getting specifically the E field at the particle position
759  subroutine get_efield(igrid,x,tloc,e)
762  use mod_geometry
763 
764  integer,intent(in) :: igrid
765  double precision,dimension(3), intent(in) :: x
766  double precision, intent(in) :: tloc
767  double precision,dimension(ndir), intent(out) :: e
768  double precision,dimension(ndir) :: vec1, vec2, vec
769  double precision,dimension(ndir) :: vfluid, b
770  double precision,dimension(ndir) :: current
771  double precision :: rho = 1.d0, rho2 = 1.d0
772  double precision :: td
773  integer :: ic^D,idir
774 
775  current(:) = 0.d0
776 
777  if (associated(usr_particle_analytic)) then
778  call usr_particle_analytic(ep, x, tloc, vec)
779  else if (associated(usr_particle_fields)) then
780  call get_vec(ep, igrid, x, tloc, vec)
781  else
782  call get_bfield(igrid, x, tloc, b)
783  call get_vec(vp, igrid, x, tloc, vfluid)
784  if (particles_eta > zero) then
785  call get_vec(jp, igrid, x, tloc, current)
786  end if
787  if (particles_etah > zero) then
788  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,rhop),ps(igrid)%x(ixg^t,1:ndim),x,rho)
789  if (time_advance) then
790  call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,rhop),ps(igrid)%x(ixg^t,1:ndim),x,rho2)
791  td = (tloc - global_time) / dt
792  rho = rho2 * (1.0d0 - td) + rho * td
793  end if
794  end if
795 
796  ! Build E
797  select case (coordinate)
799  vec(1) = -vfluid(2)*b(3)+vfluid(3)*b(2) + particles_eta*current(1) + particles_etah/rho*(current(2)*b(3) - current(3)*b(2))
800  vec(2) = vfluid(1)*b(3)-vfluid(3)*b(1) + particles_eta*current(2) + particles_etah/rho*(-current(1)*b(3) + current(3)*b(1))
801  vec(3) = -vfluid(1)*b(2)+vfluid(2)*b(1) + particles_eta*current(3) + particles_etah/rho*(current(1)*b(2) - current(2)*b(1))
802  case (cylindrical)
803  vec(r_) = -vfluid(phi_)*b(z_)+vfluid(z_)*b(phi_) + particles_eta*current(r_) + particles_etah/rho*(current(phi_)*b(z_) - current(z_)*b(phi_))
804  vec(phi_) = vfluid(r_)*b(z_)-vfluid(z_)*b(r_) + particles_eta*current(phi_) + particles_etah/rho*(-current(r_)*b(z_) + current(z_)*b(r_))
805  vec(z_) = -vfluid(r_)*b(phi_)+vfluid(phi_)*b(r_) + particles_eta*current(z_) + particles_etah/rho*(current(r_)*b(phi_) - current(phi_)*b(r_))
806  end select
807  end if
808 
809  e(:) = vec(:)
810 
811  end subroutine get_efield
812 
813  !> Get Lorentz factor from relativistic momentum
814  pure subroutine get_lfac(u,lfac)
815  use mod_global_parameters, only: ndir, c_norm
816  double precision,dimension(3), intent(in) :: u
817  double precision, intent(out) :: lfac
818 
819  if (relativistic) then
820  lfac = sqrt(1.0d0 + sum(u(:)**2)/c_norm**2)
821  else
822  lfac = 1.0d0
823  end if
824 
825  end subroutine get_lfac
826 
827  !> Get Lorentz factor from velocity
828  pure subroutine get_lfac_from_velocity(v,lfac)
829  use mod_global_parameters, only: ndir, c_norm
830  double precision,dimension(3), intent(in) :: v
831  double precision, intent(out) :: lfac
832 
833  if (relativistic) then
834  lfac = 1.0d0 / sqrt(1.0d0 - sum(v(:)**2)/c_norm**2)
835  else
836  lfac = 1.0d0
837  end if
838 
839  end subroutine get_lfac_from_velocity
840 
841  subroutine cross(a,b,c)
843  use mod_geometry
844  double precision, dimension(ndir), intent(in) :: a,b
845  double precision, dimension(ndir), intent(out) :: c
846 
847  ! ndir needs to be three for this to work!!!
848  if(ndir==3) then
849  select case(coordinate)
851  c(1) = a(2)*b(3) - a(3)*b(2)
852  c(2) = a(3)*b(1) - a(1)*b(3)
853  c(3) = a(1)*b(2) - a(2)*b(1)
854  case (cylindrical)
855  c(r_) = a(phi_)*b(z_) - a(z_)*b(phi_)
856  c(phi_) = a(z_)*b(r_) - a(r_)*b(z_)
857  c(z_) = a(r_)*b(phi_) - a(phi_)*b(r_)
858  case default
859  call mpistop('geometry not implemented in cross(a,b,c)')
860  end select
861  else
862  call mpistop("cross in particles needs to be run with three components!")
863  end if
864 
865  end subroutine cross
866 
867  subroutine interpolate_var(igrid,ixI^L,ixO^L,gf,x,xloc,gfloc)
869 
870  integer, intent(in) :: igrid,ixI^L, ixO^L
871  double precision, intent(in) :: gf(ixI^S)
872  double precision, intent(in) :: x(ixI^S,1:ndim)
873  double precision, intent(in) :: xloc(1:3)
874  double precision, intent(out) :: gfloc
875  double precision :: xd^D, myq, dx1
876  {^iftwod
877  double precision :: c00, c10
878  }
879  {^ifthreed
880  double precision :: c0, c1, c00, c10, c01, c11
881  }
882  integer :: ic^D, ic1^D, ic2^D, idir
883 
884  {
885  if(stretch_type(^d)==stretch_uni) then
886  ! uniform stretch from xprobmin
887  ic^d = ceiling(dlog((xloc(^d)-xprobmin^d)*(qstretch(ps(igrid)%level,^d)-one)/&
888  dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
889  (node(pig^d_,igrid)-1)*block_nx^d+nghostcells
890  else if(stretch_type(^d)==stretch_symm) then
891  ! symmetric stretch about 0.5*(xprobmin+xprobmax)
892  if(xloc(^d)<xprobmin^d+xstretch^d) then
893  ! stretch to left from xprobmin+xstretch
894  ic^d = block_nx^d-(int(dlog((xprobmin^d+xstretch^d-xloc(^d))*(qstretch(ps(igrid)%level,^d)-one)/&
895  dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
896  (nstretchedblocks(ps(igrid)%level,^d)/2-node(pig^d_,igrid))*block_nx^d)+nghostcells
897  else if(xloc(^d)>xprobmax^d-xstretch^d) then
898  ! stretch to right from xprobmax-xstretch
899  ic^d = ceiling(dlog((xloc(^d)-xprobmax^d+xstretch^d)*(qstretch(ps(igrid)%level,^d)-one)/&
900  dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
901  ((node(pig^d_,igrid)-1+nstretchedblocks(ps(igrid)%level,^d)/2)*block_nx^d-&
902  domain_nx^d*2**(ps(igrid)%level-1))+nghostcells
903  else
904  ! possible non-stretched central part
905  ic^d = int((xloc(^d)-rnode(rpxmin^d_,igrid))/rnode(rpdx^d_,igrid)) + 1 + nghostcells
906  end if
907  else
908  ! no stretch
909  ic^d = int((xloc(^d)-rnode(rpxmin^d_,igrid))/rnode(rpdx^d_,igrid)) + 1 + nghostcells
910  end if
911  \}
912 
913  ! linear interpolation:
914  {
915  if (x({ic^dd},^d) .lt. xloc(^d)) then
916  ic1^d = ic^d
917  else
918  ic1^d = ic^d -1
919  end if
920  ic2^d = ic1^d + 1
921  \}
922 
923  {^d&
924  if(ic1^d.lt.ixglo^d+1 .or. ic2^d.gt.ixghi^d-1) then
925  print *, 'direction: ',^d
926  print *, 'position: ',xloc(1:ndim)
927  print *, 'indices:', ic1^d,ic2^d
928  call mpistop('Trying to interpolate from out of grid!')
929  end if
930  \}
931 
932  {^ifoned
933  xd1 = (xloc(1)-x(ic11,1)) / (x(ic21,1) - x(ic11,1))
934  gfloc = gf(ic11) * (1.0d0 - xd1) + gf(ic21) * xd1
935  }
936  {^iftwod
937  xd1 = (xloc(1)-x(ic11,ic12,1)) / (x(ic21,ic12,1) - x(ic11,ic12,1))
938  xd2 = (xloc(2)-x(ic11,ic12,2)) / (x(ic11,ic22,2) - x(ic11,ic12,2))
939  c00 = gf(ic11,ic12) * (1.0d0 - xd1) + gf(ic21,ic12) * xd1
940  c10 = gf(ic11,ic22) * (1.0d0 - xd1) + gf(ic21,ic22) * xd1
941  gfloc = c00 * (1.0d0 - xd2) + c10 * xd2
942  }
943  {^ifthreed
944  xd1 = (xloc(1)-x(ic11,ic12,ic13,1)) / (x(ic21,ic12,ic13,1) - x(ic11,ic12,ic13,1))
945  xd2 = (xloc(2)-x(ic11,ic12,ic13,2)) / (x(ic11,ic22,ic13,2) - x(ic11,ic12,ic13,2))
946  xd3 = (xloc(3)-x(ic11,ic12,ic13,3)) / (x(ic11,ic12,ic23,3) - x(ic11,ic12,ic13,3))
947 
948  c00 = gf(ic11,ic12,ic13) * (1.0d0 - xd1) + gf(ic21,ic12,ic13) * xd1
949  c10 = gf(ic11,ic22,ic13) * (1.0d0 - xd1) + gf(ic21,ic22,ic13) * xd1
950  c01 = gf(ic11,ic12,ic23) * (1.0d0 - xd1) + gf(ic21,ic12,ic23) * xd1
951  c11 = gf(ic11,ic22,ic23) * (1.0d0 - xd1) + gf(ic21,ic22,ic23) * xd1
952 
953  c0 = c00 * (1.0d0 - xd2) + c10 * xd2
954  c1 = c01 * (1.0d0 - xd2) + c11 * xd2
955 
956  gfloc = c0 * (1.0d0 - xd3) + c1 * xd3
957  }
958 
959  end subroutine interpolate_var
960 
962  use mod_timing
964 
965  double precision :: tpartc_avg, tpartc_int_avg, tpartc_io_avg, tpartc_com_avg, tpartc_grid_avg
966 
967  call mpi_reduce(tpartc,tpartc_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
968  call mpi_reduce(tpartc_int,tpartc_int_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
969  call mpi_reduce(tpartc_io,tpartc_io_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
970  call mpi_reduce(tpartc_com,tpartc_com_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
971  call mpi_reduce(tpartc_grid,tpartc_grid_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
972 
973  if (mype ==0) then
974  tpartc_avg = tpartc_avg/dble(npe)
975  tpartc_int_avg = tpartc_int_avg/dble(npe)
976  tpartc_io_avg = tpartc_io_avg/dble(npe)
977  tpartc_com_avg = tpartc_com_avg/dble(npe)
978  tpartc_grid_avg = tpartc_grid_avg/dble(npe)
979  write(*,'(a,f12.3,a)')' Particle handling took : ',tpartc,' sec'
980  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc/(timeloop+smalldouble),' %'
981  write(*,'(a,f12.3,a)')' Particle IO took : ',tpartc_io_avg,' sec'
982  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_io_avg/(timeloop+smalldouble),' %'
983  write(*,'(a,f12.3,a)')' Particle COM took : ',tpartc_com_avg,' sec'
984  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_com_avg/(timeloop+smalldouble),' %'
985  write(*,'(a,f12.3,a)')' Particle integration took : ',tpartc_int_avg,' sec'
986  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_int_avg/(timeloop+smalldouble),' %'
987  write(*,'(a,f12.3,a)')' Particle init grid took : ',tpartc_grid_avg,' sec'
988  write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_grid_avg/(timeloop+smalldouble),' %'
989  end if
990 
991  end subroutine time_spent_on_particles
992 
993  subroutine read_particles_snapshot(file_exists)
995 
996  logical,intent(out) :: file_exists
997  character(len=std_len) :: filename
998  integer :: mynpayload, mynparticles, pos
999 
1000  ! some initialisations:
1001  nparticles_on_mype = 0
1002  mynparticles = 0
1003  nparticles = 0
1004  it_particles = 0
1005 
1006  ! open the snapshot file on the headnode
1007  file_exists=.false.
1008  if (mype == 0) then
1009 ! write(filename,"(a,a,i4.4,a)") trim(base_filename),'_particles',snapshotini,'.dat'
1010  ! Strip restart_from_filename of the ending
1011  pos = scan(restart_from_file, '.dat', back=.true.)
1012  write(filename,"(a,a,i4.4,a)") trim(restart_from_file(1:pos-8)),'_particles',snapshotini,'.dat'
1013  INQUIRE(file=filename, exist=file_exists)
1014  if (.not. file_exists) then
1015  write(*,*) 'WARNING: File '//trim(filename)//' with particle data does not exist.'
1016  write(*,*) 'Initialising particles from user or default routines'
1017  else
1018  open(unit=unitparticles,file=filename,form='unformatted',action='read',status='unknown',access='stream')
1019  read(unitparticles) nparticles,it_particles,mynpayload
1020  if (mynpayload .ne. npayload) &
1021  call mpistop('npayload in restart file does not match npayload in mod_particles')
1022  end if
1023  end if
1024 
1025  call mpi_bcast(file_exists,1,mpi_logical,0,icomm,ierrmpi)
1026  if (.not. file_exists) return
1027 
1028  call mpi_bcast(nparticles,1,mpi_integer,0,icomm,ierrmpi)
1029  call mpi_bcast(it_particles,1,mpi_integer,0,icomm,ierrmpi)
1030 
1031  do while (mynparticles .lt. nparticles)
1032  if (mype == 0) then
1033  do while (nparticles_on_mype .lt. nparticles_per_cpu_hi &
1034  .and. mynparticles .lt. nparticles)
1035  call read_from_snapshot
1036  mynparticles = mynparticles + 1
1037  end do
1038  end if ! mype==0
1039  call mpi_bcast(mynparticles,1,mpi_integer,0,icomm,ierrmpi)
1041  end do
1042 
1043  if (mype == 0) close(unit=unitparticles)
1044 
1045  end subroutine read_particles_snapshot
1046 
1047  subroutine read_from_snapshot()
1048 
1049  integer :: index,igrid_particle,ipe_particle
1050 
1051  read(unitparticles) index
1052  allocate(particle(index)%self)
1053  allocate(particle(index)%payload(1:npayload))
1054  particle(index)%self%index = index
1055 
1056  read(unitparticles) particle(index)%self%follow
1057  read(unitparticles) particle(index)%self%q
1058  read(unitparticles) particle(index)%self%m
1059  read(unitparticles) particle(index)%self%time
1060  read(unitparticles) particle(index)%self%dt
1061  read(unitparticles) particle(index)%self%x
1062  read(unitparticles) particle(index)%self%u
1063  read(unitparticles) particle(index)%payload(1:npayload)
1064 
1065 ! if (particle(index)%self%follow) print*, 'follow index:', index
1066 
1067  call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
1068  particle(index)%igrid = igrid_particle
1069  particle(index)%ipe = ipe_particle
1070 
1072 
1073  end subroutine read_from_snapshot
1074 
1077 
1078  character(len=std_len) :: filename
1079 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1080 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
1081 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1082 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
1083  type(particle_t), allocatable, dimension(:) :: send_particles
1084  type(particle_t), allocatable, dimension(:) :: receive_particles
1085  double precision, allocatable, dimension(:,:) :: send_payload
1086  double precision, allocatable, dimension(:,:) :: receive_payload
1087  integer :: status(MPI_STATUS_SIZE)
1088  integer,dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1089  integer :: ipe, ipart, iipart, send_n_particles_for_output
1090  logical,save :: file_exists=.false.
1091  integer :: snapshotnumber
1092 
1093  if (.not. write_snapshot) return
1094 
1095  if (time_advance) then
1096  snapshotnumber = snapshotnext
1097  else
1098  snapshotnumber = nint(t_next_output/dtsave_particles)
1099  end if
1100 
1101  receive_n_particles_for_output_from_ipe(:) = 0
1102 
1103  ! open the snapshot file on the headnode
1104  if (mype .eq. 0) then
1105  write(filename,"(a,a,i4.4,a)") trim(base_filename),'_particles',snapshotnumber,'.dat'
1106  INQUIRE(file=filename, exist=file_exists)
1107  if (.not. file_exists) then
1108  open(unit=unitparticles,file=filename,form='unformatted',status='new',access='stream')
1109  else
1110  open(unit=unitparticles,file=filename,form='unformatted',status='replace',access='stream')
1111  end if
1113  end if
1114  if (npe==1) then
1115  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1116  call append_to_snapshot(particle(ipart)%self,particle(ipart)%payload)
1117  end do
1118  return
1119  end if
1120 
1121  if (mype .ne. 0) then
1122  call mpi_send(nparticles_on_mype,1,mpi_integer,0,mype,icomm,ierrmpi)
1123  ! fill the send_buffer
1124  send_n_particles_for_output = nparticles_on_mype
1125  allocate(send_particles(1:send_n_particles_for_output))
1126  allocate(send_payload(1:npayload,1:send_n_particles_for_output))
1127  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1128  send_particles(iipart) = particle(ipart)%self
1129  send_payload(1:npayload,iipart) = particle(ipart)%payload(1:npayload)
1130  end do
1131  else
1132  ! get number of particles on other ipes
1133  do ipe=1,npe-1
1134  call mpi_recv(receive_n_particles_for_output_from_ipe(ipe),1,mpi_integer,ipe,ipe,icomm,status,ierrmpi)
1135  end do
1136  end if
1137 
1138  if (mype .ne. 0) then
1139  call mpi_send(send_particles,send_n_particles_for_output,type_particle,0,mype,icomm,ierrmpi)
1140  call mpi_send(send_payload,npayload*send_n_particles_for_output,mpi_double_precision,0,mype,icomm,ierrmpi)
1141  deallocate(send_particles)
1142  deallocate(send_payload)
1143  end if
1144 
1145  if (mype==0) then
1146  ! first output own particles (already in receive buffer)
1147  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1148  call append_to_snapshot(particle(ipart)%self,particle(ipart)%payload)
1149  end do
1150  ! now output the particles sent from the other ipes
1151  do ipe=1,npe-1
1152  allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1153  allocate(receive_payload(1:npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1154  call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),type_particle,ipe,ipe,icomm,status,ierrmpi)
1155  call mpi_recv(receive_payload,npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,icomm,status,ierrmpi)
1156  do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1157  call append_to_snapshot(receive_particles(ipart),receive_payload(1:npayload,ipart))
1158  end do ! ipart
1159  deallocate(receive_particles)
1160  deallocate(receive_payload)
1161  end do ! ipe
1162  close(unit=unitparticles)
1163  end if ! mype == 0
1164 
1165  end subroutine write_particles_snapshot
1166 
1167  subroutine append_to_snapshot(myparticle,mypayload)
1168 
1169  type(particle_t),intent(in) :: myparticle
1170  double precision :: mypayload(1:npayload)
1171 
1172  write(unitparticles) myparticle%index
1173  write(unitparticles) myparticle%follow
1174  write(unitparticles) myparticle%q
1175  write(unitparticles) myparticle%m
1176  write(unitparticles) myparticle%time
1177  write(unitparticles) myparticle%dt
1178  write(unitparticles) myparticle%x
1179  write(unitparticles) myparticle%u
1180  write(unitparticles) mypayload(1:npayload)
1181 
1182  end subroutine append_to_snapshot
1183 
1186 
1187  integer :: iipart, ipart, icomp
1188  character(len=std_len) :: filename
1189  character(len=1024) :: line, strdata
1190 
1191  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1192  if (particle(ipart)%self%follow) then
1193  filename = make_particle_filename(particle(ipart)%self%time,particle(ipart)%self%index,'individual')
1194  open(unit=unitparticles,file=filename,status='replace')
1195  line=''
1196  do icomp=1, npayload
1197  write(strdata,"(a,i2.2,a)") 'payload',icomp,','
1198  line = trim(line)//trim(strdata)
1199  end do
1200  write(unitparticles,"(a,a,a)") 'time,dt,x1,x2,x3,u1,u2,u3,',trim(line),'ipe,iteration,index'
1201  close(unit=unitparticles)
1202  end if
1203  end do
1204 
1205  end subroutine init_particles_output
1206 
1207  subroutine select_active_particles(end_time)
1209  double precision, intent(in) :: end_time
1210 
1211  double precision :: t_min_mype
1212  integer :: ipart, iipart
1213  logical :: activate
1214 
1215  t_min_mype = bigdouble
1217 
1218  do iipart = 1, nparticles_on_mype
1219  ipart = particles_on_mype(iipart);
1220  activate = (particle(ipart)%self%time < end_time)
1221  t_min_mype = min(t_min_mype, particle(ipart)%self%time)
1222 
1223  if (activate) then
1226  end if
1227  end do
1228 
1229  call mpi_allreduce(t_min_mype, min_particle_time, 1, mpi_double_precision, &
1230  mpi_min, icomm, ierrmpi)
1231 
1232  end subroutine select_active_particles
1233 
1234  subroutine locate_particle(index,igrid_particle,ipe_particle)
1235  ! given the particles unique index, tell me on which cpu and igrid it is
1236  ! returns -1,-1 if particle was not found
1238 
1239  integer, intent(in) :: index
1240  integer, intent(out) :: igrid_particle, ipe_particle
1241  integer :: iipart,ipart,ipe_has_particle,ipe
1242  integer,dimension(0:1) :: buff
1243  logical :: has_particle(0:npe-1)
1244 
1245  has_particle(:) = .false.
1246  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1247  if (particle(ipart)%self%index == index) then
1248  has_particle(mype) = .true.
1249  exit
1250  end if
1251  end do
1252 
1253  if (has_particle(mype)) then
1254  buff(0) = particle(ipart)%igrid
1255  buff(1) = mype
1256  end if
1257 
1258  if (npe>0) call mpi_allgather(has_particle(mype),1,mpi_logical,has_particle,1,mpi_logical,icomm,ierrmpi)
1259 
1260  ipe_has_particle = -1
1261  do ipe=0,npe-1
1262  if (has_particle(ipe) .eqv. .true.) then
1263  ipe_has_particle = ipe
1264  exit
1265  end if
1266  end do
1267 
1268  if (ipe_has_particle .ne. -1) then
1269  if (npe>0) call mpi_bcast(buff,2,mpi_integer,ipe_has_particle,icomm,ierrmpi)
1270  igrid_particle=buff(0)
1271  ipe_particle = buff(1)
1272  else
1273  igrid_particle=-1
1274  ipe_particle = -1
1275  end if
1276 
1277  end subroutine locate_particle
1278 
1279  subroutine find_particle_ipe(x,igrid_particle,ipe_particle)
1280  use mod_forest, only: tree_node_ptr, tree_root
1281  use mod_slice, only: get_igslice
1283 
1284  double precision, intent(in) :: x(3)
1285  integer, intent(out) :: igrid_particle, ipe_particle
1286  integer :: ig(ndir,nlevelshi), ig_lvl(nlevelshi)
1287  integer :: idim, ic(ndim)
1288  type(tree_node_ptr) :: branch
1289 
1290  ! first check if the particle is in the domain
1291  if (.not. particle_in_domain(x)) then
1292  igrid_particle = -1
1293  ipe_particle = -1
1294  return
1295  end if
1296 
1297  ! get the index on each level
1298  do idim = 1, ndim
1299  call get_igslice(idim,x(idim), ig_lvl)
1300  ig(idim,:) = ig_lvl
1301  end do
1302 
1303  ! traverse the tree until leaf is found
1304  branch=tree_root({ig(^d,1)})
1305  do while (.not.branch%node%leaf)
1306  {ic(^d)=ig(^d,branch%node%level+1) - 2 * branch%node%ig^d +2\}
1307  branch%node => branch%node%child({ic(^d)})%node
1308  end do
1309 
1310  igrid_particle = branch%node%igrid
1311  ipe_particle = branch%node%ipe
1312 
1313  end subroutine find_particle_ipe
1314 
1315  !> Check if particle is inside computational domain
1316  logical function particle_in_domain(x)
1318  double precision, dimension(ndim), intent(in) :: x
1319 
1320  particle_in_domain = all(x >= [ xprobmin^d ]) .and. &
1321  all(x < [ xprobmax^d ]) ! Jannis: as before < instead of <= here, necessary?
1322  end function particle_in_domain
1323 
1324  !> Quick check if particle is still in igrid
1325  logical function particle_in_igrid(ipart, igrid)
1327  integer, intent(in) :: igrid, ipart
1328  double precision :: x(ndim), grid_rmin(ndim), grid_rmax(ndim)
1329 
1330  ! First check if the igrid is still there
1331  if (.not. allocated(ps(igrid)%w)) then
1332  particle_in_igrid = .false.
1333  else
1334  grid_rmin = [ {rnode(rpxmin^d_,igrid)} ]
1335  grid_rmax = [ {rnode(rpxmax^d_,igrid)} ]
1336  x = particle(ipart)%self%x(1:ndim)
1337  particle_in_igrid = all(x >= grid_rmin) .and. all(x < grid_rmax)
1338  end if
1339  end function particle_in_igrid
1340 
1341  subroutine set_neighbor_ipe()
1343 
1344  integer :: igrid, iigrid,ipe
1345  integer :: my_neighbor_type, i^D
1346  logical :: ipe_is_neighbor(0:npe-1)
1347 
1348  ipe_is_neighbor(:) = .false.
1349  do iigrid=1,igridstail; igrid=igrids(iigrid);
1350 
1351  {do i^db=-1,1\}
1352  if (i^d==0|.and.) then
1353  cycle
1354  end if
1355  my_neighbor_type=neighbor_type(i^d,igrid)
1356 
1357  select case (my_neighbor_type)
1358  case (neighbor_boundary) ! boundary
1359  ! do nothing
1360  case (neighbor_coarse) ! fine-coarse
1361  call ipe_fc(i^d,igrid,ipe_is_neighbor)
1362  case (neighbor_sibling) ! same level
1363  call ipe_srl(i^d,igrid,ipe_is_neighbor)
1364  case (neighbor_fine) ! coarse-fine
1365  call ipe_cf(i^d,igrid,ipe_is_neighbor)
1366  end select
1367 
1368  {end do\}
1369 
1370  end do
1371 
1372  ! remove self as neighbor
1373  ipe_is_neighbor(mype) = .false.
1374 
1375  ! Now make the list of neighbors
1376  npe_neighbors = 0
1377  do ipe=0,npe-1
1378  if (ipe_is_neighbor(ipe)) then
1381  end if
1382  end do ! ipe
1383 
1384  end subroutine set_neighbor_ipe
1385 
1386  subroutine ipe_fc(i^D,igrid,ipe_is_neighbor)
1388  integer, intent(in) :: i^D, igrid
1389  logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1390 
1391  ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1392 
1393  end subroutine ipe_fc
1394 
1395  subroutine ipe_srl(i^D,igrid,ipe_is_neighbor)
1397  integer, intent(in) :: i^D, igrid
1398  logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1399 
1400  ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1401 
1402  end subroutine ipe_srl
1403 
1404  subroutine ipe_cf(i^D,igrid,ipe_is_neighbor)
1406  integer, intent(in) :: i^D, igrid
1407  logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1408  integer :: ic^D, inc^D
1409 
1410  {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1411  inc^db=2*i^db+ic^db
1412  \}
1413  ipe_is_neighbor( neighbor_child(2,inc^d,igrid) ) = .true.
1414 
1415  {end do\}
1416 
1417  end subroutine ipe_cf
1418 
1421 
1422  character(len=std_len) :: filename
1423 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1424 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1425  type(particle_t), allocatable, dimension(:) :: send_particles
1426  double precision, allocatable, dimension(:,:) :: send_payload
1427  double precision :: tout
1428  integer :: send_n_particles_for_output, cc
1429  integer :: nout
1430  integer :: ipart,iipart
1431 
1432  if (write_individual) then
1433  call output_individual()
1434  end if
1435 
1436  if (write_ensemble) then
1437  send_n_particles_for_output = 0
1438 
1439  do iipart=1,nparticles_on_mype
1440  ipart=particles_on_mype(iipart);
1441 
1442  ! have to send particle to rank zero for output
1443  if (mod(particle(ipart)%self%index,downsample_particles) .eq. 0) then
1444  send_n_particles_for_output = send_n_particles_for_output + 1
1445  end if
1446  end do
1447 
1448  allocate(send_particles(1:send_n_particles_for_output))
1449  allocate(send_payload(npayload,1:send_n_particles_for_output))
1450  cc = 1
1451  do iipart=1,nparticles_on_mype
1452  ipart=particles_on_mype(iipart);
1453 
1454  ! have to send particle to rank zero for output
1455  if (mod(particle(ipart)%self%index,downsample_particles) .eq. 0) then
1456  send_particles(cc) = particle(ipart)%self
1457  send_payload(1:npayload,cc) = particle(ipart)%payload(1:npayload)
1458  cc=cc+1
1459  end if
1460  end do
1461 
1462  call output_ensemble(send_n_particles_for_output,send_particles, &
1463  send_payload,'ensemble')
1464  deallocate(send_particles)
1465  deallocate(send_payload)
1466  end if
1467 
1468  end subroutine write_particle_output
1469 
1470  character(len=128) function make_particle_filename(tout,index,type)
1472 
1473  character(len=*), intent(in) :: type
1474  double precision, intent(in) :: tout
1475  integer, intent(in) :: index
1476  integer :: nout, mysnapshot
1477 
1478  select case(type)
1479  case ('ensemble')
1480  nout = nint(tout / dtsave_particles)
1481  write(make_particle_filename,"(a,a,i6.6,a)") trim(base_filename),'_ensemble',nout,'.csv'
1482  case ('destroy')
1483  write(make_particle_filename,"(a,a)") trim(base_filename),'_destroyed.csv'
1484  case ('individual')
1485  write(make_particle_filename,"(a,a,i6.6,a)") trim(base_filename),'_particle',index,'.csv'
1486  end select
1487 
1488  end function make_particle_filename
1489 
1490  subroutine output_ensemble(send_n_particles_for_output,send_particles,send_payload,typefile)
1492 
1493  integer, intent(in) :: send_n_particles_for_output
1494  type(particle_t), dimension(send_n_particles_for_output), intent(in) :: send_particles
1495  double precision, dimension(npayload,send_n_particles_for_output), intent(in) :: send_payload
1496  character(len=*), intent(in) :: typefile
1497  character(len=std_len) :: filename
1498  type(particle_t), allocatable, dimension(:) :: receive_particles
1499  double precision, allocatable, dimension(:,:) :: receive_payload
1500  integer :: status(MPI_STATUS_SIZE)
1501  integer,dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1502  integer :: ipe, ipart, nout
1503  logical :: file_exists
1504 
1505  receive_n_particles_for_output_from_ipe(:) = 0
1506 
1507  call mpi_allgather(send_n_particles_for_output, 1, mpi_integer, &
1508  receive_n_particles_for_output_from_ipe, 1, mpi_integer, icomm, ierrmpi)
1509 
1510  ! If there are no particles to be written, skip the output
1511  if (sum(receive_n_particles_for_output_from_ipe(:)) == 0) return
1512 
1513  if (mype > 0) then
1514  call mpi_send(send_particles,send_n_particles_for_output,type_particle,0,mype,icomm,ierrmpi)
1515  call mpi_send(send_payload,npayload*send_n_particles_for_output,mpi_double_precision,0,mype,icomm,ierrmpi)
1516  else
1517  ! Create file and write header
1518  if(typefile=='destroy') then ! Destroyed file
1519  write(filename,"(a,a,i6.6,a)") trim(base_filename) // '_', &
1520  trim(typefile) // '.csv'
1522  inquire(file=filename, exist=file_exists)
1523 
1524  if (.not. file_exists) then
1525  open(unit=unitparticles, file=filename)
1526  write(unitparticles,"(a)") trim(csv_header)
1527  else
1528  open(unit=unitparticles, file=filename, access='append')
1529  end if
1530 
1531  else ! Ensemble file
1532  write(filename,"(a,a,i6.6,a)") trim(base_filename) // '_', &
1533  trim(typefile) // '_', nint(t_next_output/dtsave_particles),'.csv'
1534  open(unit=unitparticles,file=filename)
1535  write(unitparticles,"(a)") trim(csv_header)
1536  end if
1537 
1538  ! Write own particles
1539  do ipart=1,send_n_particles_for_output
1540  call output_particle(send_particles(ipart),send_payload(1:npayload,ipart),0,unitparticles)
1541  end do
1542 
1543  ! Write particles from other tasks
1544  do ipe=1,npe-1
1545  allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1546  allocate(receive_payload(1:npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1547  call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),type_particle,ipe,ipe,icomm,status,ierrmpi)
1548  call mpi_recv(receive_payload,npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,icomm,status,ierrmpi)
1549  do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1550  call output_particle(receive_particles(ipart),receive_payload(1:npayload,ipart),ipe,unitparticles)
1551  end do ! ipart
1552  deallocate(receive_particles)
1553  deallocate(receive_payload)
1554  end do ! ipe
1555 
1556  close(unitparticles)
1557  end if
1558 
1559  end subroutine output_ensemble
1560 
1561  subroutine output_individual()
1563  character(len=std_len) :: filename
1564  integer :: ipart,iipart,ipe
1565  logical :: file_exists
1566 
1567  do iipart=1,nparticles_on_mype
1568  ipart=particles_on_mype(iipart)
1569 
1570  ! should the particle be dumped?
1571  if (particle(ipart)%self%follow) then
1572  write(filename,"(a,a,i6.6,a)") trim(base_filename), '_particle_', &
1573  particle(ipart)%self%index, '.csv'
1574  inquire(file=filename, exist=file_exists)
1575 
1576  ! Create empty file and write header on first iteration, or when the
1577  ! file does not exist yet
1578  if (it_particles == 0 .or. .not. file_exists) then
1579  open(unit=unitparticles, file=filename)
1580  write(unitparticles,"(a)") trim(csv_header)
1581  else
1582  open(unit=unitparticles, file=filename, access='append')
1583  end if
1584 
1585  call output_particle(particle(ipart)%self,&
1586  particle(ipart)%payload(1:npayload),mype,unitparticles)
1587 
1588  close(unitparticles)
1589  end if
1590  end do
1591 
1592  end subroutine output_individual
1593 
1594  subroutine output_particle(myparticle,payload,ipe,file_unit)
1596  use mod_geometry
1597 
1598  type(particle_t),intent(in) :: myparticle
1599  double precision, intent(in) :: payload(npayload)
1600  integer, intent(in) :: ipe
1601  integer, intent(in) :: file_unit
1602  double precision :: x(3), u(3)
1603  integer :: icomp
1604 
1605  ! convert and normalize the position
1606  if (nocartesian) then
1607  select case(coordinate)
1609  x = myparticle%x*length_convert_factor
1610  case(cylindrical)
1611  x(r_) = myparticle%x(r_)*length_convert_factor
1612  x(z_) = myparticle%x(z_)*length_convert_factor
1613  x(phi_) = myparticle%x(phi_)
1614  case(spherical)
1615  x(:) = myparticle%x(:)
1616  x(1) = x(1)*length_convert_factor
1617  end select
1618  else
1619  call partcoord_to_cartesian(myparticle%x,x)
1620  x(:) = x(:)*length_convert_factor
1621  end if
1622 
1623  u = myparticle%u(1:3) * integrator_velocity_factor
1624 
1625  write(file_unit, csv_format) myparticle%time, myparticle%dt, x(1:3), &
1626  u, payload(1:npayload), ipe, it_particles, &
1627  myparticle%index
1628 
1629  end subroutine output_particle
1630 
1631  !> Convert position to Cartesian coordinates
1632  subroutine partcoord_to_cartesian(xp,xpcart)
1633  ! conversion of particle coordinate from cylindrical/spherical to cartesian coordinates done here
1635  use mod_geometry
1636 
1637  double precision, intent(in) :: xp(1:3)
1638  double precision, intent(out) :: xpcart(1:3)
1639 
1640  select case (coordinate)
1642  xpcart(1:3) = xp(1:3)
1643  case (cylindrical)
1644  xpcart(1) = xp(r_)*cos(xp(phi_))
1645  xpcart(2) = xp(r_)*sin(xp(phi_))
1646  xpcart(3) = xp(z_)
1647  case (spherical)
1648  xpcart(1) = xp(1)*sin(xp(2))*cos(xp(3))
1649  {^iftwod
1650  xpcart(2) = xp(1)*cos(xp(2))
1651  xpcart(3) = xp(1)*sin(xp(2))*sin(xp(3))}
1652  {^ifthreed
1653  xpcart(2) = xp(1)*sin(xp(2))*sin(xp(3))
1654  xpcart(3) = xp(1)*cos(xp(2))}
1655  case default
1656  write(*,*) "No converter for coordinate=",coordinate
1657  end select
1658 
1659  end subroutine partcoord_to_cartesian
1660 
1661  !> Convert to noncartesian coordinates
1662  subroutine partcoord_from_cartesian(xp,xpcart)
1663  ! conversion of particle coordinate from Cartesian to cylindrical/spherical coordinates done here
1665  use mod_geometry
1666 
1667  double precision, intent(in) :: xpcart(1:3)
1668  double precision, intent(out) :: xp(1:3)
1669  double precision :: xx, yy, zz
1670  double precision :: rr, tth, pphi
1671 
1672  select case (coordinate)
1674  xp(1:3)=xpcart(1:3)
1675  case (cylindrical)
1676  xp(r_) = sqrt(xpcart(1)**2 + xpcart(2)**2)
1677  xp(phi_) = atan2(xpcart(2),xpcart(1))
1678  if (xp(phi_) .lt. 0.d0) xp(phi_) = 2.0d0*dpi + xp(phi_)
1679  xp(z_) = xpcart(3)
1680  case (spherical)
1681  xx = xpcart(1)
1682  {^iftwod
1683  yy = xpcart(3)
1684  zz = xpcart(2)}
1685  {^ifthreed
1686  yy = xpcart(2)
1687  zz = xpcart(3)}
1688  rr = sqrt(xx**2 + yy**2 + zz**2)
1689  tth = acos(zz/rr)
1690  if (yy > 0.d0) then
1691  pphi = acos(xx/sqrt(xx**2+yy**2))
1692  else
1693  pphi = 2.d0*dpi - acos(xx/sqrt(xx**2+yy**2))
1694  endif
1695  xp(1:3) = (/ rr, tth, pphi /)
1696  case default
1697  write(*,*) "No converter for coordinate=",coordinate
1698  end select
1699 
1700  end subroutine partcoord_from_cartesian
1701 
1702  !> Convert vector to Cartesian coordinates
1703  subroutine partvec_to_cartesian(xp,up,upcart)
1704  ! conversion of a generic vector from cylindrical/spherical to cartesian coordinates done here
1706  use mod_geometry
1707 
1708  double precision, intent(in) :: xp(1:3),up(1:3)
1709  double precision, intent(out) :: upcart(1:3)
1710 
1711  select case (coordinate)
1713  upcart(1:3)=up(1:3)
1714  case (cylindrical)
1715  upcart(1) = cos(xp(phi_)) * up(r_) - sin(xp(phi_)) * up(phi_)
1716  upcart(2) = cos(xp(phi_)) * up(phi_) + sin(xp(phi_)) * up(r_)
1717  upcart(3) = up(z_)
1718  case (spherical)
1719  upcart(1) = up(1)*sin(xp(2))*cos(xp(3)) + up(2)*cos(xp(2))*cos(xp(3)) - up(3)*sin(xp(3))
1720  {^iftwod
1721  upcart(2) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))
1722  upcart(3) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))}
1723  {^ifthreed
1724  upcart(2) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))
1725  upcart(3) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))}
1726  case default
1727  write(*,*) "No converter for coordinate=",coordinate
1728  end select
1729 
1730  end subroutine partvec_to_cartesian
1731 
1732  !> Convert vector from Cartesian coordinates
1733  subroutine partvec_from_cartesian(xp,up,upcart)
1734  ! conversion of a generic vector to cylindrical/spherical from cartesian coordinates done here
1736  use mod_geometry
1737 
1738  double precision, intent(in) :: xp(1:3),upcart(1:3)
1739  double precision, intent(out) :: up(1:3)
1740 
1741  select case (coordinate)
1743  up(1:3) = upcart(1:3)
1744  case (cylindrical)
1745  up(r_) = cos(xp(phi_)) * upcart(1) + sin(xp(phi_)) * upcart(2)
1746  up(phi_) = -sin(xp(phi_)) * upcart(1) + cos(xp(phi_)) * upcart(2)
1747  up(z_) = upcart(3)
1748  case (spherical)
1749  {^iftwod
1750  up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(3)*sin(xp(2))*sin(xp(3)) + upcart(2)*cos(xp(2))
1751  up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(3)*cos(xp(2))*sin(xp(3)) - upcart(2)*sin(xp(2))
1752  up(3) = -upcart(1)*sin(xp(3)) + upcart(3)*cos(xp(3))}
1753  {^ifthreed
1754  up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(2)*sin(xp(2))*sin(xp(3)) + upcart(3)*cos(xp(2))
1755  up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(2)*cos(xp(2))*sin(xp(3)) - upcart(3)*sin(xp(2))
1756  up(3) = -upcart(1)*sin(xp(3)) + upcart(2)*cos(xp(3))}
1757  case default
1758  write(*,*) "No converter for coordinate=",coordinate
1759  end select
1760 
1761  end subroutine partvec_from_cartesian
1762 
1763  !> Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates
1764  subroutine fix_phi_crossing(xp,igrid)
1766  use mod_geometry
1767 
1768  integer, intent(in) :: igrid
1769  double precision, intent(inout) :: xp(1:3)
1770  double precision :: xpmod(1:3)
1771  integer :: phiind
1772 
1773  select case (coordinate)
1775  ! Do nothing
1776  return
1777  case (cylindrical)
1778  phiind = phi_
1779  case (spherical)
1780  phiind = 3
1781  end select
1782 
1783  xpmod = xp
1784 
1785  ! Fix phi-boundary crossing
1786  ! Case 1: particle has crossed from ~2pi to ~0
1787  xpmod(phiind) = xp(phiind) + 2.d0*dpi
1788  if ((.not. point_in_igrid_ghostc(xp,igrid,0)) &
1789  .and. (point_in_igrid_ghostc(xpmod,igrid,0))) then
1790  xp(phiind) = xpmod(phiind)
1791  return
1792  end if
1793  ! Case 2: particle has crossed from ~0 to ~2pi
1794  xpmod(phiind) = xp(phiind) - 2.d0*dpi
1795  if ((.not. point_in_igrid_ghostc(xp,igrid,0)) &
1796  .and. (point_in_igrid_ghostc(xpmod,igrid,0))) then
1797  xp(phiind) = xpmod(phiind)
1798  return
1799  end if
1800 
1801  end subroutine fix_phi_crossing
1802 
1803  !> Quick check if particle coordinate is inside igrid
1804  !> (ghost cells included, except the last ngh)
1805  logical function point_in_igrid_ghostc(x, igrid, ngh)
1807  use mod_geometry
1808  integer, intent(in) :: igrid, ngh
1809  double precision, intent(in) :: x(ndim)
1810  double precision :: grid_rmin(ndim), grid_rmax(ndim)
1811 
1812  ! TODO: this does not work with stretched coordinates!
1813  ! TODO: but phi should not be stretched in principle...
1814  grid_rmin = [ {rnode(rpxmin^d_,igrid)-rnode(rpdx^d_,igrid)*(dble(nghostcells-ngh)-0.5d0)} ]
1815  grid_rmax = [ {rnode(rpxmax^d_,igrid)+rnode(rpdx^d_,igrid)*(dble(nghostcells-ngh)-0.5d0)} ]
1816  point_in_igrid_ghostc = all(x >= grid_rmin) .and. all(x < grid_rmax)
1817  end function point_in_igrid_ghostc
1818 
1819  subroutine comm_particles()
1820 
1822 
1823  integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1824  integer :: index
1825  integer :: tag_send, tag_receive, send_buff, rcv_buff
1826  integer :: status(MPI_STATUS_SIZE)
1827  integer, dimension(0:npe-1) :: send_n_particles_to_ipe
1828  integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
1829  type(particle_t), allocatable, dimension(:,:) :: send_particles
1830  type(particle_t), allocatable, dimension(:,:) :: receive_particles
1831  double precision, allocatable, dimension(:,:,:) :: send_payload
1832  double precision, allocatable, dimension(:,:,:) :: receive_payload
1833  integer, allocatable, dimension(:,:) :: particle_index_to_be_sent_to_ipe
1834  integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1835  integer :: destroy_n_particles_mype
1836  integer, allocatable, dimension(:) :: sndrqst, rcvrqst
1837  integer, allocatable, dimension(:) :: sndrqst_payload, rcvrqst_payload
1838  integer :: isnd, ircv
1839  integer, parameter :: maxneighbors=56 ! maximum case: coarse-fine in 3D
1840  logical :: BC_applied
1841  !-----------------------------------------------------------------------------
1842 
1843  send_n_particles_to_ipe(:) = 0
1844  receive_n_particles_from_ipe(:) = 0
1845  destroy_n_particles_mype = 0
1846 
1847  allocate( particle_index_to_be_sent_to_ipe(nparticles_per_cpu_hi,0:npe-1) )
1848  allocate( sndrqst(1:npe-1), rcvrqst(1:npe-1) )
1849  allocate( sndrqst_payload(1:npe-1), rcvrqst_payload(1:npe-1) )
1850  sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1851  sndrqst_payload = mpi_request_null; rcvrqst_payload = mpi_request_null;
1852 
1853  ! check if and where to send each particle, destroy if necessary
1854  ! !$OMP PARALLEL DO PRIVATE(ipart)
1855  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1856 
1857  ! first check if the particle should be destroyed (user defined criterion)
1858  if (associated(usr_destroy_particle)) then
1859  if (usr_destroy_particle(particle(ipart))) then
1860  ! !$OMP CRITICAL(destroy)
1861  destroy_n_particles_mype = destroy_n_particles_mype + 1
1862  particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1863  ! !$OMP END CRITICAL(destroy)
1864  cycle
1865  end if
1866  end if
1867 
1868  ! is my particle still in the same igrid?
1869  if (.not.particle_in_igrid(ipart,particle(ipart)%igrid)) then
1870  call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
1871 
1872  ! destroy particle if out of domain (signalled by return value of -1)
1873  if (igrid_particle == -1 )then
1874  call apply_periodb(particle(ipart)%self,igrid_particle,ipe_particle,bc_applied)
1875  if (.not. bc_applied .or. igrid_particle == -1) then
1876  ! !$OMP CRITICAL(destroy2)
1877  destroy_n_particles_mype = destroy_n_particles_mype + 1
1878  particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1879  ! !$OMP END CRITICAL(destroy2)
1880  cycle
1881  end if
1882  end if
1883 
1884  ! particle still present
1885  particle(ipart)%igrid = igrid_particle
1886  particle(ipart)%ipe = ipe_particle
1887 
1888  ! if we have more than one core, is it on another cpu?
1889  if (npe .gt. 1 .and. particle(ipart)%ipe .ne. mype) then
1890  ! !$OMP CRITICAL(send)
1891  send_n_particles_to_ipe(ipe_particle) = &
1892  send_n_particles_to_ipe(ipe_particle) + 1
1893  if (send_n_particles_to_ipe(ipe_particle) .gt. nparticles_per_cpu_hi) &
1894  call mpistop('too many particles, increase nparticles_per_cpu_hi')
1895  particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1896  ! !$OMP END CRITICAL(send)
1897  end if ! ipe_particle
1898 
1899  end if ! particle_in_grid
1900 
1901  end do ! ipart
1902  ! !$OMP END PARALLEL DO
1903 
1904  call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
1905 
1906  ! get out when only one core:
1907  if (npe == 1) return
1908 
1909  ! communicate amount of particles to be sent/received
1910  isnd = 0; ircv = 0;
1911  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1912  tag_send = mype * npe + ipe
1913  tag_receive = ipe * npe + mype
1914  isnd = isnd + 1; ircv = ircv + 1
1915  call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1916  call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1917  end do
1918 
1919  call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1920  call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1921  sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1922 
1923  ! allocate the send and receive buffers,
1924  ! If this is still not good enough, consider optimizing using lists of lists:
1925  allocate(send_particles(maxval(send_n_particles_to_ipe),1:min(maxneighbors,npe-1)))
1926  allocate(receive_particles(maxval(receive_n_particles_from_ipe),1:min(maxneighbors,npe-1)))
1927  allocate(send_payload(npayload,maxval(send_n_particles_to_ipe),1:min(maxneighbors,npe-1)))
1928  allocate(receive_payload(npayload,maxval(receive_n_particles_from_ipe),1:min(maxneighbors,npe-1)))
1929 
1930  isnd = 0; ircv = 0;
1931  ! send and receive the data of the particles
1932  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1933  tag_send = mype * npe + ipe
1934  tag_receive = ipe * npe + mype
1935 
1936  ! should i send some particles to ipe?
1937  if (send_n_particles_to_ipe(ipe) .gt. 0) then
1938 
1939  ! create the send buffer
1940  do ipart = 1, send_n_particles_to_ipe(ipe)
1941  send_particles(ipart,iipe) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
1942  send_payload(1:npayload,ipart,iipe) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
1943  end do ! ipart
1944  isnd = isnd + 1
1945  call mpi_isend(send_particles(:,iipe),send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1946  call mpi_isend(send_payload(:,:,iipe),npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,icomm,sndrqst_payload(isnd),ierrmpi)
1947 
1948  end if ! send .gt. 0
1949 
1950  ! should i receive some particles from ipe?
1951  if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1952 
1953  ircv = ircv + 1
1954  call mpi_irecv(receive_particles(:,iipe),receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1955  call mpi_irecv(receive_payload(:,:,iipe),npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,icomm,rcvrqst_payload(ircv),ierrmpi)
1956 
1957  end if ! receive .gt. 0
1958  end do ! ipe
1959 
1960  call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1961  call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1962  call mpi_waitall(isnd,sndrqst_payload,mpi_statuses_ignore,ierrmpi)
1963  call mpi_waitall(ircv,rcvrqst_payload,mpi_statuses_ignore,ierrmpi)
1964 
1965  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1966  do ipart = 1, send_n_particles_to_ipe(ipe)
1967  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1968  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1969  call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
1970  end do ! ipart
1971  end do
1972 
1973  do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1974  if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1975  do ipart = 1, receive_n_particles_from_ipe(ipe)
1976 
1977  index = receive_particles(ipart,iipe)%index
1979  !if (.not. allocated(particle(index)%self)) &
1980  allocate(particle(index)%self)
1981  particle(index)%self = receive_particles(ipart,iipe)
1982  !if (.not. allocated(particle(index)%payload)) &
1983  allocate(particle(index)%payload(npayload))
1984  particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart,iipe)
1985 
1986  ! since we don't send the igrid, need to re-locate it
1987  call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
1988  particle(index)%igrid = igrid_particle
1989  particle(index)%ipe = ipe_particle
1990 
1991  end do ! ipart
1992 
1993  end if ! receive .gt. 0
1994  end do ! ipe
1995 
1996  end subroutine comm_particles
1997  !=============================================================================
1999 
2000  ! does not destroy particles like comm_particles
2001 
2003 
2004  integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2005  integer :: index
2006  integer :: tag_send, tag_receive, send_buff, rcv_buff
2007  integer :: status(MPI_STATUS_SIZE)
2008  integer, dimension(0:npe-1) :: send_n_particles_to_ipe
2009  integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
2010 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
2011 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
2012 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
2013 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
2014  type(particle_t), allocatable, dimension(:) :: send_particles
2015  type(particle_t), allocatable, dimension(:) :: receive_particles
2016  double precision, allocatable, dimension(:,:) :: send_payload
2017  double precision, allocatable, dimension(:,:) :: receive_payload
2018  integer, allocatable, dimension(:,:) :: particle_index_to_be_sent_to_ipe
2019  integer, allocatable, dimension(:) :: sndrqst, rcvrqst
2020  integer :: isnd, ircv
2021  logical :: BC_applied
2022  !-----------------------------------------------------------------------------
2023  send_n_particles_to_ipe(:) = 0
2024  receive_n_particles_from_ipe(:) = 0
2025 
2026  allocate( particle_index_to_be_sent_to_ipe(nparticles_per_cpu_hi,0:npe-1) )
2027  allocate( sndrqst(1:npe-1), rcvrqst(1:npe-1) )
2028  sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
2029 
2030  ! check if and where to send each particle, relocate all of them (in case grid has changed)
2031  ! !$OMP PARALLEL DO PRIVATE(ipart)
2032  do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
2033 
2034  call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
2035 
2036  particle(ipart)%igrid = igrid_particle
2037  particle(ipart)%ipe = ipe_particle
2038 
2039  ! if we have more than one core, is it on another cpu?
2040  if (particle(ipart)%ipe .ne. mype) then
2041  ! !$OMP CRITICAL(send)
2042  send_n_particles_to_ipe(ipe_particle) = &
2043  send_n_particles_to_ipe(ipe_particle) + 1
2044  if (send_n_particles_to_ipe(ipe_particle) .gt. nparticles_per_cpu_hi) &
2045  call mpistop('G: too many particles increase nparticles_per_cpu_hi')
2046  particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2047  ! !$OMP END CRITICAL(send)
2048  end if ! ipe_particle
2049 
2050  end do ! ipart
2051  ! !$OMP END PARALLEL DO
2052 
2053  ! get out when only one core:
2054  if (npe == 1) then
2055  deallocate(sndrqst, rcvrqst)
2056  deallocate(particle_index_to_be_sent_to_ipe)
2057  return
2058  end if
2059 
2060  ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2061  ! communicate amount of particles to be sent/received
2062  ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2063  ! opedit: I think this was the main bottleneck.
2064  ! 31.12.2017: made it nonblocking (easy, least invasive)
2065  ! If this continues to be problematic, better to
2066  ! send particles with block in coarsen/refine/loadbalance. (best).
2067 
2068  isnd = 0; ircv = 0;
2069  do ipe=0,npe-1; if (ipe .eq. mype) cycle;
2070 
2071  tag_send = mype * npe + ipe; tag_receive = ipe * npe + mype;
2072  isnd = isnd + 1; ircv = ircv + 1;
2073  call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer, &
2074  ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2075  call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer, &
2076  ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2077  end do
2078 
2079  call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
2080  call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
2081  deallocate(sndrqst, rcvrqst)
2082 
2083 
2084  ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2085  ! send and receive the data of the particles
2086  ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2087 
2088  do ipe=0,npe-1
2089  if (ipe .eq. mype) cycle;
2090  tag_send = mype * npe + ipe
2091  tag_receive = ipe * npe + mype
2092 
2093  ! should i send some particles to ipe?
2094  if (send_n_particles_to_ipe(ipe) .gt. 0) then
2095 
2096  ! create the send buffer
2097  allocate(send_particles(1:send_n_particles_to_ipe(ipe)))
2098  allocate(send_payload(1:npayload,1:send_n_particles_to_ipe(ipe)))
2099  do ipart = 1, send_n_particles_to_ipe(ipe)
2100  send_particles(ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2101  send_payload(1:npayload,ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
2102  end do ! ipart
2103 
2104  call mpi_send(send_particles,send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,ierrmpi)
2105  call mpi_send(send_payload,npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,icomm,ierrmpi)
2106  do ipart = 1, send_n_particles_to_ipe(ipe)
2107  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2108  deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2109  call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2110  end do ! ipart
2111  deallocate(send_particles)
2112  deallocate(send_payload)
2113 
2114  end if ! send .gt. 0
2115 
2116  ! should i receive some particles from ipe?
2117  if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2118  allocate(receive_particles(1:receive_n_particles_from_ipe(ipe)))
2119  allocate(receive_payload(1:npayload,1:receive_n_particles_from_ipe(ipe)))
2120 
2121  call mpi_recv(receive_particles,receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,status,ierrmpi)
2122  call mpi_recv(receive_payload,npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,icomm,status,ierrmpi)
2123  do ipart = 1, receive_n_particles_from_ipe(ipe)
2124 
2125  index = receive_particles(ipart)%index
2126  !if (.not. allocated(particle(index)%self)) &
2127  allocate(particle(index)%self)
2128  particle(index)%self = receive_particles(ipart)
2129  !if (.not. allocated(particle(index)%payload)) &
2130  allocate(particle(index)%payload(npayload))
2131  particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart)
2133 
2134  ! since we don't send the igrid, need to re-locate it
2135  call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2136  particle(index)%igrid = igrid_particle
2137  particle(index)%ipe = ipe_particle
2138 
2139  end do ! ipart
2140  deallocate(receive_particles)
2141  deallocate(receive_payload)
2142 
2143  end if ! receive .gt. 0
2144  end do ! ipe
2145  deallocate(particle_index_to_be_sent_to_ipe)
2146 
2147  end subroutine comm_particles_global
2148 
2149 ! subroutine comm_particles_old()
2150 ! use mod_global_parameters
2151 !
2152 ! integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2153 ! integer :: index
2154 ! integer :: tag_send, tag_receive, tag_send_p, tag_receive_p, send_buff, rcv_buff
2155 ! integer :: status(MPI_STATUS_SIZE)
2156 ! integer, dimension(0:npe-1) :: send_n_particles_to_ipe
2157 ! integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
2158 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
2159 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
2160 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
2161 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
2162 ! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
2163 ! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_received_from_ipe
2164 ! integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
2165 ! integer :: destroy_n_particles_mype
2166 ! logical :: BC_applied
2167 ! integer, allocatable, dimension(:) :: sndrqst, rcvrqst
2168 ! integer :: isnd, ircv
2169 ! integer,dimension(npe_neighbors,nparticles_per_cpu_hi) :: payload_index
2170 ! send_n_particles_to_ipe(:) = 0
2171 ! receive_n_particles_from_ipe(:) = 0
2172 ! destroy_n_particles_mype = 0
2173 !
2174 ! ! check if and where to send each particle, destroy if necessary
2175 ! do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
2176 !
2177 ! ! first check if the particle should be destroyed because t>tmax in a fixed snapshot
2178 ! if ( .not.time_advance .and. particle(ipart)%self%time .gt. tmax_particles ) then
2179 ! destroy_n_particles_mype = destroy_n_particles_mype + 1
2180 ! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2181 ! cycle
2182 ! end if
2183 !
2184 ! ! Then check dostroy due to user-defined condition
2185 ! if (associated(usr_destroy_particle)) then
2186 ! if (usr_destroy_particle(particle(ipart))) then
2187 ! destroy_n_particles_mype = destroy_n_particles_mype + 1
2188 ! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2189 ! cycle
2190 ! end if
2191 ! end if
2192 !
2193 ! ! is my particle still in the same igrid?
2194 ! if (.not.particle_in_igrid(ipart,particle(ipart)%igrid)) then
2195 ! call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
2196 !
2197 ! ! destroy particle if out of domain (signalled by return value of -1)
2198 ! if (igrid_particle == -1 )then
2199 ! call apply_periodB(particle(ipart)%self,igrid_particle,ipe_particle,BC_applied)
2200 ! if (.not. BC_applied .or. igrid_particle == -1) then
2201 ! destroy_n_particles_mype = destroy_n_particles_mype + 1
2202 ! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2203 ! cycle
2204 ! end if
2205 ! end if
2206 !
2207 ! ! particle still present
2208 ! particle(ipart)%igrid = igrid_particle
2209 ! particle(ipart)%ipe = ipe_particle
2210 !
2211 ! ! if we have more than one core, is it on another cpu?
2212 ! if (npe .gt. 1 .and. particle(ipart)%ipe .ne. mype) then
2213 ! send_n_particles_to_ipe(ipe_particle) = &
2214 ! send_n_particles_to_ipe(ipe_particle) + 1
2215 ! particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2216 ! end if ! ipe_particle
2217 !
2218 ! end if ! particle_in_grid
2219 !
2220 ! end do ! ipart
2221 !
2222 ! call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
2223 !
2224 ! ! get out when only one core:
2225 ! if (npe == 1) return
2226 !
2227 ! ! communicate amount of particles to be sent/received
2228 ! allocate( sndrqst(1:npe_neighbors), rcvrqst(1:npe_neighbors) )
2229 ! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2230 ! isnd = 0; ircv = 0;
2231 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2232 ! tag_send = mype * npe + ipe
2233 ! tag_receive = ipe * npe + mype
2234 ! isnd = isnd + 1; ircv = ircv + 1;
2235 ! call MPI_ISEND(send_n_particles_to_ipe(ipe),1,MPI_INTEGER, &
2236 ! ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2237 ! call MPI_IRECV(receive_n_particles_from_ipe(ipe),1,MPI_INTEGER, &
2238 ! ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2239 ! end do
2240 ! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2241 ! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2242 ! deallocate( sndrqst, rcvrqst )
2243 !
2244 ! ! Communicate index of the particles to be sent/received
2245 ! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2246 ! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2247 ! isnd = 0; ircv = 0;
2248 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2249 ! if (send_n_particles_to_ipe(ipe) > 0) then
2250 ! tag_send = mype * npe + ipe
2251 ! isnd = isnd + 1
2252 ! call MPI_ISEND(particle_index_to_be_sent_to_ipe(:,ipe),send_n_particles_to_ipe(ipe),MPI_INTEGER, &
2253 ! ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2254 ! end if
2255 ! if (receive_n_particles_from_ipe(ipe) > 0) then
2256 ! tag_receive = ipe * npe + mype
2257 ! ircv = ircv + 1
2258 ! call MPI_IRECV(particle_index_to_be_received_from_ipe(:,ipe),receive_n_particles_from_ipe(ipe),MPI_INTEGER, &
2259 ! ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2260 ! end if
2261 ! end do
2262 ! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2263 ! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2264 ! deallocate( sndrqst, rcvrqst )
2265 !
2266 ! ! Send and receive the data of the particles
2267 ! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2268 ! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2269 ! isnd = 0; ircv = 0;
2270 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2271 !
2272 ! ! should i send some particles to ipe?
2273 ! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2274 !
2275 ! do ipart = 1, send_n_particles_to_ipe(ipe)
2276 ! tag_send = mype * npe + ipe + ipart
2277 ! isnd = isnd+1
2278 ! call MPI_ISEND(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self, &
2279 ! 1,type_particle,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2280 ! end do ! ipart
2281 ! end if ! send .gt. 0
2282 !
2283 ! ! should i receive some particles from ipe?
2284 ! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2285 !
2286 ! do ipart = 1, receive_n_particles_from_ipe(ipe)
2287 ! tag_receive = ipe * npe + mype + ipart
2288 ! ircv = ircv+1
2289 ! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2290 ! allocate(particle(index)%self)
2291 ! call MPI_IRECV(particle(index)%self,1,type_particle,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2292 ! end do ! ipart
2293 !
2294 ! end if ! receive .gt. 0
2295 ! end do ! ipe
2296 ! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2297 ! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2298 ! deallocate( sndrqst, rcvrqst )
2299 !
2300 ! ! Send and receive the payloads
2301 ! ! NON-BLOCKING: one communication for each particle. Needed for whatever reason
2302 ! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2303 ! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2304 ! isnd = 0; ircv = 0;
2305 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2306 !
2307 ! ! should i send some particles to ipe?
2308 ! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2309 !
2310 ! do ipart = 1, send_n_particles_to_ipe(ipe)
2311 ! tag_send = mype * npe + ipe + ipart
2312 ! isnd = isnd+1
2313 ! call MPI_ISEND(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload), &
2314 ! npayload,MPI_DOUBLE_PRECISION,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2315 ! end do ! ipart
2316 ! end if ! send .gt. 0
2317 !
2318 ! ! should i receive some particles from ipe?
2319 ! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2320 ! do ipart = 1, receive_n_particles_from_ipe(ipe)
2321 ! tag_receive = ipe * npe + mype + ipart
2322 ! ircv = ircv+1
2323 ! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2324 ! allocate(particle(index)%payload(npayload))
2325 ! call MPI_IRECV(particle(index)%payload(1:npayload),npayload, &
2326 ! MPI_DOUBLE_PRECISION,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2327 ! end do ! ipart
2328 ! end if ! receive .gt. 0
2329 ! end do ! ipe
2330 ! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2331 ! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2332 ! deallocate( sndrqst, rcvrqst )
2333 !
2334 ! ! Deeallocate sent particles
2335 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2336 ! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2337 ! do ipart = 1, send_n_particles_to_ipe(ipe)
2338 ! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2339 ! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2340 ! call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2341 ! end do ! ipart
2342 ! end if ! send .gt. 0
2343 ! end do
2344 !
2345 ! ! Relocate received particles
2346 ! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2347 ! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2348 ! do ipart = 1, receive_n_particles_from_ipe(ipe)
2349 ! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2350 ! call push_particle_into_particles_on_mype(index)
2351 ! ! since we don't send the igrid, need to re-locate it
2352 ! call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2353 ! particle(index)%igrid = igrid_particle
2354 ! particle(index)%ipe = ipe_particle
2355 ! end do ! ipart
2356 ! end if ! receive .gt. 0
2357 ! end do ! ipe
2358 !
2359 ! end subroutine comm_particles_old
2360 !
2361 ! subroutine comm_particles_global_old()
2362 ! use mod_global_parameters
2363 !
2364 ! integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2365 ! integer :: index
2366 ! integer :: tag_send, tag_receive, send_buff, rcv_buff
2367 ! integer :: status(MPI_STATUS_SIZE)
2368 ! integer, dimension(0:npe-1) :: send_n_particles_to_ipe
2369 ! integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
2370 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
2371 ! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
2372 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
2373 ! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
2374 ! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
2375 ! integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
2376 ! integer :: destroy_n_particles_mype
2377 ! logical :: BC_applied
2378 !
2379 ! send_n_particles_to_ipe(:) = 0
2380 ! receive_n_particles_from_ipe(:) = 0
2381 ! destroy_n_particles_mype = 0
2382 !
2383 ! ! check if and where to send each particle, relocate all of them (in case grid has changed)
2384 ! do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
2385 !
2386 ! call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
2387 !
2388 ! particle(ipart)%igrid = igrid_particle
2389 ! particle(ipart)%ipe = ipe_particle
2390 !
2391 ! ! if we have more than one core, is it on another cpu?
2392 ! if (particle(ipart)%ipe .ne. mype) then
2393 ! send_n_particles_to_ipe(ipe_particle) = &
2394 ! send_n_particles_to_ipe(ipe_particle) + 1
2395 ! particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2396 ! end if ! ipe_particle
2397 !
2398 ! end do ! ipart
2399 !
2400 ! ! get out when only one core:
2401 ! if (npe == 1) return
2402 !
2403 ! ! communicate amount of particles to be sent/received
2404 ! do ipe=0,npe-1;if (ipe .eq. mype) cycle;
2405 ! tag_send = mype * npe + ipe
2406 ! tag_receive = ipe * npe + mype
2407 ! call MPI_SEND(send_n_particles_to_ipe(ipe),1,MPI_INTEGER,ipe,tag_send,icomm,ierrmpi)
2408 ! call MPI_RECV(receive_n_particles_from_ipe(ipe),1,MPI_INTEGER,ipe,tag_receive,icomm,status,ierrmpi)
2409 ! end do
2410 !
2411 ! ! send and receive the data of the particles
2412 ! do ipe=0,npe-1;if (ipe .eq. mype) cycle;
2413 ! tag_send = mype * npe + ipe
2414 ! tag_receive = ipe * npe + mype
2415 !
2416 ! ! should i send some particles to ipe?
2417 ! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2418 !
2419 ! ! create the send buffer
2420 ! do ipart = 1, send_n_particles_to_ipe(ipe)
2421 ! send_particles(ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2422 ! send_payload(1:npayload,ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
2423 ! end do ! ipart
2424 ! call MPI_SEND(send_particles,send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,ierrmpi)
2425 ! call MPI_SEND(send_payload,npayload*send_n_particles_to_ipe(ipe),MPI_DOUBLE_PRECISION,ipe,tag_send,icomm,ierrmpi)
2426 ! do ipart = 1, send_n_particles_to_ipe(ipe)
2427 ! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2428 ! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2429 ! call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2430 ! end do ! ipart
2431 !
2432 ! end if ! send .gt. 0
2433 !
2434 ! ! should i receive some particles from ipe?
2435 ! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2436 !
2437 ! call MPI_RECV(receive_particles,receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,status,ierrmpi)
2438 ! call MPI_RECV(receive_payload,npayload*receive_n_particles_from_ipe(ipe),MPI_DOUBLE_PRECISION,ipe,tag_receive,icomm,status,ierrmpi)
2439 !
2440 ! do ipart = 1, receive_n_particles_from_ipe(ipe)
2441 !
2442 ! index = receive_particles(ipart)%index
2443 ! allocate(particle(index)%self)
2444 ! particle(index)%self = receive_particles(ipart)
2445 ! allocate(particle(index)%payload(npayload))
2446 ! particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart)
2447 ! call push_particle_into_particles_on_mype(index)
2448 !
2449 ! ! since we don't send the igrid, need to re-locate it
2450 ! call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2451 ! particle(index)%igrid = igrid_particle
2452 ! particle(index)%ipe = ipe_particle
2453 !
2454 ! end do ! ipart
2455 !
2456 ! end if ! receive .gt. 0
2457 ! end do ! ipe
2458 !
2459 ! end subroutine comm_particles_global_old
2460 
2461  subroutine apply_periodb(particle,igrid_particle,ipe_particle,BC_applied)
2463 
2464  type(particle_t), intent(inout) :: particle
2465  integer, intent(inout) :: igrid_particle, ipe_particle
2466  logical,intent(out) :: BC_applied
2467  integer :: idim
2468 
2469  bc_applied = .false.
2470  ! get out if we don't have any periodic BC:
2471  if (.not. any(periodb(1:ndim))) return
2472 
2473  ! go through dimensions and try re-inject the particle at the other side
2474  do idim=1,ndim
2475 
2476  if (.not. periodb(idim)) cycle
2477 
2478  select case(idim)
2479  {case (^d)
2480  if (particle%x(^d) .lt. xprobmin^d) then
2481  particle%x(^d) = particle%x(^d) + (xprobmax^d - xprobmin^d)
2482  bc_applied = .true.
2483  end if
2484  if (particle%x(^d) .ge. xprobmax^d) then
2485  particle%x(^d) = particle%x(^d) - (xprobmax^d - xprobmin^d)
2486  bc_applied = .true.
2487  end if
2488  \}
2489  end select
2490 
2491  end do
2492 
2493  call find_particle_ipe(particle%x,igrid_particle,ipe_particle)
2494 
2495  end subroutine apply_periodb
2496 
2497  !> clean up destroyed particles on all cores
2498  subroutine destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed)
2500 
2501  integer, intent(in) :: destroy_n_particles_mype
2502  integer, dimension(1:destroy_n_particles_mype), intent(in) :: particle_index_to_be_destroyed
2503  type(particle_t), dimension(destroy_n_particles_mype):: destroy_particles_mype
2504  double precision, dimension(npayload,destroy_n_particles_mype):: destroy_payload_mype
2505  integer :: iipart,ipart,destroy_n_particles
2506 
2507  destroy_n_particles = 0
2508 
2509  ! append the particle to list of destroyed particles
2510  do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2511  destroy_particles_mype(iipart) = particle(ipart)%self
2512  destroy_payload_mype(1:npayload,iipart) = particle(ipart)%payload(1:npayload)
2513  end do
2514 
2515  call output_ensemble(destroy_n_particles_mype,destroy_particles_mype,destroy_payload_mype,'destroy')
2516 
2517  if (npe > 1) then
2518  call mpi_allreduce(destroy_n_particles_mype,destroy_n_particles,1,mpi_integer,mpi_sum,icomm,ierrmpi)
2519  else
2520  destroy_n_particles = destroy_n_particles_mype
2521  end if
2522 
2523  nparticles = nparticles - destroy_n_particles
2524 
2525  do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2526  particle(ipart)%igrid = -1
2527  particle(ipart)%ipe = -1
2528  if(time_advance) then
2529  write(*,*) particle(ipart)%self%time, ': particle',ipart,' has left at it ',it_particles,' on pe', mype
2530  write(*,*) particle(ipart)%self%time, ': particles remaining:',nparticles, '(total)', nparticles_on_mype-1, 'on pe', mype
2531  endif
2532  deallocate(particle(ipart)%self)
2533  deallocate(particle(ipart)%payload)
2535  end do
2536 
2537  end subroutine destroy_particles
2538 
2540  implicit none
2541 
2542  integer, intent(in) :: ipart
2543 
2546 
2548 
2550  implicit none
2551 
2552  integer, intent(in) :: ipart
2553  integer :: i
2554 
2555  do i=1,nparticles_on_mype
2556  if (particles_on_mype(i) == ipart) then
2558  exit
2559  end if
2560  end do
2561 
2563 
2565 
2566 end module mod_particle_base
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: mod_comm_lib.t:208
Module for physical and numeric constants.
Definition: mod_constants.t:2
Module with basic grid data structures.
Definition: mod_forest.t:2
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Definition: mod_forest.t:29
Module with geometry-related routines (e.g., divergence, curl)
Definition: mod_geometry.t:2
integer coordinate
Definition: mod_geometry.t:7
integer, parameter spherical
Definition: mod_geometry.t:11
integer, parameter cartesian
Definition: mod_geometry.t:8
integer, parameter cylindrical
Definition: mod_geometry.t:10
integer, parameter cartesian_expansion
Definition: mod_geometry.t:12
integer, parameter cartesian_stretched
Definition: mod_geometry.t:9
subroutine curlvector(qvec, ixIL, ixOL, curlvec, idirmin, idirmin0, ndir0, fourthorder)
Calculate curl of a vector qvec within ixL Options to employ standard second order CD evaluations use...
Definition: mod_geometry.t:664
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
logical nocartesian
IO switches for conversion.
integer ixghi
Upper index of grid block arrays.
integer, parameter stretch_uni
Unidirectional stretching from a side.
integer domain_nx
number of cells for each dimension in level-one mesh
integer, parameter unitpar
file handle for IO
double precision global_time
The global simulation time.
double precision time_max
End time for the simulation.
double precision xstretch
physical extent of stretched border in symmetric stretching
integer snapshotini
Resume from the snapshot with this index.
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
double precision, dimension(:), allocatable, parameter d
double precision dt
global time step
double precision length_convert_factor
Conversion factor for length unit.
integer ndir
Number of spatial dimensions (components) for vector variables.
integer ixm
the mesh range of a physical block without ghost cells
integer ierrmpi
A global MPI error return code.
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
logical time_advance
do time evolving
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision c_norm
Normalised speed of light.
logical b0field
split magnetic field as background B0 field
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nghostcells
Number of ghost cells surrounding a grid.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
double precision, dimension(^nd) dxlevel
store unstretched cell size of current level
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer, parameter stretch_symm
Symmetric stretching around the center.
integer r_
Indices for cylindrical coordinates FOR TESTS, negative value when not used:
double precision, dimension(:,:), allocatable dxfirst
integer, dimension(:,:), allocatable node
integer, parameter ixglo
Lower index of grid block arrays (always 1)
Module with shared functionality for all the particle movers.
subroutine get_maxwellian_velocity(v, velocity)
subroutine partcoord_from_cartesian(xp, xpcart)
Convert to noncartesian coordinates.
double precision, dimension(3) integrator_velocity_factor
Normalization factor for velocity in the integrator.
integer nparticles_per_cpu_hi
Maximum number of particles in one processor.
integer nparticleshi
Maximum number of particles.
pure subroutine limit_dt_endtime(t_left, dt_p)
integer num_particles
Number of particles.
subroutine fix_phi_crossing(xp, igrid)
Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates.
double precision const_dt_particles
If positive, a constant time step for the particles.
subroutine output_individual()
integer n_output_destroyed
Output count for ensembles of destroyed particles.
subroutine ipe_cf(iD, igrid, ipe_is_neighbor)
integer it_particles
Iteration number of paritcles.
subroutine fields_from_mhd(igrid, w_mhd, w_part)
Determine fields from MHD variables.
subroutine particles_params_read(files)
Read this module's parameters from a file.
double precision min_particle_time
Minimum time of all particles.
subroutine output_ensemble(send_n_particles_for_output, send_particles, send_payload, typefile)
subroutine get_uniform_pos(x)
logical write_individual
Whether to write individual particle output (followed particles)
subroutine write_particle_output()
subroutine init_particles_com()
Initialize communicators for particles.
logical function particle_in_igrid(ipart, igrid)
Quick check if particle is still in igrid.
integer, dimension(:), allocatable particles_on_mype
Array of identity numbers of particles in current processor.
double precision dtsave_particles
Time interval to save particles.
subroutine output_particle(myparticle, payload, ipe, file_unit)
subroutine update_gridvars()
update grid variables for particles
integer ngridvars
Number of variables for grid field.
subroutine get_efield(igrid, x, tloc, e)
double precision particles_etah
subroutine destroy_particles(destroy_n_particles_mype, particle_index_to_be_destroyed)
clean up destroyed particles on all cores
integer nusrpayload
Number of user-defined payload variables for a particle.
subroutine select_active_particles(end_time)
subroutine write_particles_snapshot()
double precision particles_eta
Resistivity.
subroutine particles_finish()
Finalize the particles module.
logical write_snapshot
Whether to write particle snapshots.
pure subroutine get_lfac(u, lfac)
Get Lorentz factor from relativistic momentum.
subroutine find_particle_ipe(x, igrid_particle, ipe_particle)
subroutine pull_particle_from_particles_on_mype(ipart)
subroutine init_particles_output()
double precision dtheta
pure subroutine get_lfac_from_velocity(v, lfac)
Get Lorentz factor from velocity.
integer integrator
Integrator to be used for particles.
integer, dimension(:), allocatable ep
Variable index for electric field.
subroutine handle_particles()
Let particles evolve in time. The routine also handles grid variables and output.
subroutine partvec_from_cartesian(xp, up, upcart)
Convert vector from Cartesian coordinates.
subroutine fill_gridvars_default
This routine fills the particle grid variables with the default for mhd, i.e. only E and B.
character(len=400) csv_header
Header string used in CSV files.
subroutine ipe_fc(iD, igrid, ipe_is_neighbor)
subroutine finish_gridvars()
Deallocate grid variables for particles.
subroutine particle_get_current(w, ixIL, ixOL, idirmin, current)
Calculate idirmin and the idirmin:3 components of the common current array make sure that dxlevel(^D)...
subroutine get_bfield(igrid, x, tloc, b)
integer npayload
Number of total payload variables for a particle.
integer, parameter unitparticles
integer, dimension(:), allocatable particles_active_on_mype
Array of identity numbers of active particles in current processor.
logical relativistic
Use a relativistic particle mover?
character(len=name_len) integrator_type_particles
String describing the particle integrator type.
subroutine set_neighbor_ipe()
subroutine time_spent_on_particles()
subroutine get_vec(ix, igrid, x, tloc, vec)
logical write_ensemble
Whether to write ensemble output.
character(len=name_len) interp_type_particles
String describing the particle interpolation type.
integer downsample_particles
Particle downsampling factor in CSV output.
subroutine locate_particle(index, igrid_particle, ipe_particle)
subroutine push_particle_into_particles_on_mype(ipart)
subroutine read_from_snapshot()
integer nparticles_active_on_mype
Number of active particles in current processor.
procedure(sub_define_additional_gridvars), pointer particles_define_additional_gridvars
subroutine partcoord_to_cartesian(xp, xpcart)
Convert position to Cartesian coordinates.
integer nparticles
Identity number and total number of particles.
integer nparticles_on_mype
Number of particles in current processor.
integer, dimension(:), allocatable bp
Variable index for magnetic field.
subroutine comm_particles()
double precision tmax_particles
Time limit of particles.
type(particle_ptr), dimension(:), allocatable particle
procedure(fun_destroy), pointer usr_destroy_particle
integer, dimension(:), allocatable vp
Variable index for fluid velocity.
procedure(sub_integrate), pointer particles_integrate
subroutine cross(a, b, c)
subroutine apply_periodb(particle, igrid_particle, ipe_particle, BC_applied)
procedure(sub_fill_gridvars), pointer particles_fill_gridvars
double precision t_next_output
Time to write next particle output.
subroutine advance_particles(end_time, steps_taken)
Routine handles the particle evolution.
subroutine partvec_to_cartesian(xp, up, upcart)
Convert vector to Cartesian coordinates.
integer ndefpayload
Number of default payload variables for a particle.
logical function point_in_igrid_ghostc(x, igrid, ngh)
Quick check if particle coordinate is inside igrid (ghost cells included, except the last ngh)
subroutine particle_base_init()
Give initial values to parameters.
integer, dimension(:), allocatable ipe_neighbor
integer igrid_working
set the current igrid for the particle integrator:
character(len=60) csv_format
Format string used in CSV files.
integer, dimension(:), allocatable jp
Variable index for current.
subroutine append_to_snapshot(myparticle, mypayload)
integer ipart_working
set the current ipart for the particle integrator:
double precision particles_cfl
Particle CFL safety factor.
subroutine ipe_srl(iD, igrid, ipe_is_neighbor)
procedure(sub_fill_additional_gridvars), pointer particles_fill_additional_gridvars
logical function particle_in_domain(x)
Check if particle is inside computational domain.
character(len=name_len) physics_type_particles
String describing the particle physics type.
subroutine init_gridvars()
Initialize grid variables for particles.
character(len=128) function make_particle_filename(tout, index, type)
subroutine comm_particles_global()
subroutine read_particles_snapshot(file_exists)
integer rhop
Variable index for density.
subroutine interpolate_var(igrid, ixIL, ixOL, gf, x, xloc, gfloc)
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
Module for pseudo random number generation. The internal pseudo random generator is the xoroshiro128p...
Definition: mod_random.t:3
Writes D-1 slice, can do so in various formats, depending on slice_type.
Definition: mod_slice.t:2
subroutine get_igslice(dir, x, igslice)
Definition: mod_slice.t:1109
double precision tpartc_int_0
Definition: mod_timing.t:11
double precision timeio_tot
Definition: mod_timing.t:8
double precision tpartc_com
Definition: mod_timing.t:10
double precision tpartc
Definition: mod_timing.t:10
double precision tpartc0
Definition: mod_timing.t:11
double precision tpartc_grid_0
Definition: mod_timing.t:11
double precision tpartc_io
Definition: mod_timing.t:10
double precision timeloop
Definition: mod_timing.t:9
double precision tpartc_com0
Definition: mod_timing.t:11
double precision tpartc_grid
Definition: mod_timing.t:10
double precision tpartc_io_0
Definition: mod_timing.t:11
double precision tpartc_int
Definition: mod_timing.t:10
Module with all the methods that users can customize in AMRVAC.
procedure(particle_analytic), pointer usr_particle_analytic
procedure(particle_fields), pointer usr_particle_fields
Pointer to a tree_node.
Definition: mod_forest.t:6