MPI-AMRVAC 3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
Loading...
Searching...
No Matches
mod_particle_base.t
Go to the documentation of this file.
1!> Module with shared functionality for all the particle movers
3 use mod_global_parameters, only: name_len, std_len
5 use mod_random
7 use mod_comm_lib, only: mpistop
8
9 !> Normalization factor for velocity in the integrator
10 double precision :: integrator_velocity_factor(3)
11 !> Time limit of particles
12 double precision :: tmax_particles
13 !> Minimum time of all particles
14 double precision :: min_particle_time
15 !> Time interval to save particles
16 double precision :: dtsave_particles
17 !> If positive, a constant time step for the particles
18 double precision :: const_dt_particles
19 !> Particle CFL safety factor
20 double precision :: particles_cfl
21 !> Time to write next particle output
22 double precision :: t_next_output
23 !> Resistivity
24 double precision :: particles_eta, particles_etah
25 double precision :: dtheta
26 !> Particle downsampling factor in CSV output
28 !> Maximum number of particles
29 integer :: nparticleshi
30 !> Maximum number of particles in one processor
32 !> Number of default payload variables for a particle
33 integer :: ndefpayload
34 !> Number of user-defined payload variables for a particle
35 integer :: nusrpayload
36 !> Number of total payload variables for a particle
37 integer :: npayload
38 !> Number of variables for grid field
39 integer :: ngridvars
40 !> Number of particles
41 integer :: num_particles
42 !> Identity number and total number of particles
43 integer :: nparticles
44 !> Iteration number of paritcles
45 integer :: it_particles
46 !> Output count for ensembles of destroyed particles
48
49 ! these two save the list of neighboring cpus:
50 integer, allocatable :: ipe_neighbor(:)
51 integer :: npe_neighbors
52
53 integer :: type_particle
54 !> set the current igrid for the particle integrator:
55 integer :: igrid_working
56 !> set the current ipart for the particle integrator:
57 integer :: ipart_working
58
59 integer, parameter :: unitparticles=15
60
61 !> Array of identity numbers of particles in current processor
62 integer, dimension(:), allocatable :: particles_on_mype
63 !> Array of identity numbers of active particles in current processor
64 integer, dimension(:), allocatable :: particles_active_on_mype
65 !> Number of particles in current processor
67 !> Number of active particles in current processor
69 !> Integrator to be used for particles
70 integer :: integrator
71
72 !> Variable index for magnetic field
73 integer, allocatable :: bp(:)
74 !> Variable index for electric field
75 integer, allocatable :: ep(:)
76 !> Variable index for fluid velocity
77 integer, allocatable :: vp(:)
78 !> Variable index for current
79 integer, allocatable :: jp(:)
80 !> Variable index for density
81 integer :: rhop
82 !> Use a relativistic particle mover?
83 logical :: relativistic
84 !> Whether to write individual particle output (followed particles)
86 !> Whether to write ensemble output
87 logical :: write_ensemble
88 !> Whether to write particle snapshots
89 logical :: write_snapshot
90 logical :: losses
91 !> String describing the particle interpolation type
92 character(len=name_len) :: interp_type_particles = ""
93 !> String describing the particle physics type
94 character(len=name_len) :: physics_type_particles = ""
95 !> String describing the particle integrator type
96 character(len=name_len) :: integrator_type_particles = ""
97 !> Header string used in CSV files
98 character(len=400) :: csv_header
99 !> Format string used in CSV files
100 character(len=60) :: csv_format
101
103 type(particle_t), pointer :: self
104 !> extra information carried by the particle
105 double precision, allocatable :: payload(:)
106 integer :: igrid, ipe
107 end type particle_ptr
108
110 !> follow the history of the particle
111 logical :: follow
112 !> identity number
113 integer :: index
114 !> charge
115 double precision :: q
116 !> mass
117 double precision :: m
118 !> time
119 double precision :: time
120 !> time step
121 double precision :: dt
122 !> coordinates
123 double precision :: x(3)
124 !> velocity, momentum, or special ones
125 double precision :: u(3)
126 end type particle_t
127
128 ! Array containing all particles
129 type(particle_ptr), dimension(:), allocatable :: particle
130
131 ! The pseudo-random number generator
132 type(rng_t) :: rng
133
134 ! Pointers for user-defined stuff
135 procedure(sub_fill_gridvars), pointer :: particles_fill_gridvars => null()
138 procedure(sub_integrate), pointer :: particles_integrate => null()
139 procedure(fun_destroy), pointer :: usr_destroy_particle => null()
140
141 abstract interface
142
144 end subroutine sub_fill_gridvars
145
146 subroutine sub_define_additional_gridvars(ngridvars)
147 integer, intent(inout) :: ngridvars
148 end subroutine sub_define_additional_gridvars
149
151 end subroutine sub_fill_additional_gridvars
152
153 subroutine sub_integrate(end_time)
154 double precision, intent(in) :: end_time
155 end subroutine sub_integrate
156
157 function fun_destroy(myparticle)
158 import particle_ptr
159 logical :: fun_destroy
160 type(particle_ptr), intent(in) :: myparticle
161 end function fun_destroy
162
163 end interface
164
165contains
166
167 !> Finalize the particles module
168 subroutine particles_finish()
170
172
173 ! Clean up particle type
174 call mpi_type_free(type_particle,ierrmpi)
175
176 ! Clean up variables
177 deallocate(particle)
178 deallocate(ipe_neighbor)
179 deallocate(particles_on_mype)
180 deallocate(particles_active_on_mype)
181 deallocate(gridvars)
182
183 end subroutine particles_finish
184
185 !> Read this module's parameters from a file
186 subroutine particles_params_read(files)
188 character(len=*), intent(in) :: files(:)
189 integer :: n
190
191 namelist /particles_list/ physics_type_particles, nparticleshi, nparticles_per_cpu_hi, &
198
199 do n = 1, size(files)
200 open(unitpar, file=trim(files(n)), status="old")
201 read(unitpar, particles_list, end=111)
202111 close(unitpar)
203 end do
204
205 end subroutine particles_params_read
206
207 !> Give initial values to parameters
210 integer :: n
211 integer, parameter :: i8 = selected_int_kind(18)
212 integer(i8) :: seed(2)
213 integer :: idir
214 character(len=20) :: strdata
215
216 physics_type_particles = 'advect'
217 nparticleshi = 10000000
218 nparticles_per_cpu_hi = 1000000
219 num_particles = 1000
220 ndefpayload = 1
221 nusrpayload = 0
222 tmax_particles = bigdouble
223 dtsave_particles = bigdouble
224 const_dt_particles = -bigdouble
225 particles_cfl = 0.5
226 write_individual = .true.
227 write_ensemble = .true.
228 write_snapshot = .true.
230 relativistic = .false.
231 particles_eta = -1.d0
232 particles_etah = -1.d0
233 t_next_output = 0.0d0
234 dtheta = 2.0d0*dpi / 60.0d0
235 losses = .false.
236 nparticles = 0
237 it_particles = 0
243 interp_type_particles = 'default'
244
246
247 ! If sampling, ndefpayload = nw:
248 if (physics_type_particles == 'sample') ndefpayload = nw
249 ! Total number of payloads
251
252 ! initialise the random number generator
253 seed = [310952_i8, 8948923749821_i8]
254 call rng%set_seed(seed)
255
256 allocate(particle(1:nparticleshi))
257 allocate(ipe_neighbor(0:npe-1))
260
261 particles_on_mype(:) = 0
263
264 ! Generate header for CSV files
265 csv_header = ' time, dt, x1, x2, x3, u1, u2, u3,'
266 ! If sampling, name the payloads like the fluid quantities
267 if (physics_type_particles == 'sample') then
268 do n = 1, nw
269 write(strdata,"(a,a,a)") ' ', trim(prim_wnames(n)), ','
270 csv_header = trim(csv_header) // trim(strdata)
271 end do
272 else ! Otherwise, payloads are called pl01, pl02, ...
273 do n = 1, ndefpayload
274 write(strdata,"(a,i2.2,a)") ' pl', n, ','
275 csv_header = trim(csv_header) // trim(strdata)
276 end do
277 end if
278 ! User-defined payloads are called usrpl01, usrpl02, ...
279 if (nusrpayload > 0) then
280 do n = 1, nusrpayload
281 write(strdata,"(a,i2.2,a)") ' usrpl', n, ','
282 csv_header = trim(csv_header) // trim(strdata)
283 end do
284 end if
285 csv_header = trim(csv_header) // ' ipe, iteration, index'
286
287 ! Generate format string for CSV files
288 write(csv_format, '(A,I0,A,A)') '(', 8 + npayload, '(es14.6,", ")', &
289 'i8.7,", ",i11.10,", ",i8.7)'
290
291 end subroutine particle_base_init
292
293 !> Initialize communicators for particles
296
297 integer :: oldtypes(0:7), offsets(0:7), blocklengths(0:7)
298
299 oldtypes(0) = mpi_logical
300 oldtypes(1) = mpi_integer
301 oldtypes(2:7) = mpi_double_precision
302
303 blocklengths(0:5)=1
304 blocklengths(6)=3
305 blocklengths(7)=3
306
307 offsets(0) = 0
308 offsets(1) = size_logical * blocklengths(0)
309 offsets(2) = offsets(1) + size_int * blocklengths(1)
310 offsets(3) = offsets(2) + size_double * blocklengths(2)
311 offsets(4) = offsets(3) + size_double * blocklengths(3)
312 offsets(5) = offsets(4) + size_double * blocklengths(4)
313 offsets(6) = offsets(5) + size_double * blocklengths(5)
314 offsets(7) = offsets(6) + size_double * blocklengths(6)
315
316 call mpi_type_struct(8,blocklengths,offsets,oldtypes,type_particle,ierrmpi)
317 call mpi_type_commit(type_particle,ierrmpi)
318
319 end subroutine init_particles_com
320
321 subroutine get_maxwellian_velocity(v, velocity)
322 double precision, intent(out) :: v(3)
323 double precision, intent(in) :: velocity
324 double precision :: vtmp(3), vnorm
325
326 vtmp(1) = velocity * rng%normal()
327 vtmp(2) = velocity * rng%normal()
328 vtmp(3) = velocity * rng%normal()
329 vnorm = norm2(vtmp)
330 v = rng%sphere(vnorm)
331 end subroutine get_maxwellian_velocity
332
333 subroutine get_uniform_pos(x)
335 double precision, intent(out) :: x(3)
336
337 call rng%unif_01_vec(x)
338
339 {^d&x(^d) = xprobmin^d + x(^d) * (xprobmax^d - xprobmin^d)\}
340 x(ndim+1:) = 0.0d0
341 end subroutine get_uniform_pos
342
343 !> Initialize grid variables for particles
344 subroutine init_gridvars()
346 use mod_timing
347
348 integer :: igrid, iigrid
349
350 tpartc_grid_0=mpi_wtime()
351
352 do iigrid=1,igridstail; igrid=igrids(iigrid);
353 allocate(gridvars(igrid)%w(ixg^t,1:ngridvars),gridvars(igrid)%wold(ixg^t,1:ngridvars))
354 gridvars(igrid)%w = 0.0d0
355 gridvars(igrid)%wold = 0.0d0
356 end do
357
359 if (associated(particles_define_additional_gridvars)) then
361 end if
362
363 tpartc_grid = tpartc_grid + (mpi_wtime()-tpartc_grid_0)
364
365 end subroutine init_gridvars
366
367 !> Deallocate grid variables for particles
368 subroutine finish_gridvars()
370
371 integer :: iigrid, igrid
372
373 do iigrid=1,igridstail; igrid=igrids(iigrid);
374 deallocate(gridvars(igrid)%w,gridvars(igrid)%wold)
375 end do
376
377 end subroutine finish_gridvars
378
379 !> This routine fills the particle grid variables with the default for mhd, i.e. only E and B
383
384 double precision :: E(ixG^T, ndir)
385 double precision :: B(ixG^T, ndir)
386 integer :: igrid, iigrid
387
388 do iigrid=1,igridstail; igrid=igrids(iigrid);
389 if (associated(usr_particle_fields)) then ! FILL ONLY E AND B
390 call usr_particle_fields(ps(igrid)%w, ps(igrid)%x, e, b)
391 gridvars(igrid)%w(ixg^t,ep(:)) = e
392 gridvars(igrid)%w(ixg^t,bp(:)) = b
393 else
394 call fields_from_mhd(igrid, ps(igrid)%w, gridvars(igrid)%w)
395 end if
396 end do
397
398 end subroutine fill_gridvars_default
399
400 !> Determine fields from MHD variables
401 subroutine fields_from_mhd(igrid, w_mhd, w_part)
403 use mod_geometry
404 integer, intent(in) :: igrid
405 double precision, intent(in) :: w_mhd(ixG^T,nw)
406 double precision, intent(inout) :: w_part(ixG^T,ngridvars)
407
408 double precision :: current(ixG^T,7-2*ndir:3)
409 double precision :: w(ixG^T,1:nw)
410 integer :: idirmin, idir
411
412 ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
413
414 w(ixg^t,1:nw) = w_mhd(ixg^t,1:nw)
415 w_part(ixg^t,bp(1):bp(3)) = 0.0d0
416 w_part(ixg^t,ep(1):ep(3)) = 0.0d0
417
418 call phys_to_primitive(ixg^ll,ixg^ll,w,ps(igrid)%x)
419
420 ! fill with density:
421 w_part(ixg^t,rhop) = w(ixg^t,iw_rho)
422
423 ! fill with velocity:
424 w_part(ixg^t,vp(:)) = w(ixg^t,iw_mom(:))
425
426 ! fill with magnetic field:
427 if (b0field) then
428 do idir = 1, ndir
429 w_part(ixg^t,bp(idir))=w(ixg^t,iw_mag(idir))+ps(igrid)%B0(ixg^t,idir,0)
430 end do
431 else
432 w_part(ixg^t,bp(:)) = w(ixg^t,iw_mag(:))
433 end if
434
435 ! fill with current
436 current = zero
437 call particle_get_current(w,ixg^ll,ixg^llim^d^lsub1,idirmin,current)
438 w_part(ixg^t,jp(:)) = current(ixg^t,:)
439
440 ! fill with electric field
441 select case (coordinate)
443 w_part(ixg^t,ep(1)) = w_part(ixg^t,bp(2)) * &
444 w(ixg^t,iw_mom(3)) - w_part(ixg^t,bp(3)) * &
445 w(ixg^t,iw_mom(2)) + particles_eta * current(ixg^t,1)
446 w_part(ixg^t,ep(2)) = w_part(ixg^t,bp(3)) * &
447 w(ixg^t,iw_mom(1)) - w_part(ixg^t,bp(1)) * &
448 w(ixg^t,iw_mom(3)) + particles_eta * current(ixg^t,2)
449 w_part(ixg^t,ep(3)) = w_part(ixg^t,bp(1)) * &
450 w(ixg^t,iw_mom(2)) - w_part(ixg^t,bp(2)) * &
451 w(ixg^t,iw_mom(1)) + particles_eta * current(ixg^t,3)
452 case (cylindrical)
453 w_part(ixg^t,ep(r_)) = w_part(ixg^t,bp(phi_)) * &
454 w(ixg^t,iw_mom(z_)) - w_part(ixg^t,bp(z_)) * &
455 w(ixg^t,iw_mom(phi_)) + particles_eta * current(ixg^t,r_)
456 w_part(ixg^t,ep(phi_)) = w_part(ixg^t,bp(z_)) * &
457 w(ixg^t,iw_mom(r_)) - w_part(ixg^t,bp(r_)) * &
458 w(ixg^t,iw_mom(z_)) + particles_eta * current(ixg^t,phi_)
459 w_part(ixg^t,ep(z_)) = w_part(ixg^t,bp(r_)) * &
460 w(ixg^t,iw_mom(phi_)) - w_part(ixg^t,bp(phi_)) * &
461 w(ixg^t,iw_mom(r_)) + particles_eta * current(ixg^t,z_)
462 end select
463
464 ! Hall term
465 if (particles_etah > zero) then
466 select case (coordinate)
468 w_part(ixg^t,ep(1)) = w_part(ixg^t,ep(1)) + particles_etah/w(ixg^t,iw_rho) * &
469 (current(ixg^t,2) * w_part(ixg^t,bp(3)) - current(ixg^t,3) * w_part(ixg^t,bp(2)))
470 w_part(ixg^t,ep(2)) = w_part(ixg^t,ep(2)) + particles_etah/w(ixg^t,iw_rho) * &
471 (-current(ixg^t,1) * w_part(ixg^t,bp(3)) + current(ixg^t,3) * w_part(ixg^t,bp(1)))
472 w_part(ixg^t,ep(3)) = w_part(ixg^t,ep(3)) + particles_etah/w(ixg^t,iw_rho) * &
473 (current(ixg^t,1) * w_part(ixg^t,bp(2)) - current(ixg^t,2) * w_part(ixg^t,bp(1)))
474 case (cylindrical)
475 w_part(ixg^t,ep(r_)) = w_part(ixg^t,ep(r_)) + particles_etah/w(ixg^t,iw_rho) * &
476 (current(ixg^t,phi_) * w_part(ixg^t,bp(z_)) - current(ixg^t,z_) * w_part(ixg^t,bp(phi_)))
477 w_part(ixg^t,ep(phi_)) = w_part(ixg^t,ep(phi_)) + particles_etah/w(ixg^t,iw_rho) * &
478 (-current(ixg^t,r_) * w_part(ixg^t,bp(z_)) + current(ixg^t,z_) * w_part(ixg^t,bp(r_)))
479 w_part(ixg^t,ep(z_)) = w_part(ixg^t,ep(z_)) + particles_etah/w(ixg^t,iw_rho) * &
480 (current(ixg^t,r_) * w_part(ixg^t,bp(phi_)) - current(ixg^t,phi_) * w_part(ixg^t,bp(r_)))
481 end select
482 end if
483
484 end subroutine fields_from_mhd
485
486 !> Calculate idirmin and the idirmin:3 components of the common current array
487 !> make sure that dxlevel(^D) is set correctly.
488 subroutine particle_get_current(w,ixI^L,ixO^L,idirmin,current)
490 use mod_geometry
491
492 integer :: ixO^L, idirmin, ixI^L
493 double precision :: w(ixI^S,1:nw)
494 ! For ndir=2 only 3rd component of J can exist, ndir=1 is impossible for MHD
495 double precision :: current(ixI^S,7-2*ndir:3),bvec(ixI^S,1:ndir)
496 integer :: idir
497 integer :: idirmin0
498
499 idirmin0 = 7-2*ndir
500
501 if (b0field) then
502 do idir = 1, ndir
503 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))+block%B0(ixi^s,idir,0)
504 end do
505 else
506 do idir = 1, ndir
507 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))
508 end do
509 end if
510
511 call curlvector(bvec,ixi^l,ixo^l,current,idirmin,idirmin0,ndir)
512
513 end subroutine particle_get_current
514
515 !> update grid variables for particles
516 subroutine update_gridvars()
518 use mod_timing
519
520 integer :: igrid, iigrid
521
522 tpartc_grid_0=mpi_wtime()
523
524 do iigrid=1,igridstail; igrid=igrids(iigrid);
525 gridvars(igrid)%wold=gridvars(igrid)%w
526 end do
527
529 if (associated(particles_define_additional_gridvars)) then
531 end if
532
533 tpartc_grid = tpartc_grid + (mpi_wtime()-tpartc_grid_0)
534
535 end subroutine update_gridvars
536
537 !> Let particles evolve in time. The routine also handles grid variables and
538 !> output.
539 subroutine handle_particles()
541 use mod_timing
542
543 integer :: steps_taken, nparticles_left
544
545 tpartc0 = mpi_wtime()
546
548
549 if (time_advance) then
551 call update_gridvars()
552 else
554 end if
555 tpartc_com0=mpi_wtime()
557 tpartc_com=tpartc_com + (mpi_wtime()-tpartc_com0)
558
559
560 ! main integration loop
561 do
562 if (tmax_particles >= t_next_output) then
563 call advance_particles(t_next_output, steps_taken)
564 tpartc_io_0 = mpi_wtime()
566 if(convert .and. write_snapshot) then
567 if(mype==0) print*, "Writing particle output at time",t_next_output
569 end if
570 timeio_tot = timeio_tot+(mpi_wtime()-tpartc_io_0)
571 tpartc_io = tpartc_io+(mpi_wtime()-tpartc_io_0)
572
573 ! If we are using a constant time step, this prevents multiple outputs
574 ! at the same time
577 else
578 call advance_particles(tmax_particles, steps_taken)
579! call write_particle_output()
580 exit
581 end if
582
583 call mpi_allreduce(nparticles_on_mype, nparticles_left, 1, mpi_integer, &
584 mpi_sum, icomm, ierrmpi)
585 if (nparticles_left == 0 .and. convert) call mpistop('No particles left')
586
587 end do
588
589 tpartc = tpartc + (mpi_wtime() - tpartc0)
590
591 end subroutine handle_particles
592
593 !> Routine handles the particle evolution
594 subroutine advance_particles(end_time, steps_taken)
595 use mod_timing
597 double precision, intent(in) :: end_time !< Advance at most up to this time
598 integer, intent(out) :: steps_taken
599 integer :: n_active
600
601 steps_taken = 0
602
603 do
604 call select_active_particles(end_time)
605
606 ! Determine total number of active particles
607 call mpi_allreduce(nparticles_active_on_mype, n_active, 1, mpi_integer, &
608 mpi_sum, icomm, ierrmpi)
609
610 if (n_active == 0) exit
611
612 tpartc_int_0=mpi_wtime()
613 call particles_integrate(end_time)
614 steps_taken = steps_taken + 1
616
617 tpartc_com0=mpi_wtime()
618 call comm_particles()
619 tpartc_com=tpartc_com + (mpi_wtime()-tpartc_com0)
620
622 end do
623
624 end subroutine advance_particles
625
626 pure subroutine limit_dt_endtime(t_left, dt_p)
627 double precision, intent(in) :: t_left
628 double precision, intent(inout) :: dt_p
629 double precision :: n_steps
630 double precision, parameter :: eps = 1d-10
631
632 n_steps = t_left / dt_p
633
634 if (n_steps < 1+eps) then
635 ! Last step
636 dt_p = t_left
637 else if (n_steps < 2-eps) then
638 ! Divide time left over two steps, to prevent one tiny time step
639 dt_p = 0.5d0 * t_left
640 end if
641
642 end subroutine limit_dt_endtime
643
644 ! Get a generic vector quantity stored on the grid at the particle position
645 ! NOTE: DON'T GET E OR B IN THIS WAY, USE FUNCTIONS BELOW!
646 subroutine get_vec(ix,igrid,x,tloc,vec)
649
650 integer,intent(in) :: ix(ndir) !< Indices in gridvars
651 integer,intent(in) :: igrid
652 double precision,dimension(3), intent(in) :: x
653 double precision, intent(in) :: tloc
654 double precision,dimension(ndir), intent(out) :: vec
655 double precision,dimension(ndir) :: vec1, vec2
656 double precision :: td, bb
657 integer :: ic^D,idir
658
659 if (associated(usr_particle_analytic)) then
660 call usr_particle_analytic(ix, x, tloc, vec)
661 else
662 do idir=1,ndir
663 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,ix(idir)), &
664 ps(igrid)%x(ixg^t,1:ndim),x,vec(idir))
665 end do
666
667 if (time_advance) then
668 do idir=1,ndir
669 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,ix(idir)), &
670 ps(igrid)%x(ixg^t,1:ndim),x,vec2(idir))
671 end do
672
673 ! Interpolate in time
674 td = (tloc - global_time) / dt
675 vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
676 end if
677
678 ! Fabio June 2024: Renormalize vector by its grid-evaluated norm
679 if (interp_type_particles=='renormalized') then
680 if (time_advance) then
681 if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
682 call interpolate_var(igrid,ixg^ll,ixm^ll, &
683 sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=ndim+1)*td &
684 +sum(gridvars(igrid)%wold(ixg^t,ix(:))**2,dim=ndim+1)*(1.d0-td), &
685 ps(igrid)%x(ixg^t,1:ndim),x,bb)
686 end if
687 else
688 if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
689 call interpolate_var(igrid,ixg^ll,ixm^ll, &
690 sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=ndim+1), &
691 ps(igrid)%x(ixg^t,1:ndim),x,bb)
692 end if
693 vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
694 end if
695 end if
696 end if
697
698 end subroutine get_vec
699
700 subroutine get_bfield(igrid,x,tloc,b)
703
704 integer,intent(in) :: igrid
705 double precision,dimension(3), intent(in) :: x
706 double precision, intent(in) :: tloc
707 double precision,dimension(ndir), intent(out) :: b
708 double precision,dimension(ndir) :: vec1, vec2, vec
709 double precision :: td, bb
710 integer :: ic^D,idir
711
712 if (associated(usr_particle_analytic)) then
713 call usr_particle_analytic(bp, x, tloc, vec)
714 else if (associated(usr_particle_fields)) then
715 call get_vec(bp, igrid, x, tloc, vec)
716 else
717 do idir=1,ndir
718 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,bp(idir)), &
719 ps(igrid)%x(ixg^t,1:ndim),x,vec(idir))
720 end do
721
722 if (time_advance) then
723 do idir=1,ndir
724 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,bp(idir)), &
725 ps(igrid)%x(ixg^t,1:ndim),x,vec2(idir))
726 end do
727
728 ! Interpolate in time
729 td = (tloc - global_time) / dt
730 vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
731 end if
732
733 ! Fabio June 2024: Renormalize vector by its grid-evaluated norm
734 if (interp_type_particles=='renormalized') then
735 if (time_advance) then
736 if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
737 call interpolate_var(igrid,ixg^ll,ixm^ll, &
738 sum(gridvars(igrid)%w(ixg^t,bp(:))**2,dim=ndim+1)*td &
739 +sum(gridvars(igrid)%wold(ixg^t,bp(:))**2,dim=ndim+1)*(1.d0-td), &
740 ps(igrid)%x(ixg^t,1:ndim),x,bb)
741 end if
742 else
743 if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
744 call interpolate_var(igrid,ixg^ll,ixm^ll, &
745 sum(gridvars(igrid)%w(ixg^t,bp(:))**2,dim=ndim+1), &
746 ps(igrid)%x(ixg^t,1:ndim),x,bb)
747 end if
748 vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
749 end if
750 end if
751
752 end if
753
754 b(:) = vec(:)
755
756 end subroutine get_bfield
757
758 ! Shorthand for getting specifically the E field at the particle position
759 subroutine get_efield(igrid,x,tloc,e)
762 use mod_geometry
763
764 integer,intent(in) :: igrid
765 double precision,dimension(3), intent(in) :: x
766 double precision, intent(in) :: tloc
767 double precision,dimension(ndir), intent(out) :: e
768 double precision,dimension(ndir) :: vec1, vec2, vec
769 double precision,dimension(ndir) :: vfluid, b
770 double precision,dimension(ndir) :: current
771 double precision :: rho = 1.d0, rho2 = 1.d0
772 double precision :: td
773 integer :: ic^D,idir
774
775 current(:) = 0.d0
776
777 if (associated(usr_particle_analytic)) then
778 call usr_particle_analytic(ep, x, tloc, vec)
779 else if (associated(usr_particle_fields)) then
780 call get_vec(ep, igrid, x, tloc, vec)
781 else
782 call get_bfield(igrid, x, tloc, b)
783 call get_vec(vp, igrid, x, tloc, vfluid)
784 if (particles_eta > zero) then
785 call get_vec(jp, igrid, x, tloc, current)
786 end if
787 if (particles_etah > zero) then
788 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,rhop),ps(igrid)%x(ixg^t,1:ndim),x,rho)
789 if (time_advance) then
790 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,rhop),ps(igrid)%x(ixg^t,1:ndim),x,rho2)
791 td = (tloc - global_time) / dt
792 rho = rho2 * (1.0d0 - td) + rho * td
793 end if
794 end if
795
796 ! Build E
797 select case (coordinate)
799 vec(1) = -vfluid(2)*b(3)+vfluid(3)*b(2) + particles_eta*current(1) + particles_etah/rho*(current(2)*b(3) - current(3)*b(2))
800 vec(2) = vfluid(1)*b(3)-vfluid(3)*b(1) + particles_eta*current(2) + particles_etah/rho*(-current(1)*b(3) + current(3)*b(1))
801 vec(3) = -vfluid(1)*b(2)+vfluid(2)*b(1) + particles_eta*current(3) + particles_etah/rho*(current(1)*b(2) - current(2)*b(1))
802 case (cylindrical)
803 vec(r_) = -vfluid(phi_)*b(z_)+vfluid(z_)*b(phi_) + particles_eta*current(r_) + particles_etah/rho*(current(phi_)*b(z_) - current(z_)*b(phi_))
804 vec(phi_) = vfluid(r_)*b(z_)-vfluid(z_)*b(r_) + particles_eta*current(phi_) + particles_etah/rho*(-current(r_)*b(z_) + current(z_)*b(r_))
805 vec(z_) = -vfluid(r_)*b(phi_)+vfluid(phi_)*b(r_) + particles_eta*current(z_) + particles_etah/rho*(current(r_)*b(phi_) - current(phi_)*b(r_))
806 end select
807 end if
808
809 e(:) = vec(:)
810
811 end subroutine get_efield
812
813 !> Get Lorentz factor from relativistic momentum
814 pure subroutine get_lfac(u,lfac)
816 double precision,dimension(3), intent(in) :: u
817 double precision, intent(out) :: lfac
818
819 if (relativistic) then
820 lfac = sqrt(1.0d0 + sum(u(:)**2)/c_norm**2)
821 else
822 lfac = 1.0d0
823 end if
824
825 end subroutine get_lfac
826
827 !> Get Lorentz factor from velocity
828 pure subroutine get_lfac_from_velocity(v,lfac)
830 double precision,dimension(3), intent(in) :: v
831 double precision, intent(out) :: lfac
832
833 if (relativistic) then
834 lfac = 1.0d0 / sqrt(1.0d0 - sum(v(:)**2)/c_norm**2)
835 else
836 lfac = 1.0d0
837 end if
838
839 end subroutine get_lfac_from_velocity
840
841 subroutine cross(a,b,c)
843 use mod_geometry
844 double precision, dimension(ndir), intent(in) :: a,b
845 double precision, dimension(ndir), intent(out) :: c
846
847 ! ndir needs to be three for this to work!!!
848 if(ndir==3) then
849 select case(coordinate)
851 c(1) = a(2)*b(3) - a(3)*b(2)
852 c(2) = a(3)*b(1) - a(1)*b(3)
853 c(3) = a(1)*b(2) - a(2)*b(1)
854 case (cylindrical)
855 c(r_) = a(phi_)*b(z_) - a(z_)*b(phi_)
856 c(phi_) = a(z_)*b(r_) - a(r_)*b(z_)
857 c(z_) = a(r_)*b(phi_) - a(phi_)*b(r_)
858 case default
859 call mpistop('geometry not implemented in cross(a,b,c)')
860 end select
861 else
862 call mpistop("cross in particles needs to be run with three components!")
863 end if
864
865 end subroutine cross
866
867 subroutine interpolate_var(igrid,ixI^L,ixO^L,gf,x,xloc,gfloc)
869
870 integer, intent(in) :: igrid,ixI^L, ixO^L
871 double precision, intent(in) :: gf(ixI^S)
872 double precision, intent(in) :: x(ixI^S,1:ndim)
873 double precision, intent(in) :: xloc(1:3)
874 double precision, intent(out) :: gfloc
875 double precision :: xd^D, myq, dx1
876 {^iftwod
877 double precision :: c00, c10
878 }
879 {^ifthreed
880 double precision :: c0, c1, c00, c10, c01, c11
881 }
882 integer :: ic^D, ic1^D, ic2^D, idir
883
884 {
885 if(stretch_type(^d)==stretch_uni) then
886 ! uniform stretch from xprobmin
887 ic^d = ceiling(dlog((xloc(^d)-xprobmin^d)*(qstretch(ps(igrid)%level,^d)-one)/&
888 dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
889 (node(pig^d_,igrid)-1)*block_nx^d+nghostcells
890 else if(stretch_type(^d)==stretch_symm) then
891 ! symmetric stretch about 0.5*(xprobmin+xprobmax)
892 if(xloc(^d)<xprobmin^d+xstretch^d) then
893 ! stretch to left from xprobmin+xstretch
894 ic^d = block_nx^d-(int(dlog((xprobmin^d+xstretch^d-xloc(^d))*(qstretch(ps(igrid)%level,^d)-one)/&
895 dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
896 (nstretchedblocks(ps(igrid)%level,^d)/2-node(pig^d_,igrid))*block_nx^d)+nghostcells
897 else if(xloc(^d)>xprobmax^d-xstretch^d) then
898 ! stretch to right from xprobmax-xstretch
899 ic^d = ceiling(dlog((xloc(^d)-xprobmax^d+xstretch^d)*(qstretch(ps(igrid)%level,^d)-one)/&
900 dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
901 ((node(pig^d_,igrid)-1+nstretchedblocks(ps(igrid)%level,^d)/2)*block_nx^d-&
902 domain_nx^d*2**(ps(igrid)%level-1))+nghostcells
903 else
904 ! possible non-stretched central part
905 ic^d = int((xloc(^d)-rnode(rpxmin^d_,igrid))/rnode(rpdx^d_,igrid)) + 1 + nghostcells
906 end if
907 else
908 ! no stretch
909 ic^d = int((xloc(^d)-rnode(rpxmin^d_,igrid))/rnode(rpdx^d_,igrid)) + 1 + nghostcells
910 end if
911 \}
912
913 ! linear interpolation:
914 {
915 if (x({ic^dd},^d) .lt. xloc(^d)) then
916 ic1^d = ic^d
917 else
918 ic1^d = ic^d -1
919 end if
920 ic2^d = ic1^d + 1
921 \}
922
923 {^d&
924 if(ic1^d.lt.ixglo^d+1 .or. ic2^d.gt.ixghi^d-1) then
925 print *, 'direction: ',^d
926 print *, 'position: ',xloc(1:ndim)
927 print *, 'indices:', ic1^d,ic2^d
928 call mpistop('Trying to interpolate from out of grid!')
929 end if
930 \}
931
932 {^ifoned
933 xd1 = (xloc(1)-x(ic11,1)) / (x(ic21,1) - x(ic11,1))
934 gfloc = gf(ic11) * (1.0d0 - xd1) + gf(ic21) * xd1
935 }
936 {^iftwod
937 xd1 = (xloc(1)-x(ic11,ic12,1)) / (x(ic21,ic12,1) - x(ic11,ic12,1))
938 xd2 = (xloc(2)-x(ic11,ic12,2)) / (x(ic11,ic22,2) - x(ic11,ic12,2))
939 c00 = gf(ic11,ic12) * (1.0d0 - xd1) + gf(ic21,ic12) * xd1
940 c10 = gf(ic11,ic22) * (1.0d0 - xd1) + gf(ic21,ic22) * xd1
941 gfloc = c00 * (1.0d0 - xd2) + c10 * xd2
942 }
943 {^ifthreed
944 xd1 = (xloc(1)-x(ic11,ic12,ic13,1)) / (x(ic21,ic12,ic13,1) - x(ic11,ic12,ic13,1))
945 xd2 = (xloc(2)-x(ic11,ic12,ic13,2)) / (x(ic11,ic22,ic13,2) - x(ic11,ic12,ic13,2))
946 xd3 = (xloc(3)-x(ic11,ic12,ic13,3)) / (x(ic11,ic12,ic23,3) - x(ic11,ic12,ic13,3))
947
948 c00 = gf(ic11,ic12,ic13) * (1.0d0 - xd1) + gf(ic21,ic12,ic13) * xd1
949 c10 = gf(ic11,ic22,ic13) * (1.0d0 - xd1) + gf(ic21,ic22,ic13) * xd1
950 c01 = gf(ic11,ic12,ic23) * (1.0d0 - xd1) + gf(ic21,ic12,ic23) * xd1
951 c11 = gf(ic11,ic22,ic23) * (1.0d0 - xd1) + gf(ic21,ic22,ic23) * xd1
952
953 c0 = c00 * (1.0d0 - xd2) + c10 * xd2
954 c1 = c01 * (1.0d0 - xd2) + c11 * xd2
955
956 gfloc = c0 * (1.0d0 - xd3) + c1 * xd3
957 }
958
959 end subroutine interpolate_var
960
962 use mod_timing
964
965 double precision :: tpartc_avg, tpartc_int_avg, tpartc_io_avg, tpartc_com_avg, tpartc_grid_avg
966
967 call mpi_reduce(tpartc,tpartc_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
968 call mpi_reduce(tpartc_int,tpartc_int_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
969 call mpi_reduce(tpartc_io,tpartc_io_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
970 call mpi_reduce(tpartc_com,tpartc_com_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
971 call mpi_reduce(tpartc_grid,tpartc_grid_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
972
973 if (mype ==0) then
974 tpartc_avg = tpartc_avg/dble(npe)
975 tpartc_int_avg = tpartc_int_avg/dble(npe)
976 tpartc_io_avg = tpartc_io_avg/dble(npe)
977 tpartc_com_avg = tpartc_com_avg/dble(npe)
978 tpartc_grid_avg = tpartc_grid_avg/dble(npe)
979 write(*,'(a,f12.3,a)')' Particle handling took : ',tpartc,' sec'
980 write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc/(timeloop+smalldouble),' %'
981 write(*,'(a,f12.3,a)')' Particle IO took : ',tpartc_io_avg,' sec'
982 write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_io_avg/(timeloop+smalldouble),' %'
983 write(*,'(a,f12.3,a)')' Particle COM took : ',tpartc_com_avg,' sec'
984 write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_com_avg/(timeloop+smalldouble),' %'
985 write(*,'(a,f12.3,a)')' Particle integration took : ',tpartc_int_avg,' sec'
986 write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_int_avg/(timeloop+smalldouble),' %'
987 write(*,'(a,f12.3,a)')' Particle init grid took : ',tpartc_grid_avg,' sec'
988 write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_grid_avg/(timeloop+smalldouble),' %'
989 end if
990
991 end subroutine time_spent_on_particles
992
993 subroutine read_particles_snapshot(file_exists)
995
996 logical,intent(out) :: file_exists
997 character(len=std_len) :: filename
998 integer :: mynpayload, mynparticles, pos, index_latest
999
1000 ! some initialisations:
1002 mynparticles = 0
1003 nparticles = 0
1004 it_particles = 0
1005
1006 ! open the snapshot file on the headnode
1007 file_exists=.false.
1008 if (mype == 0) then
1009 ! Strip restart_from_filename of the ending
1010 pos = scan(restart_from_file, '.dat', back=.true.)
1011 do index_latest=9999,0,-1
1012 write(filename,"(a,a,i4.4,a)") trim(restart_from_file(1:pos-8)),'_particles',index_latest,'.dat'
1013 INQUIRE(file=filename, exist=file_exists)
1014 if(file_exists) exit
1015 end do
1016 if (.not. file_exists) then
1017 write(*,*) 'WARNING: File '//trim(filename)//' with particle data does not exist.'
1018 write(*,*) 'Initialising particles from user or default routines'
1019 else
1020 open(unit=unitparticles,file=filename,form='unformatted',action='read',status='unknown',access='stream')
1021 read(unitparticles) nparticles,it_particles,mynpayload
1022 if (mynpayload .ne. npayload) &
1023 call mpistop('npayload in restart file does not match npayload in mod_particles')
1024 end if
1025 end if
1026
1027 call mpi_bcast(file_exists,1,mpi_logical,0,icomm,ierrmpi)
1028 if (.not. file_exists) return
1029
1030 call mpi_bcast(nparticles,1,mpi_integer,0,icomm,ierrmpi)
1031 call mpi_bcast(it_particles,1,mpi_integer,0,icomm,ierrmpi)
1032
1033 do while (mynparticles .lt. nparticles)
1034 if (mype == 0) then
1036 .and. mynparticles .lt. nparticles)
1038 mynparticles = mynparticles + 1
1039 end do
1040 end if ! mype==0
1041 call mpi_bcast(mynparticles,1,mpi_integer,0,icomm,ierrmpi)
1043 end do
1044
1045 if (mype == 0) close(unit=unitparticles)
1046
1048
1049 end subroutine read_particles_snapshot
1050
1052
1053 integer :: index,igrid_particle,ipe_particle
1054
1055 read(unitparticles) index
1056 allocate(particle(index)%self)
1057 allocate(particle(index)%payload(1:npayload))
1058 particle(index)%self%index = index
1059
1060 read(unitparticles) particle(index)%self%follow
1061 read(unitparticles) particle(index)%self%q
1062 read(unitparticles) particle(index)%self%m
1063 read(unitparticles) particle(index)%self%time
1064 read(unitparticles) particle(index)%self%dt
1065 read(unitparticles) particle(index)%self%x
1066 read(unitparticles) particle(index)%self%u
1067 read(unitparticles) particle(index)%payload(1:npayload)
1068
1069! if (particle(index)%self%follow) print*, 'follow index:', index
1070
1071 call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
1072 particle(index)%igrid = igrid_particle
1073 particle(index)%ipe = ipe_particle
1074
1076
1077 end subroutine read_from_snapshot
1078
1081
1082 character(len=std_len) :: filename
1083! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1084! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
1085! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1086! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
1087 type(particle_t), allocatable, dimension(:) :: send_particles
1088 type(particle_t), allocatable, dimension(:) :: receive_particles
1089 double precision, allocatable, dimension(:,:) :: send_payload
1090 double precision, allocatable, dimension(:,:) :: receive_payload
1091 integer :: status(MPI_STATUS_SIZE)
1092 integer,dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1093 integer :: ipe, ipart, iipart, send_n_particles_for_output
1094 logical,save :: file_exists=.false.
1095 integer :: snapshotnumber
1096
1097 if (time_advance) then
1098 snapshotnumber = snapshotnext
1099 else
1100 snapshotnumber = nint(t_next_output/dtsave_particles)
1101 end if
1102
1103 receive_n_particles_for_output_from_ipe(:) = 0
1104
1105 ! open the snapshot file on the headnode
1106 if (mype .eq. 0) then
1107 write(filename,"(a,a,i4.4,a)") trim(base_filename),'_particles',snapshotnumber,'.dat'
1108 INQUIRE(file=filename, exist=file_exists)
1109 if (.not. file_exists) then
1110 open(unit=unitparticles,file=filename,form='unformatted',status='new',access='stream')
1111 else
1112 open(unit=unitparticles,file=filename,form='unformatted',status='replace',access='stream')
1113 end if
1115 end if
1116 if (npe==1) then
1117 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1118 call append_to_snapshot(particle(ipart)%self,particle(ipart)%payload)
1119 end do
1120 return
1121 end if
1122
1123 if (mype .ne. 0) then
1124 call mpi_send(nparticles_on_mype,1,mpi_integer,0,mype,icomm,ierrmpi)
1125 ! fill the send_buffer
1126 send_n_particles_for_output = nparticles_on_mype
1127 allocate(send_particles(1:send_n_particles_for_output))
1128 allocate(send_payload(1:npayload,1:send_n_particles_for_output))
1129 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1130 send_particles(iipart) = particle(ipart)%self
1131 send_payload(1:npayload,iipart) = particle(ipart)%payload(1:npayload)
1132 end do
1133 else
1134 ! get number of particles on other ipes
1135 do ipe=1,npe-1
1136 call mpi_recv(receive_n_particles_for_output_from_ipe(ipe),1,mpi_integer,ipe,ipe,icomm,status,ierrmpi)
1137 end do
1138 end if
1139
1140 if (mype .ne. 0) then
1141 call mpi_send(send_particles,send_n_particles_for_output,type_particle,0,mype,icomm,ierrmpi)
1142 call mpi_send(send_payload,npayload*send_n_particles_for_output,mpi_double_precision,0,mype,icomm,ierrmpi)
1143 deallocate(send_particles)
1144 deallocate(send_payload)
1145 end if
1146
1147 if (mype==0) then
1148 ! first output own particles (already in receive buffer)
1149 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1150 call append_to_snapshot(particle(ipart)%self,particle(ipart)%payload)
1151 end do
1152 ! now output the particles sent from the other ipes
1153 do ipe=1,npe-1
1154 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1155 allocate(receive_payload(1:npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1156 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),type_particle,ipe,ipe,icomm,status,ierrmpi)
1157 call mpi_recv(receive_payload,npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,icomm,status,ierrmpi)
1158 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1159 call append_to_snapshot(receive_particles(ipart),receive_payload(1:npayload,ipart))
1160 end do ! ipart
1161 deallocate(receive_particles)
1162 deallocate(receive_payload)
1163 end do ! ipe
1164 close(unit=unitparticles)
1165 end if ! mype == 0
1166
1167 end subroutine write_particles_snapshot
1168
1169 subroutine append_to_snapshot(myparticle,mypayload)
1170
1171 type(particle_t),intent(in) :: myparticle
1172 double precision :: mypayload(1:npayload)
1173
1174 write(unitparticles) myparticle%index
1175 write(unitparticles) myparticle%follow
1176 write(unitparticles) myparticle%q
1177 write(unitparticles) myparticle%m
1178 write(unitparticles) myparticle%time
1179 write(unitparticles) myparticle%dt
1180 write(unitparticles) myparticle%x
1181 write(unitparticles) myparticle%u
1182 write(unitparticles) mypayload(1:npayload)
1183
1184 end subroutine append_to_snapshot
1185
1188
1189 integer :: iipart, ipart, icomp
1190 character(len=std_len) :: filename
1191 character(len=1024) :: line, strdata
1192
1193 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1194 if (particle(ipart)%self%follow) then
1195 filename = make_particle_filename(particle(ipart)%self%time,particle(ipart)%self%index,'individual')
1196 open(unit=unitparticles,file=filename,status='replace')
1197 line=''
1198 do icomp=1, npayload
1199 write(strdata,"(a,i2.2,a)") 'payload',icomp,','
1200 line = trim(line)//trim(strdata)
1201 end do
1202 write(unitparticles,"(a,a,a)") 'time,dt,x1,x2,x3,u1,u2,u3,',trim(line),'ipe,iteration,index'
1203 close(unit=unitparticles)
1204 end if
1205 end do
1206
1207 end subroutine init_particles_output
1208
1209 subroutine select_active_particles(end_time)
1211 double precision, intent(in) :: end_time
1212
1213 double precision :: t_min_mype
1214 integer :: ipart, iipart
1215 logical :: activate
1216
1217 t_min_mype = bigdouble
1219
1220 do iipart = 1, nparticles_on_mype
1221 ipart = particles_on_mype(iipart);
1222 activate = (particle(ipart)%self%time < end_time)
1223 t_min_mype = min(t_min_mype, particle(ipart)%self%time)
1224
1225 if (activate) then
1228 end if
1229 end do
1230
1231 call mpi_allreduce(t_min_mype, min_particle_time, 1, mpi_double_precision, &
1232 mpi_min, icomm, ierrmpi)
1233
1234 end subroutine select_active_particles
1235
1236 subroutine locate_particle(index,igrid_particle,ipe_particle)
1237 ! given the particles unique index, tell me on which cpu and igrid it is
1238 ! returns -1,-1 if particle was not found
1240
1241 integer, intent(in) :: index
1242 integer, intent(out) :: igrid_particle, ipe_particle
1243 integer :: iipart,ipart,ipe_has_particle,ipe
1244 integer,dimension(0:1) :: buff
1245 logical :: has_particle(0:npe-1)
1246
1247 has_particle(:) = .false.
1248 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1249 if (particle(ipart)%self%index == index) then
1250 has_particle(mype) = .true.
1251 exit
1252 end if
1253 end do
1254
1255 if (has_particle(mype)) then
1256 buff(0) = particle(ipart)%igrid
1257 buff(1) = mype
1258 end if
1259
1260 if (npe>0) call mpi_allgather(has_particle(mype),1,mpi_logical,has_particle,1,mpi_logical,icomm,ierrmpi)
1261
1262 ipe_has_particle = -1
1263 do ipe=0,npe-1
1264 if (has_particle(ipe) .eqv. .true.) then
1265 ipe_has_particle = ipe
1266 exit
1267 end if
1268 end do
1269
1270 if (ipe_has_particle .ne. -1) then
1271 if (npe>0) call mpi_bcast(buff,2,mpi_integer,ipe_has_particle,icomm,ierrmpi)
1272 igrid_particle=buff(0)
1273 ipe_particle = buff(1)
1274 else
1275 igrid_particle=-1
1276 ipe_particle = -1
1277 end if
1278
1279 end subroutine locate_particle
1280
1281 subroutine find_particle_ipe(x,igrid_particle,ipe_particle)
1283 use mod_slice, only: get_igslice
1285
1286 double precision, intent(in) :: x(3)
1287 integer, intent(out) :: igrid_particle, ipe_particle
1288 integer :: ig(ndir,nlevelshi), ig_lvl(nlevelshi)
1289 integer :: idim, ic(ndim)
1290 type(tree_node_ptr) :: branch
1291
1292 ! first check if the particle is in the domain
1293 if (.not. particle_in_domain(x)) then
1294 igrid_particle = -1
1295 ipe_particle = -1
1296 return
1297 end if
1298
1299 ! get the index on each level
1300 do idim = 1, ndim
1301 call get_igslice(idim,x(idim), ig_lvl)
1302 ig(idim,:) = ig_lvl
1303 end do
1304
1305 ! traverse the tree until leaf is found
1306 branch=tree_root({ig(^d,1)})
1307 do while (.not.branch%node%leaf)
1308 {ic(^d)=ig(^d,branch%node%level+1) - 2 * branch%node%ig^d +2\}
1309 branch%node => branch%node%child({ic(^d)})%node
1310 end do
1311
1312 igrid_particle = branch%node%igrid
1313 ipe_particle = branch%node%ipe
1314
1315 end subroutine find_particle_ipe
1316
1317 !> Check if particle is inside computational domain
1318 logical function particle_in_domain(x)
1320 double precision, dimension(ndim), intent(in) :: x
1321
1322 particle_in_domain = all(x >= [ xprobmin^d ]) .and. &
1323 all(x < [ xprobmax^d ]) ! Jannis: as before < instead of <= here, necessary?
1324 end function particle_in_domain
1325
1326 !> Quick check if particle is still in igrid
1327 logical function particle_in_igrid(ipart, igrid)
1329 integer, intent(in) :: igrid, ipart
1330 double precision :: x(ndim), grid_rmin(ndim), grid_rmax(ndim)
1331
1332 ! First check if the igrid is still there
1333 if (.not. allocated(ps(igrid)%w)) then
1334 particle_in_igrid = .false.
1335 else
1336 grid_rmin = [ {rnode(rpxmin^d_,igrid)} ]
1337 grid_rmax = [ {rnode(rpxmax^d_,igrid)} ]
1338 x = particle(ipart)%self%x(1:ndim)
1339 particle_in_igrid = all(x >= grid_rmin) .and. all(x < grid_rmax)
1340 end if
1341 end function particle_in_igrid
1342
1343 subroutine set_neighbor_ipe()
1345
1346 integer :: igrid, iigrid,ipe
1347 integer :: my_neighbor_type, i^D
1348 logical :: ipe_is_neighbor(0:npe-1)
1349
1350 ipe_is_neighbor(:) = .false.
1351 do iigrid=1,igridstail; igrid=igrids(iigrid);
1352
1353 {do i^db=-1,1\}
1354 if (i^d==0|.and.) then
1355 cycle
1356 end if
1357 my_neighbor_type=neighbor_type(i^d,igrid)
1358
1359 select case (my_neighbor_type)
1360 case (neighbor_boundary) ! boundary
1361 ! do nothing
1362 case (neighbor_coarse) ! fine-coarse
1363 call ipe_fc(i^d,igrid,ipe_is_neighbor)
1364 case (neighbor_sibling) ! same level
1365 call ipe_srl(i^d,igrid,ipe_is_neighbor)
1366 case (neighbor_fine) ! coarse-fine
1367 call ipe_cf(i^d,igrid,ipe_is_neighbor)
1368 end select
1369
1370 {end do\}
1371
1372 end do
1373
1374 ! remove self as neighbor
1375 ipe_is_neighbor(mype) = .false.
1376
1377 ! Now make the list of neighbors
1378 npe_neighbors = 0
1379 do ipe=0,npe-1
1380 if (ipe_is_neighbor(ipe)) then
1383 end if
1384 end do ! ipe
1385
1386 end subroutine set_neighbor_ipe
1387
1388 subroutine ipe_fc(i^D,igrid,ipe_is_neighbor)
1390 integer, intent(in) :: i^D, igrid
1391 logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1392
1393 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1394
1395 end subroutine ipe_fc
1396
1397 subroutine ipe_srl(i^D,igrid,ipe_is_neighbor)
1399 integer, intent(in) :: i^D, igrid
1400 logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1401
1402 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1403
1404 end subroutine ipe_srl
1405
1406 subroutine ipe_cf(i^D,igrid,ipe_is_neighbor)
1408 integer, intent(in) :: i^D, igrid
1409 logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1410 integer :: ic^D, inc^D
1411
1412 {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1413 inc^db=2*i^db+ic^db
1414 \}
1415 ipe_is_neighbor( neighbor_child(2,inc^d,igrid) ) = .true.
1416
1417 {end do\}
1418
1419 end subroutine ipe_cf
1420
1423
1424 character(len=std_len) :: filename
1425! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1426! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1427 type(particle_t), allocatable, dimension(:) :: send_particles
1428 double precision, allocatable, dimension(:,:) :: send_payload
1429 double precision :: tout
1430 integer :: send_n_particles_for_output, cc
1431 integer :: nout
1432 integer :: ipart,iipart
1433
1434 if (write_individual) then
1435 call output_individual()
1436 end if
1437
1438 if (write_ensemble) then
1439 send_n_particles_for_output = 0
1440
1441 do iipart=1,nparticles_on_mype
1442 ipart=particles_on_mype(iipart);
1443
1444 ! have to send particle to rank zero for output
1445 if (mod(particle(ipart)%self%index,downsample_particles) .eq. 0) then
1446 send_n_particles_for_output = send_n_particles_for_output + 1
1447 end if
1448 end do
1449
1450 allocate(send_particles(1:send_n_particles_for_output))
1451 allocate(send_payload(npayload,1:send_n_particles_for_output))
1452 cc = 1
1453 do iipart=1,nparticles_on_mype
1454 ipart=particles_on_mype(iipart);
1455
1456 ! have to send particle to rank zero for output
1457 if (mod(particle(ipart)%self%index,downsample_particles) .eq. 0) then
1458 send_particles(cc) = particle(ipart)%self
1459 send_payload(1:npayload,cc) = particle(ipart)%payload(1:npayload)
1460 cc=cc+1
1461 end if
1462 end do
1463
1464 call output_ensemble(send_n_particles_for_output,send_particles, &
1465 send_payload,'ensemble')
1466 deallocate(send_particles)
1467 deallocate(send_payload)
1468 end if
1469
1470 end subroutine write_particle_output
1471
1472 character(len=128) function make_particle_filename(tout,index,type)
1474
1475 character(len=*), intent(in) :: type
1476 double precision, intent(in) :: tout
1477 integer, intent(in) :: index
1478 integer :: nout, mysnapshot
1479
1480 select case(type)
1481 case ('ensemble')
1482 nout = nint(tout / dtsave_particles)
1483 write(make_particle_filename,"(a,a,i6.6,a)") trim(base_filename),'_ensemble',nout,'.csv'
1484 case ('destroy')
1485 write(make_particle_filename,"(a,a)") trim(base_filename),'_destroyed.csv'
1486 case ('individual')
1487 write(make_particle_filename,"(a,a,i6.6,a)") trim(base_filename),'_particle',index,'.csv'
1488 end select
1489
1490 end function make_particle_filename
1491
1492 subroutine output_ensemble(send_n_particles_for_output,send_particles,send_payload,typefile)
1494
1495 integer, intent(in) :: send_n_particles_for_output
1496 type(particle_t), dimension(send_n_particles_for_output), intent(in) :: send_particles
1497 double precision, dimension(npayload,send_n_particles_for_output), intent(in) :: send_payload
1498 character(len=*), intent(in) :: typefile
1499 character(len=std_len) :: filename
1500 type(particle_t), allocatable, dimension(:) :: receive_particles
1501 double precision, allocatable, dimension(:,:) :: receive_payload
1502 integer :: status(MPI_STATUS_SIZE)
1503 integer,dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1504 integer :: ipe, ipart, nout
1505 logical :: file_exists
1506
1507 receive_n_particles_for_output_from_ipe(:) = 0
1508
1509 call mpi_allgather(send_n_particles_for_output, 1, mpi_integer, &
1510 receive_n_particles_for_output_from_ipe, 1, mpi_integer, icomm, ierrmpi)
1511
1512 ! If there are no particles to be written, skip the output
1513 if (sum(receive_n_particles_for_output_from_ipe(:)) == 0) return
1514
1515 if (mype > 0) then
1516 call mpi_send(send_particles,send_n_particles_for_output,type_particle,0,mype,icomm,ierrmpi)
1517 call mpi_send(send_payload,npayload*send_n_particles_for_output,mpi_double_precision,0,mype,icomm,ierrmpi)
1518 else
1519 ! Create file and write header
1520 if(typefile=='destroy') then ! Destroyed file
1521 write(filename,"(a,a,i6.6,a)") trim(base_filename) // '_', &
1522 trim(typefile) // '.csv'
1524 inquire(file=filename, exist=file_exists)
1525
1526 if (.not. file_exists) then
1527 open(unit=unitparticles, file=filename)
1528 write(unitparticles,"(a)") trim(csv_header)
1529 else
1530 open(unit=unitparticles, file=filename, access='append')
1531 end if
1532
1533 else ! Ensemble file
1534 write(filename,"(a,a,i6.6,a)") trim(base_filename) // '_', &
1535 trim(typefile) // '_', nint(t_next_output/dtsave_particles),'.csv'
1536 open(unit=unitparticles,file=filename)
1537 write(unitparticles,"(a)") trim(csv_header)
1538 end if
1539
1540 ! Write own particles
1541 do ipart=1,send_n_particles_for_output
1542 call output_particle(send_particles(ipart),send_payload(1:npayload,ipart),0,unitparticles)
1543 end do
1544
1545 ! Write particles from other tasks
1546 do ipe=1,npe-1
1547 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1548 allocate(receive_payload(1:npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1549 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),type_particle,ipe,ipe,icomm,status,ierrmpi)
1550 call mpi_recv(receive_payload,npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,icomm,status,ierrmpi)
1551 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1552 call output_particle(receive_particles(ipart),receive_payload(1:npayload,ipart),ipe,unitparticles)
1553 end do ! ipart
1554 deallocate(receive_particles)
1555 deallocate(receive_payload)
1556 end do ! ipe
1557
1558 close(unitparticles)
1559 end if
1560
1561 end subroutine output_ensemble
1562
1565 character(len=std_len) :: filename
1566 integer :: ipart,iipart,ipe
1567 logical :: file_exists
1568
1569 do iipart=1,nparticles_on_mype
1570 ipart=particles_on_mype(iipart)
1571
1572 ! should the particle be dumped?
1573 if (particle(ipart)%self%follow) then
1574 write(filename,"(a,a,i6.6,a)") trim(base_filename), '_particle_', &
1575 particle(ipart)%self%index, '.csv'
1576 inquire(file=filename, exist=file_exists)
1577
1578 ! Create empty file and write header on first iteration, or when the
1579 ! file does not exist yet
1580 if (it_particles == 0 .or. .not. file_exists) then
1581 open(unit=unitparticles, file=filename)
1582 write(unitparticles,"(a)") trim(csv_header)
1583 else
1584 open(unit=unitparticles, file=filename, access='append')
1585 end if
1586
1587 call output_particle(particle(ipart)%self,&
1588 particle(ipart)%payload(1:npayload),mype,unitparticles)
1589
1590 close(unitparticles)
1591 end if
1592 end do
1593
1594 end subroutine output_individual
1595
1596 subroutine output_particle(myparticle,payload,ipe,file_unit)
1598 use mod_geometry
1599
1600 type(particle_t),intent(in) :: myparticle
1601 double precision, intent(in) :: payload(npayload)
1602 integer, intent(in) :: ipe
1603 integer, intent(in) :: file_unit
1604 double precision :: x(3), u(3)
1605 integer :: icomp
1606
1607 ! convert and normalize the position
1608 if (nocartesian) then
1609 select case(coordinate)
1611 x = myparticle%x*length_convert_factor
1612 case(cylindrical)
1613 x(r_) = myparticle%x(r_)*length_convert_factor
1614 x(z_) = myparticle%x(z_)*length_convert_factor
1615 x(phi_) = myparticle%x(phi_)
1616 case(spherical)
1617 x(:) = myparticle%x(:)
1618 x(1) = x(1)*length_convert_factor
1619 end select
1620 else
1621 call partcoord_to_cartesian(myparticle%x,x)
1622 x(:) = x(:)*length_convert_factor
1623 end if
1624
1625 u = myparticle%u(1:3) * integrator_velocity_factor
1626
1627 write(file_unit, csv_format) myparticle%time, myparticle%dt, x(1:3), &
1628 u, payload(1:npayload), ipe, it_particles, &
1629 myparticle%index
1630
1631 end subroutine output_particle
1632
1633 !> Convert position to Cartesian coordinates
1634 subroutine partcoord_to_cartesian(xp,xpcart)
1635 ! conversion of particle coordinate from cylindrical/spherical to cartesian coordinates done here
1637 use mod_geometry
1638
1639 double precision, intent(in) :: xp(1:3)
1640 double precision, intent(out) :: xpcart(1:3)
1641
1642 select case (coordinate)
1644 xpcart(1:3) = xp(1:3)
1645 case (cylindrical)
1646 xpcart(1) = xp(r_)*cos(xp(phi_))
1647 xpcart(2) = xp(r_)*sin(xp(phi_))
1648 xpcart(3) = xp(z_)
1649 case (spherical)
1650 xpcart(1) = xp(1)*sin(xp(2))*cos(xp(3))
1651 {^iftwod
1652 xpcart(2) = xp(1)*cos(xp(2))
1653 xpcart(3) = xp(1)*sin(xp(2))*sin(xp(3))}
1654 {^ifthreed
1655 xpcart(2) = xp(1)*sin(xp(2))*sin(xp(3))
1656 xpcart(3) = xp(1)*cos(xp(2))}
1657 case default
1658 write(*,*) "No converter for coordinate=",coordinate
1659 end select
1660
1661 end subroutine partcoord_to_cartesian
1662
1663 !> Convert to noncartesian coordinates
1664 subroutine partcoord_from_cartesian(xp,xpcart)
1665 ! conversion of particle coordinate from Cartesian to cylindrical/spherical coordinates done here
1667 use mod_geometry
1668
1669 double precision, intent(in) :: xpcart(1:3)
1670 double precision, intent(out) :: xp(1:3)
1671 double precision :: xx, yy, zz
1672 double precision :: rr, tth, pphi
1673
1674 select case (coordinate)
1676 xp(1:3)=xpcart(1:3)
1677 case (cylindrical)
1678 xp(r_) = sqrt(xpcart(1)**2 + xpcart(2)**2)
1679 xp(phi_) = atan2(xpcart(2),xpcart(1))
1680 if (xp(phi_) .lt. 0.d0) xp(phi_) = 2.0d0*dpi + xp(phi_)
1681 xp(z_) = xpcart(3)
1682 case (spherical)
1683 xx = xpcart(1)
1684 {^iftwod
1685 yy = xpcart(3)
1686 zz = xpcart(2)}
1687 {^ifthreed
1688 yy = xpcart(2)
1689 zz = xpcart(3)}
1690 rr = sqrt(xx**2 + yy**2 + zz**2)
1691 tth = acos(zz/rr)
1692 if (yy > 0.d0) then
1693 pphi = acos(xx/sqrt(xx**2+yy**2))
1694 else
1695 pphi = 2.d0*dpi - acos(xx/sqrt(xx**2+yy**2))
1696 endif
1697 xp(1:3) = (/ rr, tth, pphi /)
1698 case default
1699 write(*,*) "No converter for coordinate=",coordinate
1700 end select
1701
1702 end subroutine partcoord_from_cartesian
1703
1704 !> Convert vector to Cartesian coordinates
1705 subroutine partvec_to_cartesian(xp,up,upcart)
1706 ! conversion of a generic vector from cylindrical/spherical to cartesian coordinates done here
1708 use mod_geometry
1709
1710 double precision, intent(in) :: xp(1:3),up(1:3)
1711 double precision, intent(out) :: upcart(1:3)
1712
1713 select case (coordinate)
1715 upcart(1:3)=up(1:3)
1716 case (cylindrical)
1717 upcart(1) = cos(xp(phi_)) * up(r_) - sin(xp(phi_)) * up(phi_)
1718 upcart(2) = cos(xp(phi_)) * up(phi_) + sin(xp(phi_)) * up(r_)
1719 upcart(3) = up(z_)
1720 case (spherical)
1721 upcart(1) = up(1)*sin(xp(2))*cos(xp(3)) + up(2)*cos(xp(2))*cos(xp(3)) - up(3)*sin(xp(3))
1722 {^iftwod
1723 upcart(2) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))
1724 upcart(3) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))}
1725 {^ifthreed
1726 upcart(2) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))
1727 upcart(3) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))}
1728 case default
1729 write(*,*) "No converter for coordinate=",coordinate
1730 end select
1731
1732 end subroutine partvec_to_cartesian
1733
1734 !> Convert vector from Cartesian coordinates
1735 subroutine partvec_from_cartesian(xp,up,upcart)
1736 ! conversion of a generic vector to cylindrical/spherical from cartesian coordinates done here
1738 use mod_geometry
1739
1740 double precision, intent(in) :: xp(1:3),upcart(1:3)
1741 double precision, intent(out) :: up(1:3)
1742
1743 select case (coordinate)
1745 up(1:3) = upcart(1:3)
1746 case (cylindrical)
1747 up(r_) = cos(xp(phi_)) * upcart(1) + sin(xp(phi_)) * upcart(2)
1748 up(phi_) = -sin(xp(phi_)) * upcart(1) + cos(xp(phi_)) * upcart(2)
1749 up(z_) = upcart(3)
1750 case (spherical)
1751 {^iftwod
1752 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(3)*sin(xp(2))*sin(xp(3)) + upcart(2)*cos(xp(2))
1753 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(3)*cos(xp(2))*sin(xp(3)) - upcart(2)*sin(xp(2))
1754 up(3) = -upcart(1)*sin(xp(3)) + upcart(3)*cos(xp(3))}
1755 {^ifthreed
1756 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(2)*sin(xp(2))*sin(xp(3)) + upcart(3)*cos(xp(2))
1757 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(2)*cos(xp(2))*sin(xp(3)) - upcart(3)*sin(xp(2))
1758 up(3) = -upcart(1)*sin(xp(3)) + upcart(2)*cos(xp(3))}
1759 case default
1760 write(*,*) "No converter for coordinate=",coordinate
1761 end select
1762
1763 end subroutine partvec_from_cartesian
1764
1765 !> Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates
1766 subroutine fix_phi_crossing(xp,igrid)
1768 use mod_geometry
1769
1770 integer, intent(in) :: igrid
1771 double precision, intent(inout) :: xp(1:3)
1772 double precision :: xpmod(1:3)
1773 integer :: phiind
1774
1775 select case (coordinate)
1777 ! Do nothing
1778 return
1779 case (cylindrical)
1780 phiind = phi_
1781 case (spherical)
1782 phiind = 3
1783 end select
1784
1785 xpmod = xp
1786
1787 ! Fix phi-boundary crossing
1788 ! Case 1: particle has crossed from ~2pi to ~0
1789 xpmod(phiind) = xp(phiind) + 2.d0*dpi
1790 if ((.not. point_in_igrid_ghostc(xp,igrid,0)) &
1791 .and. (point_in_igrid_ghostc(xpmod,igrid,0))) then
1792 xp(phiind) = xpmod(phiind)
1793 return
1794 end if
1795 ! Case 2: particle has crossed from ~0 to ~2pi
1796 xpmod(phiind) = xp(phiind) - 2.d0*dpi
1797 if ((.not. point_in_igrid_ghostc(xp,igrid,0)) &
1798 .and. (point_in_igrid_ghostc(xpmod,igrid,0))) then
1799 xp(phiind) = xpmod(phiind)
1800 return
1801 end if
1802
1803 end subroutine fix_phi_crossing
1804
1805 !> Quick check if particle coordinate is inside igrid
1806 !> (ghost cells included, except the last ngh)
1807 logical function point_in_igrid_ghostc(x, igrid, ngh)
1809 use mod_geometry
1810 integer, intent(in) :: igrid, ngh
1811 double precision, intent(in) :: x(ndim)
1812 double precision :: grid_rmin(ndim), grid_rmax(ndim)
1813
1814 ! TODO: this does not work with stretched coordinates!
1815 ! TODO: but phi should not be stretched in principle...
1816 grid_rmin = [ {rnode(rpxmin^d_,igrid)-rnode(rpdx^d_,igrid)*(dble(nghostcells-ngh)-0.5d0)} ]
1817 grid_rmax = [ {rnode(rpxmax^d_,igrid)+rnode(rpdx^d_,igrid)*(dble(nghostcells-ngh)-0.5d0)} ]
1818 point_in_igrid_ghostc = all(x >= grid_rmin) .and. all(x < grid_rmax)
1819 end function point_in_igrid_ghostc
1820
1821 subroutine comm_particles()
1822
1824
1825 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1826 integer :: index
1827 integer :: tag_send, tag_receive, send_buff, rcv_buff
1828 integer :: status(MPI_STATUS_SIZE)
1829 integer, dimension(0:npe-1) :: send_n_particles_to_ipe
1830 integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
1831 type(particle_t), allocatable, dimension(:,:) :: send_particles
1832 type(particle_t), allocatable, dimension(:,:) :: receive_particles
1833 double precision, allocatable, dimension(:,:,:) :: send_payload
1834 double precision, allocatable, dimension(:,:,:) :: receive_payload
1835 integer, allocatable, dimension(:,:) :: particle_index_to_be_sent_to_ipe
1836 integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1837 integer :: destroy_n_particles_mype
1838 integer, allocatable, dimension(:) :: sndrqst, rcvrqst
1839 integer, allocatable, dimension(:) :: sndrqst_payload, rcvrqst_payload
1840 integer :: isnd, ircv
1841 integer, parameter :: maxneighbors=56 ! maximum case: coarse-fine in 3D
1842 logical :: BC_applied
1843 !-----------------------------------------------------------------------------
1844
1845 send_n_particles_to_ipe(:) = 0
1846 receive_n_particles_from_ipe(:) = 0
1847 destroy_n_particles_mype = 0
1848
1849 allocate( particle_index_to_be_sent_to_ipe(nparticles_per_cpu_hi,0:npe-1) )
1850 allocate( sndrqst(1:npe-1), rcvrqst(1:npe-1) )
1851 allocate( sndrqst_payload(1:npe-1), rcvrqst_payload(1:npe-1) )
1852 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1853 sndrqst_payload = mpi_request_null; rcvrqst_payload = mpi_request_null;
1854
1855 ! check if and where to send each particle, destroy if necessary
1856 ! !$OMP PARALLEL DO PRIVATE(ipart)
1857 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1858
1859 ! first check if the particle should be destroyed (user defined criterion)
1860 if (associated(usr_destroy_particle)) then
1861 if (usr_destroy_particle(particle(ipart))) then
1862 ! !$OMP CRITICAL(destroy)
1863 destroy_n_particles_mype = destroy_n_particles_mype + 1
1864 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1865 ! !$OMP END CRITICAL(destroy)
1866 cycle
1867 end if
1868 end if
1869
1870 ! is my particle still in the same igrid?
1871 if (.not.particle_in_igrid(ipart,particle(ipart)%igrid)) then
1872 call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
1873
1874 ! destroy particle if out of domain (signalled by return value of -1)
1875 if (igrid_particle == -1 )then
1876 call apply_periodb(particle(ipart)%self,igrid_particle,ipe_particle,bc_applied)
1877 if (.not. bc_applied .or. igrid_particle == -1) then
1878 ! !$OMP CRITICAL(destroy2)
1879 destroy_n_particles_mype = destroy_n_particles_mype + 1
1880 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1881 ! !$OMP END CRITICAL(destroy2)
1882 cycle
1883 end if
1884 end if
1885
1886 ! particle still present
1887 particle(ipart)%igrid = igrid_particle
1888 particle(ipart)%ipe = ipe_particle
1889
1890 ! if we have more than one core, is it on another cpu?
1891 if (npe .gt. 1 .and. particle(ipart)%ipe .ne. mype) then
1892 ! !$OMP CRITICAL(send)
1893 send_n_particles_to_ipe(ipe_particle) = &
1894 send_n_particles_to_ipe(ipe_particle) + 1
1895 if (send_n_particles_to_ipe(ipe_particle) .gt. nparticles_per_cpu_hi) &
1896 call mpistop('too many particles, increase nparticles_per_cpu_hi')
1897 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1898 ! !$OMP END CRITICAL(send)
1899 end if ! ipe_particle
1900
1901 end if ! particle_in_grid
1902
1903 end do ! ipart
1904 ! !$OMP END PARALLEL DO
1905
1906 call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
1907
1908 ! get out when only one core:
1909 if (npe == 1) return
1910
1911 ! communicate amount of particles to be sent/received
1912 isnd = 0; ircv = 0;
1913 do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1914 tag_send = mype * npe + ipe
1915 tag_receive = ipe * npe + mype
1916 isnd = isnd + 1; ircv = ircv + 1
1917 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1918 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1919 end do
1920
1921 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1922 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1923 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1924
1925 ! allocate the send and receive buffers,
1926 ! If this is still not good enough, consider optimizing using lists of lists:
1927 allocate(send_particles(maxval(send_n_particles_to_ipe),1:min(maxneighbors,npe-1)))
1928 allocate(receive_particles(maxval(receive_n_particles_from_ipe),1:min(maxneighbors,npe-1)))
1929 allocate(send_payload(npayload,maxval(send_n_particles_to_ipe),1:min(maxneighbors,npe-1)))
1930 allocate(receive_payload(npayload,maxval(receive_n_particles_from_ipe),1:min(maxneighbors,npe-1)))
1931
1932 isnd = 0; ircv = 0;
1933 ! send and receive the data of the particles
1934 do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1935 tag_send = mype * npe + ipe
1936 tag_receive = ipe * npe + mype
1937
1938 ! should i send some particles to ipe?
1939 if (send_n_particles_to_ipe(ipe) .gt. 0) then
1940
1941 ! create the send buffer
1942 do ipart = 1, send_n_particles_to_ipe(ipe)
1943 send_particles(ipart,iipe) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
1944 send_payload(1:npayload,ipart,iipe) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
1945 end do ! ipart
1946 isnd = isnd + 1
1947 call mpi_isend(send_particles(:,iipe),send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1948 call mpi_isend(send_payload(:,:,iipe),npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,icomm,sndrqst_payload(isnd),ierrmpi)
1949
1950 end if ! send .gt. 0
1951
1952 ! should i receive some particles from ipe?
1953 if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1954
1955 ircv = ircv + 1
1956 call mpi_irecv(receive_particles(:,iipe),receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1957 call mpi_irecv(receive_payload(:,:,iipe),npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,icomm,rcvrqst_payload(ircv),ierrmpi)
1958
1959 end if ! receive .gt. 0
1960 end do ! ipe
1961
1962 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1963 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1964 call mpi_waitall(isnd,sndrqst_payload,mpi_statuses_ignore,ierrmpi)
1965 call mpi_waitall(ircv,rcvrqst_payload,mpi_statuses_ignore,ierrmpi)
1966
1967 do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1968 do ipart = 1, send_n_particles_to_ipe(ipe)
1969 deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1970 deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1971 call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
1972 end do ! ipart
1973 end do
1974
1975 do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1976 if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1977 do ipart = 1, receive_n_particles_from_ipe(ipe)
1978
1979 index = receive_particles(ipart,iipe)%index
1981 !if (.not. allocated(particle(index)%self)) &
1982 allocate(particle(index)%self)
1983 particle(index)%self = receive_particles(ipart,iipe)
1984 !if (.not. allocated(particle(index)%payload)) &
1985 allocate(particle(index)%payload(npayload))
1986 particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart,iipe)
1987
1988 ! since we don't send the igrid, need to re-locate it
1989 call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
1990 particle(index)%igrid = igrid_particle
1991 particle(index)%ipe = ipe_particle
1992
1993 end do ! ipart
1994
1995 end if ! receive .gt. 0
1996 end do ! ipe
1997
1998 end subroutine comm_particles
1999 !=============================================================================
2001
2002 ! does not destroy particles like comm_particles
2003
2005
2006 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2007 integer :: index
2008 integer :: tag_send, tag_receive, send_buff, rcv_buff
2009 integer :: status(MPI_STATUS_SIZE)
2010 integer, dimension(0:npe-1) :: send_n_particles_to_ipe
2011 integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
2012! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
2013! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
2014! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
2015! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
2016 type(particle_t), allocatable, dimension(:) :: send_particles
2017 type(particle_t), allocatable, dimension(:) :: receive_particles
2018 double precision, allocatable, dimension(:,:) :: send_payload
2019 double precision, allocatable, dimension(:,:) :: receive_payload
2020 integer, allocatable, dimension(:,:) :: particle_index_to_be_sent_to_ipe
2021 integer, allocatable, dimension(:) :: sndrqst, rcvrqst
2022 integer :: isnd, ircv
2023 logical :: BC_applied
2024 !-----------------------------------------------------------------------------
2025 send_n_particles_to_ipe(:) = 0
2026 receive_n_particles_from_ipe(:) = 0
2027
2028 allocate( particle_index_to_be_sent_to_ipe(nparticles_per_cpu_hi,0:npe-1) )
2029 allocate( sndrqst(1:npe-1), rcvrqst(1:npe-1) )
2030 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
2031
2032 ! check if and where to send each particle, relocate all of them (in case grid has changed)
2033 ! !$OMP PARALLEL DO PRIVATE(ipart)
2034 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
2035
2036 call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
2037
2038 particle(ipart)%igrid = igrid_particle
2039 particle(ipart)%ipe = ipe_particle
2040
2041 ! if we have more than one core, is it on another cpu?
2042 if (particle(ipart)%ipe .ne. mype) then
2043 ! !$OMP CRITICAL(send)
2044 send_n_particles_to_ipe(ipe_particle) = &
2045 send_n_particles_to_ipe(ipe_particle) + 1
2046 if (send_n_particles_to_ipe(ipe_particle) .gt. nparticles_per_cpu_hi) &
2047 call mpistop('G: too many particles increase nparticles_per_cpu_hi')
2048 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2049 ! !$OMP END CRITICAL(send)
2050 end if ! ipe_particle
2051
2052 end do ! ipart
2053 ! !$OMP END PARALLEL DO
2054
2055 ! get out when only one core:
2056 if (npe == 1) then
2057 deallocate(sndrqst, rcvrqst)
2058 deallocate(particle_index_to_be_sent_to_ipe)
2059 return
2060 end if
2061
2062 ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2063 ! communicate amount of particles to be sent/received
2064 ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2065 ! opedit: I think this was the main bottleneck.
2066 ! 31.12.2017: made it nonblocking (easy, least invasive)
2067 ! If this continues to be problematic, better to
2068 ! send particles with block in coarsen/refine/loadbalance. (best).
2069
2070 isnd = 0; ircv = 0;
2071 do ipe=0,npe-1; if (ipe .eq. mype) cycle;
2072
2073 tag_send = mype * npe + ipe; tag_receive = ipe * npe + mype;
2074 isnd = isnd + 1; ircv = ircv + 1;
2075 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer, &
2076 ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2077 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer, &
2078 ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2079 end do
2080
2081 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
2082 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
2083 deallocate(sndrqst, rcvrqst)
2084
2085
2086 ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2087 ! send and receive the data of the particles
2088 ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2089
2090 do ipe=0,npe-1
2091 if (ipe .eq. mype) cycle;
2092 tag_send = mype * npe + ipe
2093 tag_receive = ipe * npe + mype
2094
2095 ! should i send some particles to ipe?
2096 if (send_n_particles_to_ipe(ipe) .gt. 0) then
2097
2098 ! create the send buffer
2099 allocate(send_particles(1:send_n_particles_to_ipe(ipe)))
2100 allocate(send_payload(1:npayload,1:send_n_particles_to_ipe(ipe)))
2101 do ipart = 1, send_n_particles_to_ipe(ipe)
2102 send_particles(ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2103 send_payload(1:npayload,ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
2104 end do ! ipart
2105
2106 call mpi_send(send_particles,send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,ierrmpi)
2107 call mpi_send(send_payload,npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,icomm,ierrmpi)
2108 do ipart = 1, send_n_particles_to_ipe(ipe)
2109 deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2110 deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2111 call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2112 end do ! ipart
2113 deallocate(send_particles)
2114 deallocate(send_payload)
2115
2116 end if ! send .gt. 0
2117
2118 ! should i receive some particles from ipe?
2119 if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2120 allocate(receive_particles(1:receive_n_particles_from_ipe(ipe)))
2121 allocate(receive_payload(1:npayload,1:receive_n_particles_from_ipe(ipe)))
2122
2123 call mpi_recv(receive_particles,receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,status,ierrmpi)
2124 call mpi_recv(receive_payload,npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,icomm,status,ierrmpi)
2125 do ipart = 1, receive_n_particles_from_ipe(ipe)
2126
2127 index = receive_particles(ipart)%index
2128 !if (.not. allocated(particle(index)%self)) &
2129 allocate(particle(index)%self)
2130 particle(index)%self = receive_particles(ipart)
2131 !if (.not. allocated(particle(index)%payload)) &
2132 allocate(particle(index)%payload(npayload))
2133 particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart)
2135
2136 ! since we don't send the igrid, need to re-locate it
2137 call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2138 particle(index)%igrid = igrid_particle
2139 particle(index)%ipe = ipe_particle
2140
2141 end do ! ipart
2142 deallocate(receive_particles)
2143 deallocate(receive_payload)
2144
2145 end if ! receive .gt. 0
2146 end do ! ipe
2147 deallocate(particle_index_to_be_sent_to_ipe)
2148
2149 end subroutine comm_particles_global
2150
2151! subroutine comm_particles_old()
2152! use mod_global_parameters
2153!
2154! integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2155! integer :: index
2156! integer :: tag_send, tag_receive, tag_send_p, tag_receive_p, send_buff, rcv_buff
2157! integer :: status(MPI_STATUS_SIZE)
2158! integer, dimension(0:npe-1) :: send_n_particles_to_ipe
2159! integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
2160! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
2161! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
2162! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
2163! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
2164! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
2165! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_received_from_ipe
2166! integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
2167! integer :: destroy_n_particles_mype
2168! logical :: BC_applied
2169! integer, allocatable, dimension(:) :: sndrqst, rcvrqst
2170! integer :: isnd, ircv
2171! integer,dimension(npe_neighbors,nparticles_per_cpu_hi) :: payload_index
2172! send_n_particles_to_ipe(:) = 0
2173! receive_n_particles_from_ipe(:) = 0
2174! destroy_n_particles_mype = 0
2175!
2176! ! check if and where to send each particle, destroy if necessary
2177! do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
2178!
2179! ! first check if the particle should be destroyed because t>tmax in a fixed snapshot
2180! if ( .not.time_advance .and. particle(ipart)%self%time .gt. tmax_particles ) then
2181! destroy_n_particles_mype = destroy_n_particles_mype + 1
2182! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2183! cycle
2184! end if
2185!
2186! ! Then check dostroy due to user-defined condition
2187! if (associated(usr_destroy_particle)) then
2188! if (usr_destroy_particle(particle(ipart))) then
2189! destroy_n_particles_mype = destroy_n_particles_mype + 1
2190! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2191! cycle
2192! end if
2193! end if
2194!
2195! ! is my particle still in the same igrid?
2196! if (.not.particle_in_igrid(ipart,particle(ipart)%igrid)) then
2197! call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
2198!
2199! ! destroy particle if out of domain (signalled by return value of -1)
2200! if (igrid_particle == -1 )then
2201! call apply_periodB(particle(ipart)%self,igrid_particle,ipe_particle,BC_applied)
2202! if (.not. BC_applied .or. igrid_particle == -1) then
2203! destroy_n_particles_mype = destroy_n_particles_mype + 1
2204! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2205! cycle
2206! end if
2207! end if
2208!
2209! ! particle still present
2210! particle(ipart)%igrid = igrid_particle
2211! particle(ipart)%ipe = ipe_particle
2212!
2213! ! if we have more than one core, is it on another cpu?
2214! if (npe .gt. 1 .and. particle(ipart)%ipe .ne. mype) then
2215! send_n_particles_to_ipe(ipe_particle) = &
2216! send_n_particles_to_ipe(ipe_particle) + 1
2217! particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2218! end if ! ipe_particle
2219!
2220! end if ! particle_in_grid
2221!
2222! end do ! ipart
2223!
2224! call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
2225!
2226! ! get out when only one core:
2227! if (npe == 1) return
2228!
2229! ! communicate amount of particles to be sent/received
2230! allocate( sndrqst(1:npe_neighbors), rcvrqst(1:npe_neighbors) )
2231! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2232! isnd = 0; ircv = 0;
2233! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2234! tag_send = mype * npe + ipe
2235! tag_receive = ipe * npe + mype
2236! isnd = isnd + 1; ircv = ircv + 1;
2237! call MPI_ISEND(send_n_particles_to_ipe(ipe),1,MPI_INTEGER, &
2238! ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2239! call MPI_IRECV(receive_n_particles_from_ipe(ipe),1,MPI_INTEGER, &
2240! ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2241! end do
2242! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2243! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2244! deallocate( sndrqst, rcvrqst )
2245!
2246! ! Communicate index of the particles to be sent/received
2247! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2248! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2249! isnd = 0; ircv = 0;
2250! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2251! if (send_n_particles_to_ipe(ipe) > 0) then
2252! tag_send = mype * npe + ipe
2253! isnd = isnd + 1
2254! call MPI_ISEND(particle_index_to_be_sent_to_ipe(:,ipe),send_n_particles_to_ipe(ipe),MPI_INTEGER, &
2255! ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2256! end if
2257! if (receive_n_particles_from_ipe(ipe) > 0) then
2258! tag_receive = ipe * npe + mype
2259! ircv = ircv + 1
2260! call MPI_IRECV(particle_index_to_be_received_from_ipe(:,ipe),receive_n_particles_from_ipe(ipe),MPI_INTEGER, &
2261! ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2262! end if
2263! end do
2264! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2265! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2266! deallocate( sndrqst, rcvrqst )
2267!
2268! ! Send and receive the data of the particles
2269! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2270! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2271! isnd = 0; ircv = 0;
2272! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2273!
2274! ! should i send some particles to ipe?
2275! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2276!
2277! do ipart = 1, send_n_particles_to_ipe(ipe)
2278! tag_send = mype * npe + ipe + ipart
2279! isnd = isnd+1
2280! call MPI_ISEND(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self, &
2281! 1,type_particle,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2282! end do ! ipart
2283! end if ! send .gt. 0
2284!
2285! ! should i receive some particles from ipe?
2286! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2287!
2288! do ipart = 1, receive_n_particles_from_ipe(ipe)
2289! tag_receive = ipe * npe + mype + ipart
2290! ircv = ircv+1
2291! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2292! allocate(particle(index)%self)
2293! call MPI_IRECV(particle(index)%self,1,type_particle,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2294! end do ! ipart
2295!
2296! end if ! receive .gt. 0
2297! end do ! ipe
2298! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2299! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2300! deallocate( sndrqst, rcvrqst )
2301!
2302! ! Send and receive the payloads
2303! ! NON-BLOCKING: one communication for each particle. Needed for whatever reason
2304! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2305! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2306! isnd = 0; ircv = 0;
2307! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2308!
2309! ! should i send some particles to ipe?
2310! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2311!
2312! do ipart = 1, send_n_particles_to_ipe(ipe)
2313! tag_send = mype * npe + ipe + ipart
2314! isnd = isnd+1
2315! call MPI_ISEND(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload), &
2316! npayload,MPI_DOUBLE_PRECISION,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2317! end do ! ipart
2318! end if ! send .gt. 0
2319!
2320! ! should i receive some particles from ipe?
2321! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2322! do ipart = 1, receive_n_particles_from_ipe(ipe)
2323! tag_receive = ipe * npe + mype + ipart
2324! ircv = ircv+1
2325! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2326! allocate(particle(index)%payload(npayload))
2327! call MPI_IRECV(particle(index)%payload(1:npayload),npayload, &
2328! MPI_DOUBLE_PRECISION,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2329! end do ! ipart
2330! end if ! receive .gt. 0
2331! end do ! ipe
2332! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2333! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2334! deallocate( sndrqst, rcvrqst )
2335!
2336! ! Deeallocate sent particles
2337! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2338! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2339! do ipart = 1, send_n_particles_to_ipe(ipe)
2340! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2341! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2342! call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2343! end do ! ipart
2344! end if ! send .gt. 0
2345! end do
2346!
2347! ! Relocate received particles
2348! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2349! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2350! do ipart = 1, receive_n_particles_from_ipe(ipe)
2351! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2352! call push_particle_into_particles_on_mype(index)
2353! ! since we don't send the igrid, need to re-locate it
2354! call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2355! particle(index)%igrid = igrid_particle
2356! particle(index)%ipe = ipe_particle
2357! end do ! ipart
2358! end if ! receive .gt. 0
2359! end do ! ipe
2360!
2361! end subroutine comm_particles_old
2362!
2363! subroutine comm_particles_global_old()
2364! use mod_global_parameters
2365!
2366! integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2367! integer :: index
2368! integer :: tag_send, tag_receive, send_buff, rcv_buff
2369! integer :: status(MPI_STATUS_SIZE)
2370! integer, dimension(0:npe-1) :: send_n_particles_to_ipe
2371! integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
2372! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
2373! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
2374! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
2375! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
2376! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
2377! integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
2378! integer :: destroy_n_particles_mype
2379! logical :: BC_applied
2380!
2381! send_n_particles_to_ipe(:) = 0
2382! receive_n_particles_from_ipe(:) = 0
2383! destroy_n_particles_mype = 0
2384!
2385! ! check if and where to send each particle, relocate all of them (in case grid has changed)
2386! do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
2387!
2388! call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
2389!
2390! particle(ipart)%igrid = igrid_particle
2391! particle(ipart)%ipe = ipe_particle
2392!
2393! ! if we have more than one core, is it on another cpu?
2394! if (particle(ipart)%ipe .ne. mype) then
2395! send_n_particles_to_ipe(ipe_particle) = &
2396! send_n_particles_to_ipe(ipe_particle) + 1
2397! particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2398! end if ! ipe_particle
2399!
2400! end do ! ipart
2401!
2402! ! get out when only one core:
2403! if (npe == 1) return
2404!
2405! ! communicate amount of particles to be sent/received
2406! do ipe=0,npe-1;if (ipe .eq. mype) cycle;
2407! tag_send = mype * npe + ipe
2408! tag_receive = ipe * npe + mype
2409! call MPI_SEND(send_n_particles_to_ipe(ipe),1,MPI_INTEGER,ipe,tag_send,icomm,ierrmpi)
2410! call MPI_RECV(receive_n_particles_from_ipe(ipe),1,MPI_INTEGER,ipe,tag_receive,icomm,status,ierrmpi)
2411! end do
2412!
2413! ! send and receive the data of the particles
2414! do ipe=0,npe-1;if (ipe .eq. mype) cycle;
2415! tag_send = mype * npe + ipe
2416! tag_receive = ipe * npe + mype
2417!
2418! ! should i send some particles to ipe?
2419! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2420!
2421! ! create the send buffer
2422! do ipart = 1, send_n_particles_to_ipe(ipe)
2423! send_particles(ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2424! send_payload(1:npayload,ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
2425! end do ! ipart
2426! call MPI_SEND(send_particles,send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,ierrmpi)
2427! call MPI_SEND(send_payload,npayload*send_n_particles_to_ipe(ipe),MPI_DOUBLE_PRECISION,ipe,tag_send,icomm,ierrmpi)
2428! do ipart = 1, send_n_particles_to_ipe(ipe)
2429! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2430! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2431! call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2432! end do ! ipart
2433!
2434! end if ! send .gt. 0
2435!
2436! ! should i receive some particles from ipe?
2437! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2438!
2439! call MPI_RECV(receive_particles,receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,status,ierrmpi)
2440! call MPI_RECV(receive_payload,npayload*receive_n_particles_from_ipe(ipe),MPI_DOUBLE_PRECISION,ipe,tag_receive,icomm,status,ierrmpi)
2441!
2442! do ipart = 1, receive_n_particles_from_ipe(ipe)
2443!
2444! index = receive_particles(ipart)%index
2445! allocate(particle(index)%self)
2446! particle(index)%self = receive_particles(ipart)
2447! allocate(particle(index)%payload(npayload))
2448! particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart)
2449! call push_particle_into_particles_on_mype(index)
2450!
2451! ! since we don't send the igrid, need to re-locate it
2452! call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2453! particle(index)%igrid = igrid_particle
2454! particle(index)%ipe = ipe_particle
2455!
2456! end do ! ipart
2457!
2458! end if ! receive .gt. 0
2459! end do ! ipe
2460!
2461! end subroutine comm_particles_global_old
2462
2463 subroutine apply_periodb(particle,igrid_particle,ipe_particle,BC_applied)
2465
2466 type(particle_t), intent(inout) :: particle
2467 integer, intent(inout) :: igrid_particle, ipe_particle
2468 logical,intent(out) :: BC_applied
2469 integer :: idim
2470
2471 bc_applied = .false.
2472 ! get out if we don't have any periodic BC:
2473 if (.not. any(periodb(1:ndim))) return
2474
2475 ! go through dimensions and try re-inject the particle at the other side
2476 do idim=1,ndim
2477
2478 if (.not. periodb(idim)) cycle
2479
2480 select case(idim)
2481 {case (^d)
2482 if (particle%x(^d) .lt. xprobmin^d) then
2483 particle%x(^d) = particle%x(^d) + (xprobmax^d - xprobmin^d)
2484 bc_applied = .true.
2485 end if
2486 if (particle%x(^d) .ge. xprobmax^d) then
2487 particle%x(^d) = particle%x(^d) - (xprobmax^d - xprobmin^d)
2488 bc_applied = .true.
2489 end if
2490 \}
2491 end select
2492
2493 end do
2494
2495 call find_particle_ipe(particle%x,igrid_particle,ipe_particle)
2496
2497 end subroutine apply_periodb
2498
2499 !> clean up destroyed particles on all cores
2500 subroutine destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed)
2502
2503 integer, intent(in) :: destroy_n_particles_mype
2504 integer, dimension(1:destroy_n_particles_mype), intent(in) :: particle_index_to_be_destroyed
2505 type(particle_t), dimension(destroy_n_particles_mype):: destroy_particles_mype
2506 double precision, dimension(npayload,destroy_n_particles_mype):: destroy_payload_mype
2507 integer :: iipart,ipart,destroy_n_particles
2508
2509 destroy_n_particles = 0
2510
2511 ! append the particle to list of destroyed particles
2512 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2513 destroy_particles_mype(iipart) = particle(ipart)%self
2514 destroy_payload_mype(1:npayload,iipart) = particle(ipart)%payload(1:npayload)
2515 end do
2516
2517 call output_ensemble(destroy_n_particles_mype,destroy_particles_mype,destroy_payload_mype,'destroy')
2518
2519 if (npe > 1) then
2520 call mpi_allreduce(destroy_n_particles_mype,destroy_n_particles,1,mpi_integer,mpi_sum,icomm,ierrmpi)
2521 else
2522 destroy_n_particles = destroy_n_particles_mype
2523 end if
2524
2525 nparticles = nparticles - destroy_n_particles
2526
2527 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2528 particle(ipart)%igrid = -1
2529 particle(ipart)%ipe = -1
2530 if(time_advance) then
2531 write(*,*) particle(ipart)%self%time, ': particle',ipart,' has left at it ',it_particles,' on pe', mype
2532 write(*,*) particle(ipart)%self%time, ': particles remaining:',nparticles, '(total)', nparticles_on_mype-1, 'on pe', mype
2533 endif
2534 deallocate(particle(ipart)%self)
2535 deallocate(particle(ipart)%payload)
2537 end do
2538
2539 end subroutine destroy_particles
2540
2542 implicit none
2543
2544 integer, intent(in) :: ipart
2545
2548
2550
2552 implicit none
2553
2554 integer, intent(in) :: ipart
2555 integer :: i
2556
2557 do i=1,nparticles_on_mype
2558 if (particles_on_mype(i) == ipart) then
2560 exit
2561 end if
2562 end do
2563
2565
2567
2568end module mod_particle_base
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Module for physical and numeric constants.
Module with basic grid data structures.
Definition mod_forest.t:2
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Definition mod_forest.t:29
Module with geometry-related routines (e.g., divergence, curl)
Definition mod_geometry.t:2
integer coordinate
Definition mod_geometry.t:7
integer, parameter spherical
integer, parameter cartesian
Definition mod_geometry.t:8
integer, parameter cylindrical
integer, parameter cartesian_expansion
integer, parameter cartesian_stretched
Definition mod_geometry.t:9
subroutine curlvector(qvec, ixil, ixol, curlvec, idirmin, idirmin0, ndir0, fourthorder)
Calculate curl of a vector qvec within ixL Options to employ standard second order CD evaluations use...
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
logical nocartesian
IO switches for conversion.
integer ixghi
Upper index of grid block arrays.
integer, parameter stretch_uni
Unidirectional stretching from a side.
integer domain_nx
number of cells for each dimension in level-one mesh
integer, parameter unitpar
file handle for IO
double precision global_time
The global simulation time.
double precision time_max
End time for the simulation.
double precision xstretch
physical extent of stretched border in symmetric stretching
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
double precision, dimension(:), allocatable, parameter d
double precision dt
global time step
double precision length_convert_factor
Conversion factor for length unit.
integer ndir
Number of spatial dimensions (components) for vector variables.
integer ixm
the mesh range of a physical block without ghost cells
integer ierrmpi
A global MPI error return code.
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
logical time_advance
do time evolving
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision c_norm
Normalised speed of light.
logical b0field
split magnetic field as background B0 field
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nghostcells
Number of ghost cells surrounding a grid.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
double precision, dimension(^nd) dxlevel
store unstretched cell size of current level
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer, parameter stretch_symm
Symmetric stretching around the center.
integer r_
Indices for cylindrical coordinates FOR TESTS, negative value when not used:
double precision, dimension(:,:), allocatable dxfirst
integer, dimension(:,:), allocatable node
integer, parameter ixglo
Lower index of grid block arrays (always 1)
Module with shared functionality for all the particle movers.
subroutine get_maxwellian_velocity(v, velocity)
subroutine partcoord_from_cartesian(xp, xpcart)
Convert to noncartesian coordinates.
double precision, dimension(3) integrator_velocity_factor
Normalization factor for velocity in the integrator.
integer nparticles_per_cpu_hi
Maximum number of particles in one processor.
subroutine particle_get_current(w, ixil, ixol, idirmin, current)
Calculate idirmin and the idirmin:3 components of the common current array make sure that dxlevel(^D)...
integer nparticleshi
Maximum number of particles.
pure subroutine limit_dt_endtime(t_left, dt_p)
integer num_particles
Number of particles.
subroutine fix_phi_crossing(xp, igrid)
Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates.
double precision const_dt_particles
If positive, a constant time step for the particles.
subroutine output_individual()
integer n_output_destroyed
Output count for ensembles of destroyed particles.
integer it_particles
Iteration number of paritcles.
subroutine fields_from_mhd(igrid, w_mhd, w_part)
Determine fields from MHD variables.
subroutine particles_params_read(files)
Read this module's parameters from a file.
double precision min_particle_time
Minimum time of all particles.
subroutine output_ensemble(send_n_particles_for_output, send_particles, send_payload, typefile)
subroutine get_uniform_pos(x)
logical write_individual
Whether to write individual particle output (followed particles)
subroutine write_particle_output()
subroutine init_particles_com()
Initialize communicators for particles.
logical function particle_in_igrid(ipart, igrid)
Quick check if particle is still in igrid.
integer, dimension(:), allocatable particles_on_mype
Array of identity numbers of particles in current processor.
double precision dtsave_particles
Time interval to save particles.
subroutine output_particle(myparticle, payload, ipe, file_unit)
subroutine update_gridvars()
update grid variables for particles
integer ngridvars
Number of variables for grid field.
subroutine ipe_fc(id, igrid, ipe_is_neighbor)
subroutine get_efield(igrid, x, tloc, e)
double precision particles_etah
subroutine destroy_particles(destroy_n_particles_mype, particle_index_to_be_destroyed)
clean up destroyed particles on all cores
integer nusrpayload
Number of user-defined payload variables for a particle.
subroutine select_active_particles(end_time)
subroutine write_particles_snapshot()
double precision particles_eta
Resistivity.
subroutine particles_finish()
Finalize the particles module.
logical write_snapshot
Whether to write particle snapshots.
pure subroutine get_lfac(u, lfac)
Get Lorentz factor from relativistic momentum.
subroutine find_particle_ipe(x, igrid_particle, ipe_particle)
subroutine pull_particle_from_particles_on_mype(ipart)
subroutine init_particles_output()
double precision dtheta
pure subroutine get_lfac_from_velocity(v, lfac)
Get Lorentz factor from velocity.
integer integrator
Integrator to be used for particles.
integer, dimension(:), allocatable ep
Variable index for electric field.
subroutine handle_particles()
Let particles evolve in time. The routine also handles grid variables and output.
subroutine partvec_from_cartesian(xp, up, upcart)
Convert vector from Cartesian coordinates.
subroutine fill_gridvars_default
This routine fills the particle grid variables with the default for mhd, i.e. only E and B.
character(len=400) csv_header
Header string used in CSV files.
subroutine finish_gridvars()
Deallocate grid variables for particles.
subroutine ipe_srl(id, igrid, ipe_is_neighbor)
subroutine get_bfield(igrid, x, tloc, b)
integer npayload
Number of total payload variables for a particle.
integer, parameter unitparticles
integer, dimension(:), allocatable particles_active_on_mype
Array of identity numbers of active particles in current processor.
logical relativistic
Use a relativistic particle mover?
character(len=name_len) integrator_type_particles
String describing the particle integrator type.
subroutine set_neighbor_ipe()
subroutine time_spent_on_particles()
subroutine get_vec(ix, igrid, x, tloc, vec)
logical write_ensemble
Whether to write ensemble output.
character(len=name_len) interp_type_particles
String describing the particle interpolation type.
integer downsample_particles
Particle downsampling factor in CSV output.
subroutine locate_particle(index, igrid_particle, ipe_particle)
subroutine push_particle_into_particles_on_mype(ipart)
subroutine read_from_snapshot()
integer nparticles_active_on_mype
Number of active particles in current processor.
procedure(sub_define_additional_gridvars), pointer particles_define_additional_gridvars
subroutine partcoord_to_cartesian(xp, xpcart)
Convert position to Cartesian coordinates.
integer nparticles
Identity number and total number of particles.
integer nparticles_on_mype
Number of particles in current processor.
integer, dimension(:), allocatable bp
Variable index for magnetic field.
subroutine comm_particles()
double precision tmax_particles
Time limit of particles.
type(particle_ptr), dimension(:), allocatable particle
procedure(fun_destroy), pointer usr_destroy_particle
integer, dimension(:), allocatable vp
Variable index for fluid velocity.
procedure(sub_integrate), pointer particles_integrate
subroutine cross(a, b, c)
procedure(sub_fill_gridvars), pointer particles_fill_gridvars
double precision t_next_output
Time to write next particle output.
subroutine advance_particles(end_time, steps_taken)
Routine handles the particle evolution.
subroutine partvec_to_cartesian(xp, up, upcart)
Convert vector to Cartesian coordinates.
integer ndefpayload
Number of default payload variables for a particle.
logical function point_in_igrid_ghostc(x, igrid, ngh)
Quick check if particle coordinate is inside igrid (ghost cells included, except the last ngh)
subroutine particle_base_init()
Give initial values to parameters.
integer, dimension(:), allocatable ipe_neighbor
integer igrid_working
set the current igrid for the particle integrator:
character(len=60) csv_format
Format string used in CSV files.
integer, dimension(:), allocatable jp
Variable index for current.
subroutine append_to_snapshot(myparticle, mypayload)
subroutine apply_periodb(particle, igrid_particle, ipe_particle, bc_applied)
integer ipart_working
set the current ipart for the particle integrator:
subroutine interpolate_var(igrid, ixil, ixol, gf, x, xloc, gfloc)
double precision particles_cfl
Particle CFL safety factor.
procedure(sub_fill_additional_gridvars), pointer particles_fill_additional_gridvars
subroutine ipe_cf(id, igrid, ipe_is_neighbor)
logical function particle_in_domain(x)
Check if particle is inside computational domain.
character(len=name_len) physics_type_particles
String describing the particle physics type.
subroutine init_gridvars()
Initialize grid variables for particles.
character(len=128) function make_particle_filename(tout, index, type)
subroutine comm_particles_global()
subroutine read_particles_snapshot(file_exists)
integer rhop
Variable index for density.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition mod_physics.t:4
Module for pseudo random number generation. The internal pseudo random generator is the xoroshiro128p...
Definition mod_random.t:3
Writes D-1 slice, can do so in various formats, depending on slice_type.
Definition mod_slice.t:2
subroutine get_igslice(dir, x, igslice)
Definition mod_slice.t:1109
double precision tpartc_int_0
Definition mod_timing.t:11
double precision timeio_tot
Definition mod_timing.t:8
double precision tpartc_com
Definition mod_timing.t:10
double precision tpartc
Definition mod_timing.t:10
double precision tpartc0
Definition mod_timing.t:11
double precision tpartc_grid_0
Definition mod_timing.t:11
double precision tpartc_io
Definition mod_timing.t:10
double precision timeloop
Definition mod_timing.t:9
double precision tpartc_com0
Definition mod_timing.t:11
double precision tpartc_grid
Definition mod_timing.t:10
double precision tpartc_io_0
Definition mod_timing.t:11
double precision tpartc_int
Definition mod_timing.t:10
Module with all the methods that users can customize in AMRVAC.
procedure(particle_analytic), pointer usr_particle_analytic
procedure(particle_fields), pointer usr_particle_fields
Pointer to a tree_node.
Definition mod_forest.t:6
Random number generator type, which contains the state.
Definition mod_random.t:20