MPI-AMRVAC 3.1
The MPI - Adaptive Mesh Refinement - Versatile Advection Code (development version)
Loading...
Searching...
No Matches
mod_particle_base.t
Go to the documentation of this file.
1!> Module with shared functionality for all the particle movers
3 use mod_global_parameters, only: name_len, std_len
5 use mod_random
7 use mod_comm_lib, only: mpistop
8
9 !> Normalization factor for velocity in the integrator
10 double precision :: integrator_velocity_factor(3)
11 !> Time limit of particles
12 double precision :: tmax_particles
13 !> Minimum time of all particles
14 double precision :: min_particle_time
15 !> Time interval to save particles
16 double precision :: dtsave_particles
17 !> If positive, a constant time step for the particles
18 double precision :: const_dt_particles
19 !> Particle CFL safety factor
20 double precision :: particles_cfl
21 !> Time to write next particle output
22 double precision :: t_next_output
23 !> Resistivity
24 double precision :: particles_eta, particles_etah
25 double precision :: dtheta
26 !> Particle downsampling factor in CSV output
28 !> Maximum number of particles
29 integer :: nparticleshi
30 !> Maximum number of particles in one processor
32 !> Number of default payload variables for a particle
33 integer :: ndefpayload
34 !> Number of user-defined payload variables for a particle
35 integer :: nusrpayload
36 !> Number of total payload variables for a particle
37 integer :: npayload
38 !> Number of variables for grid field
39 integer :: ngridvars
40 !> Number of particles
41 integer :: num_particles
42 !> Identity number and total number of particles
43 integer :: nparticles
44 !> Iteration number of paritcles
45 integer :: it_particles
46 !> Output count for ensembles of destroyed particles
48
49 ! these two save the list of neighboring cpus:
50 integer, allocatable :: ipe_neighbor(:)
51 integer :: npe_neighbors
52
53 integer :: type_particle
54 !> set the current igrid for the particle integrator:
55 integer :: igrid_working
56 !> set the current ipart for the particle integrator:
57 integer :: ipart_working
58
59 integer, parameter :: unitparticles=15
60
61 !> Array of identity numbers of particles in current processor
62 integer, dimension(:), allocatable :: particles_on_mype
63 !> Array of identity numbers of active particles in current processor
64 integer, dimension(:), allocatable :: particles_active_on_mype
65 !> Number of particles in current processor
67 !> Number of active particles in current processor
69 !> Integrator to be used for particles
70 integer :: integrator
71
72 !> Variable index for magnetic field
73 integer, allocatable :: bp(:)
74 !> Variable index for electric field
75 integer, allocatable :: ep(:)
76 !> Variable index for fluid velocity
77 integer, allocatable :: vp(:)
78 !> Variable index for current
79 integer, allocatable :: jp(:)
80 !> Variable index for density
81 integer :: rhop
82 !> Use a relativistic particle mover?
83 logical :: relativistic
84 !> Whether to write individual particle output (followed particles)
86 !> Whether to write ensemble output
87 logical :: write_ensemble
88 !> Whether to write particle snapshots
89 logical :: write_snapshot
90 logical :: losses
91 !> String describing the particle interpolation type
92 character(len=name_len) :: interp_type_particles = ""
93 !> String describing the particle physics type
94 character(len=name_len) :: physics_type_particles = ""
95 !> String describing the particle integrator type
96 character(len=name_len) :: integrator_type_particles = ""
97 !> Header string used in CSV files
98 character(len=400) :: csv_header
99 !> Format string used in CSV files
100 character(len=60) :: csv_format
101
103 type(particle_t), pointer :: self
104 !> extra information carried by the particle
105 double precision, allocatable :: payload(:)
106 integer :: igrid, ipe
107 end type particle_ptr
108
110 !> follow the history of the particle
111 logical :: follow
112 !> identity number
113 integer :: index
114 !> charge
115 double precision :: q
116 !> mass
117 double precision :: m
118 !> time
119 double precision :: time
120 !> time step
121 double precision :: dt
122 !> coordinates
123 double precision :: x(3)
124 !> velocity, momentum, or special ones
125 double precision :: u(3)
126 end type particle_t
127
128 ! Array containing all particles
129 type(particle_ptr), dimension(:), allocatable :: particle
130
131 ! The pseudo-random number generator
132 type(rng_t) :: rng
133
134 ! Pointers for user-defined stuff
135 procedure(sub_fill_gridvars), pointer :: particles_fill_gridvars => null()
138 procedure(sub_integrate), pointer :: particles_integrate => null()
139 procedure(fun_destroy), pointer :: usr_destroy_particle => null()
140
141 abstract interface
142
144 end subroutine sub_fill_gridvars
145
146 subroutine sub_define_additional_gridvars(ngridvars)
147 integer, intent(inout) :: ngridvars
148 end subroutine sub_define_additional_gridvars
149
151 end subroutine sub_fill_additional_gridvars
152
153 subroutine sub_integrate(end_time)
154 double precision, intent(in) :: end_time
155 end subroutine sub_integrate
156
157 function fun_destroy(myparticle)
158 import particle_ptr
159 logical :: fun_destroy
160 type(particle_ptr), intent(in) :: myparticle
161 end function fun_destroy
162
163 end interface
164
165contains
166
167 !> Finalize the particles module
168 subroutine particles_finish()
170
172
173 ! Clean up particle type
174 call mpi_type_free(type_particle,ierrmpi)
175
176 ! Clean up variables
177 deallocate(particle)
178 deallocate(ipe_neighbor)
179 deallocate(particles_on_mype)
180 deallocate(particles_active_on_mype)
181 deallocate(gridvars)
182
183 end subroutine particles_finish
184
185 !> Read this module's parameters from a file
186 subroutine particles_params_read(files)
188 character(len=*), intent(in) :: files(:)
189 integer :: n
190
191 namelist /particles_list/ physics_type_particles, nparticleshi, nparticles_per_cpu_hi, &
198
199 do n = 1, size(files)
200 open(unitpar, file=trim(files(n)), status="old")
201 read(unitpar, particles_list, end=111)
202111 close(unitpar)
203 end do
204
205 end subroutine particles_params_read
206
207 !> Give initial values to parameters
210 integer :: n
211 integer, parameter :: i8 = selected_int_kind(18)
212 integer(i8) :: seed(2)
213 integer :: idir
214 character(len=20) :: strdata
215
216 physics_type_particles = 'advect'
217 nparticleshi = 10000000
218 nparticles_per_cpu_hi = 1000000
219 num_particles = 1000
220 ndefpayload = 1
221 nusrpayload = 0
222 tmax_particles = bigdouble
223 dtsave_particles = bigdouble
224 const_dt_particles = -bigdouble
225 particles_cfl = 0.5
226 write_individual = .true.
227 write_ensemble = .true.
228 write_snapshot = .true.
230 relativistic = .false.
231 particles_eta = -1.d0
232 particles_etah = -1.d0
233 t_next_output = 0.0d0
234 dtheta = 2.0d0*dpi / 60.0d0
235 losses = .false.
236 nparticles = 0
237 it_particles = 0
243 interp_type_particles = 'default'
244
246
247 ! If sampling, ndefpayload = nw:
248 if (physics_type_particles == 'sample') ndefpayload = nw
249 ! Total number of payloads
251
252 ! initialise the random number generator
253 seed = [310952_i8, 8948923749821_i8]
254 call rng%set_seed(seed)
255
256 allocate(particle(1:nparticleshi))
257 allocate(ipe_neighbor(0:npe-1))
260
261 particles_on_mype(:) = 0
263
264 ! Generate header for CSV files
265 csv_header = ' time, dt, x1, x2, x3, u1, u2, u3,'
266 ! If sampling, name the payloads like the fluid quantities
267 if (physics_type_particles == 'sample') then
268 do n = 1, nw
269 write(strdata,"(a,a,a)") ' ', trim(prim_wnames(n)), ','
270 csv_header = trim(csv_header) // trim(strdata)
271 end do
272 else ! Otherwise, payloads are called pl01, pl02, ...
273 do n = 1, ndefpayload
274 write(strdata,"(a,i2.2,a)") ' pl', n, ','
275 csv_header = trim(csv_header) // trim(strdata)
276 end do
277 end if
278 ! User-defined payloads are called usrpl01, usrpl02, ...
279 if (nusrpayload > 0) then
280 do n = 1, nusrpayload
281 write(strdata,"(a,i2.2,a)") ' usrpl', n, ','
282 csv_header = trim(csv_header) // trim(strdata)
283 end do
284 end if
285 csv_header = trim(csv_header) // ' ipe, iteration, index'
286
287 ! Generate format string for CSV files
288 write(csv_format, '(A,I0,A,A)') '(', 8 + npayload, '(es14.6,", ")', &
289 'i8.7,", ",i11.10,", ",i8.7)'
290
291 end subroutine particle_base_init
292
293 !> Initialize communicators for particles
296
297 integer :: oldtypes(0:7), offsets(0:7), blocklengths(0:7)
298
299 oldtypes(0) = mpi_logical
300 oldtypes(1) = mpi_integer
301 oldtypes(2:7) = mpi_double_precision
302
303 blocklengths(0:5)=1
304 blocklengths(6)=3
305 blocklengths(7)=3
306
307 offsets(0) = 0
308 offsets(1) = size_logical * blocklengths(0)
309 offsets(2) = offsets(1) + size_int * blocklengths(1)
310 offsets(3) = offsets(2) + size_double * blocklengths(2)
311 offsets(4) = offsets(3) + size_double * blocklengths(3)
312 offsets(5) = offsets(4) + size_double * blocklengths(4)
313 offsets(6) = offsets(5) + size_double * blocklengths(5)
314 offsets(7) = offsets(6) + size_double * blocklengths(6)
315
316 call mpi_type_struct(8,blocklengths,offsets,oldtypes,type_particle,ierrmpi)
317 call mpi_type_commit(type_particle,ierrmpi)
318
319 end subroutine init_particles_com
320
321 subroutine get_maxwellian_velocity(v, velocity)
322 double precision, intent(out) :: v(3)
323 double precision, intent(in) :: velocity
324 double precision :: vtmp(3), vnorm
325
326 vtmp(1) = velocity * rng%normal()
327 vtmp(2) = velocity * rng%normal()
328 vtmp(3) = velocity * rng%normal()
329 vnorm = norm2(vtmp)
330 v = rng%sphere(vnorm)
331 end subroutine get_maxwellian_velocity
332
333 subroutine get_uniform_pos(x)
335 double precision, intent(out) :: x(3)
336
337 call rng%unif_01_vec(x)
338
339 {^d&x(^d) = xprobmin^d + x(^d) * (xprobmax^d - xprobmin^d)\}
340 x(ndim+1:) = 0.0d0
341 end subroutine get_uniform_pos
342
343 !> Initialize grid variables for particles
344 subroutine init_gridvars()
346 use mod_timing
347
348 integer :: igrid, iigrid
349
350 tpartc_grid_0=mpi_wtime()
351
352 do iigrid=1,igridstail; igrid=igrids(iigrid);
353 allocate(gridvars(igrid)%w(ixg^t,1:ngridvars),gridvars(igrid)%wold(ixg^t,1:ngridvars))
354 gridvars(igrid)%w = 0.0d0
355 gridvars(igrid)%wold = 0.0d0
356 end do
357
359 if (associated(particles_define_additional_gridvars)) then
361 end if
362
363 tpartc_grid = tpartc_grid + (mpi_wtime()-tpartc_grid_0)
364
365 end subroutine init_gridvars
366
367 !> Deallocate grid variables for particles
368 subroutine finish_gridvars()
370
371 integer :: iigrid, igrid
372
373 do iigrid=1,igridstail; igrid=igrids(iigrid);
374 deallocate(gridvars(igrid)%w,gridvars(igrid)%wold)
375 end do
376
377 end subroutine finish_gridvars
378
379 !> This routine fills the particle grid variables with the default for mhd, i.e. only E and B
383
384 double precision :: E(ixG^T, ndir)
385 double precision :: B(ixG^T, ndir)
386 integer :: igrid, iigrid
387
388 do iigrid=1,igridstail; igrid=igrids(iigrid);
389 if (associated(usr_particle_fields)) then ! FILL ONLY E AND B
390 call usr_particle_fields(ps(igrid)%w, ps(igrid)%x, e, b)
391 gridvars(igrid)%w(ixg^t,ep(:)) = e
392 gridvars(igrid)%w(ixg^t,bp(:)) = b
393 else
394 call fields_from_mhd(igrid, ps(igrid)%w, gridvars(igrid)%w)
395 end if
396 end do
397
398 end subroutine fill_gridvars_default
399
400 !> Determine fields from MHD variables
401 subroutine fields_from_mhd(igrid, w_mhd, w_part)
403 use mod_geometry
404 integer, intent(in) :: igrid
405 double precision, intent(in) :: w_mhd(ixG^T,nw)
406 double precision, intent(inout) :: w_part(ixG^T,ngridvars)
407
408 double precision :: current(ixG^T,7-2*ndir:3)
409 double precision :: w(ixG^T,1:nw)
410 integer :: idirmin, idir
411
412 ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
413
414 w(ixg^t,1:nw) = w_mhd(ixg^t,1:nw)
415 w_part(ixg^t,bp(1):bp(3)) = 0.0d0
416 w_part(ixg^t,ep(1):ep(3)) = 0.0d0
417
418 call phys_to_primitive(ixg^ll,ixg^ll,w,ps(igrid)%x)
419
420 ! fill with density:
421 w_part(ixg^t,rhop) = w(ixg^t,iw_rho)
422
423 ! fill with velocity:
424 w_part(ixg^t,vp(:)) = w(ixg^t,iw_mom(:))
425
426 ! fill with magnetic field:
427 if (b0field) then
428 do idir = 1, ndir
429 w_part(ixg^t,bp(idir))=w(ixg^t,iw_mag(idir))+ps(igrid)%B0(ixg^t,idir,0)
430 end do
431 else
432 w_part(ixg^t,bp(:)) = w(ixg^t,iw_mag(:))
433 end if
434
435 ! fill with current
436 current = zero
437 call particle_get_current(w,ixg^ll,ixg^llim^d^lsub1,idirmin,current)
438 w_part(ixg^t,jp(:)) = current(ixg^t,:)
439
440 ! fill with electric field
441 select case (coordinate)
443 w_part(ixg^t,ep(1)) = w_part(ixg^t,bp(2)) * &
444 w(ixg^t,iw_mom(3)) - w_part(ixg^t,bp(3)) * &
445 w(ixg^t,iw_mom(2)) + particles_eta * current(ixg^t,1)
446 w_part(ixg^t,ep(2)) = w_part(ixg^t,bp(3)) * &
447 w(ixg^t,iw_mom(1)) - w_part(ixg^t,bp(1)) * &
448 w(ixg^t,iw_mom(3)) + particles_eta * current(ixg^t,2)
449 w_part(ixg^t,ep(3)) = w_part(ixg^t,bp(1)) * &
450 w(ixg^t,iw_mom(2)) - w_part(ixg^t,bp(2)) * &
451 w(ixg^t,iw_mom(1)) + particles_eta * current(ixg^t,3)
452 case (cylindrical)
453 w_part(ixg^t,ep(r_)) = w_part(ixg^t,bp(phi_)) * &
454 w(ixg^t,iw_mom(z_)) - w_part(ixg^t,bp(z_)) * &
455 w(ixg^t,iw_mom(phi_)) + particles_eta * current(ixg^t,r_)
456 w_part(ixg^t,ep(phi_)) = w_part(ixg^t,bp(z_)) * &
457 w(ixg^t,iw_mom(r_)) - w_part(ixg^t,bp(r_)) * &
458 w(ixg^t,iw_mom(z_)) + particles_eta * current(ixg^t,phi_)
459 w_part(ixg^t,ep(z_)) = w_part(ixg^t,bp(r_)) * &
460 w(ixg^t,iw_mom(phi_)) - w_part(ixg^t,bp(phi_)) * &
461 w(ixg^t,iw_mom(r_)) + particles_eta * current(ixg^t,z_)
462 end select
463
464 ! Hall term
465 if (particles_etah > zero) then
466 select case (coordinate)
468 w_part(ixg^t,ep(1)) = w_part(ixg^t,ep(1)) + particles_etah/w(ixg^t,iw_rho) * &
469 (current(ixg^t,2) * w_part(ixg^t,bp(3)) - current(ixg^t,3) * w_part(ixg^t,bp(2)))
470 w_part(ixg^t,ep(2)) = w_part(ixg^t,ep(2)) + particles_etah/w(ixg^t,iw_rho) * &
471 (-current(ixg^t,1) * w_part(ixg^t,bp(3)) + current(ixg^t,3) * w_part(ixg^t,bp(1)))
472 w_part(ixg^t,ep(3)) = w_part(ixg^t,ep(3)) + particles_etah/w(ixg^t,iw_rho) * &
473 (current(ixg^t,1) * w_part(ixg^t,bp(2)) - current(ixg^t,2) * w_part(ixg^t,bp(1)))
474 case (cylindrical)
475 w_part(ixg^t,ep(r_)) = w_part(ixg^t,ep(r_)) + particles_etah/w(ixg^t,iw_rho) * &
476 (current(ixg^t,phi_) * w_part(ixg^t,bp(z_)) - current(ixg^t,z_) * w_part(ixg^t,bp(phi_)))
477 w_part(ixg^t,ep(phi_)) = w_part(ixg^t,ep(phi_)) + particles_etah/w(ixg^t,iw_rho) * &
478 (-current(ixg^t,r_) * w_part(ixg^t,bp(z_)) + current(ixg^t,z_) * w_part(ixg^t,bp(r_)))
479 w_part(ixg^t,ep(z_)) = w_part(ixg^t,ep(z_)) + particles_etah/w(ixg^t,iw_rho) * &
480 (current(ixg^t,r_) * w_part(ixg^t,bp(phi_)) - current(ixg^t,phi_) * w_part(ixg^t,bp(r_)))
481 end select
482 end if
483
484 end subroutine fields_from_mhd
485
486 !> Calculate idirmin and the idirmin:3 components of the common current array
487 !> make sure that dxlevel(^D) is set correctly.
488 subroutine particle_get_current(w,ixI^L,ixO^L,idirmin,current)
490 use mod_geometry
491
492 integer :: ixO^L, idirmin, ixI^L
493 double precision :: w(ixI^S,1:nw)
494 ! For ndir=2 only 3rd component of J can exist, ndir=1 is impossible for MHD
495 double precision :: current(ixI^S,7-2*ndir:3),bvec(ixI^S,1:ndir)
496 integer :: idir
497 integer :: idirmin0
498
499 idirmin0 = 7-2*ndir
500
501 if (b0field) then
502 do idir = 1, ndir
503 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))+block%B0(ixi^s,idir,0)
504 end do
505 else
506 do idir = 1, ndir
507 bvec(ixi^s,idir)=w(ixi^s,iw_mag(idir))
508 end do
509 end if
510
511 call curlvector(bvec,ixi^l,ixo^l,current,idirmin,idirmin0,ndir)
512
513 end subroutine particle_get_current
514
515 !> update grid variables for particles
516 subroutine update_gridvars()
518 use mod_timing
519
520 integer :: igrid, iigrid
521
522 tpartc_grid_0=mpi_wtime()
523
524 do iigrid=1,igridstail; igrid=igrids(iigrid);
525 gridvars(igrid)%wold=gridvars(igrid)%w
526 end do
527
529 if (associated(particles_define_additional_gridvars)) then
531 end if
532
533 tpartc_grid = tpartc_grid + (mpi_wtime()-tpartc_grid_0)
534
535 end subroutine update_gridvars
536
537 !> Let particles evolve in time. The routine also handles grid variables and
538 !> output.
539 subroutine handle_particles()
541 use mod_timing
542
543 integer :: steps_taken, nparticles_left
544
545 tpartc0 = mpi_wtime()
546
548
549 if (time_advance) then
551 call update_gridvars()
552 else
554 end if
555 tpartc_com0=mpi_wtime()
557 tpartc_com=tpartc_com + (mpi_wtime()-tpartc_com0)
558
559
560 ! main integration loop
561 do
562 if (tmax_particles >= t_next_output) then
563 call advance_particles(t_next_output, steps_taken)
564 tpartc_io_0 = mpi_wtime()
565 if (mype .eq. 0 .and. (.not. time_advance)) print*, "Writing particle output at time",t_next_output
568 timeio_tot = timeio_tot+(mpi_wtime()-tpartc_io_0)
569 tpartc_io = tpartc_io+(mpi_wtime()-tpartc_io_0)
570
571 ! If we are using a constant time step, this prevents multiple outputs
572 ! at the same time
575 else
576 call advance_particles(tmax_particles, steps_taken)
577! call write_particle_output()
578 exit
579 end if
580
581 call mpi_allreduce(nparticles_on_mype, nparticles_left, 1, mpi_integer, &
582 mpi_sum, icomm, ierrmpi)
583 if (nparticles_left == 0 .and. convert) call mpistop('No particles left')
584
585 end do
586
587 tpartc = tpartc + (mpi_wtime() - tpartc0)
588
589 end subroutine handle_particles
590
591 !> Routine handles the particle evolution
592 subroutine advance_particles(end_time, steps_taken)
593 use mod_timing
595 double precision, intent(in) :: end_time !< Advance at most up to this time
596 integer, intent(out) :: steps_taken
597 integer :: n_active
598
599 steps_taken = 0
600
601 do
602 call select_active_particles(end_time)
603
604 ! Determine total number of active particles
605 call mpi_allreduce(nparticles_active_on_mype, n_active, 1, mpi_integer, &
606 mpi_sum, icomm, ierrmpi)
607
608 if (n_active == 0) exit
609
610 tpartc_int_0=mpi_wtime()
611 call particles_integrate(end_time)
612 steps_taken = steps_taken + 1
614
615 tpartc_com0=mpi_wtime()
616 call comm_particles()
617 tpartc_com=tpartc_com + (mpi_wtime()-tpartc_com0)
618
620 end do
621
622 end subroutine advance_particles
623
624 pure subroutine limit_dt_endtime(t_left, dt_p)
625 double precision, intent(in) :: t_left
626 double precision, intent(inout) :: dt_p
627 double precision :: n_steps
628 double precision, parameter :: eps = 1d-10
629
630 n_steps = t_left / dt_p
631
632 if (n_steps < 1+eps) then
633 ! Last step
634 dt_p = t_left
635 else if (n_steps < 2-eps) then
636 ! Divide time left over two steps, to prevent one tiny time step
637 dt_p = 0.5d0 * t_left
638 end if
639
640 end subroutine limit_dt_endtime
641
642 ! Get a generic vector quantity stored on the grid at the particle position
643 ! NOTE: DON'T GET E OR B IN THIS WAY, USE FUNCTIONS BELOW!
644 subroutine get_vec(ix,igrid,x,tloc,vec)
647
648 integer,intent(in) :: ix(ndir) !< Indices in gridvars
649 integer,intent(in) :: igrid
650 double precision,dimension(3), intent(in) :: x
651 double precision, intent(in) :: tloc
652 double precision,dimension(ndir), intent(out) :: vec
653 double precision,dimension(ndir) :: vec1, vec2
654 double precision :: td, bb
655 integer :: ic^D,idir
656
657 if (associated(usr_particle_analytic)) then
658 call usr_particle_analytic(ix, x, tloc, vec)
659 else
660 do idir=1,ndir
661 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,ix(idir)), &
662 ps(igrid)%x(ixg^t,1:ndim),x,vec(idir))
663 end do
664
665 if (time_advance) then
666 do idir=1,ndir
667 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,ix(idir)), &
668 ps(igrid)%x(ixg^t,1:ndim),x,vec2(idir))
669 end do
670
671 ! Interpolate in time
672 td = (tloc - global_time) / dt
673 vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
674 end if
675
676 ! Fabio June 2024: Renormalize vector by its grid-evaluated norm
677 if (interp_type_particles=='renormalized') then
678 if (time_advance) then
679 if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
680 call interpolate_var(igrid,ixg^ll,ixm^ll, &
681 sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=ndim+1)*td &
682 +sum(gridvars(igrid)%wold(ixg^t,ix(:))**2,dim=ndim+1)*(1.d0-td), &
683 ps(igrid)%x(ixg^t,1:ndim),x,bb)
684 end if
685 else
686 if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
687 call interpolate_var(igrid,ixg^ll,ixm^ll, &
688 sum(gridvars(igrid)%w(ixg^t,ix(:))**2,dim=ndim+1), &
689 ps(igrid)%x(ixg^t,1:ndim),x,bb)
690 end if
691 vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
692 end if
693 end if
694 end if
695
696 end subroutine get_vec
697
698 subroutine get_bfield(igrid,x,tloc,b)
701
702 integer,intent(in) :: igrid
703 double precision,dimension(3), intent(in) :: x
704 double precision, intent(in) :: tloc
705 double precision,dimension(ndir), intent(out) :: b
706 double precision,dimension(ndir) :: vec1, vec2, vec
707 double precision :: td, bb
708 integer :: ic^D,idir
709
710 if (associated(usr_particle_analytic)) then
711 call usr_particle_analytic(bp, x, tloc, vec)
712 else if (associated(usr_particle_fields)) then
713 call get_vec(bp, igrid, x, tloc, vec)
714 else
715 do idir=1,ndir
716 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,bp(idir)), &
717 ps(igrid)%x(ixg^t,1:ndim),x,vec(idir))
718 end do
719
720 if (time_advance) then
721 do idir=1,ndir
722 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,bp(idir)), &
723 ps(igrid)%x(ixg^t,1:ndim),x,vec2(idir))
724 end do
725
726 ! Interpolate in time
727 td = (tloc - global_time) / dt
728 vec(:) = vec2(:) * (1.0d0 - td) + vec(:) * td
729 end if
730
731 ! Fabio June 2024: Renormalize vector by its grid-evaluated norm
732 if (interp_type_particles=='renormalized') then
733 if (time_advance) then
734 if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
735 call interpolate_var(igrid,ixg^ll,ixm^ll, &
736 sum(gridvars(igrid)%w(ixg^t,bp(:))**2,dim=ndim+1)*td &
737 +sum(gridvars(igrid)%wold(ixg^t,bp(:))**2,dim=ndim+1)*(1.d0-td), &
738 ps(igrid)%x(ixg^t,1:ndim),x,bb)
739 end if
740 else
741 if (sqrt(sum(vec(:)**2)) .gt. 0.d0) then
742 call interpolate_var(igrid,ixg^ll,ixm^ll, &
743 sum(gridvars(igrid)%w(ixg^t,bp(:))**2,dim=ndim+1), &
744 ps(igrid)%x(ixg^t,1:ndim),x,bb)
745 end if
746 vec = vec/sqrt(sum(vec(:)**2))*sqrt(bb)
747 end if
748 end if
749
750 end if
751
752 b(:) = vec(:)
753
754 end subroutine get_bfield
755
756 ! Shorthand for getting specifically the E field at the particle position
757 subroutine get_efield(igrid,x,tloc,e)
760 use mod_geometry
761
762 integer,intent(in) :: igrid
763 double precision,dimension(3), intent(in) :: x
764 double precision, intent(in) :: tloc
765 double precision,dimension(ndir), intent(out) :: e
766 double precision,dimension(ndir) :: vec1, vec2, vec
767 double precision,dimension(ndir) :: vfluid, b
768 double precision,dimension(ndir) :: current
769 double precision :: rho = 1.d0, rho2 = 1.d0
770 double precision :: td
771 integer :: ic^D,idir
772
773 current(:) = 0.d0
774
775 if (associated(usr_particle_analytic)) then
776 call usr_particle_analytic(ep, x, tloc, vec)
777 else if (associated(usr_particle_fields)) then
778 call get_vec(ep, igrid, x, tloc, vec)
779 else
780 call get_bfield(igrid, x, tloc, b)
781 call get_vec(vp, igrid, x, tloc, vfluid)
782 if (particles_eta > zero) then
783 call get_vec(jp, igrid, x, tloc, current)
784 end if
785 if (particles_etah > zero) then
786 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%w(ixg^t,rhop),ps(igrid)%x(ixg^t,1:ndim),x,rho)
787 if (time_advance) then
788 call interpolate_var(igrid,ixg^ll,ixm^ll,gridvars(igrid)%wold(ixg^t,rhop),ps(igrid)%x(ixg^t,1:ndim),x,rho2)
789 td = (tloc - global_time) / dt
790 rho = rho2 * (1.0d0 - td) + rho * td
791 end if
792 end if
793
794 ! Build E
795 select case (coordinate)
797 vec(1) = -vfluid(2)*b(3)+vfluid(3)*b(2) + particles_eta*current(1) + particles_etah/rho*(current(2)*b(3) - current(3)*b(2))
798 vec(2) = vfluid(1)*b(3)-vfluid(3)*b(1) + particles_eta*current(2) + particles_etah/rho*(-current(1)*b(3) + current(3)*b(1))
799 vec(3) = -vfluid(1)*b(2)+vfluid(2)*b(1) + particles_eta*current(3) + particles_etah/rho*(current(1)*b(2) - current(2)*b(1))
800 case (cylindrical)
801 vec(r_) = -vfluid(phi_)*b(z_)+vfluid(z_)*b(phi_) + particles_eta*current(r_) + particles_etah/rho*(current(phi_)*b(z_) - current(z_)*b(phi_))
802 vec(phi_) = vfluid(r_)*b(z_)-vfluid(z_)*b(r_) + particles_eta*current(phi_) + particles_etah/rho*(-current(r_)*b(z_) + current(z_)*b(r_))
803 vec(z_) = -vfluid(r_)*b(phi_)+vfluid(phi_)*b(r_) + particles_eta*current(z_) + particles_etah/rho*(current(r_)*b(phi_) - current(phi_)*b(r_))
804 end select
805 end if
806
807 e(:) = vec(:)
808
809 end subroutine get_efield
810
811 !> Get Lorentz factor from relativistic momentum
812 pure subroutine get_lfac(u,lfac)
814 double precision,dimension(3), intent(in) :: u
815 double precision, intent(out) :: lfac
816
817 if (relativistic) then
818 lfac = sqrt(1.0d0 + sum(u(:)**2)/c_norm**2)
819 else
820 lfac = 1.0d0
821 end if
822
823 end subroutine get_lfac
824
825 !> Get Lorentz factor from velocity
826 pure subroutine get_lfac_from_velocity(v,lfac)
828 double precision,dimension(3), intent(in) :: v
829 double precision, intent(out) :: lfac
830
831 if (relativistic) then
832 lfac = 1.0d0 / sqrt(1.0d0 - sum(v(:)**2)/c_norm**2)
833 else
834 lfac = 1.0d0
835 end if
836
837 end subroutine get_lfac_from_velocity
838
839 subroutine cross(a,b,c)
841 use mod_geometry
842 double precision, dimension(ndir), intent(in) :: a,b
843 double precision, dimension(ndir), intent(out) :: c
844
845 ! ndir needs to be three for this to work!!!
846 if(ndir==3) then
847 select case(coordinate)
849 c(1) = a(2)*b(3) - a(3)*b(2)
850 c(2) = a(3)*b(1) - a(1)*b(3)
851 c(3) = a(1)*b(2) - a(2)*b(1)
852 case (cylindrical)
853 c(r_) = a(phi_)*b(z_) - a(z_)*b(phi_)
854 c(phi_) = a(z_)*b(r_) - a(r_)*b(z_)
855 c(z_) = a(r_)*b(phi_) - a(phi_)*b(r_)
856 case default
857 call mpistop('geometry not implemented in cross(a,b,c)')
858 end select
859 else
860 call mpistop("cross in particles needs to be run with three components!")
861 end if
862
863 end subroutine cross
864
865 subroutine interpolate_var(igrid,ixI^L,ixO^L,gf,x,xloc,gfloc)
867
868 integer, intent(in) :: igrid,ixI^L, ixO^L
869 double precision, intent(in) :: gf(ixI^S)
870 double precision, intent(in) :: x(ixI^S,1:ndim)
871 double precision, intent(in) :: xloc(1:3)
872 double precision, intent(out) :: gfloc
873 double precision :: xd^D, myq, dx1
874 {^iftwod
875 double precision :: c00, c10
876 }
877 {^ifthreed
878 double precision :: c0, c1, c00, c10, c01, c11
879 }
880 integer :: ic^D, ic1^D, ic2^D, idir
881
882 {
883 if(stretch_type(^d)==stretch_uni) then
884 ! uniform stretch from xprobmin
885 ic^d = ceiling(dlog((xloc(^d)-xprobmin^d)*(qstretch(ps(igrid)%level,^d)-one)/&
886 dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
887 (node(pig^d_,igrid)-1)*block_nx^d+nghostcells
888 else if(stretch_type(^d)==stretch_symm) then
889 ! symmetric stretch about 0.5*(xprobmin+xprobmax)
890 if(xloc(^d)<xprobmin^d+xstretch^d) then
891 ! stretch to left from xprobmin+xstretch
892 ic^d = block_nx^d-(int(dlog((xprobmin^d+xstretch^d-xloc(^d))*(qstretch(ps(igrid)%level,^d)-one)/&
893 dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
894 (nstretchedblocks(ps(igrid)%level,^d)/2-node(pig^d_,igrid))*block_nx^d)+nghostcells
895 else if(xloc(^d)>xprobmax^d-xstretch^d) then
896 ! stretch to right from xprobmax-xstretch
897 ic^d = ceiling(dlog((xloc(^d)-xprobmax^d+xstretch^d)*(qstretch(ps(igrid)%level,^d)-one)/&
898 dxfirst(ps(igrid)%level,^d)+one)/dlog(qstretch(ps(igrid)%level,^d)))-&
899 ((node(pig^d_,igrid)-1+nstretchedblocks(ps(igrid)%level,^d)/2)*block_nx^d-&
900 domain_nx^d*2**(ps(igrid)%level-1))+nghostcells
901 else
902 ! possible non-stretched central part
903 ic^d = int((xloc(^d)-rnode(rpxmin^d_,igrid))/rnode(rpdx^d_,igrid)) + 1 + nghostcells
904 end if
905 else
906 ! no stretch
907 ic^d = int((xloc(^d)-rnode(rpxmin^d_,igrid))/rnode(rpdx^d_,igrid)) + 1 + nghostcells
908 end if
909 \}
910
911 ! linear interpolation:
912 {
913 if (x({ic^dd},^d) .lt. xloc(^d)) then
914 ic1^d = ic^d
915 else
916 ic1^d = ic^d -1
917 end if
918 ic2^d = ic1^d + 1
919 \}
920
921 {^d&
922 if(ic1^d.lt.ixglo^d+1 .or. ic2^d.gt.ixghi^d-1) then
923 print *, 'direction: ',^d
924 print *, 'position: ',xloc(1:ndim)
925 print *, 'indices:', ic1^d,ic2^d
926 call mpistop('Trying to interpolate from out of grid!')
927 end if
928 \}
929
930 {^ifoned
931 xd1 = (xloc(1)-x(ic11,1)) / (x(ic21,1) - x(ic11,1))
932 gfloc = gf(ic11) * (1.0d0 - xd1) + gf(ic21) * xd1
933 }
934 {^iftwod
935 xd1 = (xloc(1)-x(ic11,ic12,1)) / (x(ic21,ic12,1) - x(ic11,ic12,1))
936 xd2 = (xloc(2)-x(ic11,ic12,2)) / (x(ic11,ic22,2) - x(ic11,ic12,2))
937 c00 = gf(ic11,ic12) * (1.0d0 - xd1) + gf(ic21,ic12) * xd1
938 c10 = gf(ic11,ic22) * (1.0d0 - xd1) + gf(ic21,ic22) * xd1
939 gfloc = c00 * (1.0d0 - xd2) + c10 * xd2
940 }
941 {^ifthreed
942 xd1 = (xloc(1)-x(ic11,ic12,ic13,1)) / (x(ic21,ic12,ic13,1) - x(ic11,ic12,ic13,1))
943 xd2 = (xloc(2)-x(ic11,ic12,ic13,2)) / (x(ic11,ic22,ic13,2) - x(ic11,ic12,ic13,2))
944 xd3 = (xloc(3)-x(ic11,ic12,ic13,3)) / (x(ic11,ic12,ic23,3) - x(ic11,ic12,ic13,3))
945
946 c00 = gf(ic11,ic12,ic13) * (1.0d0 - xd1) + gf(ic21,ic12,ic13) * xd1
947 c10 = gf(ic11,ic22,ic13) * (1.0d0 - xd1) + gf(ic21,ic22,ic13) * xd1
948 c01 = gf(ic11,ic12,ic23) * (1.0d0 - xd1) + gf(ic21,ic12,ic23) * xd1
949 c11 = gf(ic11,ic22,ic23) * (1.0d0 - xd1) + gf(ic21,ic22,ic23) * xd1
950
951 c0 = c00 * (1.0d0 - xd2) + c10 * xd2
952 c1 = c01 * (1.0d0 - xd2) + c11 * xd2
953
954 gfloc = c0 * (1.0d0 - xd3) + c1 * xd3
955 }
956
957 end subroutine interpolate_var
958
960 use mod_timing
962
963 double precision :: tpartc_avg, tpartc_int_avg, tpartc_io_avg, tpartc_com_avg, tpartc_grid_avg
964
965 call mpi_reduce(tpartc,tpartc_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
966 call mpi_reduce(tpartc_int,tpartc_int_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
967 call mpi_reduce(tpartc_io,tpartc_io_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
968 call mpi_reduce(tpartc_com,tpartc_com_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
969 call mpi_reduce(tpartc_grid,tpartc_grid_avg,1,mpi_double_precision,mpi_sum,0,icomm,ierrmpi)
970
971 if (mype ==0) then
972 tpartc_avg = tpartc_avg/dble(npe)
973 tpartc_int_avg = tpartc_int_avg/dble(npe)
974 tpartc_io_avg = tpartc_io_avg/dble(npe)
975 tpartc_com_avg = tpartc_com_avg/dble(npe)
976 tpartc_grid_avg = tpartc_grid_avg/dble(npe)
977 write(*,'(a,f12.3,a)')' Particle handling took : ',tpartc,' sec'
978 write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc/(timeloop+smalldouble),' %'
979 write(*,'(a,f12.3,a)')' Particle IO took : ',tpartc_io_avg,' sec'
980 write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_io_avg/(timeloop+smalldouble),' %'
981 write(*,'(a,f12.3,a)')' Particle COM took : ',tpartc_com_avg,' sec'
982 write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_com_avg/(timeloop+smalldouble),' %'
983 write(*,'(a,f12.3,a)')' Particle integration took : ',tpartc_int_avg,' sec'
984 write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_int_avg/(timeloop+smalldouble),' %'
985 write(*,'(a,f12.3,a)')' Particle init grid took : ',tpartc_grid_avg,' sec'
986 write(*,'(a,f12.2,a)')' Percentage: ',100.0d0*tpartc_grid_avg/(timeloop+smalldouble),' %'
987 end if
988
989 end subroutine time_spent_on_particles
990
991 subroutine read_particles_snapshot(file_exists)
993
994 logical,intent(out) :: file_exists
995 character(len=std_len) :: filename
996 integer :: mynpayload, mynparticles, pos
997
998 ! some initialisations:
1000 mynparticles = 0
1001 nparticles = 0
1002 it_particles = 0
1003
1004 ! open the snapshot file on the headnode
1005 file_exists=.false.
1006 if (mype == 0) then
1007! write(filename,"(a,a,i4.4,a)") trim(base_filename),'_particles',snapshotini,'.dat'
1008 ! Strip restart_from_filename of the ending
1009 pos = scan(restart_from_file, '.dat', back=.true.)
1010 write(filename,"(a,a,i4.4,a)") trim(restart_from_file(1:pos-8)),'_particles',snapshotini,'.dat'
1011 INQUIRE(file=filename, exist=file_exists)
1012 if (.not. file_exists) then
1013 write(*,*) 'WARNING: File '//trim(filename)//' with particle data does not exist.'
1014 write(*,*) 'Initialising particles from user or default routines'
1015 else
1016 open(unit=unitparticles,file=filename,form='unformatted',action='read',status='unknown',access='stream')
1017 read(unitparticles) nparticles,it_particles,mynpayload
1018 if (mynpayload .ne. npayload) &
1019 call mpistop('npayload in restart file does not match npayload in mod_particles')
1020 end if
1021 end if
1022
1023 call mpi_bcast(file_exists,1,mpi_logical,0,icomm,ierrmpi)
1024 if (.not. file_exists) return
1025
1026 call mpi_bcast(nparticles,1,mpi_integer,0,icomm,ierrmpi)
1027 call mpi_bcast(it_particles,1,mpi_integer,0,icomm,ierrmpi)
1028
1029 do while (mynparticles .lt. nparticles)
1030 if (mype == 0) then
1032 .and. mynparticles .lt. nparticles)
1034 mynparticles = mynparticles + 1
1035 end do
1036 end if ! mype==0
1037 call mpi_bcast(mynparticles,1,mpi_integer,0,icomm,ierrmpi)
1039 end do
1040
1041 if (mype == 0) close(unit=unitparticles)
1042
1043 end subroutine read_particles_snapshot
1044
1046
1047 integer :: index,igrid_particle,ipe_particle
1048
1049 read(unitparticles) index
1050 allocate(particle(index)%self)
1051 allocate(particle(index)%payload(1:npayload))
1052 particle(index)%self%index = index
1053
1054 read(unitparticles) particle(index)%self%follow
1055 read(unitparticles) particle(index)%self%q
1056 read(unitparticles) particle(index)%self%m
1057 read(unitparticles) particle(index)%self%time
1058 read(unitparticles) particle(index)%self%dt
1059 read(unitparticles) particle(index)%self%x
1060 read(unitparticles) particle(index)%self%u
1061 read(unitparticles) particle(index)%payload(1:npayload)
1062
1063! if (particle(index)%self%follow) print*, 'follow index:', index
1064
1065 call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
1066 particle(index)%igrid = igrid_particle
1067 particle(index)%ipe = ipe_particle
1068
1070
1071 end subroutine read_from_snapshot
1072
1075
1076 character(len=std_len) :: filename
1077! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1078! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
1079! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1080! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
1081 type(particle_t), allocatable, dimension(:) :: send_particles
1082 type(particle_t), allocatable, dimension(:) :: receive_particles
1083 double precision, allocatable, dimension(:,:) :: send_payload
1084 double precision, allocatable, dimension(:,:) :: receive_payload
1085 integer :: status(MPI_STATUS_SIZE)
1086 integer,dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1087 integer :: ipe, ipart, iipart, send_n_particles_for_output
1088 logical,save :: file_exists=.false.
1089 integer :: snapshotnumber
1090
1091 if (time_advance) then
1092 snapshotnumber = snapshotnext
1093 else
1094 snapshotnumber = nint(t_next_output/dtsave_particles)
1095 end if
1096
1097 receive_n_particles_for_output_from_ipe(:) = 0
1098
1099 ! open the snapshot file on the headnode
1100 if (mype .eq. 0) then
1101 write(filename,"(a,a,i4.4,a)") trim(base_filename),'_particles',snapshotnumber,'.dat'
1102 INQUIRE(file=filename, exist=file_exists)
1103 if (.not. file_exists) then
1104 open(unit=unitparticles,file=filename,form='unformatted',status='new',access='stream')
1105 else
1106 open(unit=unitparticles,file=filename,form='unformatted',status='replace',access='stream')
1107 end if
1109 end if
1110 if (npe==1) then
1111 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1112 call append_to_snapshot(particle(ipart)%self,particle(ipart)%payload)
1113 end do
1114 return
1115 end if
1116
1117 if (mype .ne. 0) then
1118 call mpi_send(nparticles_on_mype,1,mpi_integer,0,mype,icomm,ierrmpi)
1119 ! fill the send_buffer
1120 send_n_particles_for_output = nparticles_on_mype
1121 allocate(send_particles(1:send_n_particles_for_output))
1122 allocate(send_payload(1:npayload,1:send_n_particles_for_output))
1123 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1124 send_particles(iipart) = particle(ipart)%self
1125 send_payload(1:npayload,iipart) = particle(ipart)%payload(1:npayload)
1126 end do
1127 else
1128 ! get number of particles on other ipes
1129 do ipe=1,npe-1
1130 call mpi_recv(receive_n_particles_for_output_from_ipe(ipe),1,mpi_integer,ipe,ipe,icomm,status,ierrmpi)
1131 end do
1132 end if
1133
1134 if (mype .ne. 0) then
1135 call mpi_send(send_particles,send_n_particles_for_output,type_particle,0,mype,icomm,ierrmpi)
1136 call mpi_send(send_payload,npayload*send_n_particles_for_output,mpi_double_precision,0,mype,icomm,ierrmpi)
1137 deallocate(send_particles)
1138 deallocate(send_payload)
1139 end if
1140
1141 if (mype==0) then
1142 ! first output own particles (already in receive buffer)
1143 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1144 call append_to_snapshot(particle(ipart)%self,particle(ipart)%payload)
1145 end do
1146 ! now output the particles sent from the other ipes
1147 do ipe=1,npe-1
1148 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1149 allocate(receive_payload(1:npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1150 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),type_particle,ipe,ipe,icomm,status,ierrmpi)
1151 call mpi_recv(receive_payload,npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,icomm,status,ierrmpi)
1152 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1153 call append_to_snapshot(receive_particles(ipart),receive_payload(1:npayload,ipart))
1154 end do ! ipart
1155 deallocate(receive_particles)
1156 deallocate(receive_payload)
1157 end do ! ipe
1158 close(unit=unitparticles)
1159 end if ! mype == 0
1160
1161 end subroutine write_particles_snapshot
1162
1163 subroutine append_to_snapshot(myparticle,mypayload)
1164
1165 type(particle_t),intent(in) :: myparticle
1166 double precision :: mypayload(1:npayload)
1167
1168 write(unitparticles) myparticle%index
1169 write(unitparticles) myparticle%follow
1170 write(unitparticles) myparticle%q
1171 write(unitparticles) myparticle%m
1172 write(unitparticles) myparticle%time
1173 write(unitparticles) myparticle%dt
1174 write(unitparticles) myparticle%x
1175 write(unitparticles) myparticle%u
1176 write(unitparticles) mypayload(1:npayload)
1177
1178 end subroutine append_to_snapshot
1179
1182
1183 integer :: iipart, ipart, icomp
1184 character(len=std_len) :: filename
1185 character(len=1024) :: line, strdata
1186
1187 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1188 if (particle(ipart)%self%follow) then
1189 filename = make_particle_filename(particle(ipart)%self%time,particle(ipart)%self%index,'individual')
1190 open(unit=unitparticles,file=filename,status='replace')
1191 line=''
1192 do icomp=1, npayload
1193 write(strdata,"(a,i2.2,a)") 'payload',icomp,','
1194 line = trim(line)//trim(strdata)
1195 end do
1196 write(unitparticles,"(a,a,a)") 'time,dt,x1,x2,x3,u1,u2,u3,',trim(line),'ipe,iteration,index'
1197 close(unit=unitparticles)
1198 end if
1199 end do
1200
1201 end subroutine init_particles_output
1202
1203 subroutine select_active_particles(end_time)
1205 double precision, intent(in) :: end_time
1206
1207 double precision :: t_min_mype
1208 integer :: ipart, iipart
1209 logical :: activate
1210
1211 t_min_mype = bigdouble
1213
1214 do iipart = 1, nparticles_on_mype
1215 ipart = particles_on_mype(iipart);
1216 activate = (particle(ipart)%self%time < end_time)
1217 t_min_mype = min(t_min_mype, particle(ipart)%self%time)
1218
1219 if (activate) then
1222 end if
1223 end do
1224
1225 call mpi_allreduce(t_min_mype, min_particle_time, 1, mpi_double_precision, &
1226 mpi_min, icomm, ierrmpi)
1227
1228 end subroutine select_active_particles
1229
1230 subroutine locate_particle(index,igrid_particle,ipe_particle)
1231 ! given the particles unique index, tell me on which cpu and igrid it is
1232 ! returns -1,-1 if particle was not found
1234
1235 integer, intent(in) :: index
1236 integer, intent(out) :: igrid_particle, ipe_particle
1237 integer :: iipart,ipart,ipe_has_particle,ipe
1238 integer,dimension(0:1) :: buff
1239 logical :: has_particle(0:npe-1)
1240
1241 has_particle(:) = .false.
1242 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1243 if (particle(ipart)%self%index == index) then
1244 has_particle(mype) = .true.
1245 exit
1246 end if
1247 end do
1248
1249 if (has_particle(mype)) then
1250 buff(0) = particle(ipart)%igrid
1251 buff(1) = mype
1252 end if
1253
1254 if (npe>0) call mpi_allgather(has_particle(mype),1,mpi_logical,has_particle,1,mpi_logical,icomm,ierrmpi)
1255
1256 ipe_has_particle = -1
1257 do ipe=0,npe-1
1258 if (has_particle(ipe) .eqv. .true.) then
1259 ipe_has_particle = ipe
1260 exit
1261 end if
1262 end do
1263
1264 if (ipe_has_particle .ne. -1) then
1265 if (npe>0) call mpi_bcast(buff,2,mpi_integer,ipe_has_particle,icomm,ierrmpi)
1266 igrid_particle=buff(0)
1267 ipe_particle = buff(1)
1268 else
1269 igrid_particle=-1
1270 ipe_particle = -1
1271 end if
1272
1273 end subroutine locate_particle
1274
1275 subroutine find_particle_ipe(x,igrid_particle,ipe_particle)
1277 use mod_slice, only: get_igslice
1279
1280 double precision, intent(in) :: x(3)
1281 integer, intent(out) :: igrid_particle, ipe_particle
1282 integer :: ig(ndir,nlevelshi), ig_lvl(nlevelshi)
1283 integer :: idim, ic(ndim)
1284 type(tree_node_ptr) :: branch
1285
1286 ! first check if the particle is in the domain
1287 if (.not. particle_in_domain(x)) then
1288 igrid_particle = -1
1289 ipe_particle = -1
1290 return
1291 end if
1292
1293 ! get the index on each level
1294 do idim = 1, ndim
1295 call get_igslice(idim,x(idim), ig_lvl)
1296 ig(idim,:) = ig_lvl
1297 end do
1298
1299 ! traverse the tree until leaf is found
1300 branch=tree_root({ig(^d,1)})
1301 do while (.not.branch%node%leaf)
1302 {ic(^d)=ig(^d,branch%node%level+1) - 2 * branch%node%ig^d +2\}
1303 branch%node => branch%node%child({ic(^d)})%node
1304 end do
1305
1306 igrid_particle = branch%node%igrid
1307 ipe_particle = branch%node%ipe
1308
1309 end subroutine find_particle_ipe
1310
1311 !> Check if particle is inside computational domain
1312 logical function particle_in_domain(x)
1314 double precision, dimension(ndim), intent(in) :: x
1315
1316 particle_in_domain = all(x >= [ xprobmin^d ]) .and. &
1317 all(x < [ xprobmax^d ]) ! Jannis: as before < instead of <= here, necessary?
1318 end function particle_in_domain
1319
1320 !> Quick check if particle is still in igrid
1321 logical function particle_in_igrid(ipart, igrid)
1323 integer, intent(in) :: igrid, ipart
1324 double precision :: x(ndim), grid_rmin(ndim), grid_rmax(ndim)
1325
1326 ! First check if the igrid is still there
1327 if (.not. allocated(ps(igrid)%w)) then
1328 particle_in_igrid = .false.
1329 else
1330 grid_rmin = [ {rnode(rpxmin^d_,igrid)} ]
1331 grid_rmax = [ {rnode(rpxmax^d_,igrid)} ]
1332 x = particle(ipart)%self%x(1:ndim)
1333 particle_in_igrid = all(x >= grid_rmin) .and. all(x < grid_rmax)
1334 end if
1335 end function particle_in_igrid
1336
1337 subroutine set_neighbor_ipe()
1339
1340 integer :: igrid, iigrid,ipe
1341 integer :: my_neighbor_type, i^D
1342 logical :: ipe_is_neighbor(0:npe-1)
1343
1344 ipe_is_neighbor(:) = .false.
1345 do iigrid=1,igridstail; igrid=igrids(iigrid);
1346
1347 {do i^db=-1,1\}
1348 if (i^d==0|.and.) then
1349 cycle
1350 end if
1351 my_neighbor_type=neighbor_type(i^d,igrid)
1352
1353 select case (my_neighbor_type)
1354 case (neighbor_boundary) ! boundary
1355 ! do nothing
1356 case (neighbor_coarse) ! fine-coarse
1357 call ipe_fc(i^d,igrid,ipe_is_neighbor)
1358 case (neighbor_sibling) ! same level
1359 call ipe_srl(i^d,igrid,ipe_is_neighbor)
1360 case (neighbor_fine) ! coarse-fine
1361 call ipe_cf(i^d,igrid,ipe_is_neighbor)
1362 end select
1363
1364 {end do\}
1365
1366 end do
1367
1368 ! remove self as neighbor
1369 ipe_is_neighbor(mype) = .false.
1370
1371 ! Now make the list of neighbors
1372 npe_neighbors = 0
1373 do ipe=0,npe-1
1374 if (ipe_is_neighbor(ipe)) then
1377 end if
1378 end do ! ipe
1379
1380 end subroutine set_neighbor_ipe
1381
1382 subroutine ipe_fc(i^D,igrid,ipe_is_neighbor)
1384 integer, intent(in) :: i^D, igrid
1385 logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1386
1387 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1388
1389 end subroutine ipe_fc
1390
1391 subroutine ipe_srl(i^D,igrid,ipe_is_neighbor)
1393 integer, intent(in) :: i^D, igrid
1394 logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1395
1396 ipe_is_neighbor(neighbor(2,i^d,igrid)) = .true.
1397
1398 end subroutine ipe_srl
1399
1400 subroutine ipe_cf(i^D,igrid,ipe_is_neighbor)
1402 integer, intent(in) :: i^D, igrid
1403 logical, intent(inout) :: ipe_is_neighbor(0:npe-1)
1404 integer :: ic^D, inc^D
1405
1406 {do ic^db=1+int((1-i^db)/2),2-int((1+i^db)/2)
1407 inc^db=2*i^db+ic^db
1408 \}
1409 ipe_is_neighbor( neighbor_child(2,inc^d,igrid) ) = .true.
1410
1411 {end do\}
1412
1413 end subroutine ipe_cf
1414
1417
1418 character(len=std_len) :: filename
1419! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
1420! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
1421 type(particle_t), allocatable, dimension(:) :: send_particles
1422 double precision, allocatable, dimension(:,:) :: send_payload
1423 double precision :: tout
1424 integer :: send_n_particles_for_output, cc
1425 integer :: nout
1426 integer :: ipart,iipart
1427
1428 if (write_individual) then
1429 call output_individual()
1430 end if
1431
1432 if (write_ensemble) then
1433 send_n_particles_for_output = 0
1434
1435 do iipart=1,nparticles_on_mype
1436 ipart=particles_on_mype(iipart);
1437
1438 ! have to send particle to rank zero for output
1439 if (mod(particle(ipart)%self%index,downsample_particles) .eq. 0) then
1440 send_n_particles_for_output = send_n_particles_for_output + 1
1441 end if
1442 end do
1443
1444 allocate(send_particles(1:send_n_particles_for_output))
1445 allocate(send_payload(npayload,1:send_n_particles_for_output))
1446 cc = 1
1447 do iipart=1,nparticles_on_mype
1448 ipart=particles_on_mype(iipart);
1449
1450 ! have to send particle to rank zero for output
1451 if (mod(particle(ipart)%self%index,downsample_particles) .eq. 0) then
1452 send_particles(cc) = particle(ipart)%self
1453 send_payload(1:npayload,cc) = particle(ipart)%payload(1:npayload)
1454 cc=cc+1
1455 end if
1456 end do
1457
1458 call output_ensemble(send_n_particles_for_output,send_particles, &
1459 send_payload,'ensemble')
1460 deallocate(send_particles)
1461 deallocate(send_payload)
1462 end if
1463
1464 end subroutine write_particle_output
1465
1466 character(len=128) function make_particle_filename(tout,index,type)
1468
1469 character(len=*), intent(in) :: type
1470 double precision, intent(in) :: tout
1471 integer, intent(in) :: index
1472 integer :: nout, mysnapshot
1473
1474 select case(type)
1475 case ('ensemble')
1476 nout = nint(tout / dtsave_particles)
1477 write(make_particle_filename,"(a,a,i6.6,a)") trim(base_filename),'_ensemble',nout,'.csv'
1478 case ('destroy')
1479 write(make_particle_filename,"(a,a)") trim(base_filename),'_destroyed.csv'
1480 case ('individual')
1481 write(make_particle_filename,"(a,a,i6.6,a)") trim(base_filename),'_particle',index,'.csv'
1482 end select
1483
1484 end function make_particle_filename
1485
1486 subroutine output_ensemble(send_n_particles_for_output,send_particles,send_payload,typefile)
1488
1489 integer, intent(in) :: send_n_particles_for_output
1490 type(particle_t), dimension(send_n_particles_for_output), intent(in) :: send_particles
1491 double precision, dimension(npayload,send_n_particles_for_output), intent(in) :: send_payload
1492 character(len=*), intent(in) :: typefile
1493 character(len=std_len) :: filename
1494 type(particle_t), allocatable, dimension(:) :: receive_particles
1495 double precision, allocatable, dimension(:,:) :: receive_payload
1496 integer :: status(MPI_STATUS_SIZE)
1497 integer,dimension(0:npe-1) :: receive_n_particles_for_output_from_ipe
1498 integer :: ipe, ipart, nout
1499 logical :: file_exists
1500
1501 receive_n_particles_for_output_from_ipe(:) = 0
1502
1503 call mpi_allgather(send_n_particles_for_output, 1, mpi_integer, &
1504 receive_n_particles_for_output_from_ipe, 1, mpi_integer, icomm, ierrmpi)
1505
1506 ! If there are no particles to be written, skip the output
1507 if (sum(receive_n_particles_for_output_from_ipe(:)) == 0) return
1508
1509 if (mype > 0) then
1510 call mpi_send(send_particles,send_n_particles_for_output,type_particle,0,mype,icomm,ierrmpi)
1511 call mpi_send(send_payload,npayload*send_n_particles_for_output,mpi_double_precision,0,mype,icomm,ierrmpi)
1512 else
1513 ! Create file and write header
1514 if(typefile=='destroy') then ! Destroyed file
1515 write(filename,"(a,a,i6.6,a)") trim(base_filename) // '_', &
1516 trim(typefile) // '.csv'
1518 inquire(file=filename, exist=file_exists)
1519
1520 if (.not. file_exists) then
1521 open(unit=unitparticles, file=filename)
1522 write(unitparticles,"(a)") trim(csv_header)
1523 else
1524 open(unit=unitparticles, file=filename, access='append')
1525 end if
1526
1527 else ! Ensemble file
1528 write(filename,"(a,a,i6.6,a)") trim(base_filename) // '_', &
1529 trim(typefile) // '_', nint(t_next_output/dtsave_particles),'.csv'
1530 open(unit=unitparticles,file=filename)
1531 write(unitparticles,"(a)") trim(csv_header)
1532 end if
1533
1534 ! Write own particles
1535 do ipart=1,send_n_particles_for_output
1536 call output_particle(send_particles(ipart),send_payload(1:npayload,ipart),0,unitparticles)
1537 end do
1538
1539 ! Write particles from other tasks
1540 do ipe=1,npe-1
1541 allocate(receive_particles(1:receive_n_particles_for_output_from_ipe(ipe)))
1542 allocate(receive_payload(1:npayload,1:receive_n_particles_for_output_from_ipe(ipe)))
1543 call mpi_recv(receive_particles,receive_n_particles_for_output_from_ipe(ipe),type_particle,ipe,ipe,icomm,status,ierrmpi)
1544 call mpi_recv(receive_payload,npayload*receive_n_particles_for_output_from_ipe(ipe),mpi_double_precision,ipe,ipe,icomm,status,ierrmpi)
1545 do ipart=1,receive_n_particles_for_output_from_ipe(ipe)
1546 call output_particle(receive_particles(ipart),receive_payload(1:npayload,ipart),ipe,unitparticles)
1547 end do ! ipart
1548 deallocate(receive_particles)
1549 deallocate(receive_payload)
1550 end do ! ipe
1551
1552 close(unitparticles)
1553 end if
1554
1555 end subroutine output_ensemble
1556
1559 character(len=std_len) :: filename
1560 integer :: ipart,iipart,ipe
1561 logical :: file_exists
1562
1563 do iipart=1,nparticles_on_mype
1564 ipart=particles_on_mype(iipart)
1565
1566 ! should the particle be dumped?
1567 if (particle(ipart)%self%follow) then
1568 write(filename,"(a,a,i6.6,a)") trim(base_filename), '_particle_', &
1569 particle(ipart)%self%index, '.csv'
1570 inquire(file=filename, exist=file_exists)
1571
1572 ! Create empty file and write header on first iteration, or when the
1573 ! file does not exist yet
1574 if (it_particles == 0 .or. .not. file_exists) then
1575 open(unit=unitparticles, file=filename)
1576 write(unitparticles,"(a)") trim(csv_header)
1577 else
1578 open(unit=unitparticles, file=filename, access='append')
1579 end if
1580
1581 call output_particle(particle(ipart)%self,&
1582 particle(ipart)%payload(1:npayload),mype,unitparticles)
1583
1584 close(unitparticles)
1585 end if
1586 end do
1587
1588 end subroutine output_individual
1589
1590 subroutine output_particle(myparticle,payload,ipe,file_unit)
1592 use mod_geometry
1593
1594 type(particle_t),intent(in) :: myparticle
1595 double precision, intent(in) :: payload(npayload)
1596 integer, intent(in) :: ipe
1597 integer, intent(in) :: file_unit
1598 double precision :: x(3), u(3)
1599 integer :: icomp
1600
1601 ! convert and normalize the position
1602 if (nocartesian) then
1603 select case(coordinate)
1605 x = myparticle%x*length_convert_factor
1606 case(cylindrical)
1607 x(r_) = myparticle%x(r_)*length_convert_factor
1608 x(z_) = myparticle%x(z_)*length_convert_factor
1609 x(phi_) = myparticle%x(phi_)
1610 case(spherical)
1611 x(:) = myparticle%x(:)
1612 x(1) = x(1)*length_convert_factor
1613 end select
1614 else
1615 call partcoord_to_cartesian(myparticle%x,x)
1616 x(:) = x(:)*length_convert_factor
1617 end if
1618
1619 u = myparticle%u(1:3) * integrator_velocity_factor
1620
1621 write(file_unit, csv_format) myparticle%time, myparticle%dt, x(1:3), &
1622 u, payload(1:npayload), ipe, it_particles, &
1623 myparticle%index
1624
1625 end subroutine output_particle
1626
1627 !> Convert position to Cartesian coordinates
1628 subroutine partcoord_to_cartesian(xp,xpcart)
1629 ! conversion of particle coordinate from cylindrical/spherical to cartesian coordinates done here
1631 use mod_geometry
1632
1633 double precision, intent(in) :: xp(1:3)
1634 double precision, intent(out) :: xpcart(1:3)
1635
1636 select case (coordinate)
1638 xpcart(1:3) = xp(1:3)
1639 case (cylindrical)
1640 xpcart(1) = xp(r_)*cos(xp(phi_))
1641 xpcart(2) = xp(r_)*sin(xp(phi_))
1642 xpcart(3) = xp(z_)
1643 case (spherical)
1644 xpcart(1) = xp(1)*sin(xp(2))*cos(xp(3))
1645 {^iftwod
1646 xpcart(2) = xp(1)*cos(xp(2))
1647 xpcart(3) = xp(1)*sin(xp(2))*sin(xp(3))}
1648 {^ifthreed
1649 xpcart(2) = xp(1)*sin(xp(2))*sin(xp(3))
1650 xpcart(3) = xp(1)*cos(xp(2))}
1651 case default
1652 write(*,*) "No converter for coordinate=",coordinate
1653 end select
1654
1655 end subroutine partcoord_to_cartesian
1656
1657 !> Convert to noncartesian coordinates
1658 subroutine partcoord_from_cartesian(xp,xpcart)
1659 ! conversion of particle coordinate from Cartesian to cylindrical/spherical coordinates done here
1661 use mod_geometry
1662
1663 double precision, intent(in) :: xpcart(1:3)
1664 double precision, intent(out) :: xp(1:3)
1665 double precision :: xx, yy, zz
1666 double precision :: rr, tth, pphi
1667
1668 select case (coordinate)
1670 xp(1:3)=xpcart(1:3)
1671 case (cylindrical)
1672 xp(r_) = sqrt(xpcart(1)**2 + xpcart(2)**2)
1673 xp(phi_) = atan2(xpcart(2),xpcart(1))
1674 if (xp(phi_) .lt. 0.d0) xp(phi_) = 2.0d0*dpi + xp(phi_)
1675 xp(z_) = xpcart(3)
1676 case (spherical)
1677 xx = xpcart(1)
1678 {^iftwod
1679 yy = xpcart(3)
1680 zz = xpcart(2)}
1681 {^ifthreed
1682 yy = xpcart(2)
1683 zz = xpcart(3)}
1684 rr = sqrt(xx**2 + yy**2 + zz**2)
1685 tth = acos(zz/rr)
1686 if (yy > 0.d0) then
1687 pphi = acos(xx/sqrt(xx**2+yy**2))
1688 else
1689 pphi = 2.d0*dpi - acos(xx/sqrt(xx**2+yy**2))
1690 endif
1691 xp(1:3) = (/ rr, tth, pphi /)
1692 case default
1693 write(*,*) "No converter for coordinate=",coordinate
1694 end select
1695
1696 end subroutine partcoord_from_cartesian
1697
1698 !> Convert vector to Cartesian coordinates
1699 subroutine partvec_to_cartesian(xp,up,upcart)
1700 ! conversion of a generic vector from cylindrical/spherical to cartesian coordinates done here
1702 use mod_geometry
1703
1704 double precision, intent(in) :: xp(1:3),up(1:3)
1705 double precision, intent(out) :: upcart(1:3)
1706
1707 select case (coordinate)
1709 upcart(1:3)=up(1:3)
1710 case (cylindrical)
1711 upcart(1) = cos(xp(phi_)) * up(r_) - sin(xp(phi_)) * up(phi_)
1712 upcart(2) = cos(xp(phi_)) * up(phi_) + sin(xp(phi_)) * up(r_)
1713 upcart(3) = up(z_)
1714 case (spherical)
1715 upcart(1) = up(1)*sin(xp(2))*cos(xp(3)) + up(2)*cos(xp(2))*cos(xp(3)) - up(3)*sin(xp(3))
1716 {^iftwod
1717 upcart(2) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))
1718 upcart(3) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))}
1719 {^ifthreed
1720 upcart(2) = up(1)*sin(xp(2))*sin(xp(3)) + up(2)*cos(xp(2))*sin(xp(3)) + up(3)*cos(xp(3))
1721 upcart(3) = up(1)*cos(xp(2)) - up(2)*sin(xp(2))}
1722 case default
1723 write(*,*) "No converter for coordinate=",coordinate
1724 end select
1725
1726 end subroutine partvec_to_cartesian
1727
1728 !> Convert vector from Cartesian coordinates
1729 subroutine partvec_from_cartesian(xp,up,upcart)
1730 ! conversion of a generic vector to cylindrical/spherical from cartesian coordinates done here
1732 use mod_geometry
1733
1734 double precision, intent(in) :: xp(1:3),upcart(1:3)
1735 double precision, intent(out) :: up(1:3)
1736
1737 select case (coordinate)
1739 up(1:3) = upcart(1:3)
1740 case (cylindrical)
1741 up(r_) = cos(xp(phi_)) * upcart(1) + sin(xp(phi_)) * upcart(2)
1742 up(phi_) = -sin(xp(phi_)) * upcart(1) + cos(xp(phi_)) * upcart(2)
1743 up(z_) = upcart(3)
1744 case (spherical)
1745 {^iftwod
1746 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(3)*sin(xp(2))*sin(xp(3)) + upcart(2)*cos(xp(2))
1747 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(3)*cos(xp(2))*sin(xp(3)) - upcart(2)*sin(xp(2))
1748 up(3) = -upcart(1)*sin(xp(3)) + upcart(3)*cos(xp(3))}
1749 {^ifthreed
1750 up(1) = upcart(1)*sin(xp(2))*cos(xp(3)) + upcart(2)*sin(xp(2))*sin(xp(3)) + upcart(3)*cos(xp(2))
1751 up(2) = upcart(1)*cos(xp(2))*cos(xp(3)) + upcart(2)*cos(xp(2))*sin(xp(3)) - upcart(3)*sin(xp(2))
1752 up(3) = -upcart(1)*sin(xp(3)) + upcart(2)*cos(xp(3))}
1753 case default
1754 write(*,*) "No converter for coordinate=",coordinate
1755 end select
1756
1757 end subroutine partvec_from_cartesian
1758
1759 !> Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates
1760 subroutine fix_phi_crossing(xp,igrid)
1762 use mod_geometry
1763
1764 integer, intent(in) :: igrid
1765 double precision, intent(inout) :: xp(1:3)
1766 double precision :: xpmod(1:3)
1767 integer :: phiind
1768
1769 select case (coordinate)
1771 ! Do nothing
1772 return
1773 case (cylindrical)
1774 phiind = phi_
1775 case (spherical)
1776 phiind = 3
1777 end select
1778
1779 xpmod = xp
1780
1781 ! Fix phi-boundary crossing
1782 ! Case 1: particle has crossed from ~2pi to ~0
1783 xpmod(phiind) = xp(phiind) + 2.d0*dpi
1784 if ((.not. point_in_igrid_ghostc(xp,igrid,0)) &
1785 .and. (point_in_igrid_ghostc(xpmod,igrid,0))) then
1786 xp(phiind) = xpmod(phiind)
1787 return
1788 end if
1789 ! Case 2: particle has crossed from ~0 to ~2pi
1790 xpmod(phiind) = xp(phiind) - 2.d0*dpi
1791 if ((.not. point_in_igrid_ghostc(xp,igrid,0)) &
1792 .and. (point_in_igrid_ghostc(xpmod,igrid,0))) then
1793 xp(phiind) = xpmod(phiind)
1794 return
1795 end if
1796
1797 end subroutine fix_phi_crossing
1798
1799 !> Quick check if particle coordinate is inside igrid
1800 !> (ghost cells included, except the last ngh)
1801 logical function point_in_igrid_ghostc(x, igrid, ngh)
1803 use mod_geometry
1804 integer, intent(in) :: igrid, ngh
1805 double precision, intent(in) :: x(ndim)
1806 double precision :: grid_rmin(ndim), grid_rmax(ndim)
1807
1808 ! TODO: this does not work with stretched coordinates!
1809 ! TODO: but phi should not be stretched in principle...
1810 grid_rmin = [ {rnode(rpxmin^d_,igrid)-rnode(rpdx^d_,igrid)*(dble(nghostcells-ngh)-0.5d0)} ]
1811 grid_rmax = [ {rnode(rpxmax^d_,igrid)+rnode(rpdx^d_,igrid)*(dble(nghostcells-ngh)-0.5d0)} ]
1812 point_in_igrid_ghostc = all(x >= grid_rmin) .and. all(x < grid_rmax)
1813 end function point_in_igrid_ghostc
1814
1815 subroutine comm_particles()
1816
1818
1819 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
1820 integer :: index
1821 integer :: tag_send, tag_receive, send_buff, rcv_buff
1822 integer :: status(MPI_STATUS_SIZE)
1823 integer, dimension(0:npe-1) :: send_n_particles_to_ipe
1824 integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
1825 type(particle_t), allocatable, dimension(:,:) :: send_particles
1826 type(particle_t), allocatable, dimension(:,:) :: receive_particles
1827 double precision, allocatable, dimension(:,:,:) :: send_payload
1828 double precision, allocatable, dimension(:,:,:) :: receive_payload
1829 integer, allocatable, dimension(:,:) :: particle_index_to_be_sent_to_ipe
1830 integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
1831 integer :: destroy_n_particles_mype
1832 integer, allocatable, dimension(:) :: sndrqst, rcvrqst
1833 integer, allocatable, dimension(:) :: sndrqst_payload, rcvrqst_payload
1834 integer :: isnd, ircv
1835 integer, parameter :: maxneighbors=56 ! maximum case: coarse-fine in 3D
1836 logical :: BC_applied
1837 !-----------------------------------------------------------------------------
1838
1839 send_n_particles_to_ipe(:) = 0
1840 receive_n_particles_from_ipe(:) = 0
1841 destroy_n_particles_mype = 0
1842
1843 allocate( particle_index_to_be_sent_to_ipe(nparticles_per_cpu_hi,0:npe-1) )
1844 allocate( sndrqst(1:npe-1), rcvrqst(1:npe-1) )
1845 allocate( sndrqst_payload(1:npe-1), rcvrqst_payload(1:npe-1) )
1846 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1847 sndrqst_payload = mpi_request_null; rcvrqst_payload = mpi_request_null;
1848
1849 ! check if and where to send each particle, destroy if necessary
1850 ! !$OMP PARALLEL DO PRIVATE(ipart)
1851 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
1852
1853 ! first check if the particle should be destroyed (user defined criterion)
1854 if (associated(usr_destroy_particle)) then
1855 if (usr_destroy_particle(particle(ipart))) then
1856 ! !$OMP CRITICAL(destroy)
1857 destroy_n_particles_mype = destroy_n_particles_mype + 1
1858 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1859 ! !$OMP END CRITICAL(destroy)
1860 cycle
1861 end if
1862 end if
1863
1864 ! is my particle still in the same igrid?
1865 if (.not.particle_in_igrid(ipart,particle(ipart)%igrid)) then
1866 call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
1867
1868 ! destroy particle if out of domain (signalled by return value of -1)
1869 if (igrid_particle == -1 )then
1870 call apply_periodb(particle(ipart)%self,igrid_particle,ipe_particle,bc_applied)
1871 if (.not. bc_applied .or. igrid_particle == -1) then
1872 ! !$OMP CRITICAL(destroy2)
1873 destroy_n_particles_mype = destroy_n_particles_mype + 1
1874 particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
1875 ! !$OMP END CRITICAL(destroy2)
1876 cycle
1877 end if
1878 end if
1879
1880 ! particle still present
1881 particle(ipart)%igrid = igrid_particle
1882 particle(ipart)%ipe = ipe_particle
1883
1884 ! if we have more than one core, is it on another cpu?
1885 if (npe .gt. 1 .and. particle(ipart)%ipe .ne. mype) then
1886 ! !$OMP CRITICAL(send)
1887 send_n_particles_to_ipe(ipe_particle) = &
1888 send_n_particles_to_ipe(ipe_particle) + 1
1889 if (send_n_particles_to_ipe(ipe_particle) .gt. nparticles_per_cpu_hi) &
1890 call mpistop('too many particles, increase nparticles_per_cpu_hi')
1891 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
1892 ! !$OMP END CRITICAL(send)
1893 end if ! ipe_particle
1894
1895 end if ! particle_in_grid
1896
1897 end do ! ipart
1898 ! !$OMP END PARALLEL DO
1899
1900 call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
1901
1902 ! get out when only one core:
1903 if (npe == 1) return
1904
1905 ! communicate amount of particles to be sent/received
1906 isnd = 0; ircv = 0;
1907 do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1908 tag_send = mype * npe + ipe
1909 tag_receive = ipe * npe + mype
1910 isnd = isnd + 1; ircv = ircv + 1
1911 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1912 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1913 end do
1914
1915 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1916 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1917 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
1918
1919 ! allocate the send and receive buffers,
1920 ! If this is still not good enough, consider optimizing using lists of lists:
1921 allocate(send_particles(maxval(send_n_particles_to_ipe),1:min(maxneighbors,npe-1)))
1922 allocate(receive_particles(maxval(receive_n_particles_from_ipe),1:min(maxneighbors,npe-1)))
1923 allocate(send_payload(npayload,maxval(send_n_particles_to_ipe),1:min(maxneighbors,npe-1)))
1924 allocate(receive_payload(npayload,maxval(receive_n_particles_from_ipe),1:min(maxneighbors,npe-1)))
1925
1926 isnd = 0; ircv = 0;
1927 ! send and receive the data of the particles
1928 do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1929 tag_send = mype * npe + ipe
1930 tag_receive = ipe * npe + mype
1931
1932 ! should i send some particles to ipe?
1933 if (send_n_particles_to_ipe(ipe) .gt. 0) then
1934
1935 ! create the send buffer
1936 do ipart = 1, send_n_particles_to_ipe(ipe)
1937 send_particles(ipart,iipe) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
1938 send_payload(1:npayload,ipart,iipe) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
1939 end do ! ipart
1940 isnd = isnd + 1
1941 call mpi_isend(send_particles(:,iipe),send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
1942 call mpi_isend(send_payload(:,:,iipe),npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,icomm,sndrqst_payload(isnd),ierrmpi)
1943
1944 end if ! send .gt. 0
1945
1946 ! should i receive some particles from ipe?
1947 if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1948
1949 ircv = ircv + 1
1950 call mpi_irecv(receive_particles(:,iipe),receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
1951 call mpi_irecv(receive_payload(:,:,iipe),npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,icomm,rcvrqst_payload(ircv),ierrmpi)
1952
1953 end if ! receive .gt. 0
1954 end do ! ipe
1955
1956 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
1957 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
1958 call mpi_waitall(isnd,sndrqst_payload,mpi_statuses_ignore,ierrmpi)
1959 call mpi_waitall(ircv,rcvrqst_payload,mpi_statuses_ignore,ierrmpi)
1960
1961 do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1962 do ipart = 1, send_n_particles_to_ipe(ipe)
1963 deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
1964 deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
1965 call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
1966 end do ! ipart
1967 end do
1968
1969 do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
1970 if (receive_n_particles_from_ipe(ipe) .gt. 0) then
1971 do ipart = 1, receive_n_particles_from_ipe(ipe)
1972
1973 index = receive_particles(ipart,iipe)%index
1975 !if (.not. allocated(particle(index)%self)) &
1976 allocate(particle(index)%self)
1977 particle(index)%self = receive_particles(ipart,iipe)
1978 !if (.not. allocated(particle(index)%payload)) &
1979 allocate(particle(index)%payload(npayload))
1980 particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart,iipe)
1981
1982 ! since we don't send the igrid, need to re-locate it
1983 call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
1984 particle(index)%igrid = igrid_particle
1985 particle(index)%ipe = ipe_particle
1986
1987 end do ! ipart
1988
1989 end if ! receive .gt. 0
1990 end do ! ipe
1991
1992 end subroutine comm_particles
1993 !=============================================================================
1995
1996 ! does not destroy particles like comm_particles
1997
1999
2000 integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2001 integer :: index
2002 integer :: tag_send, tag_receive, send_buff, rcv_buff
2003 integer :: status(MPI_STATUS_SIZE)
2004 integer, dimension(0:npe-1) :: send_n_particles_to_ipe
2005 integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
2006! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
2007! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
2008! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
2009! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
2010 type(particle_t), allocatable, dimension(:) :: send_particles
2011 type(particle_t), allocatable, dimension(:) :: receive_particles
2012 double precision, allocatable, dimension(:,:) :: send_payload
2013 double precision, allocatable, dimension(:,:) :: receive_payload
2014 integer, allocatable, dimension(:,:) :: particle_index_to_be_sent_to_ipe
2015 integer, allocatable, dimension(:) :: sndrqst, rcvrqst
2016 integer :: isnd, ircv
2017 logical :: BC_applied
2018 !-----------------------------------------------------------------------------
2019 send_n_particles_to_ipe(:) = 0
2020 receive_n_particles_from_ipe(:) = 0
2021
2022 allocate( particle_index_to_be_sent_to_ipe(nparticles_per_cpu_hi,0:npe-1) )
2023 allocate( sndrqst(1:npe-1), rcvrqst(1:npe-1) )
2024 sndrqst = mpi_request_null; rcvrqst = mpi_request_null;
2025
2026 ! check if and where to send each particle, relocate all of them (in case grid has changed)
2027 ! !$OMP PARALLEL DO PRIVATE(ipart)
2028 do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
2029
2030 call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
2031
2032 particle(ipart)%igrid = igrid_particle
2033 particle(ipart)%ipe = ipe_particle
2034
2035 ! if we have more than one core, is it on another cpu?
2036 if (particle(ipart)%ipe .ne. mype) then
2037 ! !$OMP CRITICAL(send)
2038 send_n_particles_to_ipe(ipe_particle) = &
2039 send_n_particles_to_ipe(ipe_particle) + 1
2040 if (send_n_particles_to_ipe(ipe_particle) .gt. nparticles_per_cpu_hi) &
2041 call mpistop('G: too many particles increase nparticles_per_cpu_hi')
2042 particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2043 ! !$OMP END CRITICAL(send)
2044 end if ! ipe_particle
2045
2046 end do ! ipart
2047 ! !$OMP END PARALLEL DO
2048
2049 ! get out when only one core:
2050 if (npe == 1) then
2051 deallocate(sndrqst, rcvrqst)
2052 deallocate(particle_index_to_be_sent_to_ipe)
2053 return
2054 end if
2055
2056 ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2057 ! communicate amount of particles to be sent/received
2058 ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2059 ! opedit: I think this was the main bottleneck.
2060 ! 31.12.2017: made it nonblocking (easy, least invasive)
2061 ! If this continues to be problematic, better to
2062 ! send particles with block in coarsen/refine/loadbalance. (best).
2063
2064 isnd = 0; ircv = 0;
2065 do ipe=0,npe-1; if (ipe .eq. mype) cycle;
2066
2067 tag_send = mype * npe + ipe; tag_receive = ipe * npe + mype;
2068 isnd = isnd + 1; ircv = ircv + 1;
2069 call mpi_isend(send_n_particles_to_ipe(ipe),1,mpi_integer, &
2070 ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2071 call mpi_irecv(receive_n_particles_from_ipe(ipe),1,mpi_integer, &
2072 ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2073 end do
2074
2075 call mpi_waitall(isnd,sndrqst,mpi_statuses_ignore,ierrmpi)
2076 call mpi_waitall(ircv,rcvrqst,mpi_statuses_ignore,ierrmpi)
2077 deallocate(sndrqst, rcvrqst)
2078
2079
2080 ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2081 ! send and receive the data of the particles
2082 ! !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
2083
2084 do ipe=0,npe-1
2085 if (ipe .eq. mype) cycle;
2086 tag_send = mype * npe + ipe
2087 tag_receive = ipe * npe + mype
2088
2089 ! should i send some particles to ipe?
2090 if (send_n_particles_to_ipe(ipe) .gt. 0) then
2091
2092 ! create the send buffer
2093 allocate(send_particles(1:send_n_particles_to_ipe(ipe)))
2094 allocate(send_payload(1:npayload,1:send_n_particles_to_ipe(ipe)))
2095 do ipart = 1, send_n_particles_to_ipe(ipe)
2096 send_particles(ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2097 send_payload(1:npayload,ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
2098 end do ! ipart
2099
2100 call mpi_send(send_particles,send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,ierrmpi)
2101 call mpi_send(send_payload,npayload*send_n_particles_to_ipe(ipe),mpi_double_precision,ipe,tag_send,icomm,ierrmpi)
2102 do ipart = 1, send_n_particles_to_ipe(ipe)
2103 deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2104 deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2105 call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2106 end do ! ipart
2107 deallocate(send_particles)
2108 deallocate(send_payload)
2109
2110 end if ! send .gt. 0
2111
2112 ! should i receive some particles from ipe?
2113 if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2114 allocate(receive_particles(1:receive_n_particles_from_ipe(ipe)))
2115 allocate(receive_payload(1:npayload,1:receive_n_particles_from_ipe(ipe)))
2116
2117 call mpi_recv(receive_particles,receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,status,ierrmpi)
2118 call mpi_recv(receive_payload,npayload*receive_n_particles_from_ipe(ipe),mpi_double_precision,ipe,tag_receive,icomm,status,ierrmpi)
2119 do ipart = 1, receive_n_particles_from_ipe(ipe)
2120
2121 index = receive_particles(ipart)%index
2122 !if (.not. allocated(particle(index)%self)) &
2123 allocate(particle(index)%self)
2124 particle(index)%self = receive_particles(ipart)
2125 !if (.not. allocated(particle(index)%payload)) &
2126 allocate(particle(index)%payload(npayload))
2127 particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart)
2129
2130 ! since we don't send the igrid, need to re-locate it
2131 call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2132 particle(index)%igrid = igrid_particle
2133 particle(index)%ipe = ipe_particle
2134
2135 end do ! ipart
2136 deallocate(receive_particles)
2137 deallocate(receive_payload)
2138
2139 end if ! receive .gt. 0
2140 end do ! ipe
2141 deallocate(particle_index_to_be_sent_to_ipe)
2142
2143 end subroutine comm_particles_global
2144
2145! subroutine comm_particles_old()
2146! use mod_global_parameters
2147!
2148! integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2149! integer :: index
2150! integer :: tag_send, tag_receive, tag_send_p, tag_receive_p, send_buff, rcv_buff
2151! integer :: status(MPI_STATUS_SIZE)
2152! integer, dimension(0:npe-1) :: send_n_particles_to_ipe
2153! integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
2154! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
2155! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
2156! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
2157! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
2158! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
2159! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_received_from_ipe
2160! integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
2161! integer :: destroy_n_particles_mype
2162! logical :: BC_applied
2163! integer, allocatable, dimension(:) :: sndrqst, rcvrqst
2164! integer :: isnd, ircv
2165! integer,dimension(npe_neighbors,nparticles_per_cpu_hi) :: payload_index
2166! send_n_particles_to_ipe(:) = 0
2167! receive_n_particles_from_ipe(:) = 0
2168! destroy_n_particles_mype = 0
2169!
2170! ! check if and where to send each particle, destroy if necessary
2171! do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
2172!
2173! ! first check if the particle should be destroyed because t>tmax in a fixed snapshot
2174! if ( .not.time_advance .and. particle(ipart)%self%time .gt. tmax_particles ) then
2175! destroy_n_particles_mype = destroy_n_particles_mype + 1
2176! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2177! cycle
2178! end if
2179!
2180! ! Then check dostroy due to user-defined condition
2181! if (associated(usr_destroy_particle)) then
2182! if (usr_destroy_particle(particle(ipart))) then
2183! destroy_n_particles_mype = destroy_n_particles_mype + 1
2184! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2185! cycle
2186! end if
2187! end if
2188!
2189! ! is my particle still in the same igrid?
2190! if (.not.particle_in_igrid(ipart,particle(ipart)%igrid)) then
2191! call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
2192!
2193! ! destroy particle if out of domain (signalled by return value of -1)
2194! if (igrid_particle == -1 )then
2195! call apply_periodB(particle(ipart)%self,igrid_particle,ipe_particle,BC_applied)
2196! if (.not. BC_applied .or. igrid_particle == -1) then
2197! destroy_n_particles_mype = destroy_n_particles_mype + 1
2198! particle_index_to_be_destroyed(destroy_n_particles_mype) = ipart
2199! cycle
2200! end if
2201! end if
2202!
2203! ! particle still present
2204! particle(ipart)%igrid = igrid_particle
2205! particle(ipart)%ipe = ipe_particle
2206!
2207! ! if we have more than one core, is it on another cpu?
2208! if (npe .gt. 1 .and. particle(ipart)%ipe .ne. mype) then
2209! send_n_particles_to_ipe(ipe_particle) = &
2210! send_n_particles_to_ipe(ipe_particle) + 1
2211! particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2212! end if ! ipe_particle
2213!
2214! end if ! particle_in_grid
2215!
2216! end do ! ipart
2217!
2218! call destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed(1:destroy_n_particles_mype))
2219!
2220! ! get out when only one core:
2221! if (npe == 1) return
2222!
2223! ! communicate amount of particles to be sent/received
2224! allocate( sndrqst(1:npe_neighbors), rcvrqst(1:npe_neighbors) )
2225! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2226! isnd = 0; ircv = 0;
2227! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2228! tag_send = mype * npe + ipe
2229! tag_receive = ipe * npe + mype
2230! isnd = isnd + 1; ircv = ircv + 1;
2231! call MPI_ISEND(send_n_particles_to_ipe(ipe),1,MPI_INTEGER, &
2232! ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2233! call MPI_IRECV(receive_n_particles_from_ipe(ipe),1,MPI_INTEGER, &
2234! ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2235! end do
2236! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2237! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2238! deallocate( sndrqst, rcvrqst )
2239!
2240! ! Communicate index of the particles to be sent/received
2241! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2242! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2243! isnd = 0; ircv = 0;
2244! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2245! if (send_n_particles_to_ipe(ipe) > 0) then
2246! tag_send = mype * npe + ipe
2247! isnd = isnd + 1
2248! call MPI_ISEND(particle_index_to_be_sent_to_ipe(:,ipe),send_n_particles_to_ipe(ipe),MPI_INTEGER, &
2249! ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2250! end if
2251! if (receive_n_particles_from_ipe(ipe) > 0) then
2252! tag_receive = ipe * npe + mype
2253! ircv = ircv + 1
2254! call MPI_IRECV(particle_index_to_be_received_from_ipe(:,ipe),receive_n_particles_from_ipe(ipe),MPI_INTEGER, &
2255! ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2256! end if
2257! end do
2258! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2259! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2260! deallocate( sndrqst, rcvrqst )
2261!
2262! ! Send and receive the data of the particles
2263! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2264! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2265! isnd = 0; ircv = 0;
2266! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2267!
2268! ! should i send some particles to ipe?
2269! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2270!
2271! do ipart = 1, send_n_particles_to_ipe(ipe)
2272! tag_send = mype * npe + ipe + ipart
2273! isnd = isnd+1
2274! call MPI_ISEND(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self, &
2275! 1,type_particle,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2276! end do ! ipart
2277! end if ! send .gt. 0
2278!
2279! ! should i receive some particles from ipe?
2280! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2281!
2282! do ipart = 1, receive_n_particles_from_ipe(ipe)
2283! tag_receive = ipe * npe + mype + ipart
2284! ircv = ircv+1
2285! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2286! allocate(particle(index)%self)
2287! call MPI_IRECV(particle(index)%self,1,type_particle,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2288! end do ! ipart
2289!
2290! end if ! receive .gt. 0
2291! end do ! ipe
2292! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2293! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2294! deallocate( sndrqst, rcvrqst )
2295!
2296! ! Send and receive the payloads
2297! ! NON-BLOCKING: one communication for each particle. Needed for whatever reason
2298! allocate( sndrqst(1:npe_neighbors*nparticles_per_cpu_hi), rcvrqst(1:npe_neighbors*nparticles_per_cpu_hi) )
2299! sndrqst = MPI_REQUEST_NULL; rcvrqst = MPI_REQUEST_NULL;
2300! isnd = 0; ircv = 0;
2301! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2302!
2303! ! should i send some particles to ipe?
2304! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2305!
2306! do ipart = 1, send_n_particles_to_ipe(ipe)
2307! tag_send = mype * npe + ipe + ipart
2308! isnd = isnd+1
2309! call MPI_ISEND(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload), &
2310! npayload,MPI_DOUBLE_PRECISION,ipe,tag_send,icomm,sndrqst(isnd),ierrmpi)
2311! end do ! ipart
2312! end if ! send .gt. 0
2313!
2314! ! should i receive some particles from ipe?
2315! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2316! do ipart = 1, receive_n_particles_from_ipe(ipe)
2317! tag_receive = ipe * npe + mype + ipart
2318! ircv = ircv+1
2319! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2320! allocate(particle(index)%payload(npayload))
2321! call MPI_IRECV(particle(index)%payload(1:npayload),npayload, &
2322! MPI_DOUBLE_PRECISION,ipe,tag_receive,icomm,rcvrqst(ircv),ierrmpi)
2323! end do ! ipart
2324! end if ! receive .gt. 0
2325! end do ! ipe
2326! call MPI_WAITALL(isnd,sndrqst,MPI_STATUSES_IGNORE,ierrmpi)
2327! call MPI_WAITALL(ircv,rcvrqst,MPI_STATUSES_IGNORE,ierrmpi)
2328! deallocate( sndrqst, rcvrqst )
2329!
2330! ! Deeallocate sent particles
2331! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2332! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2333! do ipart = 1, send_n_particles_to_ipe(ipe)
2334! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2335! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2336! call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2337! end do ! ipart
2338! end if ! send .gt. 0
2339! end do
2340!
2341! ! Relocate received particles
2342! do iipe=1,npe_neighbors;ipe=ipe_neighbor(iipe);
2343! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2344! do ipart = 1, receive_n_particles_from_ipe(ipe)
2345! index = particle_index_to_be_received_from_ipe(ipart,ipe)
2346! call push_particle_into_particles_on_mype(index)
2347! ! since we don't send the igrid, need to re-locate it
2348! call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2349! particle(index)%igrid = igrid_particle
2350! particle(index)%ipe = ipe_particle
2351! end do ! ipart
2352! end if ! receive .gt. 0
2353! end do ! ipe
2354!
2355! end subroutine comm_particles_old
2356!
2357! subroutine comm_particles_global_old()
2358! use mod_global_parameters
2359!
2360! integer :: ipart, iipart, igrid_particle, ipe_particle, ipe, iipe
2361! integer :: index
2362! integer :: tag_send, tag_receive, send_buff, rcv_buff
2363! integer :: status(MPI_STATUS_SIZE)
2364! integer, dimension(0:npe-1) :: send_n_particles_to_ipe
2365! integer, dimension(0:npe-1) :: receive_n_particles_from_ipe
2366! type(particle_t), dimension(nparticles_per_cpu_hi) :: send_particles
2367! type(particle_t), dimension(nparticles_per_cpu_hi) :: receive_particles
2368! double precision, dimension(npayload,nparticles_per_cpu_hi) :: send_payload
2369! double precision, dimension(npayload,nparticles_per_cpu_hi) :: receive_payload
2370! integer, dimension(nparticles_per_cpu_hi,0:npe-1) :: particle_index_to_be_sent_to_ipe
2371! integer, dimension(nparticles_per_cpu_hi) :: particle_index_to_be_destroyed
2372! integer :: destroy_n_particles_mype
2373! logical :: BC_applied
2374!
2375! send_n_particles_to_ipe(:) = 0
2376! receive_n_particles_from_ipe(:) = 0
2377! destroy_n_particles_mype = 0
2378!
2379! ! check if and where to send each particle, relocate all of them (in case grid has changed)
2380! do iipart=1,nparticles_on_mype;ipart=particles_on_mype(iipart);
2381!
2382! call find_particle_ipe(particle(ipart)%self%x,igrid_particle,ipe_particle)
2383!
2384! particle(ipart)%igrid = igrid_particle
2385! particle(ipart)%ipe = ipe_particle
2386!
2387! ! if we have more than one core, is it on another cpu?
2388! if (particle(ipart)%ipe .ne. mype) then
2389! send_n_particles_to_ipe(ipe_particle) = &
2390! send_n_particles_to_ipe(ipe_particle) + 1
2391! particle_index_to_be_sent_to_ipe(send_n_particles_to_ipe(ipe_particle),ipe_particle) = ipart
2392! end if ! ipe_particle
2393!
2394! end do ! ipart
2395!
2396! ! get out when only one core:
2397! if (npe == 1) return
2398!
2399! ! communicate amount of particles to be sent/received
2400! do ipe=0,npe-1;if (ipe .eq. mype) cycle;
2401! tag_send = mype * npe + ipe
2402! tag_receive = ipe * npe + mype
2403! call MPI_SEND(send_n_particles_to_ipe(ipe),1,MPI_INTEGER,ipe,tag_send,icomm,ierrmpi)
2404! call MPI_RECV(receive_n_particles_from_ipe(ipe),1,MPI_INTEGER,ipe,tag_receive,icomm,status,ierrmpi)
2405! end do
2406!
2407! ! send and receive the data of the particles
2408! do ipe=0,npe-1;if (ipe .eq. mype) cycle;
2409! tag_send = mype * npe + ipe
2410! tag_receive = ipe * npe + mype
2411!
2412! ! should i send some particles to ipe?
2413! if (send_n_particles_to_ipe(ipe) .gt. 0) then
2414!
2415! ! create the send buffer
2416! do ipart = 1, send_n_particles_to_ipe(ipe)
2417! send_particles(ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self
2418! send_payload(1:npayload,ipart) = particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload(1:npayload)
2419! end do ! ipart
2420! call MPI_SEND(send_particles,send_n_particles_to_ipe(ipe),type_particle,ipe,tag_send,icomm,ierrmpi)
2421! call MPI_SEND(send_payload,npayload*send_n_particles_to_ipe(ipe),MPI_DOUBLE_PRECISION,ipe,tag_send,icomm,ierrmpi)
2422! do ipart = 1, send_n_particles_to_ipe(ipe)
2423! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%self)
2424! deallocate(particle(particle_index_to_be_sent_to_ipe(ipart,ipe))%payload)
2425! call pull_particle_from_particles_on_mype(particle_index_to_be_sent_to_ipe(ipart,ipe))
2426! end do ! ipart
2427!
2428! end if ! send .gt. 0
2429!
2430! ! should i receive some particles from ipe?
2431! if (receive_n_particles_from_ipe(ipe) .gt. 0) then
2432!
2433! call MPI_RECV(receive_particles,receive_n_particles_from_ipe(ipe),type_particle,ipe,tag_receive,icomm,status,ierrmpi)
2434! call MPI_RECV(receive_payload,npayload*receive_n_particles_from_ipe(ipe),MPI_DOUBLE_PRECISION,ipe,tag_receive,icomm,status,ierrmpi)
2435!
2436! do ipart = 1, receive_n_particles_from_ipe(ipe)
2437!
2438! index = receive_particles(ipart)%index
2439! allocate(particle(index)%self)
2440! particle(index)%self = receive_particles(ipart)
2441! allocate(particle(index)%payload(npayload))
2442! particle(index)%payload(1:npayload) = receive_payload(1:npayload,ipart)
2443! call push_particle_into_particles_on_mype(index)
2444!
2445! ! since we don't send the igrid, need to re-locate it
2446! call find_particle_ipe(particle(index)%self%x,igrid_particle,ipe_particle)
2447! particle(index)%igrid = igrid_particle
2448! particle(index)%ipe = ipe_particle
2449!
2450! end do ! ipart
2451!
2452! end if ! receive .gt. 0
2453! end do ! ipe
2454!
2455! end subroutine comm_particles_global_old
2456
2457 subroutine apply_periodb(particle,igrid_particle,ipe_particle,BC_applied)
2459
2460 type(particle_t), intent(inout) :: particle
2461 integer, intent(inout) :: igrid_particle, ipe_particle
2462 logical,intent(out) :: BC_applied
2463 integer :: idim
2464
2465 bc_applied = .false.
2466 ! get out if we don't have any periodic BC:
2467 if (.not. any(periodb(1:ndim))) return
2468
2469 ! go through dimensions and try re-inject the particle at the other side
2470 do idim=1,ndim
2471
2472 if (.not. periodb(idim)) cycle
2473
2474 select case(idim)
2475 {case (^d)
2476 if (particle%x(^d) .lt. xprobmin^d) then
2477 particle%x(^d) = particle%x(^d) + (xprobmax^d - xprobmin^d)
2478 bc_applied = .true.
2479 end if
2480 if (particle%x(^d) .ge. xprobmax^d) then
2481 particle%x(^d) = particle%x(^d) - (xprobmax^d - xprobmin^d)
2482 bc_applied = .true.
2483 end if
2484 \}
2485 end select
2486
2487 end do
2488
2489 call find_particle_ipe(particle%x,igrid_particle,ipe_particle)
2490
2491 end subroutine apply_periodb
2492
2493 !> clean up destroyed particles on all cores
2494 subroutine destroy_particles(destroy_n_particles_mype,particle_index_to_be_destroyed)
2496
2497 integer, intent(in) :: destroy_n_particles_mype
2498 integer, dimension(1:destroy_n_particles_mype), intent(in) :: particle_index_to_be_destroyed
2499 type(particle_t), dimension(destroy_n_particles_mype):: destroy_particles_mype
2500 double precision, dimension(npayload,destroy_n_particles_mype):: destroy_payload_mype
2501 integer :: iipart,ipart,destroy_n_particles
2502
2503 destroy_n_particles = 0
2504
2505 ! append the particle to list of destroyed particles
2506 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2507 destroy_particles_mype(iipart) = particle(ipart)%self
2508 destroy_payload_mype(1:npayload,iipart) = particle(ipart)%payload(1:npayload)
2509 end do
2510
2511 call output_ensemble(destroy_n_particles_mype,destroy_particles_mype,destroy_payload_mype,'destroy')
2512
2513 if (npe > 1) then
2514 call mpi_allreduce(destroy_n_particles_mype,destroy_n_particles,1,mpi_integer,mpi_sum,icomm,ierrmpi)
2515 else
2516 destroy_n_particles = destroy_n_particles_mype
2517 end if
2518
2519 nparticles = nparticles - destroy_n_particles
2520
2521 do iipart=1,destroy_n_particles_mype;ipart=particle_index_to_be_destroyed(iipart);
2522 particle(ipart)%igrid = -1
2523 particle(ipart)%ipe = -1
2524 if(time_advance) then
2525 write(*,*) particle(ipart)%self%time, ': particle',ipart,' has left at it ',it_particles,' on pe', mype
2526 write(*,*) particle(ipart)%self%time, ': particles remaining:',nparticles, '(total)', nparticles_on_mype-1, 'on pe', mype
2527 endif
2528 deallocate(particle(ipart)%self)
2529 deallocate(particle(ipart)%payload)
2531 end do
2532
2533 end subroutine destroy_particles
2534
2536 implicit none
2537
2538 integer, intent(in) :: ipart
2539
2542
2544
2546 implicit none
2547
2548 integer, intent(in) :: ipart
2549 integer :: i
2550
2551 do i=1,nparticles_on_mype
2552 if (particles_on_mype(i) == ipart) then
2554 exit
2555 end if
2556 end do
2557
2559
2561
2562end module mod_particle_base
subroutine, public mpistop(message)
Exit MPI-AMRVAC with an error message.
Module for physical and numeric constants.
Module with basic grid data structures.
Definition mod_forest.t:2
type(tree_node_ptr), dimension(:^d &), allocatable, save tree_root
Pointers to the coarse grid.
Definition mod_forest.t:29
Module with geometry-related routines (e.g., divergence, curl)
Definition mod_geometry.t:2
integer coordinate
Definition mod_geometry.t:7
integer, parameter spherical
integer, parameter cartesian
Definition mod_geometry.t:8
integer, parameter cylindrical
integer, parameter cartesian_expansion
integer, parameter cartesian_stretched
Definition mod_geometry.t:9
subroutine curlvector(qvec, ixil, ixol, curlvec, idirmin, idirmin0, ndir0, fourthorder)
Calculate curl of a vector qvec within ixL Options to employ standard second order CD evaluations use...
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
logical nocartesian
IO switches for conversion.
integer ixghi
Upper index of grid block arrays.
integer, parameter stretch_uni
Unidirectional stretching from a side.
integer domain_nx
number of cells for each dimension in level-one mesh
integer, parameter unitpar
file handle for IO
double precision global_time
The global simulation time.
double precision time_max
End time for the simulation.
double precision xstretch
physical extent of stretched border in symmetric stretching
integer snapshotini
Resume from the snapshot with this index.
integer, parameter ndim
Number of spatial dimensions for grid variables.
integer, dimension(:,:), allocatable nstretchedblocks
(even) number of (symmetrically) stretched blocks per level and dimension
character(len=std_len), dimension(:), allocatable par_files
Which par files are used as input.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer block_nx
number of cells for each dimension in grid block excluding ghostcells
double precision, dimension(:), allocatable, parameter d
double precision dt
global time step
double precision length_convert_factor
Conversion factor for length unit.
integer ndir
Number of spatial dimensions (components) for vector variables.
integer ixm
the mesh range of a physical block without ghost cells
integer ierrmpi
A global MPI error return code.
double precision, dimension(:,:), allocatable qstretch
Stretching factors and first cell size for each AMR level and dimension.
integer snapshotnext
IO: snapshot and collapsed views output numbers/labels.
integer npe
The number of MPI tasks.
logical time_advance
do time evolving
character(len=std_len) restart_from_file
If not 'unavailable', resume from snapshot with this base file name.
logical, dimension(ndim) periodb
True for dimensions with periodic boundaries.
double precision c_norm
Normalised speed of light.
logical b0field
split magnetic field as background B0 field
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer nghostcells
Number of ghost cells surrounding a grid.
logical convert
If true and restart_from_file is given, convert snapshots to other file formats.
integer, dimension(ndim) stretch_type
What kind of stretching is used per dimension.
double precision, dimension(^nd) dxlevel
store unstretched cell size of current level
character(len=std_len) base_filename
Base file name for simulation output, which will be followed by a number.
integer, parameter stretch_symm
Symmetric stretching around the center.
integer r_
Indices for cylindrical coordinates FOR TESTS, negative value when not used:
double precision, dimension(:,:), allocatable dxfirst
integer, dimension(:,:), allocatable node
integer, parameter ixglo
Lower index of grid block arrays (always 1)
Module with shared functionality for all the particle movers.
subroutine get_maxwellian_velocity(v, velocity)
subroutine partcoord_from_cartesian(xp, xpcart)
Convert to noncartesian coordinates.
double precision, dimension(3) integrator_velocity_factor
Normalization factor for velocity in the integrator.
integer nparticles_per_cpu_hi
Maximum number of particles in one processor.
subroutine particle_get_current(w, ixil, ixol, idirmin, current)
Calculate idirmin and the idirmin:3 components of the common current array make sure that dxlevel(^D)...
integer nparticleshi
Maximum number of particles.
pure subroutine limit_dt_endtime(t_left, dt_p)
integer num_particles
Number of particles.
subroutine fix_phi_crossing(xp, igrid)
Fix particle position when crossing the 0,2pi boundary in noncartesian coordinates.
double precision const_dt_particles
If positive, a constant time step for the particles.
subroutine output_individual()
integer n_output_destroyed
Output count for ensembles of destroyed particles.
integer it_particles
Iteration number of paritcles.
subroutine fields_from_mhd(igrid, w_mhd, w_part)
Determine fields from MHD variables.
subroutine particles_params_read(files)
Read this module's parameters from a file.
double precision min_particle_time
Minimum time of all particles.
subroutine output_ensemble(send_n_particles_for_output, send_particles, send_payload, typefile)
subroutine get_uniform_pos(x)
logical write_individual
Whether to write individual particle output (followed particles)
subroutine write_particle_output()
subroutine init_particles_com()
Initialize communicators for particles.
logical function particle_in_igrid(ipart, igrid)
Quick check if particle is still in igrid.
integer, dimension(:), allocatable particles_on_mype
Array of identity numbers of particles in current processor.
double precision dtsave_particles
Time interval to save particles.
subroutine output_particle(myparticle, payload, ipe, file_unit)
subroutine update_gridvars()
update grid variables for particles
integer ngridvars
Number of variables for grid field.
subroutine ipe_fc(id, igrid, ipe_is_neighbor)
subroutine get_efield(igrid, x, tloc, e)
double precision particles_etah
subroutine destroy_particles(destroy_n_particles_mype, particle_index_to_be_destroyed)
clean up destroyed particles on all cores
integer nusrpayload
Number of user-defined payload variables for a particle.
subroutine select_active_particles(end_time)
subroutine write_particles_snapshot()
double precision particles_eta
Resistivity.
subroutine particles_finish()
Finalize the particles module.
logical write_snapshot
Whether to write particle snapshots.
pure subroutine get_lfac(u, lfac)
Get Lorentz factor from relativistic momentum.
subroutine find_particle_ipe(x, igrid_particle, ipe_particle)
subroutine pull_particle_from_particles_on_mype(ipart)
subroutine init_particles_output()
double precision dtheta
pure subroutine get_lfac_from_velocity(v, lfac)
Get Lorentz factor from velocity.
integer integrator
Integrator to be used for particles.
integer, dimension(:), allocatable ep
Variable index for electric field.
subroutine handle_particles()
Let particles evolve in time. The routine also handles grid variables and output.
subroutine partvec_from_cartesian(xp, up, upcart)
Convert vector from Cartesian coordinates.
subroutine fill_gridvars_default
This routine fills the particle grid variables with the default for mhd, i.e. only E and B.
character(len=400) csv_header
Header string used in CSV files.
subroutine finish_gridvars()
Deallocate grid variables for particles.
subroutine ipe_srl(id, igrid, ipe_is_neighbor)
subroutine get_bfield(igrid, x, tloc, b)
integer npayload
Number of total payload variables for a particle.
integer, parameter unitparticles
integer, dimension(:), allocatable particles_active_on_mype
Array of identity numbers of active particles in current processor.
logical relativistic
Use a relativistic particle mover?
character(len=name_len) integrator_type_particles
String describing the particle integrator type.
subroutine set_neighbor_ipe()
subroutine time_spent_on_particles()
subroutine get_vec(ix, igrid, x, tloc, vec)
logical write_ensemble
Whether to write ensemble output.
character(len=name_len) interp_type_particles
String describing the particle interpolation type.
integer downsample_particles
Particle downsampling factor in CSV output.
subroutine locate_particle(index, igrid_particle, ipe_particle)
subroutine push_particle_into_particles_on_mype(ipart)
subroutine read_from_snapshot()
integer nparticles_active_on_mype
Number of active particles in current processor.
procedure(sub_define_additional_gridvars), pointer particles_define_additional_gridvars
subroutine partcoord_to_cartesian(xp, xpcart)
Convert position to Cartesian coordinates.
integer nparticles
Identity number and total number of particles.
integer nparticles_on_mype
Number of particles in current processor.
integer, dimension(:), allocatable bp
Variable index for magnetic field.
subroutine comm_particles()
double precision tmax_particles
Time limit of particles.
type(particle_ptr), dimension(:), allocatable particle
procedure(fun_destroy), pointer usr_destroy_particle
integer, dimension(:), allocatable vp
Variable index for fluid velocity.
procedure(sub_integrate), pointer particles_integrate
subroutine cross(a, b, c)
procedure(sub_fill_gridvars), pointer particles_fill_gridvars
double precision t_next_output
Time to write next particle output.
subroutine advance_particles(end_time, steps_taken)
Routine handles the particle evolution.
subroutine partvec_to_cartesian(xp, up, upcart)
Convert vector to Cartesian coordinates.
integer ndefpayload
Number of default payload variables for a particle.
logical function point_in_igrid_ghostc(x, igrid, ngh)
Quick check if particle coordinate is inside igrid (ghost cells included, except the last ngh)
subroutine particle_base_init()
Give initial values to parameters.
integer, dimension(:), allocatable ipe_neighbor
integer igrid_working
set the current igrid for the particle integrator:
character(len=60) csv_format
Format string used in CSV files.
integer, dimension(:), allocatable jp
Variable index for current.
subroutine append_to_snapshot(myparticle, mypayload)
subroutine apply_periodb(particle, igrid_particle, ipe_particle, bc_applied)
integer ipart_working
set the current ipart for the particle integrator:
subroutine interpolate_var(igrid, ixil, ixol, gf, x, xloc, gfloc)
double precision particles_cfl
Particle CFL safety factor.
procedure(sub_fill_additional_gridvars), pointer particles_fill_additional_gridvars
subroutine ipe_cf(id, igrid, ipe_is_neighbor)
logical function particle_in_domain(x)
Check if particle is inside computational domain.
character(len=name_len) physics_type_particles
String describing the particle physics type.
subroutine init_gridvars()
Initialize grid variables for particles.
character(len=128) function make_particle_filename(tout, index, type)
subroutine comm_particles_global()
subroutine read_particles_snapshot(file_exists)
integer rhop
Variable index for density.
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition mod_physics.t:4
Module for pseudo random number generation. The internal pseudo random generator is the xoroshiro128p...
Definition mod_random.t:3
Writes D-1 slice, can do so in various formats, depending on slice_type.
Definition mod_slice.t:2
subroutine get_igslice(dir, x, igslice)
Definition mod_slice.t:1109
double precision tpartc_int_0
Definition mod_timing.t:11
double precision timeio_tot
Definition mod_timing.t:8
double precision tpartc_com
Definition mod_timing.t:10
double precision tpartc
Definition mod_timing.t:10
double precision tpartc0
Definition mod_timing.t:11
double precision tpartc_grid_0
Definition mod_timing.t:11
double precision tpartc_io
Definition mod_timing.t:10
double precision timeloop
Definition mod_timing.t:9
double precision tpartc_com0
Definition mod_timing.t:11
double precision tpartc_grid
Definition mod_timing.t:10
double precision tpartc_io_0
Definition mod_timing.t:11
double precision tpartc_int
Definition mod_timing.t:10
Module with all the methods that users can customize in AMRVAC.
procedure(particle_analytic), pointer usr_particle_analytic
procedure(particle_fields), pointer usr_particle_fields
Pointer to a tree_node.
Definition mod_forest.t:6
Random number generator type, which contains the state.
Definition mod_random.t:20