MPI-AMRVAC  3.0
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_advance.t
Go to the documentation of this file.
1 !> Module containing all the time stepping schemes
2 module mod_advance
3 
4  implicit none
5  private
6 
7  !> Whether to conserve fluxes at the current sub-step
8  logical :: fix_conserve_at_step = .true.
9 
10  public :: advance
11  public :: process
12  public :: process_advanced
13 
14 contains
15 
16  !> Advance all the grids over one time step, including all sources
17  subroutine advance(iit)
19  use mod_particles, only: handle_particles
20  use mod_source, only: add_split_source
21 
22  integer, intent(in) :: iit
23 
24  integer :: iigrid, igrid, idimsplit
25 
26  ! old solution values at t_n-1 no longer needed: make copy of w(t_n)
27  !$OMP PARALLEL DO PRIVATE(igrid)
28  do iigrid=1,igridstail; igrid=igrids(iigrid);
29  pso(igrid)%w=ps(igrid)%w
30  if(stagger_grid) pso(igrid)%ws=ps(igrid)%ws
31  end do
32  !$OMP END PARALLEL DO
33 
34  {#IFDEF RAY
35  call update_rays
36  }
37 
38  ! split source addition
39  call add_split_source(prior=.true.)
40 
41  if (dimsplit) then
42  if ((iit/2)*2==iit .or. typedimsplit=='xy') then
43  ! do the sweeps in order of increasing idim,
44  do idimsplit=1,ndim
45  call advect(idimsplit,idimsplit)
46  end do
47  else
48  ! If the parity of "iit" is odd and typedimsplit=xyyx,
49  ! do sweeps backwards
50  do idimsplit=ndim,1,-1
51  call advect(idimsplit,idimsplit)
52  end do
53  end if
54  else
55  ! Add fluxes from all directions at once
56  call advect(1,ndim)
57  end if
58 
59  ! split source addition
60  call add_split_source(prior=.false.)
61 
62  if(use_particles) call handle_particles
63 
64  end subroutine advance
65 
66  !> Advance all grids over one time step, but without taking dimensional
67  !> splitting or split source terms into account
68  subroutine advect(idim^LIM)
73 
74  integer, intent(in) :: idim^LIM
75  integer :: iigrid, igrid
76 
77  call init_comm_fix_conserve(idim^lim,nwflux)
78  fix_conserve_at_step = time_advance .and. levmax>levmin
79 
80  ! copy w instead of wold because of potential use of dimsplit or sourcesplit
81  !$OMP PARALLEL DO PRIVATE(igrid)
82  do iigrid=1,igridstail; igrid=igrids(iigrid);
83  ps1(igrid)%w=ps(igrid)%w
84  if(stagger_grid) ps1(igrid)%ws=ps(igrid)%ws
85  end do
86  !$OMP END PARALLEL DO
87 
88  istep = 0
89 
90  select case (t_stepper)
91  case (onestep)
92  select case (t_integrator)
93  case (forward_euler)
94  call advect1(flux_method,one,idim^lim,global_time,ps1,global_time,ps)
95 
96  case (imex_euler)
97  call advect1(flux_method,one,idim^lim,global_time,ps,global_time,ps1)
98  call global_implicit_update(one,dt,global_time+dt,ps,ps1)
99 
100  case (imex_sp)
101  call global_implicit_update(one,dt,global_time,ps,ps1)
102  !$OMP PARALLEL DO PRIVATE(igrid)
103  do iigrid=1,igridstail; igrid=igrids(iigrid);
104  ps1(igrid)%w=ps(igrid)%w
105  if(stagger_grid) ps1(igrid)%ws=ps(igrid)%ws
106  end do
107  !$OMP END PARALLEL DO
108  call advect1(flux_method,one,idim^lim,global_time,ps1,global_time,ps)
109 
110  case default
111  call mpistop("unkown onestep time_integrator in advect")
112  end select
113 
114  case (twostep)
115  select case (t_integrator)
116  case (predictor_corrector)
117  ! PC or explicit midpoint
118  ! predictor step
119  fix_conserve_at_step = .false.
120  call advect1(typepred1,half,idim^lim,global_time,ps,global_time,ps1)
121  ! corrector step
122  fix_conserve_at_step = time_advance .and. levmax>levmin
123  call advect1(flux_method,one,idim^lim,global_time+half*dt,ps1,global_time,ps)
124 
125  case (rk2_alf)
126  ! RK2 with alfa parameter, where rk_a21=alfa
127  call advect1(flux_method,rk_a21, idim^lim,global_time,ps,global_time,ps1)
128  !$OMP PARALLEL DO PRIVATE(igrid)
129  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
130  ps(igrid)%w = ps(igrid)%w+rk_b1*(ps1(igrid)%w-ps(igrid)%w)/rk_a21
131  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+(one-rk_b2)*(ps1(igrid)%ws-ps(igrid)%ws)/rk_a21
132  end do
133  !$OMP END PARALLEL DO
135 
136  case (ssprk2)
137  ! ssprk2 or Heun's method
138  call advect1(flux_method,one, idim^lim,global_time,ps,global_time,ps1)
139  !$OMP PARALLEL DO PRIVATE(igrid)
140  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
141  ps(igrid)%w = half*ps(igrid)%w+half*ps1(igrid)%w
142  if(stagger_grid) ps(igrid)%ws = half*ps(igrid)%ws+half*ps1(igrid)%ws
143  end do
144  !$OMP END PARALLEL DO
145  call advect1(flux_method,half,idim^lim,global_time+dt,ps1,global_time+half*dt,ps)
146 
147  case (imex_midpoint)
148  !$OMP PARALLEL DO PRIVATE(igrid)
149  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
150  ps2(igrid)%w = ps(igrid)%w
151  if(stagger_grid) ps2(igrid)%ws = ps(igrid)%ws
152  end do
153  !$OMP END PARALLEL DO
154  call advect1(flux_method,half, idim^lim,global_time,ps,global_time,ps1)
155  call global_implicit_update(half,dt,global_time+half*dt,ps2,ps1)
156  !$OMP PARALLEL DO PRIVATE(igrid)
157  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
158  ps(igrid)%w = ps(igrid)%w+2.0d0*(ps2(igrid)%w-ps1(igrid)%w)
159  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+2.0d0*(ps2(igrid)%ws-ps1(igrid)%ws)
160  end do
161  !$OMP END PARALLEL DO
162  call advect1(flux_method,one, idim^lim,global_time+half*dt,ps2,global_time,ps)
163 
164  case (imex_trapezoidal)
165  call advect1(flux_method,one, idim^lim,global_time,ps,global_time,ps1)
166  !$OMP PARALLEL DO PRIVATE(igrid)
167  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
168  ps2(igrid)%w = half*(ps(igrid)%w+ps1(igrid)%w)
169  if(stagger_grid) ps2(igrid)%ws = half*(ps(igrid)%ws+ps1(igrid)%ws)
170  end do
171  !$OMP END PARALLEL DO
173  !$OMP PARALLEL DO PRIVATE(igrid)
174  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
175  ps1(igrid)%w = ps1(igrid)%w+half*dt*ps(igrid)%w
176  if(stagger_grid) ps1(igrid)%ws = ps1(igrid)%ws+half*dt*ps(igrid)%ws
177  end do
178  !$OMP END PARALLEL DO
179  !$OMP PARALLEL DO PRIVATE(igrid)
180  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
181  ps(igrid)%w = ps2(igrid)%w+half*dt*ps(igrid)%w
182  if(stagger_grid) ps(igrid)%ws = ps2(igrid)%ws+half*dt*ps(igrid)%ws
183  end do
184  !$OMP END PARALLEL DO
185  call getbc(global_time+dt,dt,ps1,iwstart,nwgc,phys_req_diagonal)
186  call global_implicit_update(half,dt,global_time+dt,ps2,ps1)
187  !$OMP PARALLEL DO PRIVATE(igrid)
188  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
189  ps(igrid)%w = ps(igrid)%w+ps2(igrid)%w-ps1(igrid)%w
190  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws+ps2(igrid)%ws-ps1(igrid)%ws
191  end do
192  !$OMP END PARALLEL DO
193  call advect1(flux_method,half, idim^lim,global_time+dt,ps2,global_time+half*dt,ps)
194 
195  case (imex_222)
196  ! One-parameter family of schemes (parameter is imex222_lambda) from
197  ! Pareschi&Russo 2005, which is L-stable (for default lambda) and
198  ! asymptotically SSP.
199  ! See doi.org/10.1007/s10915-004-4636-4 (table II)
200  ! See doi.org/10.1016/j.apnum.2016.10.018 for interesting values of lambda
201 
202  ! Preallocate ps2 as y^n for the implicit update
203  !$OMP PARALLEL DO PRIVATE(igrid)
204  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
205  ps2(igrid)%w = ps(igrid)%w
206  if(stagger_grid) ps2(igrid)%ws = ps(igrid)%ws
207  end do
208  !$OMP END PARALLEL DO
209  ! Solve xi1 = y^n + lambda.dt.F_im(xi1)
211 
212  ! Set ps1 = y^n + dt.F_ex(xi1)
213  call advect1(flux_method, one, idim^lim, global_time, ps2, global_time, ps1)
214  ! Set ps2 = dt.F_im(xi1) (is at t^n)
215  ! Set ps = y^n + dt/2 . F(xi1) (is at t^n+dt/2)
216  ! Set ps1 = y^n + dt.F_ex(xi1) + (1-2.lambda).dt.F_im(xi1) and enforce BC (at t^n+dt)
217  !$OMP PARALLEL DO PRIVATE(igrid)
218  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
219  ps2(igrid)%w = (ps2(igrid)%w - ps(igrid)%w) / imex222_lambda
220  if(stagger_grid) ps2(igrid)%ws = (ps2(igrid)%ws - ps(igrid)%ws) / imex222_lambda
221  end do
222  !$OMP END PARALLEL DO
223  !$OMP PARALLEL DO PRIVATE(igrid)
224  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
225  ps(igrid)%w = half*(ps(igrid)%w + ps1(igrid)%w + ps2(igrid)%w)
226  if(stagger_grid) ps(igrid)%ws = half*(ps(igrid)%ws + ps1(igrid)%ws + ps2(igrid)%ws)
227  end do
228  !$OMP END PARALLEL DO
229  !$OMP PARALLEL DO PRIVATE(igrid)
230  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
231  ps1(igrid)%w = ps1(igrid)%w + (1.0d0 - 2.0d0*imex222_lambda)*ps2(igrid)%w
232  if(stagger_grid) ps1(igrid)%ws = ps1(igrid)%ws + (1.0d0 - 2.0d0*imex222_lambda)*ps2(igrid)%ws
233  end do
234  !$OMP END PARALLEL DO
235  call getbc(global_time+dt,dt,ps1,iwstart,nwgc,phys_req_diagonal)
236 
237  ! Preallocate ps2 as xi1 for the implicit update (is at t^n)
238  !$OMP PARALLEL DO PRIVATE(igrid)
239  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
240  ps2(igrid)%w = 2.0d0*ps2(igrid)%w - ps1(igrid)%w - imex222_lambda*ps2(igrid)%w
241  if(stagger_grid) ps2(igrid)%ws = 2.0d0*ps2(igrid)%ws - ps1(igrid)%ws - imex222_lambda*ps2(igrid)%ws
242  end do
243  !$OMP END PARALLEL DO
244  ! Solve xi2 = (ps1) + lambda.dt.F_im(xi2)
246 
247  ! Add dt/2.F_im(xi2) to ps
248  !$OMP PARALLEL DO PRIVATE(igrid)
249  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
250  ps(igrid)%w = ps(igrid)%w + (ps2(igrid)%w - ps1(igrid)%w) / (2.0d0 * imex222_lambda)
251  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + (ps2(igrid)%ws - ps1(igrid)%ws) / (2.0d0 * imex222_lambda)
252  end do
253  !$OMP END PARALLEL DO
254  ! Set ps = y^n + dt/2.(F(xi1)+F(xi2)) = y^(n+1)
255  call advect1(flux_method, half, idim^lim, global_time+dt, ps2, global_time+half*dt, ps)
256 
257  case default
258  call mpistop("unkown twostep time_integrator in advect")
259  end select
260 
261  case (threestep)
262  select case (t_integrator)
263  case (ssprk3)
264  ! this is SSPRK(3,3) Gottlieb-Shu 1998 or SSP(3,2) depending on ssprk_order (3 vs 2)
265  call advect1(flux_method,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
266  !$OMP PARALLEL DO PRIVATE(igrid)
267  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
268  ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
269  if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
270  end do
271  !$OMP END PARALLEL DO
273  !$OMP PARALLEL DO PRIVATE(igrid)
274  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
275  ps(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
276  if(stagger_grid) ps(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
277  end do
278  !$OMP END PARALLEL DO
279  call advect1(flux_method,rk_beta33, idim^lim,global_time+rk_c3*dt,ps2,global_time+(1.0d0-rk_beta33)*dt,ps)
280 
281  case (rk3_bt)
282  ! this is a general threestep RK according to its Butcher Table
283  call advect1(flux_method,rk3_a21, idim^lim,global_time,ps,global_time,ps1)
284  !$OMP PARALLEL DO PRIVATE(igrid)
285  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
286  ps3(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/rk3_a21
287  if(stagger_grid) ps3(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/rk3_a21
288  end do
289  !$OMP END PARALLEL DO
290  !$OMP PARALLEL DO PRIVATE(igrid)
291  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
292  ps2(igrid)%w=ps(igrid)%w+rk3_a31*ps3(igrid)%w
293  if(stagger_grid) ps2(igrid)%ws=ps(igrid)%ws+rk3_a31*ps3(igrid)%ws
294  end do
295  !$OMP END PARALLEL DO
297  !$OMP PARALLEL DO PRIVATE(igrid)
298  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
299  ps(igrid)%w=ps(igrid)%w+rk3_b1*ps3(igrid)%w &
300  +rk3_b2*(ps2(igrid)%w-(ps(igrid)%w+rk3_a31*ps3(igrid)%w))/rk3_a32
301  if(stagger_grid)then
302  ps(igrid)%ws=ps(igrid)%ws+rk3_b1*ps3(igrid)%ws &
303  +rk3_b2*(ps2(igrid)%ws-(ps(igrid)%ws+rk3_a31*ps3(igrid)%ws))/rk3_a32
304  endif
305  end do
306  !$OMP END PARALLEL DO
307  call advect1(flux_method,rk3_b3, idim^lim,global_time+rk3_c3*dt,ps2,global_time+(1.0d0-rk3_b3)*dt,ps)
308 
309  case (imex_ars3)
310  ! this is IMEX scheme ARS3
311  call advect1(flux_method,ars_gamma, idim^lim,global_time,ps,global_time,ps1)
312  !$OMP PARALLEL DO PRIVATE(igrid)
313  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
314  ps4(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/ars_gamma
315  if(stagger_grid) ps4(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/ars_gamma
316  end do
317  !$OMP END PARALLEL DO
319  !$OMP PARALLEL DO PRIVATE(igrid)
320  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
321  ps1(igrid)%w=(ps2(igrid)%w-ps1(igrid)%w)/ars_gamma
322  if(stagger_grid) ps1(igrid)%ws=(ps2(igrid)%ws-ps1(igrid)%ws)/ars_gamma
323  end do
324  !$OMP END PARALLEL DO
325  !$OMP PARALLEL DO PRIVATE(igrid)
326  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
327  ps3(igrid)%w=ps(igrid)%w+(ars_gamma-1.0d0)*ps4(igrid)%w+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%w
328  if(stagger_grid) then
329  ps3(igrid)%ws=ps(igrid)%ws+(ars_gamma-1.0d0)*ps4(igrid)%ws+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%ws
330  endif
331  end do
332  !$OMP END PARALLEL DO
333  call advect1(flux_method,2.0d0*(1.0d0-ars_gamma), idim^lim,global_time+ars_gamma*dt,ps2,global_time+(ars_gamma-1.0d0)*dt,ps3)
334  !$OMP PARALLEL DO PRIVATE(igrid)
335  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
336  ps2(igrid)%w=ps1(igrid)%w+(ps3(igrid)%w-(ps(igrid)%w+ &
337  (ars_gamma-1.0d0)*ps4(igrid)%w+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%w))/(2.0d0*(1.0d0-ars_gamma))
338  if(stagger_grid) then
339  ps2(igrid)%ws=ps1(igrid)%ws+(ps3(igrid)%ws-(ps(igrid)%ws+ &
340  (ars_gamma-1.0d0)*ps4(igrid)%ws+(1.0d0-2.0d0*ars_gamma)*ps1(igrid)%ws))/(2.0d0*(1.0d0-ars_gamma))
341  endif
342  end do
343  !$OMP END PARALLEL DO
345  !$OMP PARALLEL DO PRIVATE(igrid)
346  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
347  ps(igrid)%w=ps(igrid)%w+half*ps2(igrid)%w &
348  +half*(ps4(igrid)%w-ps3(igrid)%w)/ars_gamma
349  if(stagger_grid) then
350  ps(igrid)%ws=ps(igrid)%ws+half*ps2(igrid)%ws &
351  +half*(ps4(igrid)%ws-ps3(igrid)%ws)/ars_gamma
352  endif
353  end do
354  !$OMP END PARALLEL DO
355  call advect1(flux_method,half, idim^lim,global_time+(1.0d0-ars_gamma)*dt,ps4,global_time+half*dt,ps)
356 
357  case (imex_232)
358  ! this is IMEX_ARK(2,3,2) or IMEX_SSP(2,3,2)
359  call advect1(flux_method,imex_a21, idim^lim,global_time,ps,global_time,ps1)
360  !$OMP PARALLEL DO PRIVATE(igrid)
361  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
362  ps4(igrid)%w=(ps1(igrid)%w-ps(igrid)%w)/imex_a21
363  ps3(igrid)%w=ps(igrid)%w
364  if(stagger_grid) then
365  ps4(igrid)%ws=(ps1(igrid)%ws-ps(igrid)%ws)/imex_a21
366  ps3(igrid)%ws=ps(igrid)%ws
367  endif
368  end do
369  !$OMP END PARALLEL DO
371  !$OMP PARALLEL DO PRIVATE(igrid)
372  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
373  ps1(igrid)%w=ps1(igrid)%w+imex_ha21*dt*ps3(igrid)%w
374  if(stagger_grid) ps1(igrid)%ws=ps1(igrid)%ws+imex_ha21*dt*ps3(igrid)%ws
375  end do
376  !$OMP END PARALLEL DO
377  call getbc(global_time+imex_a21*dt,dt,ps1,iwstart,nwgc,phys_req_diagonal)
379  !$OMP PARALLEL DO PRIVATE(igrid)
380  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
381  ps(igrid)%w=ps(igrid)%w+imex_a31*ps4(igrid)%w &
382  +imex_b1*dt*ps3(igrid)%w+imex_b2*(ps2(igrid)%w-ps1(igrid)%w)/imex_ha22
383  if(stagger_grid) then
384  ps(igrid)%ws=ps(igrid)%ws+imex_a31*ps4(igrid)%ws &
385  +imex_b1*dt*ps3(igrid)%ws+imex_b2*(ps2(igrid)%ws-ps1(igrid)%ws)/imex_ha22
386  endif
387  end do
388  !$OMP END PARALLEL DO
389  !$OMP PARALLEL DO PRIVATE(igrid)
390  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
391  ps3(igrid)%w=ps1(igrid)%w-imex_a21*ps4(igrid)%w &
392  -imex_ha21*dt*ps3(igrid)%w+imex_b1*dt*ps3(igrid)%w
393  if(stagger_grid) then
394  ps3(igrid)%ws=ps1(igrid)%ws-imex_a21*ps4(igrid)%ws &
395  -imex_ha21*dt*ps3(igrid)%ws+imex_b1*dt*ps3(igrid)%ws
396  endif
397  end do
398  !$OMP END PARALLEL DO
400  !$OMP PARALLEL DO PRIVATE(igrid)
401  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
402  ps2(igrid)%w=(ps(igrid)%w-ps3(igrid)%w-imex_a31*ps4(igrid)%w)/imex_a32 &
403  +(1.0d0-imex_b2/imex_a32)*(ps2(igrid)%w-ps1(igrid)%w)/imex_ha22
404  if(stagger_grid) then
405  ps2(igrid)%ws=(ps(igrid)%ws-ps3(igrid)%ws-imex_a31*ps4(igrid)%ws)/imex_a32 &
406  +(1.0d0-imex_b2/imex_a32)*(ps2(igrid)%ws-ps1(igrid)%ws)/imex_ha22
407  endif
408  end do
409  !$OMP END PARALLEL DO
410  !$OMP PARALLEL DO PRIVATE(igrid)
411  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
412  ps1(igrid)%w=ps3(igrid)%w+imex_b1*ps4(igrid)%w+imex_b2*ps2(igrid)%w
413  if(stagger_grid) then
414  ps1(igrid)%ws=ps3(igrid)%ws+imex_b1*ps4(igrid)%ws+imex_b2*ps2(igrid)%ws
415  endif
416  end do
417  !$OMP END PARALLEL DO
419  !$OMP PARALLEL DO PRIVATE(igrid)
420  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
421  ps(igrid)%w=ps1(igrid)%w+ps2(igrid)%w-ps(igrid)%w
422  if(stagger_grid) then
423  ps(igrid)%ws=ps1(igrid)%ws+ps2(igrid)%ws-ps(igrid)%ws
424  endif
425  end do
426  !$OMP END PARALLEL DO
427  call advect1(flux_method,imex_b3, idim^lim,global_time+imex_c3*dt,ps2,global_time+(1.0d0-imex_b3)*dt,ps)
428 
429  case (imex_cb3a)
430  ! Third order IMEX scheme with low-storage implementation (4 registers).
431  ! From Cavaglieri&Bewley 2015, see doi.org/10.1016/j.jcp.2015.01.031
432  ! (scheme called "IMEXRKCB3a" there). Uses 3 explicit and 2 implicit stages.
433  ! Parameters are in imex_bj, imex_cj (same for implicit/explicit),
434  ! imex_aij (implicit tableau) and imex_haij (explicit tableau).
435  call advect1(flux_method, imex_ha21, idim^lim, global_time, ps, global_time, ps1)
437  !$OMP PARALLEL DO PRIVATE(igrid)
438  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
439  ps3(igrid)%w = ps(igrid)%w + imex_a32/imex_a22 * (ps2(igrid)%w - ps1(igrid)%w)
440  ps(igrid)%w = ps(igrid)%w + imex_b2 /imex_a22 * (ps2(igrid)%w - ps1(igrid)%w)
441  ps1(igrid)%w = ps3(igrid)%w
442  if(stagger_grid) ps3(igrid)%ws = ps(igrid)%ws + imex_a32/imex_a22 * (ps2(igrid)%ws - ps1(igrid)%ws)
443  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + imex_b2 /imex_a22 * (ps2(igrid)%ws - ps1(igrid)%ws)
444  if(stagger_grid) ps1(igrid)%ws = ps3(igrid)%ws
445  end do
446  !$OMP END PARALLEL DO
447  call advect1(flux_method, imex_ha32, idim^lim, global_time+imex_c2*dt, ps2, global_time, ps3)
448  !$OMP PARALLEL DO PRIVATE(igrid)
449  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
450  ps(igrid)%w = ps(igrid)%w + imex_b2 /imex_ha32 * (ps3(igrid)%w - ps1(igrid)%w)
451  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + imex_b2 /imex_ha32 * (ps3(igrid)%ws - ps1(igrid)%ws)
452  end do
454  !$OMP PARALLEL DO PRIVATE(igrid)
455  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
456  ps(igrid)%w = ps(igrid)%w + imex_b3 /imex_a33 * (ps1(igrid)%w - ps3(igrid)%w)
457  if(stagger_grid) ps(igrid)%ws = ps(igrid)%ws + imex_b3 /imex_a33 * (ps1(igrid)%ws - ps3(igrid)%ws)
458  end do
459  !$OMP END PARALLEL DO
461 
462  case default
463  call mpistop("unkown threestep time_integrator in advect")
464  end select
465 
466  case (fourstep)
467  select case (t_integrator)
468  case (ssprk4)
469  ! SSPRK(4,3) or SSP(4,2) depending on ssprk_order (3 vs 2)
470  ! ssprk43: Strong stability preserving 4 stage RK 3rd order by Ruuth and Spiteri
471  ! Ruuth & Spiteri J. S C, 17 (2002) p. 211 - 220
472  ! supposed to be stable up to CFL=2.
473  ! ssp42: stable up to CFL=3
474  call advect1(flux_method,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
475  !$OMP PARALLEL DO PRIVATE(igrid)
476  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
477  ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
478  if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
479  end do
480  !$OMP END PARALLEL DO
482  !$OMP PARALLEL DO PRIVATE(igrid)
483  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
484  ps1(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
485  if(stagger_grid) ps1(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
486  end do
487  !$OMP END PARALLEL DO
489  !$OMP PARALLEL DO PRIVATE(igrid)
490  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
491  ps(igrid)%w=rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w
492  if(stagger_grid) ps(igrid)%ws=rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws
493  end do
494  !$OMP END PARALLEL DO
495  call advect1(flux_method,rk_beta44, idim^lim,global_time+rk_c4*dt,ps1,global_time+(1.0d0-rk_beta44)*dt,ps)
496 
497  case (rk4)
498  ! the standard RK(4,4) method
499  !$OMP PARALLEL DO PRIVATE(igrid)
500  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
501  ps2(igrid)%w=ps(igrid)%w
502  ps3(igrid)%w=ps(igrid)%w
503  if(stagger_grid) then
504  ps2(igrid)%ws=ps(igrid)%ws
505  ps3(igrid)%ws=ps(igrid)%ws
506  endif
507  end do
508  !$OMP END PARALLEL DO
509  call advect1(flux_method,half, idim^lim,global_time,ps,global_time,ps1)
510  call advect1(flux_method,half, idim^lim,global_time+half*dt,ps1,global_time,ps2)
511  call advect1(flux_method,1.0d0, idim^lim,global_time+half*dt,ps2,global_time,ps3)
512  !$OMP PARALLEL DO PRIVATE(igrid)
513  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
514  ps(igrid)%w=(1.0d0/3.0d0)*(-ps(igrid)%w+ps1(igrid)%w+2.0d0*ps2(igrid)%w+ps3(igrid)%w)
515  if(stagger_grid) ps(igrid)%ws=(1.0d0/3.0d0) &
516  *(-ps(igrid)%ws+ps1(igrid)%ws+2.0d0*ps2(igrid)%ws+ps3(igrid)%ws)
517  end do
518  !$OMP END PARALLEL DO
519  call advect1(flux_method,1.0d0/6.0d0, idim^lim,global_time+dt,ps3,global_time+dt*5.0d0/6.0d0,ps)
520 
521  case default
522  call mpistop("unkown fourstep time_integrator in advect")
523  end select
524 
525  case (fivestep)
526  select case (t_integrator)
527  case (ssprk5)
528  ! SSPRK(5,4) by Ruuth and Spiteri
529  !bcexch = .false.
530  call advect1(flux_method,rk_beta11, idim^lim,global_time,ps,global_time,ps1)
531  !$OMP PARALLEL DO PRIVATE(igrid)
532  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
533  ps2(igrid)%w=rk_alfa21*ps(igrid)%w+rk_alfa22*ps1(igrid)%w
534  if(stagger_grid) ps2(igrid)%ws=rk_alfa21*ps(igrid)%ws+rk_alfa22*ps1(igrid)%ws
535  end do
536  !$OMP END PARALLEL DO
538  !$OMP PARALLEL DO PRIVATE(igrid)
539  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
540  ps1(igrid)%w=rk_alfa31*ps(igrid)%w+rk_alfa33*ps2(igrid)%w
541  if(stagger_grid) ps1(igrid)%ws=rk_alfa31*ps(igrid)%ws+rk_alfa33*ps2(igrid)%ws
542  end do
543  !$OMP END PARALLEL DO
545  !$OMP PARALLEL DO PRIVATE(igrid)
546  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
547  ps3(igrid)%w=rk_alfa53*ps2(igrid)%w+rk_alfa54*ps1(igrid)%w
548  if(stagger_grid) ps3(igrid)%ws=rk_alfa53*ps2(igrid)%ws+rk_alfa54*ps1(igrid)%ws
549  end do
550  !$OMP END PARALLEL DO
551  !$OMP PARALLEL DO PRIVATE(igrid)
552  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
553  ps2(igrid)%w=rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w
554  if(stagger_grid) ps2(igrid)%ws=rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws
555  end do
556  !$OMP END PARALLEL DO
558  !$OMP PARALLEL DO PRIVATE(igrid)
559  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
560  ps(igrid)%w=ps3(igrid)%w+rk_alfa55*ps2(igrid)%w &
561  +(rk_beta54/rk_beta44)*(ps2(igrid)%w-(rk_alfa41*ps(igrid)%w+rk_alfa44*ps1(igrid)%w))
562  if(stagger_grid) then
563  ps(igrid)%ws=ps3(igrid)%ws+rk_alfa55*ps2(igrid)%ws &
564  +(rk_beta54/rk_beta44)*(ps2(igrid)%ws-(rk_alfa41*ps(igrid)%ws+rk_alfa44*ps1(igrid)%ws))
565  endif
566  end do
567  !$OMP END PARALLEL DO
568  !bcexch = .true.
569  call advect1(flux_method,rk_beta55, idim^lim,global_time+rk_c5*dt,ps2,global_time+(1.0d0-rk_beta55)*dt,ps)
570 
571  case default
572  call mpistop("unkown fivestep time_integrator in advect")
573  end select
574 
575  case default
576  call mpistop("unkown time_stepper in advect")
577  end select
578 
579  end subroutine advect
580 
581  !> Implicit global update step within IMEX schemes, advance psa=psb+dtfactor*qdt*F_im(psa)
582  subroutine global_implicit_update(dtfactor,qdt,qtC,psa,psb)
586 
587  type(state), target :: psa(max_blocks) !< Compute implicit part from this state and update it
588  type(state), target :: psb(max_blocks) !< Will be unchanged, as on entry
589  double precision, intent(in) :: qdt !< overall time step dt
590  double precision, intent(in) :: qtC !< Both states psa and psb at this time level
591  double precision, intent(in) :: dtfactor !< Advance psa=psb+dtfactor*qdt*F_im(psa)
592 
593  integer :: iigrid, igrid
594 
595  !> First copy all variables from a to b, this is necessary to account for
596  ! quantities is w with no implicit sourceterm
597  do iigrid=1,igridstail; igrid=igrids(iigrid);
598  psa(igrid)%w = psb(igrid)%w
599  end do
600 
601  if (associated(phys_implicit_update)) then
602  call phys_implicit_update(dtfactor,qdt,qtc,psa,psb)
603  end if
604 
605  ! enforce boundary conditions for psa
606  call getbc(qtc,0.d0,psa,iwstart,nwgc,phys_req_diagonal)
607 
608  end subroutine global_implicit_update
609 
610  !> Evaluate Implicit part in place, i.e. psa==>F_im(psa)
611  subroutine evaluate_implicit(qtC,psa)
614 
615  type(state), target :: psa(max_blocks) !< Compute implicit part from this state and update it
616  double precision, intent(in) :: qtC !< psa at this time level
617 
618  if (associated(phys_evaluate_implicit)) then
619  call phys_evaluate_implicit(qtc,psa)
620  end if
621 
622  end subroutine evaluate_implicit
623 
624  !> Integrate all grids by one partial step
625  subroutine advect1(method,dtfactor,idim^LIM,qtC,psa,qt,psb)
628  use mod_fix_conserve
629  use mod_physics
630 
631  integer, intent(in) :: idim^LIM
632  type(state), target :: psa(max_blocks) !< Compute fluxes based on this state
633  type(state), target :: psb(max_blocks) !< Update solution on this state
634  double precision, intent(in) :: dtfactor !< Advance over dtfactor * dt
635  double precision, intent(in) :: qtC
636  double precision, intent(in) :: qt
637  integer, intent(in) :: method(nlevelshi)
638 
639  double precision :: qdt
640  integer :: iigrid, igrid
641 
642  istep = istep+1
643 
644  if(associated(phys_special_advance)) then
645  call phys_special_advance(qtc,psa)
646  end if
647 
648  qdt=dtfactor*dt
649  ! opedit: Just advance the active grids:
650  !$OMP PARALLEL DO PRIVATE(igrid)
651  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
652  block=>ps(igrid)
653  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
654 
655  call process1_grid(method(block%level),igrid,qdt,ixg^ll,idim^lim,qtc,&
656  psa(igrid),qt,psb(igrid),pso(igrid))
657  end do
658  !$OMP END PARALLEL DO
659 
660  ! opedit: Send flux for all grids, expects sends for all
661  ! nsend_fc(^D), set in connectivity.t.
662 
663  if (fix_conserve_global .and. fix_conserve_at_step) then
664  call recvflux(idim^lim)
665  call sendflux(idim^lim)
666  call fix_conserve(psb,idim^lim,1,nwflux)
667  if(stagger_grid) then
668  call fix_edges(psb,idim^lim)
669  ! fill the cell-center values from the updated staggered variables
670  !$OMP PARALLEL DO PRIVATE(igrid)
671  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
672  call phys_face_to_center(ixm^ll,psb(igrid))
673  end do
674  !$OMP END PARALLEL DO
675  end if
676  if(phys_solve_eaux) then
677  ! synchronize internal energy for AMR mesh
678  !$OMP PARALLEL DO PRIVATE(igrid)
679  do iigrid=1,igridstail_active; igrid=igrids_active(iigrid);
680  call phys_energy_synchro(ixg^ll,ixm^ll,psb(igrid)%w,psb(igrid)%x)
681  end do
682  !$OMP END PARALLEL DO
683  end if
684  end if
685 
686  ! For all grids: fill ghost cells
687  call getbc(qt+qdt,qdt,psb,iwstart,nwgc,phys_req_diagonal)
688 
689  end subroutine advect1
690 
691  !> Prepare to advance a single grid over one partial time step
692  subroutine process1_grid(method,igrid,qdt,ixI^L,idim^LIM,qtC,sCT,qt,s,sold)
694  use mod_fix_conserve
695 
696  integer, intent(in) :: method
697  integer, intent(in) :: igrid, ixI^L, idim^LIM
698  double precision, intent(in) :: qdt, qtC, qt
699  type(state), target :: sCT, s, sold
700 
701  ! cell face flux
702  double precision :: fC(ixI^S,1:nwflux,1:ndim)
703  ! cell edge flux
704  double precision :: fE(ixI^S,7-2*ndim:3)
705 
706  call advect1_grid(method,qdt,ixi^l,idim^lim,qtc,sct,qt,s,sold,fc,fe,&
707  rnode(rpdx1_:rnodehi,igrid),ps(igrid)%x)
708 
709  ! opedit: Obviously, flux is stored only for active grids.
710  ! but we know in fix_conserve wether there is a passive neighbor
711  ! but we know in conserve_fix wether there is a passive neighbor
712  ! via neighbor_active(i^D,igrid) thus we skip the correction for those.
713  ! This violates strict conservation when the active/passive interface
714  ! coincides with a coarse/fine interface.
715  if (fix_conserve_global .and. fix_conserve_at_step) then
716  call store_flux(igrid,fc,idim^lim,nwflux)
717  if(stagger_grid) call store_edge(igrid,ixi^l,fe,idim^lim)
718  end if
719 
720  end subroutine process1_grid
721 
722  !> Advance a single grid over one partial time step
723  subroutine advect1_grid(method,qdt,ixI^L,idim^LIM,qtC,sCT,qt,s,sold,fC,fE,dxs,x)
724 
725  ! integrate one grid by one partial step
728  use mod_tvd
729  use mod_source, only: addsource2
731 
732  integer, intent(in) :: method
733  integer, intent(in) :: ixI^L, idim^LIM
734  double precision, intent(in) :: qdt, qtC, qt, dxs(ndim), x(ixI^S,1:ndim)
735  type(state), target :: sCT, s, sold
736  double precision :: fC(ixI^S,1:nwflux,1:ndim)
737  double precision :: fE(ixI^S,7-2*ndim:3)
738 
739  integer :: ixO^L
740 
741  ixo^l=ixi^l^lsubnghostcells;
742  select case (method)
744  call finite_volume(method,qdt,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,sold,fc,fe,dxs,x)
745  case (fs_cd,fs_cd4)
746  call centdiff(method,qdt,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dxs,x)
747  case (fs_hancock)
748  call hancock(qdt,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,dxs,x)
749  case (fs_fd)
750  call fd(qdt,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dxs,x)
751  case (fs_tvd)
752  call centdiff(fs_cd,qdt,ixi^l,ixo^l,idim^lim,qtc,sct,qt,s,fc,fe,dxs,x)
753  call tvdlimit(method,qdt,ixi^l,ixo^l,idim^lim,sct,qt+qdt,s,fc,dxs,x)
754  case (fs_source)
755  call addsource2(qdt*dble(idimmax-idimmin+1)/dble(ndim),&
756  ixi^l,ixo^l,1,nw,qtc,sct%w,qt,s%w,x,.false.)
757  case (fs_nul)
758  ! There is nothing to do
759  case default
760  call mpistop("unknown flux scheme in advect1_grid")
761  end select
762 
763  end subroutine advect1_grid
764 
765  !> process is a user entry in time loop, before output and advance
766  !> allows to modify solution, add extra variables, etc.
767  !> Warning: CFL dt already determined (and is not recomputed)!
768  subroutine process(iit,qt)
772  use mod_physics, only: phys_req_diagonal
773  ! .. scalars ..
774  integer,intent(in) :: iit
775  double precision, intent(in):: qt
776 
777  integer:: iigrid, igrid
778 
779  if (associated(usr_process_global)) then
780  call usr_process_global(iit,qt)
781  end if
782 
783  if (associated(usr_process_grid)) then
784  !$OMP PARALLEL DO PRIVATE(igrid)
785  do iigrid=1,igridstail; igrid=igrids(iigrid);
786  ! next few lines ensure correct usage of routines like divvector etc
787  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
788  block=>ps(igrid)
789  call usr_process_grid(igrid,node(plevel_,igrid),ixg^ll,ixm^ll, &
790  qt,ps(igrid)%w,ps(igrid)%x)
791  end do
792  !$OMP END PARALLEL DO
793  call getbc(qt,dt,ps,iwstart,nwgc,phys_req_diagonal)
794  end if
795  end subroutine process
796 
797  !> process_advanced is user entry in time loop, just after advance
798  !> allows to modify solution, add extra variables, etc.
799  !> added for handling two-way coupled PIC-MHD
800  !> Warning: w is now at global_time^(n+1), global time and iteration at global_time^n, it^n
801  subroutine process_advanced(iit,qt)
806  use mod_physics, only: phys_req_diagonal
807  ! .. scalars ..
808  integer,intent(in) :: iit
809  double precision, intent(in):: qt
810 
811  integer:: iigrid, igrid
812 
813  if (associated(usr_process_adv_global)) then
814  call usr_process_adv_global(iit,qt)
815  end if
816 
817  if (associated(usr_process_adv_grid)) then
818  !$OMP PARALLEL DO PRIVATE(igrid)
819  do iigrid=1,igridstail; igrid=igrids(iigrid);
820  ! next few lines ensure correct usage of routines like divvector etc
821  ^d&dxlevel(^d)=rnode(rpdx^d_,igrid);
822  block=>ps(igrid)
823 
824  call usr_process_adv_grid(igrid,node(plevel_,igrid),ixg^ll,ixm^ll, &
825  qt,ps(igrid)%w,ps(igrid)%x)
826  end do
827  !$OMP END PARALLEL DO
828  call getbc(qt,dt,ps,iwstart,nwgc,phys_req_diagonal)
829  end if
830  end subroutine process_advanced
831 
832 end module mod_advance
subroutine mpistop(message)
Exit MPI-AMRVAC with an error message.
Definition: comm_lib.t:194
Module containing all the time stepping schemes.
Definition: mod_advance.t:2
subroutine, public process_advanced(iit, qt)
process_advanced is user entry in time loop, just after advance allows to modify solution,...
Definition: mod_advance.t:802
subroutine, public advance(iit)
Advance all the grids over one time step, including all sources.
Definition: mod_advance.t:18
subroutine advect1(method, dtfactor, idimLIM, qtC, psa, qt, psb)
Integrate all grids by one partial step.
Definition: mod_advance.t:626
subroutine, public process(iit, qt)
process is a user entry in time loop, before output and advance allows to modify solution,...
Definition: mod_advance.t:769
subroutine evaluate_implicit(qtC, psa)
Evaluate Implicit part in place, i.e. psa==>F_im(psa)
Definition: mod_advance.t:612
subroutine global_implicit_update(dtfactor, qdt, qtC, psa, psb)
Implicit global update step within IMEX schemes, advance psa=psb+dtfactor*qdt*F_im(psa)
Definition: mod_advance.t:583
subroutine advect1_grid(method, qdt, ixIL, idimLIM, qtC, sCT, qt, s, sold, fC, fE, dxs, x)
Advance a single grid over one partial time step.
Definition: mod_advance.t:724
subroutine advect(idimLIM)
Advance all grids over one time step, but without taking dimensional splitting or split source terms ...
Definition: mod_advance.t:69
subroutine process1_grid(method, igrid, qdt, ixIL, idimLIM, qtC, sCT, qt, s, sold)
Prepare to advance a single grid over one partial time step.
Definition: mod_advance.t:693
Module with finite difference methods for fluxes.
subroutine, public fd(qdt, ixIL, ixOL, idimsLIM, qtC, sCT, qt, snew, fC, fE, dxs, x)
subroutine, public centdiff(method, qdt, ixIL, ixOL, idimsLIM, qtC, sCT, qt, s, fC, fE, dxs, x)
Module with finite volume methods for fluxes.
subroutine, public finite_volume(method, qdt, ixIL, ixOL, idimsLIM, qtC, sCT, qt, snew, sold, fC, fE, dxs, x)
finite volume method
subroutine, public hancock(qdt, ixIL, ixOL, idimsLIM, qtC, sCT, qt, snew, dxs, x)
The non-conservative Hancock predictor for TVDLF.
Module for flux conservation near refinement boundaries.
subroutine, public recvflux(idimLIM)
subroutine, public fix_edges(psuse, idimLIM)
subroutine, public sendflux(idimLIM)
subroutine, public fix_conserve(psb, idimLIM, nw0, nwfluxin)
subroutine, public store_edge(igrid, ixIL, fE, idimLIM)
subroutine, public init_comm_fix_conserve(idimLIM, nwfluxin)
subroutine, public store_flux(igrid, fC, idimLIM, nwfluxin)
update ghost cells of all blocks including physical boundaries
subroutine getbc(time, qdt, psb, nwstart, nwbc, req_diag)
do update ghost cells of all blocks including physical boundaries
This module contains definitions of global parameters and variables and some generic functions/subrou...
type(state), pointer block
Block pointer for using one block and its previous state.
integer, dimension(:), allocatable typepred1
The spatial discretization for the predictor step when using a two step PC method.
integer, parameter fs_tvdlf
integer, parameter fs_hlld
integer, parameter imex_euler
double precision global_time
The global simulation time.
integer istep
Index of the sub-step in a multi-step time integrator.
integer, parameter threestep
integer, parameter imex_232
integer, parameter fivestep
double precision ars_gamma
IMEX_ARS3 parameter ars_gamma.
integer, parameter fs_hllcd
integer, parameter ndim
Number of spatial dimensions for grid variables.
logical stagger_grid
True for using stagger grid.
integer, parameter imex_ars3
logical use_particles
Use particles module or not.
integer, parameter fs_tvdmu
integer, parameter imex_trapezoidal
integer, parameter plevel_
integer, dimension(:), allocatable, parameter d
double precision imex222_lambda
IMEX-222(lambda) one-parameter family of schemes.
integer, dimension(:), allocatable flux_method
Which flux scheme of spatial discretization to use (per grid level)
integer, parameter rk2_alf
integer, parameter fs_hll
flux schemes
double precision, dimension(:,:), allocatable rnode
Corner coordinates.
integer, parameter imex_sp
integer, parameter twostep
integer, parameter onestep
double precision rk_a21
RK2(alfa) method parameters from Butcher tableau.
integer, parameter predictor_corrector
integer, parameter forward_euler
logical fix_conserve_global
Whether to apply flux conservation at refinement boundaries.
character(len=std_len) typedimsplit
integer t_stepper
time stepper type
integer, parameter fourstep
integer, parameter rnodehi
grid location info (corner coordinates and grid spacing)
integer, parameter fs_hllc
double precision imex_a22
IMEX_CB3a extra parameters.
integer, parameter imex_cb3a
integer, parameter imex_222
integer t_integrator
time integrator method
integer, parameter fs_hancock
integer, dimension(:,:), allocatable node
integer, parameter fs_source
double precision, dimension(ndim) dxlevel
integer, parameter imex_midpoint
Module containing all the particle routines.
Definition: mod_particles.t:2
This module defines the procedures of a physics module. It contains function pointers for the various...
Definition: mod_physics.t:4
procedure(sub_evaluate_implicit), pointer phys_evaluate_implicit
Definition: mod_physics.t:86
logical phys_req_diagonal
Whether the physics routines require diagonal ghost cells, for example for computing a curl.
Definition: mod_physics.t:27
logical phys_solve_eaux
Solve internal energy and total energy equations.
Definition: mod_physics.t:39
procedure(sub_energy_synchro), pointer phys_energy_synchro
Definition: mod_physics.t:67
procedure(sub_implicit_update), pointer phys_implicit_update
Definition: mod_physics.t:85
procedure(sub_face_to_center), pointer phys_face_to_center
Definition: mod_physics.t:84
procedure(sub_special_advance), pointer phys_special_advance
Definition: mod_physics.t:73
Module for handling split source terms (split from the fluxes)
Definition: mod_source.t:2
subroutine, public addsource2(qdt, ixIL, ixOL, iwLIM, qtC, wCT, qt, w, x, qsourcesplit, src_active, wCTprim)
Add source within ixO for iws: w=w+qdt*S[wCT].
Definition: mod_source.t:121
subroutine, public add_split_source(prior)
Definition: mod_source.t:20
Subroutines for TVD-MUSCL schemes.
Definition: mod_tvd.t:2
subroutine, public tvdlimit(method, qdt, ixIL, ixOL, idimLIM, s, qt, snew, fC, dxs, x)
Definition: mod_tvd.t:14
Module with all the methods that users can customize in AMRVAC.
procedure(process_grid), pointer usr_process_grid
procedure(process_adv_grid), pointer usr_process_adv_grid
procedure(process_global), pointer usr_process_global
procedure(process_adv_global), pointer usr_process_adv_global