MPI-AMRVAC  3.0
The MPI - Adaptive Mesh Refinement - Versatile Advection Code
mod_coarsen_refine.t
Go to the documentation of this file.
1 !> Module to coarsen and refine grids for AMR
3  implicit none
4  private
5  !> MPI recv send variables for AMR
6  integer :: itag, irecv, isend
7  integer, dimension(:), allocatable :: recvrequest, sendrequest
8  integer, dimension(:,:), allocatable :: recvstatus, sendstatus
9  !> MPI recv send variables for staggered-variable AMR
10  integer :: itag_stg
11  integer, dimension(:), allocatable :: recvrequest_stg, sendrequest_stg
12  integer, dimension(:,:), allocatable :: recvstatus_stg, sendstatus_stg
13 
14  ! Public subroutines
15  public :: amr_coarsen_refine
16 
17 contains
18 
19  !> coarsen and refine blocks to update AMR grid
20  subroutine amr_coarsen_refine
21  use mod_forest
25  use mod_amr_fct
28  {^nooned
30  }
31 
32  integer :: iigrid, igrid, ipe, igridco, ipeco, level, ic^d
33  integer, dimension(2^D&) :: igridfi, ipefi
34  integer :: n_coarsen, n_refine
35  type(tree_node_ptr) :: tree, sibling
36  logical :: active
37  integer, external :: getnode
38 
39  call proper_nesting
40 
41  if(stagger_grid) then
42  call store_faces
43  call comm_faces
44  end if
45 
46  n_coarsen = count(coarsen(:, :))
47  n_refine = count(refine(:, :))
48 
49  ! to save memory: first coarsen then refine
50  irecv=0
51  isend=0
52  allocate(recvstatus(mpi_status_size,max_blocks),recvrequest(max_blocks), &
53  sendstatus(mpi_status_size,max_blocks),sendrequest(max_blocks))
54  recvrequest=mpi_request_null
55  sendrequest=mpi_request_null
56 
57  if(stagger_grid) then
58  allocate(recvstatus_stg(mpi_status_size,max_blocks*^nd),recvrequest_stg(max_blocks*^nd), &
59  sendstatus_stg(mpi_status_size,max_blocks*^nd),sendrequest_stg(max_blocks*^nd))
60  recvrequest_stg=mpi_request_null
61  sendrequest_stg=mpi_request_null
62  end if
63 
64  do ipe=0,npe-1
65  do igrid=1,max_blocks
66  if (coarsen(igrid,ipe)) then
67  if (.not.associated(igrid_to_node(igrid,ipe)%node)) cycle
68 
69  tree%node => igrid_to_node(igrid,ipe)%node%parent%node
70  {do ic^db=1,2\}
71  sibling%node => tree%node%child(ic^d)%node
72  ipefi(ic^d)=sibling%node%ipe
73  igridfi(ic^d)=sibling%node%igrid
74  {end do\}
75 
76  ipeco=ipefi(1^d&)
77  igridco=getnode(ipeco)
78 
79  call coarsen_tree_leaf(igridco,ipeco,igridfi,ipefi,active)
80 
81  call coarsen_grid_siblings(igridco,ipeco,igridfi,ipefi,active)
82 
83  ! local coarsening done
84  {do ic^db=1,2\}
85  if (ipefi(ic^d)==ipeco) then
86  call putnode(igridfi(ic^d),ipefi(ic^d))
87  coarsen(igridfi(ic^d),ipefi(ic^d))=.false.
88  end if
89  {end do\}
90  end if
91  end do
92  end do
93 
94  if (irecv>0) then
95  call mpi_waitall(irecv,recvrequest,recvstatus,ierrmpi)
96  if(stagger_grid) call mpi_waitall(irecv,recvrequest_stg,recvstatus_stg,ierrmpi)
97  end if
98  if (isend>0) then
99  call mpi_waitall(isend,sendrequest,sendstatus,ierrmpi)
100  if(stagger_grid) call mpi_waitall(isend,sendrequest_stg,sendstatus_stg,ierrmpi)
101  end if
102 
103  deallocate(recvstatus,recvrequest,sendstatus,sendrequest)
104  if(stagger_grid) deallocate(recvstatus_stg,recvrequest_stg,sendstatus_stg,sendrequest_stg)
105 
106  ! non-local coarsening done
107  do ipe=0,npe-1
108  do igrid=1,max_blocks
109  if (coarsen(igrid,ipe)) then
110  !if (ipe==mype) call dealloc_node(igrid) ! do not deallocate node
111  ! memory preventing fragmentization of system memory as a result
112  ! of frequent allocating and deallocating memory
113 
114  ! put the node (igrid number) into unused.
115  call putnode(igrid,ipe)
116  coarsen(igrid,ipe)=.false.
117  end if
118  end do
119  end do
120 
121  do ipe=0,npe-1
122  do igrid=1,max_blocks
123  if (refine(igrid,ipe)) then
124 
125  {do ic^db=1,2\}
126  igridfi(ic^d)=getnode(ipe)
127  ipefi(ic^d)=ipe
128  {end do\}
129 
130  call refine_tree_leaf(igridfi,ipefi,igrid,ipe,active)
131 
132  if (ipe==mype) call refine_grids(igridfi,ipefi,igrid,ipe,active)
133 
134  ! refinement done
135  call putnode(igrid,ipe)
136  refine(igrid,ipe)=.false.
137  end if
138  end do
139  end do
140 
141  ! A crash occurs in later MPI_WAITALL when initial condition comsumes too
142  ! much time to filling new blocks with both gfortran and intel fortran compiler.
143  ! This barrier cure this problem
144  !TODO to find the reason
145  if(.not.time_advance) call mpi_barrier(icomm,ierrmpi)
146 
147  if(stagger_grid) call end_comm_faces
148 
149  call get_level_range
150 
151  ! Update sfc array: igrid and ipe info in space filling curve
152  call amr_morton_order()
153 
154  call load_balance
155 
156  ! Rebuild tree connectivity
157  call getigrids
158  call build_connectivity
159 
160  ! Update the list of active grids
161  call selectgrids
162  ! grid structure now complete again.
163 
164  ! since we only filled mesh values, and advance assumes filled
165  ! ghost cells, do boundary filling for the new levels
166  if (time_advance) then
167  call getbc(global_time+dt,0.d0,ps,iwstart,nwgc)
168  else
169  call getbc(global_time,0.d0,ps,iwstart,nwgc)
170  end if
171 
172  {^nooned
173  if (use_multigrid) call mg_update_refinement(n_coarsen, n_refine)
174  }
175 
176  if (associated(usr_after_refine)) then
177  call usr_after_refine(n_coarsen, n_refine)
178  end if
179 
180  end subroutine amr_coarsen_refine
181 
182  !> For all grids on all processors, do a check on refinement flags. Make
183  !> sure that neighbors will not differ more than one level of refinement.
184  subroutine proper_nesting
185  use mod_forest
187 
188  logical, dimension(:,:), allocatable :: refine2
189  integer :: iigrid, igrid, level, ic^D, inp^D, i^D, my_neighbor_type,ipe
190  logical :: coarsening, pole(ndim), sendbuf(max_blocks)
191  type(tree_node_ptr) :: tree, p_neighbor, my_parent, sibling, my_neighbor, &
192  neighborchild
193 
194  if (nbufferx^d/=0|.or.) then
195  allocate(refine2(max_blocks,npe))
196  call mpi_allreduce(refine,refine2,max_blocks*npe,mpi_logical,mpi_lor, &
197  icomm,ierrmpi)
198  refine=refine2
199  else
200  sendbuf(:)=refine(:,mype)
201  call mpi_allgather(sendbuf,max_blocks,mpi_logical,refine,max_blocks, &
202  mpi_logical,icomm,ierrmpi)
203  end if
204 
205  do level=min(levmax,refine_max_level-1),levmin+1,-1
206  tree%node => level_head(level)%node
207  do
208  if (.not.associated(tree%node)) exit
209 
210  if (refine(tree%node%igrid,tree%node%ipe)) then
211  ic^d=1+modulo(tree%node%ig^d-1,2);
212  {do inp^db=ic^db-2,ic^db-1\}
213  if (inp^d==0|.and.) cycle
214  p_neighbor%node => tree%node%parent%node
215  {if (inp^d/=0) then
216  p_neighbor%node => p_neighbor%node%neighbor(ic^d,^d)%node
217  if (.not.associated(p_neighbor%node)) cycle
218  end if\}
219  if (p_neighbor%node%leaf) then
220  refine(p_neighbor%node%igrid,p_neighbor%node%ipe)=.true.
221  end if
222  {end do\}
223  end if
224 
225  tree%node => tree%node%next%node
226  end do
227  end do
228 
229  ! On each processor locally, check if grids set for coarsening are already
230  ! set for refinement.
231 
232  do iigrid=1,igridstail; igrid=igrids(iigrid);
233  if (refine(igrid,mype).and.coarsen(igrid,mype)) coarsen(igrid,mype)=.false.
234  end do
235 
236  ! For all grids on all processors, do a check on coarse refinement flags
237  sendbuf(:)=coarsen(:,mype)
238  call mpi_allgather(sendbuf,max_blocks,mpi_logical,coarsen,max_blocks, &
239  mpi_logical,icomm,ierrmpi)
240 
241  do level=levmax,max(2,levmin),-1
242  tree%node => level_head(level)%node
243  do
244  if (.not.associated(tree%node)) exit
245 
246  if (coarsen(tree%node%igrid,tree%node%ipe)) then
247  coarsening=.true.
248  my_parent%node => tree%node%parent%node
249 
250  ! are all siblings flagged for coarsen ?
251  check1: {do ic^db=1,2\}
252  sibling%node => my_parent%node%child(ic^d)%node
253  if (sibling%node%leaf) then
254  if (coarsen(sibling%node%igrid,sibling%node%ipe)) cycle
255  end if
257  exit check1
258  {end do\} check1
259 
260  ! Make sure that neighbors will not differ more than one level of
261  ! refinement, otherwise unflag all siblings
262  if (coarsening) then
263  check2: {do ic^db=1,2\}
264  sibling%node => my_parent%node%child(ic^d)%node
265  {do i^db=ic^db-2,ic^db-1\}
266  if (i^d==0|.and.) cycle
267  call find_neighbor(my_neighbor,my_neighbor_type, &
268  sibling,i^d,pole)
269  select case (my_neighbor_type)
270  case (neighbor_sibling)
271  if (refine(my_neighbor%node%igrid, &
272  my_neighbor%node%ipe)) then
274  exit check2
275  else
276  cycle
277  end if
278  case (neighbor_fine)
279  neighborchild%node=>my_neighbor%node%child(1^d&)%node
280  if (neighborchild%node%leaf) then
281  if (coarsen(neighborchild%node%igrid, &
282  neighborchild%node%ipe)) then
283  cycle
284  end if
285  end if
287  exit check2
288  end select
289  {end do\}
290  {end do\} check2
291  end if
292 
293  end if
294 
295  tree%node => tree%node%next%node
296  end do
297  end do
298 
299  contains
300 
302 
303  integer :: ic^D
304  type(tree_node_ptr) :: sibling
305 
306  {do ic^db=1,2\}
307  sibling%node => my_parent%node%child(ic^d)%node
308  if (sibling%node%leaf) then
309  coarsen(sibling%node%igrid,sibling%node%ipe)=.false.
310  end if
311  {end do\}
312  coarsening=.false.
313 
314  end subroutine unflag_coarsen_siblings
315 
316  end subroutine proper_nesting
317 
318  !> coarsen sibling blocks into one block
319  subroutine coarsen_grid_siblings(igrid,ipe,child_igrid,child_ipe,active)
321 
322  integer, intent(in) :: igrid, ipe
323  integer, dimension(2^D&), intent(in) :: child_igrid, child_ipe
324  logical, intent(in) :: active
325 
326  integer :: igridFi, ipeFi, ixCo^L, ixCoG^L, ixCoM^L, ic^D, idir
327 
328  if (ipe==mype) call alloc_node(igrid)
329 
330  ! New passive cell, coarsen from initial condition:
331  if (.not. active) then
332  if (ipe == mype) then
333  call initial_condition(igrid)
334  {do ic^db=1,2\}
335  igridfi=child_igrid(ic^d)
336  ipefi=child_ipe(ic^d)
337  !if (ipeFi==mype) then
338  ! ! remove solution space of child
339  ! call dealloc_node(igridFi)
340  !end if
341  {end do\}
342  end if
343  return
344  end if
345 
346  {do ic^db=1,2\}
347  igridfi=child_igrid(ic^d)
348  ipefi=child_ipe(ic^d)
349 
350  if (ipefi==mype) then
351  ^d&dxlevel(^d)=rnode(rpdx^d_,igridfi);
352  if (ipe==mype) then
353  ixcomin^d=ixmlo^d+(ic^d-1)*(ixmhi^d-ixmlo^d+1)/2;
354  ixcomax^d=ixmhi^d+(ic^d-2)*(ixmhi^d-ixmlo^d+1)/2;
355 
356  call coarsen_grid(ps(igridfi),ixg^ll,ixm^ll,ps(igrid),ixg^ll,ixco^l)
357  ! remove solution space of child
358  !call dealloc_node(igridFi)
359  else
360  ixcogmin^d=1;
361  ixcogmax^d=ixghi^d/2+nghostcells;
362  ixcom^l=ixcog^l^lsubnghostcells;
363  call coarsen_grid(ps(igridfi),ixg^ll,ixm^ll,psc(igridfi), &
364  ixcog^l,ixcom^l)
365 
366  !itag=ipeFi*max_blocks+igridFi
367  itag=ipefi+igridfi
368  isend=isend+1
369  call mpi_isend(psc(igridfi)%w,1,type_coarse_block,ipe,itag, &
370  icomm,sendrequest(isend),ierrmpi)
371  if(stagger_grid) then
372  do idir=1,ndim
373  !itag_stg=(npe+ipeFi+1)*max_blocks+igridFi*(ndir-1+idir)
374  itag_stg=(npe+ipefi+1)+igridfi*(ndir-1+idir)
375  call mpi_isend(psc(igridfi)%ws,1,type_coarse_block_stg(idir,ic^d),ipe,itag_stg, &
376  icomm,sendrequest_stg(isend),ierrmpi)
377  end do
378  end if
379  end if
380  else
381  if (ipe==mype) then
382  !itag=ipeFi*max_blocks+igridFi
383  itag=ipefi+igridfi
384  irecv=irecv+1
385  call mpi_irecv(ps(igrid)%w,1,type_sub_block(ic^d),ipefi,itag, &
386  icomm,recvrequest(irecv),ierrmpi)
387  if(stagger_grid) then
388  do idir=1,ndim
389  !itag_stg=(npe+ipeFi+1)*max_blocks+igridFi*(ndir-1+idir)
390  itag_stg=(npe+ipefi+1)+igridfi*(ndir-1+idir)
391  call mpi_irecv(ps(igrid)%ws,1,type_sub_block_stg(idir,ic^d),ipefi,itag_stg, &
392  icomm,recvrequest_stg(irecv),ierrmpi)
393  end do
394  end if
395  end if
396  end if
397  {end do\}
398 
399  end subroutine coarsen_grid_siblings
400 
401 end module mod_coarsen_refine
subroutine find_neighbor(my_neighbor, my_neighbor_type, tree, iD, pole)
find neighors of all blocks
Definition: amr_neighbors.t:43
subroutine alloc_node(igrid)
allocate arrays on igrid node
integer function getnode(ipe)
Get first available igrid on processor ipe.
subroutine putnode(igrid, ipe)
subroutine initial_condition(igrid)
fill in initial condition
Definition: amrini.t:39
subroutine coarsen_grid(sFi, ixFiGL, ixFiL, sCo, ixCoGL, ixCoL)
coarsen one grid to its coarser representative
Definition: coarsen.t:3
subroutine build_connectivity
Definition: connectivity.t:45
subroutine getigrids
Definition: connectivity.t:26
subroutine get_level_range
Definition: connectivity.t:2
subroutine coarsen_tree_leaf(igrid, ipe, child_igrid, child_ipe, active)
Definition: forest.t:83
subroutine refine_tree_leaf(child_igrid, child_ipe, igrid, ipe, active)
Definition: forest.t:151
subroutine unflag_coarsen_siblings
subroutine, public store_faces
To achive consistency and thus conservation of divergence, when refining a block we take into account...
Definition: mod_amr_fct.t:458
subroutine, public comm_faces
When refining a coarse block with fine neighbours, it is necessary prolong consistently with the alre...
Definition: mod_amr_fct.t:529
Module to coarsen and refine grids for AMR.
subroutine proper_nesting
For all grids on all processors, do a check on refinement flags. Make sure that neighbors will not di...
subroutine, public amr_coarsen_refine
coarsen and refine blocks to update AMR grid
Module with basic grid data structures.
Definition: mod_forest.t:2
logical, dimension(:,:), allocatable, save refine
Definition: mod_forest.t:70
logical, dimension(:,:), allocatable, save coarsen
AMR flags and grids-in-use identifier per processor (igrid,ipe)
Definition: mod_forest.t:70
type(tree_node_ptr), dimension(:,:), allocatable, save igrid_to_node
Array to go from an [igrid, ipe] index to a node pointer.
Definition: mod_forest.t:32
type(tree_node_ptr), dimension(:), allocatable, save level_head
The head pointer of the linked list per refinement level.
Definition: mod_forest.t:35
update ghost cells of all blocks including physical boundaries
This module contains definitions of global parameters and variables and some generic functions/subrou...
logical stagger_grid
True for using stagger grid.
integer icomm
The MPI communicator.
integer mype
The rank of the current MPI task.
integer, dimension(:), allocatable, parameter d
integer ierrmpi
A global MPI error return code.
integer npe
The number of MPI tasks.
integer nbufferx
Number of cells as buffer zone.
integer refine_max_level
Maximal number of AMR levels.
integer max_blocks
The maximum number of grid blocks in a processor.
Module to couple the octree-mg library to AMRVAC. This file uses the VACPP preprocessor,...
Module with all the methods that users can customize in AMRVAC.
procedure(after_refine), pointer usr_after_refine
subroutine refine_grids(child_igrid, child_ipe, igrid, ipe, active)
refine one block to its children blocks
Definition: refine.t:3
subroutine selectgrids
Definition: selectgrids.t:3
Pointer to a tree_node.
Definition: mod_forest.t:6