2D AMRCLAW
Functions/Subroutines
advanc.f File Reference

Go to the source code of this file.

Functions/Subroutines

subroutine advanc (level, nvar, dtlevnew, vtime, naux)
 Integrate all grids at the input level by one step of its delta(t) More...
 
subroutine prepgrids (listgrids, num, level)
 
subroutine par_advanc (mptr, mitot, mjtot, nvar, naux, dtnew)
 Integrate grid mptr. More...
 

Function/Subroutine Documentation

◆ advanc()

subroutine advanc (   level,
  nvar,
  dtlevnew,
logical  vtime,
  naux 
)

Integrate all grids at the input level by one step of its delta(t)

this includes:

  • setting the ghost cells
  • advancing the solution on the grid
  • adjusting fluxes for flux conservation step later

Definition at line 11 of file advanc.f.

References amr_module::alloc, bound(), amr_module::cfl_level, amr_module::cflmax, amr_module::hxposs, amr_module::hyposs, amr_module::listofgrids, amr_module::liststart, amr_module::lstart, amr_module::mxnest, amr_module::ndihi, amr_module::ndilo, amr_module::ndjhi, amr_module::ndjlo, amr_module::nghost, amr_module::node, amr_module::null, amr_module::numgrids, par_advanc(), amr_module::possk, amr_module::rinfinity, amr_module::rnode, saveqc(), amr_module::store1, amr_module::storeaux, amr_module::timebound, amr_module::timeboundcpu, amr_module::timemult, amr_module::timestepgrid, amr_module::timestepgridcpu, amr_module::tvoll, and amr_module::tvollcpu.

Referenced by setgrd(), and tick().

11 c
12  use amr_module
13  implicit double precision (a-h,o-z)
14 
15 
16  logical vtime
17  integer omp_get_thread_num, omp_get_max_threads
18  integer mythread/0/, maxthreads/1/
19  integer listgrids(numgrids(level))
20  integer clock_start, clock_finish, clock_rate
21  integer clock_startstepgrid,clock_startbound,clock_finishbound
22  real(kind=8) cpu_start, cpu_finish
23  real(kind=8) cpu_startbound, cpu_finishbound
24  real(kind=8) cpu_startstepgrid, cpu_finishstepgrid
25 
26 c maxgr is maximum number of grids many things are
27 c dimensioned at, so this is overall. only 1d array
28 c though so should suffice. problem is
29 c not being able to dimension at maxthreads
30 
31 
32 c
33 c ::::::::::::::; ADVANC :::::::::::::::::::::::::::::::::::::::::::
34 c integrate all grids at the input 'level' by one step of its delta(t)
35 c this includes: setting the ghost cells
36 c advancing the solution on the grid
37 c adjusting fluxes for flux conservation step later
38 c :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
39 c
40 c get start time for more detailed timing by level
41  call system_clock(clock_start,clock_rate)
42  call cpu_time(cpu_start)
43 
44 
45  hx = hxposs(level)
46  hy = hyposs(level)
47  delt = possk(level)
48 c this is linear alg.
49 c call prepgrids(listgrids,numgrids(level),level)
50 c
51 
52  call system_clock(clock_startbound,clock_rate)
53  call cpu_time(cpu_startbound)
54 
55 
56 c maxthreads initialized to 1 above in case no openmp
57 !$ maxthreads = omp_get_max_threads()
58 
59 c We want to do this regardless of the threading type
60 !$OMP PARALLEL DO PRIVATE(j,locnew, locaux, mptr,nx,ny,mitot,
61 !$OMP& mjtot,time,levSt),
62 !$OMP& SHARED(level, nvar, naux, alloc, intrat, delt,
63 !$OMP& listOfGrids,listStart,nghost,
64 !$OMP& node,rnode,numgrids,listgrids),
65 !$OMP& SCHEDULE (dynamic,1)
66 !$OMP& DEFAULT(none)
67  do j = 1, numgrids(level)
68  !mptr = listgrids(j)
69  levst = liststart(level)
70  mptr = listofgrids(levst+j-1)
71  !write(*,*)"old ",listgrids(j)," new",mptr
72  nx = node(ndihi,mptr) - node(ndilo,mptr) + 1
73  ny = node(ndjhi,mptr) - node(ndjlo,mptr) + 1
74  mitot = nx + 2*nghost
75  mjtot = ny + 2*nghost
76  locnew = node(store1,mptr)
77  locaux = node(storeaux,mptr)
78  time = rnode(timemult,mptr)
79 c
80  call bound(time,nvar,nghost,alloc(locnew),mitot,mjtot,mptr,
81  1 alloc(locaux),naux)
82 
83  end do
84 !$OMP END PARALLEL DO
85  call system_clock(clock_finishbound,clock_rate)
86  call cpu_time(cpu_finishbound)
87  timebound = timebound + clock_finishbound - clock_startbound
88  timeboundcpu=timeboundcpu+cpu_finishbound-cpu_startbound
89 
90 c
91 c save coarse level values if there is a finer level for wave fixup
92  if (level+1 .le. mxnest) then
93  if (lstart(level+1) .ne. null) then
94  call saveqc(level+1,nvar,naux)
95  endif
96  endif
97 c
98  dtlevnew = rinfinity
99  cfl_level = 0.d0 !# to keep track of max cfl seen on each level
100 
101 c
102  call system_clock(clock_startstepgrid,clock_rate)
103  call cpu_time(cpu_startstepgrid)
104 
105 
106 !$OMP PARALLEL DO PRIVATE(j,mptr,nx,ny,mitot,mjtot)
107 !$OMP& PRIVATE(mythread,dtnew)
108 !$OMP& SHARED(rvol,rvoll,level,nvar,mxnest,alloc,intrat)
109 !$OMP& SHARED(nghost,intratx,intraty,hx,hy,naux,listsp)
110 !$OMP& SHARED(node,rnode,dtlevnew,numgrids,listgrids)
111 !$OMP& SHARED(listOfGrids,listStart,levSt)
112 !$OMP& SCHEDULE (DYNAMIC,1)
113 !$OMP& DEFAULT(none)
114  do j = 1, numgrids(level)
115  !mptr = listgrids(j)
116  levst = liststart(level)
117  mptr = listofgrids(levst+j-1)
118  nx = node(ndihi,mptr) - node(ndilo,mptr) + 1
119  ny = node(ndjhi,mptr) - node(ndjlo,mptr) + 1
120  mitot = nx + 2*nghost
121  mjtot = ny + 2*nghost
122 c
123  call par_advanc(mptr,mitot,mjtot,nvar,naux,dtnew)
124 !$OMP CRITICAL (newdt)
125  dtlevnew = dmin1(dtlevnew,dtnew)
126 !$OMP END CRITICAL (newdt)
127 
128  end do
129 !$OMP END PARALLEL DO
130 c
131  call system_clock(clock_finish,clock_rate)
132  call cpu_time(cpu_finish)
133  tvoll(level) = tvoll(level) + clock_finish - clock_start
134  tvollcpu(level) = tvollcpu(level) + cpu_finish - cpu_start
135  timestepgrid = timestepgrid +clock_finish-clock_startstepgrid
136  timestepgridcpu=timestepgridcpu+cpu_finish-cpu_startstepgrid
137 
138  cflmax = dmax1(cflmax, cfl_level)
139 
140 c
141  return
integer, parameter timemult
current simulation time on this grid
Definition: amr_module.f90:151
real(kind=8), parameter rinfinity
Definition: amr_module.f90:169
subroutine saveqc(level, nvar, naux)
Prepare new fine grids to save fluxes after each integration step for future conservative fix-up on c...
Definition: saveqc.f:14
integer timebound
Definition: amr_module.f90:241
real(kind=8), dimension(maxlv) hyposs
Definition: amr_module.f90:193
real(kind=8) cflmax
Definition: amr_module.f90:255
real(kind=8), dimension(maxlv) hxposs
Definition: amr_module.f90:193
integer, parameter ndihi
global i index of right border of this grid
Definition: amr_module.f90:111
integer, dimension(nsize, maxgr) node
Definition: amr_module.f90:198
integer, dimension(0:maxlv+1) liststart
Definition: amr_module.f90:189
integer, dimension(maxlv) numgrids
Definition: amr_module.f90:198
real(kind=8), dimension(rsize, maxgr) rnode
Definition: amr_module.f90:193
integer, dimension(maxgr) listofgrids
Definition: amr_module.f90:189
real(kind=8) cfl_level
Definition: amr_module.f90:255
integer, parameter ndilo
global i index of left border of this grid
Definition: amr_module.f90:108
integer, parameter ndjlo
global j index of lower border of this grid
Definition: amr_module.f90:114
integer, parameter store1
pointer to the address of memory storing the first copy of solution data on this grid, usually for storing new solution
Definition: amr_module.f90:101
integer, dimension(maxlv) lstart
Definition: amr_module.f90:198
real(kind=8) timeboundcpu
Definition: amr_module.f90:244
real(kind=8), dimension(maxlv) tvollcpu
Definition: amr_module.f90:243
real(kind=8), dimension(maxlv) possk
Definition: amr_module.f90:193
integer, parameter ndjhi
global j index of upper border of this grid
Definition: amr_module.f90:117
integer timestepgrid
Definition: amr_module.f90:241
subroutine bound(time, nvar, ng, valbig, mitot, mjtot, mptr, aux, naux)
This routine sets the boundary values for a given grid at level level.
Definition: bound.f90:52
real(kind=8) timestepgridcpu
Definition: amr_module.f90:244
integer, parameter null
Definition: amr_module.f90:155
integer mxnest
Definition: amr_module.f90:198
integer nghost
Definition: amr_module.f90:232
The module contains the definition of a "node descriptor" as well as other global variables used duri...
Definition: amr_module.f90:21
subroutine par_advanc(mptr, mitot, mjtot, nvar, naux, dtnew)
Integrate grid mptr.
Definition: advanc.f:171
integer, parameter storeaux
pointer to the address of memory storing auxiliary data on this grid
Definition: amr_module.f90:120
integer, dimension(maxlv) tvoll
Definition: amr_module.f90:238
real(kind=8), dimension(:), allocatable alloc
Definition: amr_module.f90:218
Here is the call graph for this function:
Here is the caller graph for this function:

◆ par_advanc()

subroutine par_advanc (   mptr,
  mitot,
  mjtot,
  nvar,
  naux,
  dtnew 
)

Integrate grid mptr.

grids are done in parallel.

Definition at line 171 of file advanc.f.

References amr_module::alloc, amr_module::cfluxptr, amr_module::cornxlo, amr_module::cornylo, amr_module::dimensional_split, amr_module::ffluxptr, fluxad(), fluxsv(), amr_module::hxposs, amr_module::hyposs, amr_module::intratx, amr_module::intraty, amr_module::listsp, amr_module::mxnest, amr_module::ndihi, amr_module::ndilo, amr_module::ndjhi, amr_module::ndjlo, amr_module::nestlevel, amr_module::nghost, amr_module::node, gauges_module::num_gauges, amr_module::possk, qad(), amr_module::rnode, amr_module::rvol, amr_module::rvoll, stepgrid(), stepgrid_dimsplit(), amr_module::store1, amr_module::store2, amr_module::storeaux, amr_module::timemult, and gauges_module::update_gauges().

Referenced by advanc().

171 c
172  use amr_module
174  implicit double precision (a-h,o-z)
175 
176 
177  integer omp_get_thread_num, omp_get_max_threads
178  integer mythread/0/, maxthreads/1/
179 
180  double precision fp(nvar,mitot,mjtot),fm(nvar,mitot,mjtot)
181  double precision gp(nvar,mitot,mjtot),gm(nvar,mitot,mjtot)
182 
183 
184 c
185 c :::::::::::::: PAR_ADVANC :::::::::::::::::::::::::::::::::::::::::::
186 c integrate this grid. grids are done in parallel.
187 c extra subr. used to allow for stack based allocation of
188 c flux arrays. They are only needed temporarily. If used alloc
189 c array for them it has too long a lendim, makes too big
190 c a checkpoint file, and is a big critical section.
191 c :::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
192 c
193  level = node(nestlevel,mptr)
194  hx = hxposs(level)
195  hy = hyposs(level)
196  delt = possk(level)
197  nx = node(ndihi,mptr) - node(ndilo,mptr) + 1
198  ny = node(ndjhi,mptr) - node(ndjlo,mptr) + 1
199  time = rnode(timemult,mptr)
200 
201 !$ mythread = omp_get_thread_num()
202 
203  locold = node(store2, mptr)
204  locnew = node(store1, mptr)
205 
206 c
207 c copy old soln. values into next time step's soln. values
208 c since integrator will overwrite it. only for grids not at
209 c the finest level. finest level grids do not maintain copies
210 c of old and new time solution values.
211 c
212  if (level .lt. mxnest) then
213  ntot = mitot * mjtot * nvar
214 cdir$ ivdep
215  do 10 i = 1, ntot
216  10 alloc(locold + i - 1) = alloc(locnew + i - 1)
217  endif
218 c
219  xlow = rnode(cornxlo,mptr) - nghost*hx
220  ylow = rnode(cornylo,mptr) - nghost*hy
221 
222 !$OMP CRITICAL(rv)
223  rvol = rvol + nx * ny
224  rvoll(level) = rvoll(level) + nx * ny
225 !$OMP END CRITICAL(rv)
226 
227 
228  locaux = node(storeaux,mptr)
229 c
230  if (node(ffluxptr,mptr) .ne. 0) then
231  lenbc = 2*(nx/intratx(level-1)+ny/intraty(level-1))
232  locsvf = node(ffluxptr,mptr)
233  locsvq = locsvf + nvar*lenbc
234  locx1d = locsvq + nvar*lenbc
235  call qad(alloc(locnew),mitot,mjtot,nvar,
236  1 alloc(locsvf),alloc(locsvq),lenbc,
237  2 intratx(level-1),intraty(level-1),hx,hy,
238  3 naux,alloc(locaux),alloc(locx1d),delt,mptr)
239  endif
240 
241 c # See if the grid about to be advanced has gauge data to output.
242 c # This corresponds to previous time step, but output done
243 c # now to make linear interpolation easier, since grid
244 c # now has boundary conditions filled in.
245 
246 c should change the way print_gauges does io - right now is critical section
247 c no more, each gauge has own array.
248 
249  if (num_gauges > 0) then
250  call update_gauges(alloc(locnew:locnew+nvar*mitot*mjtot),
251  . alloc(locaux:locaux+nvar*mitot*mjtot),
252  . xlow,ylow,nvar,mitot,mjtot,naux,mptr)
253  endif
254 
255 c
256  if (dimensional_split .eq. 0) then
257 c # Unsplit method
258  call stepgrid(alloc(locnew),fm,fp,gm,gp,
259  2 mitot,mjtot,nghost,
260  3 delt,dtnew,hx,hy,nvar,
261  4 xlow,ylow,time,mptr,naux,alloc(locaux))
262  else if (dimensional_split .eq. 1) then
263 c # Godunov splitting
264  call stepgrid_dimsplit(alloc(locnew),fm,fp,gm,gp,
265  2 mitot,mjtot,nghost,
266  3 delt,dtnew,hx,hy,nvar,
267  4 xlow,ylow,time,mptr,naux,alloc(locaux))
268  else
269 c # should never get here due to check in amr2
270  write(6,*) '*** Strang splitting not supported'
271  stop
272  endif
273 
274  if (node(cfluxptr,mptr) .ne. 0)
275  2 call fluxsv(mptr,fm,fp,gm,gp,
276  3 alloc(node(cfluxptr,mptr)),mitot,mjtot,
277  4 nvar,listsp(level),delt,hx,hy)
278  if (node(ffluxptr,mptr) .ne. 0) then
279  lenbc = 2*(nx/intratx(level-1)+ny/intraty(level-1))
280  locsvf = node(ffluxptr,mptr)
281  call fluxad(fm,fp,gm,gp,
282  2 alloc(locsvf),mptr,mitot,mjtot,nvar,
283  4 lenbc,intratx(level-1),intraty(level-1),
284  5 nghost,delt,hx,hy)
285  endif
286 c
287 c write(outunit,969) mythread,delt, dtnew
288 c969 format(" thread ",i4," updated by ",e15.7, " new dt ",e15.7)
289  rnode(timemult,mptr) = rnode(timemult,mptr)+delt
290 c
291  return
subroutine fluxad(xfluxm, xfluxp, yfluxm, yfluxp,
When a fine grid is updated, this subroutine is called to save fluxes going into an adjacent coarse c...
Definition: fluxad.f:8
integer, parameter timemult
current simulation time on this grid
Definition: amr_module.f90:151
integer, parameter cornxlo
x-coordinate of the left border of this grid
Definition: amr_module.f90:143
real(kind=8), dimension(maxlv) hyposs
Definition: amr_module.f90:193
real(kind=8), dimension(maxlv) hxposs
Definition: amr_module.f90:193
integer, parameter ndihi
global i index of right border of this grid
Definition: amr_module.f90:111
subroutine qad(valbig, mitot, mjtot, nvar, svdflx, qc1d, lenbc, lratiox, lratioy, hx, hy, maux, aux, auxc1d, delt, mptr)
For each coarse-fine interface, a Riemann problem between an inner ghost cell value on the fine grid ...
Definition: qad.f:12
integer, dimension(nsize, maxgr) node
Definition: amr_module.f90:198
subroutine update_gauges(q, aux, xlow, ylow, num_eqn, mitot, mjtot, num_aux, mptr)
integer, parameter nestlevel
AMR level of the grid.
Definition: amr_module.f90:44
real(kind=8), dimension(rsize, maxgr) rnode
Definition: amr_module.f90:193
integer, parameter ndilo
global i index of left border of this grid
Definition: amr_module.f90:108
integer, parameter ndjlo
global j index of lower border of this grid
Definition: amr_module.f90:114
integer, parameter store1
pointer to the address of memory storing the first copy of solution data on this grid, usually for storing new solution
Definition: amr_module.f90:101
real(kind=8) rvol
Definition: amr_module.f90:237
integer dimensional_split
Definition: amr_module.f90:253
integer, parameter store2
pointer to the address of memory storing the second copy of solution data on this grid...
Definition: amr_module.f90:105
integer, dimension(maxlv) intraty
Definition: amr_module.f90:198
subroutine fluxsv(mptr, xfluxm, xfluxp, yfluxm, yfluxp, listbc,
When a coarse grid cell is advanced, if it borders a coarse-fine interface, the flux or wave that emi...
Definition: fluxsv.f:11
real(kind=8), dimension(maxlv) rvoll
Definition: amr_module.f90:237
subroutine stepgrid(q, fm, fp, gm, gp, mitot, mjtot, mbc, dt, dtnew, dx, dy, nvar, xlow, ylow, time, mptr, maux, aux)
Take a time step on a single grid mptr and overwrite solution array q.
Definition: stepgrid.f:26
real(kind=8), dimension(maxlv) possk
Definition: amr_module.f90:193
integer num_gauges
integer, parameter ndjhi
global j index of upper border of this grid
Definition: amr_module.f90:117
integer, dimension(maxlv) listsp
Definition: amr_module.f90:198
integer, parameter cornylo
y-coordinate of the lower border of this grid
Definition: amr_module.f90:145
integer, dimension(maxlv) intratx
Definition: amr_module.f90:198
integer mxnest
Definition: amr_module.f90:198
integer nghost
Definition: amr_module.f90:232
The module contains the definition of a "node descriptor" as well as other global variables used duri...
Definition: amr_module.f90:21
integer, parameter cfluxptr
Pointer to an 5 by maxsp array, which has boundary information for this grid.
Definition: amr_module.f90:80
subroutine stepgrid_dimsplit(q, fm, fp, gm, gp, mitot, mjtot, mbc, dt, dtnew, dx, dy, nvar, xlow, ylow, time, mptr, maux, aux)
integer, parameter storeaux
pointer to the address of memory storing auxiliary data on this grid
Definition: amr_module.f90:120
real(kind=8), dimension(:), allocatable alloc
Definition: amr_module.f90:218
integer, parameter ffluxptr
pointer to the address of memory storing fluxes in a layer around the grid, to be used in conservatio...
Definition: amr_module.f90:97
Here is the call graph for this function:
Here is the caller graph for this function:

◆ prepgrids()

subroutine prepgrids ( integer, dimension(num)  listgrids,
  num,
  level 
)

Definition at line 147 of file advanc.f.

References amr_module::levelptr, amr_module::lstart, and amr_module::node.

Referenced by spest2().

147 
148  use amr_module
149  implicit double precision (a-h,o-z)
150  integer listgrids(num)
151 
152  mptr = lstart(level)
153  do j = 1, num
154  listgrids(j) = mptr
155  mptr = node(levelptr, mptr)
156  end do
157 
158  if (mptr .ne. 0) then
159  write(*,*)" Error in routine setting up grid array "
160  stop
161  endif
162 
163  return
integer, dimension(nsize, maxgr) node
Definition: amr_module.f90:198
integer, dimension(maxlv) lstart
Definition: amr_module.f90:198
integer, parameter levelptr
node number (index) of next grid on the same level
Definition: amr_module.f90:35
The module contains the definition of a "node descriptor" as well as other global variables used duri...
Definition: amr_module.f90:21
Here is the caller graph for this function: