1 subroutine itrdrv (y, ac, 2 & uold, x, 3 & iBC, BC, 4 & iper, ilwork, shp, 5 & shgl, shpb, shglb, 6 & ifath, velbar, nsons ) 7c 8c---------------------------------------------------------------------- 9c 10c This iterative driver is the semi-discrete, predictor multi-corrector 11c algorithm. It contains the Hulbert Generalized Alpha method which 12c is 2nd order accurate for Rho_inf from 0 to 1. The method can be 13c made first-order accurate by setting Rho_inf=-1. It uses CGP and 14c GMRES iterative solvers. 15c 16c working arrays: 17c y (nshg,ndof) : Y variables 18c x (nshg,nsd) : node coordinates 19c iBC (nshg) : BC codes 20c BC (nshg,ndofBC) : BC constraint parameters 21c iper (nshg) : periodicity table 22c 23c 24c Zdenek Johan, Winter 1991. (Fortran 90) 25c Alberto Figueroa, Winter 2004. CMM-FSI 26c Irene Vignon, Fall 2004. Impedance BC 27c---------------------------------------------------------------------- 28c 29 use pvsQbi !gives us splag (the spmass at the end of this run 30 use specialBC !gives us itvn 31 use timedata !allows collection of time series 32 use convolImpFlow !for Imp bc 33 use convolRCRFlow !for RCR bc 34 use turbsa ! used to access d2wall 35 use wallData 36 use fncorpmod 37 use solvedata 38 use iso_c_binding 39 40c use readarrays !reads in uold and acold 41 42 include "common.h" 43 include "mpif.h" 44 include "auxmpi.h" 45#ifdef HAVE_SVLS 46 include "svLS.h" 47#endif 48#if !defined(HAVE_SVLS) && !defined(HAVE_LESLIB) 49#error "You must enable a linear solver during cmake setup" 50#endif 51 52c 53 54 55 real*8 y(nshg,ndof), ac(nshg,ndof), 56 & yold(nshg,ndof), acold(nshg,ndof), 57 & u(nshg,nsd), uold(nshg,nsd), 58 & x(numnp,nsd), solinc(nshg,ndof), 59 & BC(nshg,ndofBC), tf(nshg,ndof), 60 & GradV(nshg,nsdsq) 61 62c 63 real*8 res(nshg,ndof) 64c 65 real*8 shp(MAXTOP,maxsh,MAXQPT), 66 & shgl(MAXTOP,nsd,maxsh,MAXQPT), 67 & shpb(MAXTOP,maxsh,MAXQPT), 68 & shglb(MAXTOP,nsd,maxsh,MAXQPT) 69c 70 integer rowp(nshg,nnz), colm(nshg+1), 71 & iBC(nshg), 72 & ilwork(nlwork), 73 & iper(nshg), ifuncs(6) 74 75 real*8 vbc_prof(nshg,3) 76 77 integer stopjob 78 character*10 cname2 79 character*5 cname 80c 81c stuff for dynamic model s.w.avg and wall model 82c 83 dimension ifath(numnp), velbar(nfath,ndof), nsons(nfath) 84 85 dimension wallubar(2),walltot(2) 86c 87 real*8 almit, alfit, gamit 88c 89 character*20 fname1,fmt1 90 character*20 fname2,fmt2 91 character*60 fnamepold, fvarts 92 character*4 fname4c ! 4 characters 93 integer iarray(50) ! integers for headers 94 integer isgn(ndof), isgng(ndof) 95 96 real*8, allocatable, dimension(:,:) :: rerr 97 real*8, allocatable, dimension(:,:) :: ybar, strain, vorticity 98 real*8, allocatable, dimension(:,:) :: wallssVec, wallssVecbar 99 100 real*8 tcorecp(2), tcorecpscal(2) 101 102 real*8, allocatable, dimension(:,:,:) :: yphbar 103 real*8 CFLworst(numel) 104 105 integer :: iv_rankpernode, iv_totnodes, iv_totcores 106 integer :: iv_node, iv_core, iv_thread 107!-------------------------------------------------------------------- 108! Setting up svLS 109#ifdef HAVE_SVLS 110 INTEGER svLS_nFaces 111 TYPE(svLS_lhsType) svLS_lhs 112 TYPE(svLS_lsType) svLS_ls 113! repeat for scalar solves (up to 4 at this time which is consistent with rest of PHASTA) 114 TYPE(svLS_lhsType) svLS_lhs_S(4) 115 TYPE(svLS_lsType) svLS_ls_S(4) 116#endif 117 call initmpistat() ! see bottom of code to see just how easy it is 118 119 call initmemstat() 120 121!-------------------------------------------------------------------- 122! Setting up svLS Moved down for better org 123 124c 125c only master should be verbose 126c 127 128 if(numpe.gt.0 .and. myrank.ne.master)iverbose=0 129c 130 131 lskeep=lstep 132 133 call initTimeSeries() 134c 135c.... open history and aerodynamic forces files 136c 137 if (myrank .eq. master) then 138 open (unit=ihist, file=fhist, status='unknown') 139 open (unit=iforce, file=fforce, status='unknown') 140 open (unit=76, file="fort.76", status='unknown') 141 if(numImpSrfs.gt.0 .or. numRCRSrfs.gt.0) then 142 fnamepold = 'pold' 143 fnamepold = trim(fnamepold)//trim(cname2(lstep)) 144 fnamepold = trim(fnamepold)//'.dat' 145 fnamepold = trim(fnamepold) 146 open (unit=8177, file=fnamepold, status='unknown') 147 endif 148 endif 149c 150c.... initialize 151c 152 ifuncs(:) = 0 ! func. evaluation counter 153 istep = 0 154 yold = y 155 acold = ac 156 157!!!!!!!!!!!!!!!!!!! 158!Init output fields 159!!!!!!!!!!!!!!!!!! 160 numerr=10+isurf 161 allocate(rerr(nshg,numerr)) 162 rerr = zero 163 164 if(ierrcalc.eq.1 .or. ioybar.eq.1) then ! we need ybar for error too 165 if (ivort == 1) then 166 irank2ybar=17 167 allocate(ybar(nshg,irank2ybar)) ! more space for vorticity if requested 168 else 169 irank2ybar=13 170 allocate(ybar(nshg,irank2ybar)) 171 endif 172 ybar = zero ! Initialize ybar to zero, which is essential 173 endif 174 175 if(ivort == 1) then 176 allocate(strain(nshg,6)) 177 allocate(vorticity(nshg,5)) 178 endif 179 180 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 181 allocate(wallssVec(nshg,3)) 182 if (ioybar .eq. 1) then 183 allocate(wallssVecbar(nshg,3)) 184 wallssVecbar = zero ! Initialization important if mean wss computed 185 endif 186 endif 187 188! both nstepsincycle and nphasesincycle needs to be set 189 if(nstepsincycle.eq.0) nphasesincycle = 0 190 if(nphasesincycle.ne.0) then 191! & allocate(yphbar(nshg,5,nphasesincycle)) 192 if (ivort == 1) then 193 irank2yphbar=15 194 allocate(yphbar(nshg,irank2yphbar,nphasesincycle)) ! more space for vorticity 195 else 196 irank2yphbar=11 197 allocate(yphbar(nshg,irank2yphbar,nphasesincycle)) 198 endif 199 yphbar = zero 200 endif 201 202!!!!!!!!!!!!!!!!!!! 203!END Init output fields 204!!!!!!!!!!!!!!!!!! 205 206 vbc_prof(:,1:3) = BC(:,3:5) 207 if(iramp.eq.1) then 208 call BCprofileInit(vbc_prof,x) 209 endif 210 211c 212c.... ---------------> initialize Equation Solver <--------------- 213c 214 call initEQS(iBC, rowp, colm,svLS_nFaces, 215 2 svLS_LHS,svLS_ls, 216 3 svLS_LHS_S,svLS_ls_S) 217c 218c... prepare lumped mass if needed 219c 220 if((flmpr.ne.0).or.(flmpl.ne.0)) call genlmass(x, shp,shgl) 221c 222c.... -----------------> End of initialization <----------------- 223c 224c.....open the necessary files to gather time series 225c 226 lstep0 = lstep+1 227 nsteprcr = nstep(1)+lstep 228c 229c.... loop through the time sequences 230c 231 232 233 do 3000 itsq = 1, ntseq 234 itseq = itsq 235 236c 237c.... set up the time integration parameters 238c 239 nstp = nstep(itseq) 240 nitr = niter(itseq) 241 LCtime = loctim(itseq) 242 dtol(:)= deltol(itseq,:) 243 244 call itrSetup ( y, acold ) 245 246c 247c...initialize the coefficients for the impedance convolution, 248c which are functions of alphaf so need to do it after itrSetup 249 if(numImpSrfs.gt.zero) then 250 call calcImpConvCoef (numImpSrfs, ntimeptpT) 251 endif 252c 253c...initialize the initial condition P(0)-RQ(0)-Pd(0) for RCR BC 254c need ndsurf so should be after initNABI 255 if(numRCRSrfs.gt.zero) then 256 call calcRCRic(y,nsrflistRCR,numRCRSrfs) 257 endif 258c 259c find the last solve of the flow in the step sequence so that we will 260c know when we are at/near end of step 261c 262c ilast=0 263 nitr=0 ! count number of flow solves in a step (# of iterations) 264 do i=1,seqsize 265 if(stepseq(i).eq.0) nitr=nitr+1 266 enddo 267 268 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 269 tcorecp(:) = zero ! used in solfar.f (solflow) 270 tcorecpscal(:) = zero ! used in solfar.f (solflow) 271 if(myrank.eq.0) then 272 tcorecp1 = TMRC() 273 endif 274 275c 276c.... loop through the time steps 277c 278 istop=0 279 rmub=datmat(1,2,1) 280 if(rmutarget.gt.0) then 281 rmue=rmutarget 282 else 283 rmue=datmat(1,2,1) ! keep constant 284 endif 285 286 if(iramp.eq.1) then 287 call BCprofileScale(vbc_prof,BC,yold) ! fix the yold values to the reset BC 288 isclr=1 ! fix scalar 289 do isclr=1,nsclr 290 call itrBCSclr(yold,ac,iBC,BC,iper,ilwork) 291 enddo 292 endif 293 294 do 2000 istp = 1, nstp 295 if(iramp.eq.1) 296 & call BCprofileScale(vbc_prof,BC,yold) 297 298 call rerun_check(stopjob) 299 if(myrank.eq.master) write(*,*) 300 & 'stopjob,lstep,istep', stopjob,lstep,istep 301 if(stopjob.eq.lstep) then 302 stopjob=-2 ! this is the code to finish 303 if ((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) then 304 if(myrank.eq.master) write(*,*) 305 & 'line 473 says last step written so exit' 306 goto 2002 ! the step was written last step so just exit 307 else 308 if(myrank.eq.master) 309 & write(*,*) 'line 473 says last step not written' 310 istep=nstp ! have to do this so that solution will be written 311 goto 2001 312 endif 313 endif 314 315c.... if we have time varying boundary conditions update the values of BC. 316c these will be for time step n+1 so use lstep+1 317c 318 if(itvn.gt.0) call BCint((lstep+1)*Delt(1), shp, shgl, 319 & shpb, shglb, x, BC, iBC) 320 321c 322c ... calculate the pressure contribution that depends on the history for the Imp. BC 323c 324 if(numImpSrfs.gt.0) then 325 call pHist(poldImp,QHistImp,ImpConvCoef, 326 & ntimeptpT,numImpSrfs) 327 endif 328c 329c ... calc the pressure contribution that depends on the history for the RCR BC 330c 331 if(numRCRSrfs.gt.0) then 332 call CalcHopRCR (Delt(itseq), lstep, numRCRSrfs) 333 call CalcRCRConvCoef(lstep,numRCRSrfs) 334 call pHist(poldRCR,QHistRCR,RCRConvCoef,nsteprcr, 335 & numRCRSrfs) 336 endif 337 338 if(iLES.gt.0) then !complicated stuff has moved to 339 !routine below 340 call lesmodels(yold, acold, shgl, shp, 341 & iper, ilwork, rowp, colm, 342 & nsons, ifath, x, 343 & iBC, BC) 344 345 346 endif 347 348c.... set traction BCs for modeled walls 349c 350 if (itwmod.ne.0) then 351 call asbwmod(yold, acold, x, BC, iBC, 352 & iper, ilwork, ifath, velbar) 353 endif 354 355c 356c.... Determine whether the vorticity field needs to be computed for this time step or not 357c 358 call seticomputevort 359c 360c.... -----------------------> predictor phase <----------------------- 361c 362 call itrPredict(yold, y, acold, ac , uold, u, iBC) 363 call itrBC (y, ac, iBC, BC, iper,ilwork) 364 365 if(nsolt.eq.1) then 366 isclr=0 367 call itrBCSclr (y, ac, iBC, BC, iper, ilwork) 368 endif 369 do isclr=1,nsclr 370 call itrBCSclr (y, ac, iBC, BC, iper, ilwork) 371 enddo 372 iter=0 373 ilss=0 ! this is a switch thrown on first solve of LS redistance 374 do istepc=1,seqsize 375 icode=stepseq(istepc) 376 if(mod(icode,5).eq.0) then ! this is a solve 377 isolve=icode/10 378 if(icode.eq.0) then ! flow solve (encoded as 0) 379c 380 iter = iter+1 381 ifuncs(1) = ifuncs(1) + 1 382c 383 Force(1) = zero 384 Force(2) = zero 385 Force(3) = zero 386 HFlux = zero 387 lhs = 1 - min(1,mod(ifuncs(1)-1,LHSupd(1))) 388 389 call SolFlow(y, ac, u, 390 & yold, acold, uold, 391 & x, iBC, 392 & BC, res, 393 & iper, 394 & ilwork, shp, shgl, 395 & shpb, shglb, rowp, 396 & colm, 397 & solinc, rerr, tcorecp, 398 & GradV, sumtime 399#ifdef HAVE_SVLS 400 & ,svLS_lhs, svLS_ls, svLS_nFaces) 401#else 402 & ) 403#endif 404 405 else ! scalar type solve 406 if (icode.eq.5) then ! Solve for Temperature 407 ! (encoded as (nsclr+1)*10) 408 isclr=0 409 ifuncs(2) = ifuncs(2) + 1 410 j=1 411 else ! solve a scalar (encoded at isclr*10) 412 isclr=isolve 413 ifuncs(isclr+2) = ifuncs(isclr+2) + 1 414 j=isclr+nsolt 415 if((iLSet.eq.2).and.(ilss.eq.0) 416 & .and.(isclr.eq.2)) then 417 ilss=1 ! throw switch (once per step) 418 y(:,7)=y(:,6) ! redistance field initialized 419 ac(:,7) = zero 420 call itrBCSclr ( y, ac, iBC, BC, iper, 421 & ilwork) 422c 423c....store the flow alpha, gamma parameter values and assigm them the 424c....Backward Euler parameters to solve the second levelset scalar 425c 426 alfit=alfi 427 gamit=gami 428 almit=almi 429 Deltt=Delt(1) 430 Dtglt=Dtgl 431 alfi = 1 432 gami = 1 433 almi = 1 434c Delt(1)= Deltt ! Give a pseudo time step 435 Dtgl = one / Delt(1) 436 endif ! level set eq. 2 437 endif ! deciding between temperature and scalar 438 439 lhs = 1 - min(1,mod(ifuncs(isclr+2)-1, 440 & LHSupd(isclr+2))) 441 442 call SolSclr(y, ac, u, 443 & yold, acold, uold, 444 & x, iBC, 445 & BC, 446 & iper, 447 & ilwork, shp, shgl, 448 & shpb, shglb, rowp, 449 & colm, 450 & solinc(1,isclr+5), tcorecpscal 451#ifdef HAVE_SVLS 452 & ,svLS_lhs_S(isclr), svLS_ls_S(isclr), svls_nfaces) 453#else 454 & ) 455#endif 456 457 458 endif ! end of scalar type solve 459 460 else ! this is an update (mod did not equal zero) 461 iupdate=icode/10 ! what to update 462 if(icode.eq.1) then !update flow 463 call itrCorrect ( y, ac, u, solinc, iBC) 464 call itrBC (y, ac, iBC, BC, iper, ilwork) 465 else ! update scalar 466 isclr=iupdate !unless 467 if(icode.eq.6) isclr=0 468 if(iRANS.lt.-100)then ! RANS 469 call itrCorrectSclrPos(y,ac,solinc(1,isclr+5)) 470 else 471 call itrCorrectSclr (y, ac, solinc(1,isclr+5)) 472 endif 473 if (ilset.eq.2 .and. isclr.eq.2) then 474 if (ivconstraint .eq. 1) then 475 call itrBCSclr ( y, ac, iBC, BC, iper, 476 & ilwork) 477c 478c ... applying the volume constraint on second level set scalar 479c 480 call solvecon (y, x, iBC, BC, 481 & iper, ilwork, shp, shgl) 482c 483 endif ! end of volume constraint calculations 484 endif ! end of redistance calculations 485c 486 call itrBCSclr ( y, ac, iBC, BC, iper, 487 & ilwork) 488 endif ! end of flow or scalar update 489 endif ! end of switch between solve or update 490 enddo ! loop over sequence in step 491c 492c 493c.... obtain the time average statistics 494c 495 if (ioform .eq. 2) then 496 497 call stsGetStats( y, yold, ac, acold, 498 & u, uold, x, 499 & shp, shgl, shpb, shglb, 500 & iBC, BC, iper, ilwork, 501 & rowp, colm, lhsK, lhsP ) 502 endif 503 504c 505c Find the solution at the end of the timestep and move it to old 506c 507c 508c ...First to reassign the parameters for the original time integrator scheme 509c 510 if((iLSet.eq.2).and.(ilss.eq.1)) then 511 alfi =alfit 512 gami =gamit 513 almi =almit 514 Delt(1)=Deltt 515 Dtgl =Dtglt 516 endif 517 call itrUpdate( yold, acold, uold, y, ac, u) 518 call itrBC (yold, acold, iBC, BC, iper,ilwork) 519 520 istep = istep + 1 521 lstep = lstep + 1 522c 523c .. Print memory consumption on BGQ 524c 525 call printmeminfo("itrdrv"//char(0)) 526 527c 528c .. Compute vorticity 529c 530 if ( icomputevort == 1) 531 & call computeVort( vorticity, GradV,strain) 532c 533c.... update and the aerodynamic forces 534c 535 call forces ( yold, ilwork ) 536 537c 538c .. write out the instantaneous solution 539c 5402001 continue ! we could get here by 2001 label if user requested stop 541 if (((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) .or. 542 & istep.eq.nstep(itseq)) then 543 544!so that we can see progress in force file close it so that it flushes 545!and then reopen in append mode 546 547 close(iforce) 548 open (unit=iforce, file=fforce, position='append') 549 550! Call to restar() will open restart file in write mode (and not append mode) 551! that is needed as other fields are written in append mode 552 553 call restar ('out ', yold ,ac) 554 555 if(ivort == 1) then 556 call write_field(myrank,'a','vorticity',9,vorticity, 557 & 'd',nshg,5,lstep) 558 endif 559 560 call printmeminfo("itrdrv after checkpoint"//char(0)) 561 else if(stopjob.eq.-2) then 562 if(myrank.eq.master) then 563 write(*,*) 'line 755 says no write before stopping' 564 write(*,*) 'istep,nstep,irs',istep,nstep(itseq),irs 565 endif 566 endif !just the instantaneous stuff for videos 567c 568c.... compute the consistent boundary flux 569c 570 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 571 call Bflux ( yold, acold, uold, x, 572 & shp, shgl, shpb, 573 & shglb, ilwork, iBC, 574 & BC, iper, wallssVec) 575 endif 576 577 if(stopjob.eq.-2) goto 2003 578 579 580c 581c ... update the flow history for the impedance convolution, filter it and write it out 582c 583 if(numImpSrfs.gt.zero) then 584 call UpdHistConv(y,nsrflistImp,numImpSrfs) !uses Delt(1) 585 endif 586 587c 588c ... update the flow history for the RCR convolution 589c 590 if(numRCRSrfs.gt.zero) then 591 call UpdHistConv(y,nsrflistRCR,numRCRSrfs) !uses lstep 592 endif 593 594 595c... dump TIME SERIES 596 597 if (exts) then 598 ! Note: freq is only defined if exts is true, 599 ! i.e. if xyzts.dat is present in the #-procs_case 600 if ( mod(lstep-1,freq).eq.0) call dumpTimeSeries() 601 endif 602 603 if((irscale.ge.0).or.(itwmod.gt.0)) 604 & call getvel (yold, ilwork, iBC, 605 & nsons, ifath, velbar) 606 607 if((irscale.ge.0).and.(myrank.eq.master)) then 608 call genscale(yold, x, iper, 609 & iBC, ifath, velbar, 610 & nsons) 611 endif 612c 613c.... print out results. 614c 615 ntoutv=max(ntout,100) ! velb is not needed so often 616 if ((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) then 617 if( (mod(lstep, ntoutv) .eq. 0) .and. 618 & ((irscale.ge.0).or.(itwmod.gt.0) .or. 619 & ((nsonmax.eq.1).and.(iLES.gt.0)))) 620 & call rwvelb ('out ', velbar ,ifail) 621 endif 622c 623c.... end of the NSTEP and NTSEQ loops 624c 625 626 627c 628c.... -------------------> error calculation <----------------- 629c 630 if(ierrcalc.eq.1 .or. ioybar.eq.1) 631 & call collectErrorYbar(ybar,yold,wallssVec,wallssVecBar, 632 & vorticity,yphbar,rerr,irank2ybar,irank2yphbar) 633 2003 continue ! we get here if stopjob equals lstep and this jumped over 634! the statistics computation because we have no new data to average in 635! rather we are just trying to output the last state that was not already 636! written 637c 638c.... ----------------------> Complete Restart Processing <---------------------- 639c 640! for now it is the same frequency but need to change this 641! soon.... but don't forget to change the field counter in 642! new_interface.cc 643! 644 if (((irs .ge. 1) .and. (mod(lstep, ntout) .eq. 0)) .or. 645 & istep.eq.nstep(itseq)) then 646 647 lesId = numeqns(1) 648 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 649 if(myrank.eq.0) then 650 tcormr1 = TMRC() 651 endif 652 if((nsolflow.eq.1).and.(ipresPrjFlag.eq.1)) then 653#ifdef HAVE_LESLIB 654 call saveLesRestart( lesId, aperm , nshg, myrank, lstep, 655 & nPermDims ) 656 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 657 if(myrank.eq.0) then 658 tcormr2 = TMRC() 659 write(6,*) 'call saveLesRestart for projection and'// 660 & 'pressure projection vectors', tcormr2-tcormr1 661 endif 662#endif 663 endif 664 665 if(ierrcalc.eq.1) then 666c 667c.....smooth the error indicators 668c 669 do i=1,ierrsmooth 670 call errsmooth( rerr, x, iper, ilwork, shp, shgl, iBC ) 671 end do 672 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 673 if(myrank.eq.0) then 674 tcormr1 = TMRC() 675 endif 676 call write_error(myrank, lstep, nshg, 10, rerr ) 677 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 678 if(myrank.eq.0) then 679 tcormr2 = TMRC() 680 write(6,*) 'Time to write the error fields to the disks', 681 & tcormr2-tcormr1 682 endif 683 endif ! ierrcalc 684 685 if(ioybar.eq.1) then 686 if(ivort == 1) then 687 call write_field(myrank,'a','ybar',4, 688 & ybar,'d',nshg,17,lstep) 689 else 690 call write_field(myrank,'a','ybar',4, 691 & ybar,'d',nshg,13,lstep) 692 endif 693 694 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 695 call write_field(myrank,'a','wssbar',6, 696 & wallssVecBar,'d',nshg,3,lstep) 697 endif 698 699 if(nphasesincycle .gt. 0) then 700 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 701 if(myrank.eq.0) then 702 tcormr1 = TMRC() 703 endif 704 do iphase=1,nphasesincycle 705 if(ivort == 1) then 706 call write_phavg2(myrank,'a','phase_average',13,iphase, 707 & nphasesincycle,yphbar(:,:,iphase),'d',nshg,15,lstep) 708 else 709 call write_phavg2(myrank,'a','phase_average',13,iphase, 710 & nphasesincycle,yphbar(:,:,iphase),'d',nshg,11,lstep) 711 endif 712 end do 713 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 714 if(myrank.eq.0) then 715 tcormr2 = TMRC() 716 write(6,*) 'write all phase avg to the disks = ', 717 & tcormr2-tcormr1 718 endif 719 endif !nphasesincyle 720 endif !ioybar 721 722 if(iRANS.lt.0) then 723 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 724 if(myrank.eq.0) then 725 tcormr1 = TMRC() 726 endif 727 call write_field(myrank,'a','dwal',4,d2wall,'d', 728 & nshg,1,lstep) 729 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 730 if(myrank.eq.0) then 731 tcormr2 = TMRC() 732 write(6,*) 'Time to write dwal to the disks = ', 733 & tcormr2-tcormr1 734 endif 735 endif !iRANS 736 737 endif ! write out complete restart state 738 !next 2 lines are two ways to end early 739 if(stopjob.eq.-2) goto 2002 740 if(istop.eq.1000) goto 2002 ! stop when delta small (see rstatic) 741 2000 continue 742 2002 continue 743 744! done with time stepping so deallocate fields already written 745! 746 747 if(ioybar.eq.1) then 748 deallocate(ybar) 749 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 750 deallocate(wallssVecbar) 751 endif 752 if(nphasesincycle .gt. 0) then 753 deallocate(yphbar) 754 endif !nphasesincyle 755 endif !ioybar 756 if(ivort == 1) then 757 deallocate(strain,vorticity) 758 endif 759 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 760 deallocate(wallssVec) 761 endif 762 if(iRANS.lt.0) then 763 deallocate(d2wall) 764 endif 765 766 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 767 if(myrank.eq.0) then 768 tcorecp2 = TMRC() 769 write(6,*) 'T(core) cpu = ',tcorecp2-tcorecp1 770 write(6,*) '(Elm. form.',tcorecp(1), 771 & ',Lin. alg. sol.',tcorecp(2),')' 772 write(6,*) '(Elm. form. Scal.',tcorecpscal(1), 773 & ',Lin. alg. sol. Scal.',tcorecpscal(2),')' 774 write(6,*) '' 775 776 endif 777 778 call print_system_stats(tcorecp, tcorecpscal) 779 call print_mesh_stats() 780 call print_mpi_stats() 781 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 782! return 783c call MPI_Finalize() 784c call MPI_ABORT(MPI_COMM_WORLD, ierr) 785 786 call destroyWallData 787 call destroyfncorp 788 789 3000 continue 790 791 792c 793c.... close history and aerodynamic forces files 794c 795 if (myrank .eq. master) then 796! close (ihist) 797 close (iforce) 798 close(76) 799 if(numImpSrfs.gt.0 .or. numRCRSrfs.gt.0) then 800 close (8177) 801 endif 802 endif 803c 804c.... close varts file for probes 805c 806 call finalizeTimeSeries() 807 808 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 809 if(myrank.eq.0) then 810 write(*,*) 'itrdrv - done with aerodynamic forces' 811 endif 812 813 do isrf = 0,MAXSURF 814 if ( nsrflist(isrf).ne.0 .and. 815 & myrank.eq.irankfilesforce(isrf)) then 816 iunit=60+isrf 817 close(iunit) 818 endif 819 enddo 820 821 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 822 if(myrank.eq.0) then 823 write(*,*) 'itrdrv - done with MAXSURF' 824 endif 825 826 827 5 format(1X,F15.10,3X,F15.10,3X,F15.10,3X,F15.10) 828 444 format(6(2x,e14.7)) 829c 830c.... end 831c 832 if(nsolflow.eq.1) then 833 call dsdF 834 endif 835 if((nsclr+nsolt).gt.0) then 836 call dsdS 837 endif 838 839 if(iabc==1) deallocate(acs) 840 841 if (numpe > 1) call MPI_BARRIER(MPI_COMM_WORLD, ierr) 842 if(myrank.eq.0) then 843 write(*,*) 'itrdrv - done - BACK TO process.f' 844 endif 845 846 return 847 end 848 849 subroutine lesmodels(y, ac, shgl, shp, 850 & iper, ilwork, rowp, colm, 851 & nsons, ifath, x, 852 & iBC, BC) 853 854 include "common.h" 855 856 real*8 y(nshg,ndof), ac(nshg,ndof), 857 & x(numnp,nsd), 858 & BC(nshg,ndofBC) 859 real*8 shp(MAXTOP,maxsh,MAXQPT), 860 & shgl(MAXTOP,nsd,maxsh,MAXQPT) 861 862c 863 integer rowp(nshg,nnz), colm(nshg+1), 864 & iBC(nshg), 865 & ilwork(nlwork), 866 & iper(nshg) 867 dimension ifath(numnp), nsons(nfath) 868 869 real*8, allocatable, dimension(:) :: fwr2,fwr3,fwr4 870 real*8, allocatable, dimension(:) :: stabdis,cdelsq1 871 real*8, allocatable, dimension(:,:) :: xavegt, xavegt2,xavegt3 872 873 if( (iLES.gt.1) ) then ! Allocate Stuff for advanced LES models 874 allocate (fwr2(nshg)) 875 allocate (fwr3(nshg)) 876 allocate (fwr4(nshg)) 877 allocate (xavegt(nfath,12)) 878 allocate (xavegt2(nfath,12)) 879 allocate (xavegt3(nfath,12)) 880 allocate (stabdis(nfath)) 881 endif 882 883c.... get dynamic model coefficient 884c 885 ilesmod=iLES/10 886c 887c digit bit set filter rule, 10 bit set model 888c 889 if (ilesmod.eq.0) then ! 0 < iLES< 10 => dyn. model calculated 890 ! at nodes based on discrete filtering 891 892 893 if(isubmod.eq.2) then 894 call SUPGdis(y, ac, shgl, shp, 895 & iper, ilwork, 896 & nsons, ifath, x, 897 & iBC, BC, stabdis, xavegt3) 898 endif 899 900 if( ((isubmod.eq.0).or.(isubmod.eq.2)))then ! If no 901 ! sub-model 902 ! or SUPG 903 ! model wanted 904 905 if(i2filt.eq.0)then ! If simple filter 906 907 if(modlstats .eq. 0) then ! If no model stats wanted 908 call getdmc (y, shgl, shp, 909 & iper, ilwork, nsons, 910 & ifath, x) 911 else ! else get model stats 912 call stdfdmc (y, shgl, shp, 913 & iper, ilwork, nsons, 914 & ifath, x) 915 endif ! end of stats if statement 916 917 else ! else if twice filtering 918 919 call widefdmc(y, shgl, shp, 920 & iper, ilwork, nsons, 921 & ifath, x) 922 923 924 endif ! end of simple filter if statement 925 926 endif ! end of SUPG or no sub-model if statement 927 928 929 if( (isubmod.eq.1) ) then ! If DFWR sub-model wanted 930 call cdelBHsq (y, shgl, shp, 931 & iper, ilwork, nsons, 932 & ifath, x, cdelsq1) 933 call FiltRat (y, shgl, shp, 934 & iper, ilwork, nsons, 935 & ifath, x, cdelsq1, 936 & fwr4, fwr3) 937 938 939 if (i2filt.eq.0) then ! If simple filter wanted 940 call DFWRsfdmc(y, shgl, shp, 941 & iper, ilwork, nsons, 942 & ifath, x, fwr2, fwr3) 943 else ! else if twice filtering wanted 944 call DFWRwfdmc(y, shgl, shp, 945 & iper, ilwork, nsons, 946 & ifath, x, fwr4, fwr4) 947 endif ! end of simple filter if statement 948 949 endif ! end of DFWR sub-model if statement 950 951 if( (isubmod.eq.2) )then ! If SUPG sub-model wanted 952 call dmcSUPG (y, ac, shgl, 953 & shp, iper, ilwork, 954 & nsons, ifath, x, 955 & iBC, BC, rowp, colm, 956 & xavegt2, stabdis) 957 endif 958 959 if(idis.eq.1)then ! If SUPG/Model dissipation wanted 960 call ediss (y, ac, shgl, 961 & shp, iper, ilwork, 962 & nsons, ifath, x, 963 & iBC, BC, xavegt) 964 endif 965 966 endif ! end of ilesmod 967 968 if (ilesmod .eq. 1) then ! 10 < iLES < 20 => dynamic-mixed 969 ! at nodes based on discrete filtering 970 call bardmc (y, shgl, shp, 971 & iper, ilwork, 972 & nsons, ifath, x) 973 endif 974 975 if (ilesmod .eq. 2) then ! 20 < iLES < 30 => dynamic at quad 976 ! pts based on lumped projection filt. 977 978 if(isubmod.eq.0)then 979 call projdmc (y, shgl, shp, 980 & iper, ilwork, x) 981 else 982 call cpjdmcnoi (y, shgl, shp, 983 & iper, ilwork, x, 984 & rowp, colm, 985 & iBC, BC) 986 endif 987 988 endif 989 990 if( (iLES.gt.1) ) then ! Deallocate Stuff for advanced LES models 991 deallocate (fwr2) 992 deallocate (fwr3) 993 deallocate (fwr4) 994 deallocate (xavegt) 995 deallocate (xavegt2) 996 deallocate (xavegt3) 997 deallocate (stabdis) 998 endif 999 return 1000 end 1001 1002c 1003c...initialize the coefficients for the impedance convolution 1004c 1005 subroutine CalcImpConvCoef (numISrfs, numTpoints) 1006 1007 use convolImpFlow !uses flow history and impedance for convolution 1008 1009 include "common.h" !for alfi 1010 1011 integer numISrfs, numTpoints 1012 1013 allocate (ConvCoef(numTpoints+2,3)) !same time discret. for all imp. BC 1014 do j=1,numTpoints+2 1015 ConvCoef(j,:)=0.5/numTpoints !dt/2 divided by period T=N*dt 1016 ConvCoef(j,1)=ConvCoef(j,1)*(1.0-alfi)*(1.0-alfi) 1017 ConvCoef(j,2)=ConvCoef(j,2)*(1.0+2*alfi*(1.0-alfi)) 1018 ConvCoef(j,3)=ConvCoef(j,3)*alfi*alfi 1019 enddo 1020 ConvCoef(1,2)=zero 1021 ConvCoef(1,3)=zero 1022 ConvCoef(2,3)=zero 1023 ConvCoef(numTpoints+1,1)=zero 1024 ConvCoef(numTpoints+2,2)=zero 1025 ConvCoef(numTpoints+2,1)=zero 1026c 1027c...calculate the coefficients for the impedance convolution 1028c 1029 allocate (ImpConvCoef(numTpoints+2,numISrfs)) 1030 1031c..coefficients below assume Q linear in time step, Z constant 1032c do j=3,numTpoints 1033c ImpConvCoef(j,:) = ValueListImp(j-1,:)*ConvCoef(j,3) 1034c & + ValueListImp(j,:)*ConvCoef(j,2) 1035c & + ValueListImp(j+1,:)*ConvCoef(j,1) 1036c enddo 1037c ImpConvCoef(1,:) = ValueListImp(2,:)*ConvCoef(1,1) 1038c ImpConvCoef(2,:) = ValueListImp(2,:)*ConvCoef(2,2) 1039c & + ValueListImp(3,:)*ConvCoef(2,1) 1040c ImpConvCoef(numTpoints+1,:) = 1041c & ValueListImp(numTpoints,:)*ConvCoef(numTpoints+1,3) 1042c & + ValueListImp(numTpoints+1,:)*ConvCoef(numTpoints+1,2) 1043c ImpConvCoef(numTpoints+2,:) = 1044c & ValueListImp(numTpoints+1,:)*ConvCoef(numTpoints+2,3) 1045 1046c..try easiest convolution Q and Z constant per time step 1047 do j=3,numTpoints+1 1048 ImpConvCoef(j,:) = ValueListImp(j-1,:)/numTpoints 1049 enddo 1050 ImpConvCoef(1,:) =zero 1051 ImpConvCoef(2,:) =zero 1052 ImpConvCoef(numTpoints+2,:) = 1053 & ValueListImp(numTpoints+1,:)/numTpoints 1054c compensate for yalpha passed not y in Elmgmr() 1055 ImpConvCoef(numTpoints+1,:)= ImpConvCoef(numTpoints+1,:) 1056 & - ImpConvCoef(numTpoints+2,:)*(1.0-alfi)/alfi 1057 ImpConvCoef(numTpoints+2,:)= ImpConvCoef(numTpoints+2,:)/alfi 1058 return 1059 end 1060 1061c 1062c ... update the flow rate history for the impedance convolution, filter it and write it out 1063c 1064 subroutine UpdHistConv(y,nsrfIdList,numSrfs) 1065 1066 use convolImpFlow !brings ntimeptpT, QHistImp, QHistTry, QHistTryF, numImpSrfs 1067 use convolRCRFlow !brings QHistRCR, numRCRSrfs 1068 1069 include "common.h" 1070 1071 integer nsrfIdList(0:MAXSURF), numSrfs 1072 character*20 fname1 1073 character*10 cname2 1074 character*5 cname 1075 real*8 y(nshg,3) !velocity at time n+1 1076 real*8 NewQ(0:MAXSURF) !temporary unknown for the flow rate 1077 !that needs to be added to the flow history 1078 1079 call GetFlowQ(NewQ,y,nsrfIdList,numSrfs) !new flow at time n+1 1080c 1081c... for imp BC: shift QHist, add new constribution, filter and write out 1082c 1083 if(numImpSrfs.gt.zero) then 1084 do j=1, ntimeptpT 1085 QHistImp(j,1:numSrfs)=QHistImp(j+1,1:numSrfs) 1086 enddo 1087 QHistImp(ntimeptpT+1,1:numSrfs) = NewQ(1:numSrfs) 1088 1089c 1090c....filter the flow rate history 1091c 1092 cutfreq = 10 !hardcoded cutting frequency of the filter 1093 do j=1, ntimeptpT 1094 QHistTry(j,:)=QHistImp(j+1,:) 1095 enddo 1096 call Filter(QHistTryF,QHistTry,ntimeptpT,Delt(1),cutfreq) 1097c.... if no filter applied then uncomment next three lines 1098c do j=1, ntimeptpT 1099c QHistTryF(j,:)=QHistTry(j,:) 1100c enddo 1101 1102c QHistImp(1,:)=zero ! why do we do this? for beta(1,:) = zero it does not really matters 1103 do j=1, ntimeptpT 1104 QHistImp(j+1,:)=QHistTryF(j,:) 1105 enddo 1106c 1107c.... write out the new history of flow rates to Qhistor.dat 1108c 1109 if (((irs .ge. 1) .and. ((mod(lstep, ntout) .eq. 0) .or. 1110 & (istep .eq. nstep(1)))) .and. 1111 & (myrank .eq. master)) then 1112 open(unit=816, file='Qhistor.dat',status='replace') 1113 write(816,*) ntimeptpT 1114 do j=1,ntimeptpT+1 1115 write(816,*) (QHistImp(j,n),n=1, numSrfs) 1116 enddo 1117 close(816) 1118c... write out a copy with step number to be able to restart 1119 fname1 = 'Qhistor' 1120 fname1 = trim(fname1)//trim(cname2(lstep))//'.dat' 1121 open(unit=8166,file=trim(fname1),status='unknown') 1122 write(8166,*) ntimeptpT 1123 do j=1,ntimeptpT+1 1124 write(8166,*) (QHistImp(j,n),n=1, numSrfs) 1125 enddo 1126 close(8166) 1127 endif 1128 endif 1129 1130c 1131c... for RCR bc just add the new contribution 1132c 1133 if(numRCRSrfs.gt.zero) then 1134 QHistRCR(lstep+1,1:numSrfs) = NewQ(1:numSrfs) 1135c 1136c.... write out the new history of flow rates to Qhistor.dat 1137c 1138 if ((irs .ge. 1) .and. (myrank .eq. master)) then 1139 if(istep.eq.1) then 1140 open(unit=816,file='Qhistor.dat',status='unknown') 1141 else 1142 open(unit=816,file='Qhistor.dat',position='append') 1143 endif 1144 if(istep.eq.1) then 1145 do j=1,lstep 1146 write(816,*) j, (QHistRCR(j,n),n=1,numSrfs) ! read from file of previous run 1147 enddo 1148 endif 1149 write(816,*) lstep+1, (QHistRCR(lstep+1,n),n=1, numSrfs) 1150 close(816) 1151c... write out a copy with step number to be able to restart 1152 if (((irs .ge. 1) .and. ((mod(lstep, ntout) .eq. 0) .or. 1153 & (istep .eq. nstep(1)))) .and. 1154 & (myrank .eq. master)) then 1155 fname1 = 'Qhistor' 1156 fname1 = trim(fname1)//trim(cname2(lstep))//'.dat' 1157 open(unit=8166,file=trim(fname1),status='unknown') 1158 write(8166,*) lstep+1 1159 do j=1,lstep+1 1160 write(8166,*) (QHistRCR(j,n),n=1, numSrfs) 1161 enddo 1162 close(8166) 1163 endif 1164 endif 1165 endif 1166 1167 return 1168 end 1169 1170c 1171c...calculate the time varying coefficients for the RCR convolution 1172c 1173 subroutine CalcRCRConvCoef (stepn, numSrfs) 1174 1175 use convolRCRFlow !brings in ValueListRCR, dtRCR 1176 1177 include "common.h" !brings alfi 1178 1179 integer numSrfs, stepn 1180 1181 RCRConvCoef = zero 1182 if (stepn .eq. 0) then 1183 RCRConvCoef(1,:) = ValueListRCR(1,:)*(1.0-alfi) + 1184 & ValueListRCR(3,:)*(-alfi + 1.0 + 1/dtRCR(:) 1185 & - exp(-alfi*dtRCR(:))*(1 + 1/dtRCR(:))) 1186 RCRConvCoef(2,:) = ValueListRCR(1,:)*alfi 1187 & + ValueListRCR(3,:) 1188 & *(alfi - 1/dtRCR(:) + exp(-alfi*dtRCR(:))/dtRCR(:)) 1189 endif 1190 if (stepn .ge. 1) then 1191 RCRConvCoef(1,:) =-ValueListRCR(3,:)*exp(-dtRCR(:)*(stepn+alfi)) 1192 & *(1 + (1 - exp(dtRCR(:)))/dtRCR(:)) 1193 RCRConvCoef(stepn+1,:) = ValueListRCR(1,:)*(1-alfi) 1194 & - ValueListRCR(3,:)*(alfi - 1 - 1/dtRCR(:) 1195 & + exp(-alfi*dtRCR(:))/dtRCR(:)*(2 - exp(-dtRCR(:)))) 1196 RCRConvCoef(stepn+2,:) = ValueListRCR(1,:)*alfi 1197 & + ValueListRCR(3,:) 1198 & *(alfi - 1/dtRCR(:) + exp(-alfi*dtRCR(:))/dtRCR(:)) 1199 endif 1200 if (stepn .ge. 2) then 1201 do j=2,stepn 1202 RCRConvCoef(j,:) = ValueListRCR(3,:)/dtRCR(:)* 1203 & exp(-dtRCR(:)*(stepn + alfi + 2 - j))* 1204 & (1 - exp(dtRCR(:)))**2 1205 enddo 1206 endif 1207 1208c compensate for yalpha passed not y in Elmgmr() 1209 RCRConvCoef(stepn+1,:)= RCRConvCoef(stepn+1,:) 1210 & - RCRConvCoef(stepn+2,:)*(1.0-alfi)/alfi 1211 RCRConvCoef(stepn+2,:)= RCRConvCoef(stepn+2,:)/alfi 1212 1213 return 1214 end 1215 1216c 1217c...calculate the time dependent H operator for the RCR convolution 1218c 1219 subroutine CalcHopRCR (timestepRCR, stepn, numSrfs) 1220 1221 use convolRCRFlow !brings in HopRCR, dtRCR 1222 1223 include "common.h" 1224 1225 integer numSrfs, stepn 1226 real*8 PdistCur(0:MAXSURF), timestepRCR 1227 1228 HopRCR=zero 1229 call RCRint(timestepRCR*(stepn + alfi),PdistCur) 1230 HopRCR(1:numSrfs) = RCRic(1:numSrfs) 1231 & *exp(-dtRCR(1:numSrfs)*(stepn + alfi)) + PdistCur(1:numSrfs) 1232 return 1233 end 1234c 1235c ... initialize the influence of the initial conditions for the RCR BC 1236c 1237 subroutine calcRCRic(y,srfIdList,numSrfs) 1238 1239 use convolRCRFlow !brings RCRic, ValueListRCR, ValuePdist 1240 1241 include "common.h" 1242 1243 integer srfIdList(0:MAXSURF), numSrfs, irankCoupled 1244 real*8 y(nshg,4) !need velocity and pressure 1245 real*8 Qini(0:MAXSURF) !initial flow rate 1246 real*8 PdistIni(0:MAXSURF) !initial distal pressure 1247 real*8 Pini(0:MAXSURF),CoupleArea(0:MAXSURF) ! initial pressure 1248 real*8 VelOnly(nshg,3), POnly(nshg) 1249 1250 allocate (RCRic(0:MAXSURF)) 1251 1252 if(lstep.eq.0) then 1253 VelOnly(:,1:3)=y(:,1:3) 1254 call GetFlowQ(Qini,VelOnly,srfIdList,numSrfs) !get initial flow 1255 QHistRCR(1,1:numSrfs)=Qini(1:numSrfs) !initialize QHistRCR 1256 POnly(:)=y(:,4) ! pressure 1257 call integrScalar(Pini,POnly,srfIdList,numSrfs) !get initial pressure integral 1258 POnly(:)=one ! one to get area 1259 call integrScalar(CoupleArea,POnly,srfIdList,numSrfs) !get surf area 1260 Pini(1:numSrfs) = Pini(1:numSrfs)/CoupleArea(1:numSrfs) 1261 else 1262 Qini(1:numSrfs)=QHistRCR(1,1:numSrfs) 1263 Pini(1:numSrfs)=zero ! hack 1264 endif 1265 call RCRint(istep,PdistIni) !get initial distal P (use istep) 1266 RCRic(1:numSrfs) = Pini(1:numSrfs) 1267 & - ValueListRCR(1,:)*Qini(1:numSrfs)-PdistIni(1:numSrfs) 1268 return 1269 end 1270 1271c.........function that integrates a scalar over a boundary 1272 subroutine integrScalar(scalInt,scal,srfIdList,numSrfs) 1273 1274 use pvsQbi !brings ndsurf, NASC 1275 1276 include "common.h" 1277 include "mpif.h" 1278 1279 integer srfIdList(0:MAXSURF), numSrfs, irankCoupled, i, k 1280 real*8 scal(nshg), scalInt(0:MAXSURF), scalIntProc(0:MAXSURF) 1281 1282 scalIntProc = zero 1283 do i = 1,nshg 1284 if(numSrfs.gt.zero) then 1285 do k = 1,numSrfs 1286 irankCoupled = 0 1287 if (srfIdList(k).eq.ndsurf(i)) then 1288 irankCoupled=k 1289 scalIntProc(irankCoupled) = scalIntProc(irankCoupled) 1290 & + NASC(i)*scal(i) 1291 exit 1292 endif 1293 enddo 1294 endif 1295 enddo 1296c 1297c at this point, each scalint has its "nodes" contributions to the scalar 1298c accumulated into scalIntProc. Note, because NASC is on processor this 1299c will NOT be the scalar for the surface yet 1300c 1301c.... reduce integrated scalar for each surface, push on scalInt 1302c 1303 npars=MAXSURF+1 1304 call MPI_ALLREDUCE (scalIntProc, scalInt(:), npars, 1305 & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 1306 1307 return 1308 end 1309 1310 subroutine writeTimingMessage(key,iomode,timing) 1311 use iso_c_binding 1312 use phstr 1313 implicit none 1314 1315 character(len=*) :: key 1316 integer :: iomode 1317 real*8 :: timing 1318 character(len=1024) :: timing_msg 1319 character(len=*), parameter :: 1320 & streamModeString = c_char_"stream"//c_null_char, 1321 & fileModeString = c_char_"disk"//c_null_char 1322 1323 timing_msg = c_char_"Time to write "//c_null_char 1324 call phstr_appendStr(timing_msg,key) 1325 if ( iomode .eq. -1 ) then 1326 call phstr_appendStr(timing_msg, streamModeString) 1327 else 1328 call phstr_appendStr(timing_msg, fileModeString) 1329 endif 1330 call phstr_appendStr(timing_msg, c_char_' = '//c_null_char) 1331 call phstr_appendDbl(timing_msg, timing) 1332 write(6,*) trim(timing_msg) 1333 return 1334 end subroutine 1335 1336 subroutine initmpistat() 1337 include "common.h" 1338 1339 impistat = 0 1340 impistat2 = 0 1341 iISend = 0 1342 iISendScal = 0 1343 iIRecv = 0 1344 iIRecvScal = 0 1345 iWaitAll = 0 1346 iWaitAllScal = 0 1347 iAllR = 0 1348 iAllRScal = 0 1349 rISend = zero 1350 rISendScal = zero 1351 rIRecv = zero 1352 rIRecvScal = zero 1353 rWaitAll = zero 1354 rWaitAllScal = zero 1355 rAllR = zero 1356 rAllRScal = zero 1357 rCommu = zero 1358 rCommuScal = zero 1359 return 1360 end subroutine 1361 1362 subroutine initTimeSeries() 1363 use timedata !allows collection of time series 1364 include "common.h" 1365 character*60 fvarts 1366 character*10 cname2 1367 1368 inquire(file='xyzts.dat',exist=exts) 1369 if(exts) then 1370 open(unit=626,file='xyzts.dat',status='old') 1371 read(626,*) ntspts, freq, tolpt, iterat, varcod 1372 call sTD ! sets data structures 1373 1374 do jj=1,ntspts ! read coordinate data where solution desired 1375 read(626,*) ptts(jj,1),ptts(jj,2),ptts(jj,3) 1376 enddo 1377 close(626) 1378 1379 statptts(:,:) = 0 1380 parptts(:,:) = zero 1381 varts(:,:) = zero 1382 1383 1384 iv_rankpernode = iv_rankpercore*iv_corepernode 1385 iv_totnodes = numpe/iv_rankpernode 1386 iv_totcores = iv_corepernode*iv_totnodes 1387 if (myrank .eq. 0) then 1388 write(*,*) 'Info for probes:' 1389 write(*,*) ' Ranks per core:',iv_rankpercore 1390 write(*,*) ' Cores per node:',iv_corepernode 1391 write(*,*) ' Ranks per node:',iv_rankpernode 1392 write(*,*) ' Total number of nodes:',iv_totnodes 1393 write(*,*) ' Total number of cores',iv_totcores 1394 endif 1395 1396! if (myrank .eq. numpe-1) then 1397 do jj=1,ntspts 1398 1399 ! Compute the adequate rank which will take care of probe jj 1400 jjm1 = jj-1 1401 iv_node = (iv_totnodes-1)-mod(jjm1,iv_totnodes) 1402 iv_core = (iv_corepernode-1) - mod((jjm1 - 1403 & mod(jjm1,iv_totnodes))/iv_totnodes,iv_corepernode) 1404 iv_thread = (iv_rankpercore-1) - mod((jjm1- 1405 & (mod(jjm1,iv_totcores)))/iv_totcores,iv_rankpercore) 1406 iv_rank(jj) = iv_node*iv_rankpernode 1407 & + iv_core*iv_rankpercore 1408 & + iv_thread 1409 1410 if(myrank == 0) then 1411 write(*,*) ' Probe', jj, 'handled by rank', 1412 & iv_rank(jj), ' on node', iv_node 1413 endif 1414 1415 ! Verification just in case 1416 if(iv_rank(jj) .lt.0 .or. iv_rank(jj) .ge. numpe) then 1417 write(*,*) 'WARNING: iv_rank(',jj,') is ', iv_rank(jj), 1418 & ' and reset to numpe-1' 1419 iv_rank(jj) = numpe-1 1420 endif 1421 1422 ! Open the varts files 1423 if(myrank == iv_rank(jj)) then 1424 fvarts='varts/varts' 1425 fvarts=trim(fvarts)//trim(cname2(jj)) 1426 fvarts=trim(fvarts)//trim(cname2(lstep)) 1427 fvarts=trim(fvarts)//'.dat' 1428 fvarts=trim(fvarts) 1429 open(unit=1000+jj, file=fvarts, status='unknown') 1430 endif 1431 enddo 1432! endif 1433 1434 endif 1435c 1436 return 1437 end subroutine 1438 1439 subroutine finalizeTimeSeries() 1440 use timedata !allows collection of time series 1441 include "common.h" 1442 if(exts) then 1443 do jj=1,ntspts 1444 if (myrank == iv_rank(jj)) then 1445 close(1000+jj) 1446 endif 1447 enddo 1448 call dTD ! deallocates time series arrays 1449 endif 1450 return 1451 end subroutine 1452 1453 1454 1455 subroutine initEQS(iBC, rowp, colm,svLS_nFaces, 1456 2 svLS_LHS,svLS_ls, 1457 3 svLS_LHS_S,svLS_ls_S) 1458 1459 use solvedata 1460 use fncorpmod 1461 include "common.h" 1462#ifdef HAVE_SVLS 1463 include "svLS.h" 1464 include "mpif.h" 1465 include "auxmpi.h" 1466 1467 TYPE(svLS_lhsType) svLS_lhs 1468 TYPE(svLS_lsType) svLS_ls 1469 TYPE(svLS_commuType) communicator 1470 TYPE(svLS_lsType) svLS_ls_S(4) 1471 TYPE(svLS_lhsType) svLS_lhs_S(4) 1472 TYPE(svLS_commuType) communicator_S(4) 1473 INTEGER svLS_nFaces, gnNo, nNo, faIn, facenNo 1474#endif 1475 integer, allocatable :: gNodes(:) 1476 real*8, allocatable :: sV(:,:) 1477 character*1024 servername 1478 integer rowp(nshg,nnz), colm(nshg+1), 1479 & iBC(nshg) 1480#ifdef HAVE_LESLIB 1481 integer eqnType 1482! IF (svLSFlag .EQ. 0) THEN !When we get a PETSc option it also could block this or a positive leslib 1483 call SolverLicenseServer(servername) 1484! ENDIF 1485#endif 1486c 1487c.... For linear solver Library 1488c 1489c 1490c.... assign parameter values 1491c 1492 do i = 1, 100 1493 numeqns(i) = i 1494 enddo 1495c 1496c.... determine how many scalar equations we are going to need to solve 1497c 1498 nsolt=mod(impl(1),2) ! 1 if solving temperature 1499 nsclrsol=nsolt+nsclr ! total number of scalars solved At 1500! some point we probably want to create a map, considering stepseq(), to find 1501! what is actually solved and only dimension lhs to the appropriate 1502! size. (see 1.6.1 and earlier for a "failed" attempt at this). 1503 1504 nsolflow=mod(impl(1),100)/10 ! 1 if solving flow 1505c 1506c.... Now, call lesNew routine to initialize 1507c memory space 1508c 1509 call genadj(colm, rowp, icnt ) ! preprocess the adjacency list 1510 1511 nnz_tot=icnt ! this is exactly the number of non-zero blocks on 1512 ! this proc 1513 1514 if (nsolflow.eq.1) then ! start of setup for the flow 1515 lesId = numeqns(1) 1516 eqnType = 1 1517 nDofs = 4 1518! Setting up svLS or leslib for flow 1519 IF (svLSFlag .EQ. 1) THEN 1520! ifdef svLS_1 : opening large ifdef for svLS solver setup 1521#ifdef HAVE_SVLS 1522 call aSDf 1523 IF(nPrjs.eq.0) THEN 1524 svLSType=2 !GMRES if borrowed ACUSIM projection vectors variable set to zero 1525 ELSE 1526 svLSType=3 !NS solver 1527 ENDIF 1528! reltol for the NSSOLVE is the stop criterion on the outer loop 1529! reltolIn is (eps_GM, eps_CG) from the CompMech paper 1530! for now we are using 1531! Tolerance on ACUSIM Pressure Projection for CG and 1532! Tolerance on Momentum Equations for GMRES 1533! also using Kspaceand maxIters from setup for ACUSIM 1534! 1535 eps_outer=40.0*epstol(1) !following papers soggestion for now 1536 CALL svLS_LS_CREATE(svLS_ls, svLSType, dimKry=Kspace, 1537 2 relTol=eps_outer, relTolIn=(/epstol(1),prestol/), 1538 3 maxItr=maxIters, 1539 4 maxItrIn=(/maxIters,maxIters/)) 1540 1541 CALL svLS_COMMU_CREATE(communicator, MPI_COMM_WORLD) 1542 nNo=nshg 1543 gnNo=nshgt 1544 IF (ipvsq .GE. 2) THEN 1545 1546#if((VER_CORONARY == 1)&&(VER_CLOSEDLOOP == 1)) 1547 svLS_nFaces = 1 + numResistSrfs + numNeumannSrfs 1548 2 + numImpSrfs + numRCRSrfs + numCORSrfs 1549#elif((VER_CORONARY == 1)&&(VER_CLOSEDLOOP == 0)) 1550 svLS_nFaces = 1 + numResistSrfs 1551 2 + numImpSrfs + numRCRSrfs + numCORSrfs 1552#elif((VER_CORONARY == 0)&&(VER_CLOSEDLOOP == 1)) 1553 svLS_nFaces = 1 + numResistSrfs + numNeumannSrfs 1554 2 + numImpSrfs + numRCRSrfs 1555#else 1556 svLS_nFaces = 1 + numResistSrfs 1557 2 + numImpSrfs + numRCRSrfs 1558#endif 1559 1560 ELSE 1561 svLS_nFaces = 1 !not sure about this...looks like 1 means 0 for array size issues 1562 END IF 1563 1564 CALL svLS_LHS_CREATE(svLS_lhs, communicator, gnNo, nNo, 1565 2 nnz_tot, ltg, colm, rowp, svLS_nFaces) 1566 1567 faIn = 1 1568 facenNo = 0 1569 DO i=1, nshg 1570 IF (IBITS(iBC(i),3,3) .NE. 0) facenNo = facenNo + 1 1571 END DO 1572 ALLOCATE(gNodes(facenNo), sV(nsd,facenNo)) 1573 sV = 0D0 1574 j = 0 1575 DO i=1, nshg 1576 IF (IBITS(iBC(i),3,3) .NE. 0) THEN 1577 j = j + 1 1578 gNodes(j) = i 1579 IF (.NOT.BTEST(iBC(i),3)) sV(1,j) = 1D0 1580 IF (.NOT.BTEST(iBC(i),4)) sV(2,j) = 1D0 1581 IF (.NOT.BTEST(iBC(i),5)) sV(3,j) = 1D0 1582 END IF 1583 END DO 1584 CALL svLS_BC_CREATE(svLS_lhs, faIn, facenNo, 1585 2 nsd, BC_TYPE_Dir, gNodes, sV) 1586 DEALLOCATE(gNodes) 1587 DEALLOCATE(sV) 1588! else of ifdef svLS_1 1589#else 1590 if(myrank.eq.0) write(*,*) 'your input requests svLS but your cmake did not build for it' 1591 call error('itrdrv ','nosVLS',svLSFlag) 1592! endif of ifdef svLS_1 1593#endif 1594 ENDIF !of svLS init. inside ifdef so we can trap above else 1595! note input_fform does not allow svLSFlag=1 AND leslib=1 so above or below only 1596 if(leslib.eq.1) then 1597! ifdef leslib_1 : setup for leslib 1598#ifdef HAVE_LESLIB 1599!-------------------------------------------------------------------- 1600 call myfLesNew( lesId, 41994, 1601 & eqnType, 1602 & nDofs, minIters, maxIters, 1603 & Kspace, iprjFlag, nPrjs, 1604 & ipresPrjFlag, nPresPrjs, epstol(1), 1605 & prestol, iverbose, statsflow, 1606 & nPermDims, nTmpDims, servername ) 1607 call aSDf 1608 call readLesRestart( lesId, aperm, nshg, myrank, lstep, 1609 & nPermDims ) 1610! else leslib_1 1611#else 1612 if(myrank.eq.0) write(*,*) 'your input requests leslib but your cmake did not build for it' 1613 call error('itrdrv ','nolslb',leslib) 1614! endif leslib_1 1615#endif 1616 endif !leslib=1 1617 1618 else ! not solving flow at all so set it solverDims to zero 1619 nPermDims = 0 1620 nTmpDims = 0 1621 endif 1622 1623!Above is setup for flow now we do scalar 1624 1625 if(nsclrsol.gt.0) then 1626 do isolsc=1,nsclrsol ! this loop sets up unique data for each scalar solved 1627 lesId = numeqns(isolsc+1) 1628 eqnType = 2 1629 nDofs = 1 1630 isclpresPrjflag = 0 1631 nPresPrjs = 0 1632 isclprjFlag = 1 1633 indx=isolsc+2-nsolt ! complicated to keep epstol(2) for 1634 ! temperature followed by scalars 1635! ifdef svLS_2 : Setting up svLS for scalar 1636#ifdef HAVE_SVLS 1637 IF (svLSFlag .EQ. 1) THEN 1638 svLSType=2 !only option for scalars 1639! reltol for the GMRES is the stop criterion 1640! also using Kspaceand maxIters from setup for ACUSIM 1641! 1642 CALL svLS_LS_CREATE(svLS_ls_S(isolsc), svLSType, 1643 2 dimKry=Kspace, 1644 3 relTol=epstol(indx), 1645 4 maxItr=maxIters 1646 5 ) 1647 1648 CALL svLS_COMMU_CREATE(communicator_S(isolsc), 1649 2 MPI_COMM_WORLD) 1650 1651 svLS_nFaces = 1 !not sure about this...should try it with zero 1652 1653 CALL svLS_LHS_CREATE(svLS_lhs_S(isolsc), 1654 2 communicator_S(isolsc), gnNo, nNo, 1655 3 nnz_tot, ltg, colm, rowp, svLS_nFaces) 1656 1657 1658 faIn = 1 1659 facenNo = 0 1660 ib=5+isolsc 1661 DO i=1, nshg 1662 IF (btest(iBC(i),ib)) facenNo = facenNo + 1 1663 END DO 1664 ALLOCATE(gNodes(facenNo), sV(1,facenNo)) 1665 sV = 0D0 1666 j = 0 1667 DO i=1, nshg 1668 IF (btest(iBC(i),ib)) THEN 1669 j = j + 1 1670 gNodes(j) = i 1671 END IF 1672 END DO 1673 1674 CALL svLS_BC_CREATE(svLS_lhs_S(isolsc), faIn, facenNo, 1675 2 1, BC_TYPE_Dir, gNodes, sV(1,:)) 1676 DEALLOCATE(gNodes) 1677 DEALLOCATE(sV) 1678 1679 ENDIF !svLS handing scalar solve 1680#endif 1681 1682 1683#ifdef HAVE_LESLIB 1684 if (leslib.eq.1) then 1685 call myfLesNew( lesId, 41994, 1686 & eqnType, 1687 & nDofs, minIters, maxIters, 1688 & Kspace, isclprjFlag, nPrjs, 1689 & isclpresPrjFlag, nPresPrjs, epstol(indx), 1690 & prestol, iverbose, statssclr, 1691 & nPermDimsS, nTmpDimsS, servername ) 1692 endif 1693#endif 1694 enddo !loop over scalars to solve (not checked to worked out for multiple svLS solves 1695 call aSDs(nsclrsol) 1696 else !no scalar solves at all so zero dims not used 1697 nPermDimsS = 0 1698 nTmpDimsS = 0 1699 endif 1700 return 1701 end subroutine 1702 1703 1704 subroutine seticomputevort 1705 include "common.h" 1706 icomputevort = 0 1707 if (ivort == 1) then ! Print vorticity = True in solver.inp 1708 ! We then compute the vorticity only if we 1709 ! 1) we write an intermediate checkpoint 1710 ! 2) we reach the last time step and write the last checkpoint 1711 ! 3) we accumulate statistics in ybar for every time step 1712 ! BEWARE: we need here lstep+1 and istep+1 because the lstep and 1713 ! istep gets incremened after the flowsolve, further below 1714 if (((irs .ge. 1) .and. (mod(lstep+1, ntout) .eq. 0)) .or. 1715 & istep+1.eq.nstep(itseq) .or. ioybar == 1) then 1716 icomputevort = 1 1717 endif 1718 endif 1719 1720! write(*,*) 'icomputevort: ',icomputevort, ' - istep: ', 1721! & istep,' - nstep(itseq):',nstep(itseq),'- lstep:', 1722! & lstep, '- ntout:', ntout 1723 return 1724 end subroutine 1725 1726 subroutine computeVort( vorticity, GradV,strain) 1727 include "common.h" 1728 1729 real*8 gradV(nshg,nsdsq), strain(nshg,6), vorticity(nshg,5) 1730 1731 ! vorticity components and magnitude 1732 vorticity(:,1) = GradV(:,8)-GradV(:,6) !omega_x 1733 vorticity(:,2) = GradV(:,3)-GradV(:,7) !omega_y 1734 vorticity(:,3) = GradV(:,4)-GradV(:,2) !omega_z 1735 vorticity(:,4) = sqrt( vorticity(:,1)*vorticity(:,1) 1736 & + vorticity(:,2)*vorticity(:,2) 1737 & + vorticity(:,3)*vorticity(:,3) ) 1738 ! Q 1739 strain(:,1) = GradV(:,1) !S11 1740 strain(:,2) = 0.5*(GradV(:,2)+GradV(:,4)) !S12 1741 strain(:,3) = 0.5*(GradV(:,3)+GradV(:,7)) !S13 1742 strain(:,4) = GradV(:,5) !S22 1743 strain(:,5) = 0.5*(GradV(:,6)+GradV(:,8)) !S23 1744 strain(:,6) = GradV(:,9) !S33 1745 1746 vorticity(:,5) = 0.25*( vorticity(:,4)*vorticity(:,4) !Q 1747 & - 2.0*( strain(:,1)*strain(:,1) 1748 & + 2* strain(:,2)*strain(:,2) 1749 & + 2* strain(:,3)*strain(:,3) 1750 & + strain(:,4)*strain(:,4) 1751 & + 2* strain(:,5)*strain(:,5) 1752 & + strain(:,6)*strain(:,6))) 1753 1754 return 1755 end subroutine 1756 1757 subroutine dumpTimeSeries() 1758 use timedata !allows collection of time series 1759 include "common.h" 1760 include "mpif.h" 1761 character*60 fvarts 1762 character*10 cname2 1763 1764 1765 if (numpe > 1) then 1766 do jj = 1, ntspts 1767 vartssoln((jj-1)*ndof+1:jj*ndof)=varts(jj,:) 1768 ivarts=zero 1769 enddo 1770 do k=1,ndof*ntspts 1771 if(vartssoln(k).ne.zero) ivarts(k)=1 1772 enddo 1773 1774! call MPI_REDUCE(vartssoln, vartssolng, ndof*ntspts, 1775! & MPI_DOUBLE_PRECISION, MPI_SUM, master, 1776! & MPI_COMM_WORLD, ierr) 1777 1778 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1779 call MPI_ALLREDUCE(vartssoln, vartssolng, 1780 & ndof*ntspts, 1781 & MPI_DOUBLE_PRECISION, MPI_SUM, 1782 & MPI_COMM_WORLD, ierr) 1783 1784! call MPI_REDUCE(ivarts, ivartsg, ndof*ntspts, 1785! & MPI_INTEGER, MPI_SUM, master, 1786! & MPI_COMM_WORLD, ierr) 1787 1788 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 1789 call MPI_ALLREDUCE(ivarts, ivartsg, ndof*ntspts, 1790 & MPI_INTEGER, MPI_SUM, 1791 & MPI_COMM_WORLD, ierr) 1792 1793! if (myrank.eq.zero) then 1794 do jj = 1, ntspts 1795 1796 if(myrank .eq. iv_rank(jj)) then 1797 ! No need to update all varts components, only the one treated by the expected rank 1798 ! Note: keep varts as a vector, as multiple probes could be treated by one rank 1799 indxvarts = (jj-1)*ndof 1800 do k=1,ndof 1801 if(ivartsg(indxvarts+k).ne.0) then ! none of the vartssoln(parts) were non zero 1802 varts(jj,k)=vartssolng(indxvarts+k)/ 1803 & ivartsg(indxvarts+k) 1804 endif 1805 enddo 1806 endif !only if myrank eq iv_rank(jj) 1807 enddo 1808! endif !only on master 1809 endif !only if numpe > 1 1810 1811! if (myrank.eq.zero) then 1812 do jj = 1, ntspts 1813 if(myrank .eq. iv_rank(jj)) then 1814 ifile = 1000+jj 1815 write(ifile,555) lstep, (varts(jj,k),k=1,ndof) !Beware of format 555 - check ndof 1816c call flush(ifile) 1817 if (((irs .ge. 1) .and. 1818 & (mod(lstep, ntout) .eq. 0))) then 1819 close(ifile) 1820 fvarts='varts/varts' 1821 fvarts=trim(fvarts)//trim(cname2(jj)) 1822 fvarts=trim(fvarts)//trim(cname2(lskeep)) 1823 fvarts=trim(fvarts)//'.dat' 1824 fvarts=trim(fvarts) 1825 open(unit=ifile, file=fvarts, 1826 & position='append') 1827 endif !only when dumping restart 1828 endif 1829 enddo 1830! endif !only on master 1831 1832 varts(:,:) = zero ! reset the array for next step 1833 1834 1835!555 format(i6,5(2x,E12.5e2)) 1836555 format(i6,6(2x,E20.12e2)) !assuming ndof = 6 here 1837 1838 return 1839 end subroutine 1840 1841 subroutine collectErrorYbar(ybar,yold,wallssVec,wallssVecBar, 1842 & vorticity,yphbar,rerr,irank2ybar,irank2yphbar) 1843 include "common.h" 1844 real*8 ybar(nshg,irank2yphbar),yold(nshg,ndof),vorticity(nshg,5) 1845 real*8 yphbar(nshg,irank2yphbar,nphasesincycle) 1846 real*8 wallssvec(nshg,3),wallssVecBar(nshg,3), rerr(nshg,numerr) 1847c$$$c 1848c$$$c compute average 1849c$$$c 1850c$$$ tfact=one/istep 1851c$$$ ybar =tfact*yold + (one-tfact)*ybar 1852 1853c compute average 1854c ybar(:,1:3) are average velocity components 1855c ybar(:,4) is average pressure 1856c ybar(:,5) is average speed 1857c ybar(:,6:8) is average of sq. of each vel. component 1858c ybar(:,9) is average of sq. of pressure 1859c ybar(:,10:12) is average of cross vel. components : uv, uw and vw 1860c averaging procedure justified only for identical time step sizes 1861c ybar(:,13) is average of eddy viscosity 1862c ybar(:,14:16) is average vorticity components 1863c ybar(:,17) is average vorticity magnitude 1864c istep is number of time step 1865c 1866 icollectybar = 0 1867 if(nphasesincycle.eq.0 .or. 1868 & istep.gt.ncycles_startphaseavg*nstepsincycle) then 1869 icollectybar = 1 1870 if((istep-1).eq.ncycles_startphaseavg*nstepsincycle) 1871 & istepsinybar = 0 ! init. to zero in first cycle in avg. 1872 endif 1873 1874 if(icollectybar.eq.1) then 1875 istepsinybar = istepsinybar+1 1876 tfact=one/istepsinybar 1877 1878! if(myrank.eq.master .and. nphasesincycle.ne.0 .and. 1879! & mod((istep-1),nstepsincycle).eq.0) 1880! & write(*,*)'nsamples in phase average:',istepsinybar 1881 1882c ybar to contain the averaged ((u,v,w),p)-fields 1883c and speed average, i.e., sqrt(u^2+v^2+w^2) 1884c and avg. of sq. terms including 1885c u^2, v^2, w^2, p^2 and cross terms of uv, uw and vw 1886 1887 ybar(:,1) = tfact*yold(:,1) + (one-tfact)*ybar(:,1) 1888 ybar(:,2) = tfact*yold(:,2) + (one-tfact)*ybar(:,2) 1889 ybar(:,3) = tfact*yold(:,3) + (one-tfact)*ybar(:,3) 1890 ybar(:,4) = tfact*yold(:,4) + (one-tfact)*ybar(:,4) 1891 ybar(:,5) = tfact*sqrt(yold(:,1)**2+yold(:,2)**2+ 1892 & yold(:,3)**2) + (one-tfact)*ybar(:,5) 1893 ybar(:,6) = tfact*yold(:,1)**2 + 1894 & (one-tfact)*ybar(:,6) 1895 ybar(:,7) = tfact*yold(:,2)**2 + 1896 & (one-tfact)*ybar(:,7) 1897 ybar(:,8) = tfact*yold(:,3)**2 + 1898 & (one-tfact)*ybar(:,8) 1899 ybar(:,9) = tfact*yold(:,4)**2 + 1900 & (one-tfact)*ybar(:,9) 1901 ybar(:,10) = tfact*yold(:,1)*yold(:,2) + !uv 1902 & (one-tfact)*ybar(:,10) 1903 ybar(:,11) = tfact*yold(:,1)*yold(:,3) + !uw 1904 & (one-tfact)*ybar(:,11) 1905 ybar(:,12) = tfact*yold(:,2)*yold(:,3) + !vw 1906 & (one-tfact)*ybar(:,12) 1907 if(nsclr.gt.0) !nut 1908 & ybar(:,13) = tfact*yold(:,6) + (one-tfact)*ybar(:,13) 1909 1910 if(ivort == 1) then !vorticity 1911 ybar(:,14) = tfact*vorticity(:,1) + 1912 & (one-tfact)*ybar(:,14) 1913 ybar(:,15) = tfact*vorticity(:,2) + 1914 & (one-tfact)*ybar(:,15) 1915 ybar(:,16) = tfact*vorticity(:,3) + 1916 & (one-tfact)*ybar(:,16) 1917 ybar(:,17) = tfact*vorticity(:,4) + 1918 & (one-tfact)*ybar(:,17) 1919 endif 1920 1921 if(abs(itwmod).ne.1 .and. iowflux.eq.1) then 1922 wallssVecBar(:,1) = tfact*wallssVec(:,1) 1923 & +(one-tfact)*wallssVecBar(:,1) 1924 wallssVecBar(:,2) = tfact*wallssVec(:,2) 1925 & +(one-tfact)*wallssVecBar(:,2) 1926 wallssVecBar(:,3) = tfact*wallssVec(:,3) 1927 & +(one-tfact)*wallssVecBar(:,3) 1928 endif 1929 endif !icollectybar.eq.1 1930c 1931c compute phase average 1932c 1933 if(nphasesincycle.ne.0 .and. 1934 & istep.gt.ncycles_startphaseavg*nstepsincycle) then 1935 1936c beginning of cycle is considered as ncycles_startphaseavg*nstepsincycle+1 1937 if((istep-1).eq.ncycles_startphaseavg*nstepsincycle) 1938 & icyclesinavg = 0 ! init. to zero in first cycle in avg. 1939 1940 ! find number of steps between phases 1941 nstepsbtwphase = nstepsincycle/nphasesincycle ! integer value 1942 if(mod(istep-1,nstepsincycle).eq.0) then 1943 iphase = 1 ! init. to one in beginning of every cycle 1944 icyclesinavg = icyclesinavg + 1 1945 endif 1946 1947 icollectphase = 0 1948 istepincycle = mod(istep,nstepsincycle) 1949 if(istepincycle.eq.0) istepincycle=nstepsincycle 1950 if(istepincycle.eq.iphase*nstepsbtwphase) then 1951 icollectphase = 1 1952 iphase = iphase+1 ! use 'iphase-1' below 1953 endif 1954 1955 if(icollectphase.eq.1) then 1956 tfactphase = one/icyclesinavg 1957 1958 if(myrank.eq.master) then 1959 write(*,*) 'nsamples in phase ',iphase-1,': ', 1960 & icyclesinavg 1961 endif 1962 1963 yphbar(:,1,iphase-1) = tfactphase*yold(:,1) + 1964 & (one-tfactphase)*yphbar(:,1,iphase-1) 1965 yphbar(:,2,iphase-1) = tfactphase*yold(:,2) + 1966 & (one-tfactphase)*yphbar(:,2,iphase-1) 1967 yphbar(:,3,iphase-1) = tfactphase*yold(:,3) + 1968 & (one-tfactphase)*yphbar(:,3,iphase-1) 1969 yphbar(:,4,iphase-1) = tfactphase*yold(:,4) + 1970 & (one-tfactphase)*yphbar(:,4,iphase-1) 1971 yphbar(:,5,iphase-1) = tfactphase*sqrt(yold(:,1)**2 1972 & +yold(:,2)**2+yold(:,3)**2) + 1973 & (one-tfactphase)*yphbar(:,5,iphase-1) 1974 yphbar(:,6,iphase-1) = 1975 & tfactphase*yold(:,1)*yold(:,1) 1976 & +(one-tfactphase)*yphbar(:,6,iphase-1) 1977 1978 yphbar(:,7,iphase-1) = 1979 & tfactphase*yold(:,1)*yold(:,2) 1980 & +(one-tfactphase)*yphbar(:,7,iphase-1) 1981 1982 yphbar(:,8,iphase-1) = 1983 & tfactphase*yold(:,1)*yold(:,3) 1984 & +(one-tfactphase)*yphbar(:,8,iphase-1) 1985 1986 yphbar(:,9,iphase-1) = 1987 & tfactphase*yold(:,2)*yold(:,2) 1988 & +(one-tfactphase)*yphbar(:,9,iphase-1) 1989 1990 yphbar(:,10,iphase-1) = 1991 & tfactphase*yold(:,2)*yold(:,3) 1992 & +(one-tfactphase)*yphbar(:,10,iphase-1) 1993 1994 yphbar(:,11,iphase-1) = 1995 & tfactphase*yold(:,3)*yold(:,3) 1996 & +(one-tfactphase)*yphbar(:,11,iphase-1) 1997 1998 if(ivort == 1) then 1999 yphbar(:,12,iphase-1) = 2000 & tfactphase*vorticity(:,1) 2001 & +(one-tfactphase)*yphbar(:,12,iphase-1) 2002 yphbar(:,13,iphase-1) = 2003 & tfactphase*vorticity(:,2) 2004 & +(one-tfactphase)*yphbar(:,13,iphase-1) 2005 yphbar(:,14,iphase-1) = 2006 & tfactphase*vorticity(:,3) 2007 & +(one-tfactphase)*yphbar(:,14,iphase-1) 2008 yphbar(:,15,iphase-1) = 2009 & tfactphase*vorticity(:,4) 2010 & +(one-tfactphase)*yphbar(:,15,iphase-1) 2011 endif 2012 endif !compute phase average 2013 endif !if(nphasesincycle.eq.0 .or. istep.gt.ncycles_startphaseavg*nstepsincycle) 2014c 2015c compute rms 2016c 2017 if(icollectybar.eq.1) then 2018 rerr(:, 7)=rerr(:, 7)+(yold(:,1)-ybar(:,1))**2 2019 rerr(:, 8)=rerr(:, 8)+(yold(:,2)-ybar(:,2))**2 2020 rerr(:, 9)=rerr(:, 9)+(yold(:,3)-ybar(:,3))**2 2021 rerr(:,10)=rerr(:,10)+(yold(:,4)-ybar(:,4))**2 2022 endif 2023 return 2024 end subroutine 2025 2026