1 subroutine forces (y , ilwork) 2c 3c---------------------------------------------------------------------- 4c 5c This subroutine calculates and updates the aerodynamic forces 6c 7c---------------------------------------------------------------------- 8c 9 include "common.h" 10 include "mpif.h" 11 include "auxmpi.h" 12c 13 integer ilwork(nlwork) 14 15 real*8 y(nshg,ndof) 16 real*8 Forin(5), Forout(5),spmasss 17 real*8 ftots(3,0:MAXSURF),ftot(3) 18 19!MR CHANGE 20 integer :: iv_rankpernode, iv_totnodes, iv_totcores 21 integer :: iv_node, iv_core, iv_thread 22!MR CHANGE 23 24c 25c START OF DISTURBANCE BLOCK 26c 27 dimension diste(1), distesum(1) 28 distcalc=0 ! we would want to activate this for T-S instability studies 29 if(distcalc.eq.1) then 30 if(iter.eq.nitr) then ! we have completed the last iteration 31 if (numpe > 1) then 32 call MPI_REDUCE (dke,dkesum,1,MPI_DOUBLE_PRECISION, 33 & MPI_SUM, master, MPI_COMM_WORLD,ierr) 34 endif 35 if (numpe.eq.1) 36 & write (76,1000) lstep+1, dke,dke 37 38 if ((myrank .eq. master).and.(numpe > 1)) then 39 write (76,1000) lstep+1, dkesum,dkesum 40c 41 call flush(76) 42c 43 endif 44 dke=0.0 ! we must zero it back out for next step 45 46 endif 47 endif 48 49C 50C END OF DISTURBANCE BLOCK 51C 52 53c 54c.... --------------------> Aerodynamic Forces <---------------------- 55c 56c.... output the forces and the heat flux 57c 58 if (numpe > 1) then 59 Forin = (/ Force(1), Force(2), Force(3), HFlux, 60 & entrop /) 61 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 62 call MPI_ALLREDUCE (Forin(1), Forout(1),5, 63 & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 64 Force = Forout(1:3) 65 HFlux = Forout(4) 66 entrop= Forout(5) 67 endif 68 69 if (numpe > 1) then 70 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 71 call MPI_ALLREDUCE (flxID(1,isrfIM), Atots,1, 72 & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 73 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 74 call MPI_ALLREDUCE (flxID(2,isrfIM), spmasss,1, 75 & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 76 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 77 call MPI_ALLREDUCE (flxID(3,:), Ftots(1,:),MAXSURF+1, 78 & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 79 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 80 call MPI_ALLREDUCE (flxID(4,:), Ftots(2,:),MAXSURF+1, 81 & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 82 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 83 call MPI_ALLREDUCE (flxID(5,:), Ftots(3,:),MAXSURF+1, 84 & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 85 else 86 Ftots=flxID(3:5,:) 87 Atots=flxID(1,isrfIM) 88 spmasss=flxID(2,isrfIM) 89 endif 90 call sforce() 91 92 ftot(1)=sum(Ftots(1,0:MAXSURF)) 93 ftot(2)=sum(Ftots(2,0:MAXSURF)) 94 ftot(3)=sum(Ftots(3,0:MAXSURF)) 95 96 if (myrank .eq. master) then 97 write (iforce,1000) lstep, (Force(i), i=1,nsd), 98 & HFlux,spmasss,(ftot(i),i=1,nsd) 99 call flush(iforce) 100 endif 101 102 if((spmasss.ne.0) .and. (matflg(5,1).ne.0))then ! we are doing 103 ! force adjustment 104 tmp=Dtgl*0.025 ! .025= 1/2/tmp1 when tmp1=20 105 106 select case ( matflg(5,1) ) 107 case ( 1 ) ! standard linear body force 108 fwall=Force(1) 109 vlngth=xlngth 110 case ( 2 ) 111 case ( 3 ) 112 fwall=Force(3) 113 vlngth=zlngth 114 end select 115 116 write(222,*) spmasss 117 fnew=(vel-spmasss/(ro*Atots))*tmp + 118 & fwall/(vlngth*Atots) 119 if(myrank.eq.0) then 120 write(880,*) datmat(1,5,1),fnew 121 call flush(880) 122 endif 123 datmat(1,5,1)=fnew !* (one - tmp2) + datmat(1,5,1) * tmp2 124 endif 125 126 if (pzero .eq. 1) then 127c 128 129c also adjust the pressure here so that the mean stays at zero (prevent drift) 130c 131c This is only valid if there are NO Dirichlet Boundary conditions on p!!! 132c 133c (method below counts partition boundary nodes twice. When we get ntopsh 134c into the method it will be easy to fix this. Currently it would require 135c some work and it is probably not worth it 136c 137c switched to avg of the on-processor averages to get around hierarchic 138c modes problem 139c 140c pave=sum(y(1:numnp,1)) 141c call MPI_ALLREDUCE (pave, pavet, 1, 142c & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 143c pavet=pavet/nshgt 144 pave=sum(y(1:numnp,4)) 145 if (numpe .gt. 1) then 146 xnpts=numnp 147 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 148 call MPI_ALLREDUCE (pave, pavet, 1, 149 & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 150 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 151 call MPI_ALLREDUCE (xnpts, xnptst, 1, 152 & MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 153 pavet=pavet/xnptst 154 else 155 pavet = pave/numnp 156 endif 157 158 y(1:numnp,4)=y(1:numnp,4)-pavet 159 pave=sum(y(1:numnp,4)) 160c if(myrank.eq.0) then 161c write(90+myrank,*) pavet,pave 162c call flush(900) 163c endif 164c 165 endif 166 167 return 168c 169! 1000 format(1p,i6,8e13.5) 170 1000 format(1p,i6,8e20.12) 171c 172 end 173 174 subroutine sforce() 175 176 include "common.h" 177 include "mpif.h" 178 179 real*8 SFlux(10),SFluxg(10) 180 character*20 fname1 181 character*5 cname 182 character*10 cname2 183 184 integer icalled 185 data icalled /0/ 186 save icalled 187 188 if(icalled.eq.0) then 189 icalled = 1 190 191!MR CHANGE 192 iv_rankpernode = iv_rankpercore*iv_corepernode 193 iv_totnodes = numpe/iv_rankpernode 194 iv_totcores = iv_corepernode*iv_totnodes 195 irankfilesforce(:) = -1 196!MR CHANGE 197 198 do isrf = 0,MAXSURF 199 200!MR CHANGE 201 ! Compute the adequate rank which will take care of isrf 202 iv_node = (iv_totnodes-1)-mod(isrf,iv_totnodes) 203 iv_core = (iv_corepernode-1) - mod((isrf - 204 & mod(isrf,iv_totnodes))/iv_totnodes,iv_corepernode) 205 iv_thread = (iv_rankpercore-1) - mod((isrf- 206 & (mod(isrf,iv_totcores)))/iv_totcores,iv_rankpercore) 207 irankfilesforce(isrf) = iv_node*iv_rankpernode 208 & + iv_core*iv_rankpercore 209 & + iv_thread 210 211 if(myrank == 0 .and. isrf.le.iv_rankpernode) then 212 write(*,*) ' sforce ', isrf, 'if any handled by rank', 213 & irankfilesforce(isrf), ' on node', iv_node 214 endif 215 216 ! Verification just in case 217 if(irankfilesforce(isrf) .lt.0 .or. 218 & irankfilesforce(isrf) .ge. numpe) then 219 write(*,*) 'WARNING: irankfilesforce(',isrf,') is ', 220 & irankfilesforce(isrf), 221 & ' and reset to numpe-1' 222 irankfilesforce(isrf) = numpe-1 223 endif 224!MR CHANGE 225 226! if ( nsrflist(isrf).ne.0 .and. myrank.eq.master) then 227 if ( nsrflist(isrf).ne.0 .and. 228 & myrank.eq.irankfilesforce(isrf)) then 229 iunit=60+isrf 230 fname1 = 'forces_s'//trim(cname(isrf))// cname2(lskeep+1) 231 open(unit=iunit,file=trim(fname1),status='unknown') 232 endif 233 enddo 234 endif 235 236 do isrf = 0,MAXSURF 237 if ( nsrflist(isrf).ne.0 ) then 238 SFlux(:) = flxID(:,isrf) ! Area, Mass Flux, Force 1, Force 2, Force 3, Scalars 239 if ( numpe > 1 ) then 240 call MPI_BARRIER(MPI_COMM_WORLD, ierr) 241 call MPI_ALLREDUCE (SFlux, SFluxg,10, 242 . MPI_DOUBLE_PRECISION,MPI_SUM, MPI_COMM_WORLD,ierr) 243 SFlux = SFluxg 244 endif 245! if(myrank.eq.master) then 246 if(myrank.eq.irankfilesforce(isrf)) then 247 iunit=60+isrf 248! write(iunit,"(i7,1p10e14.5)") lstep, SFlux 249 write(iunit,"(i7,1p10e20.12)") lstep, SFlux 250 call flush(iunit) 251! done in itrdrv.f 252! if(istep.eq.nstep(1)) then !end step then close file 253! close(iunit) 254! endif 255 endif 256 endif 257 enddo 258 259 return 260 end 261