1 subroutine genBC (iBC, BC, x, ilwork, iper) 2c 3c---------------------------------------------------------------------- 4c This routine generates the essential prescribed boundary conditions. 5c 6c input: 7c iBC (nshg) : boundary condition code 8c nBC (nshg) : boundary condition mapping array 9c 10c output: 11c BC (nshg,ndofBC) : The constraint data for prescribed BC 12c 13c 14c Note: genBC1 reduces the input data for the velocity. In the 15c case of varying velocity direction in the generation, the 16c results may not be correct. (since a linearity assumption is 17c made in the generation). 18c 19c 20c Farzin Shakib, Spring 1986. 21c Zdenek Johan, Winter 1991. (Fortran 90) 22c---------------------------------------------------------------------- 23c 24 use slpw 25 use readarrays ! used to access BCinp, nBC 26 use specialBC ! filling acs here 27 include "common.h" 28c 29 dimension iBC(nshg), nsurf(nshg), 30 & BC(nshg,ndofBC), 31 & x(numnp,nsd), ilwork(nlwork), 32 & iper(nshg) 33c 34c BCinp for each point has: 35c D T P c11 c12 c13 M1 c21 c22 c23 M2 theta S1 S2 S3... 36c 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15... 37c Remember, ndof=nsd+2+nsclr 38c 39c Arrays in the following 1 line are now dimensioned in readnblk 40c dimension BCinp(numpbc,ndof+7) 41c 42 dimension BCtmp(nshg,ndof+7) 43c 44c ndof+7= 3(thermos) + (nsd-1)*(nsd+1) + nscalars + 1 (theta) 45c #vect *(vec dir +mag) 46c 47c.... ---------------------------> Input <--------------------------- 48c 49c.... convert boundary condition data 50c 51 BCtmp = zero 52c 53 if(numpbc.ne.0) then 54 do i = 1, ndof+7 55 where (nBC(:) .ne. 0) BCtmp(:,i) = BCinp(nBC(:),i) 56 enddo 57 endif 58 deallocate(BCinp) 59 60c 61 if(any(BCtmp(:,12).ne.0)) then 62 iabc=1 63 allocate (acs(nshg,2)) 64 where (btest(iBC,10)) 65 acs(:,1) = cos(BCtmp(:,12)) 66 acs(:,2) = sin(BCtmp(:,12)) 67 endwhere 68 endif 69 70c 71c.... ----------------------> Wall Normals <-------------------------- 72c (calculate the normal and adjust BCinp to the true normal as needed) 73c 74!needed either way if(navier.eq.1)then 75 call genwnm (iBC, BCtmp, x, ilwork, iper, nsurf) 76!needed either way endif 77c determine the first point off the wall for each wall node 78!needed either way if(navier.eq.1)then 79 call genotwn (x,BCtmp, iBC, nsurf) 80!needed either way endif 81c.... ------------------------> Conversion <------------------------- 82c 83c.... convert the input boundary conditions to condensed version 84c 85 BC = zero 86c 87 if(myrank.eq.0) write(*,*) 'Navier is set to ', navier 88 if(navier.eq.1)then ! zero navier means Euler simulation 89 call genBC1 (BCtmp, iBC, BC) 90 else !enabling for IC code now 91c elseif(matflg(1,1).eq.0)then ! compressible code 92 allocate(BCtmpg(nshg,ndof+7)) 93 allocate(BCg(nshg,ndofBC)) 94 allocate(iBCg(nshg)) 95 BCtmpg=BCtmp 96 iBCg=iBC 97 call genBC1 (BCtmp, iBC, BC) 98c... genBC1 convert BCtmp to BC 99 BCg=BC 100 icdg=icd 101c... find slip wall 102 call findslpw(x,ilwork,iper,iBC) 103c... apply slip wall condition to wall nodes 104 do i=1,nslpwnd 105 nn=idxslpw(i) 106 call slpwBC(mpslpw(nn,1),mpslpw(nn,2),iBCg(nn), 107 & BCg(nn,:), BCtmpg(nn,:), 108 & iBC(nn), BC(nn,:), 109 & wlnorm(nn,:,:) ) 110 enddo 111 icd=icdg 112 deallocate(idxslpw) 113 deallocate(BCg) 114 deallocate(iBCg) 115 deallocate(BCtmpg) 116 deallocate(mpslpw) 117 deallocate(wlnorm) 118! else 119! if(myrank.eq.0) write(*,*) 'Incompressible code not able to do inviscid at this time' 120 endif 121 122 123c 124c.... ---------------------------> Echo <---------------------------- 125c 126c.... echo the input data 127c 128 if (necho .lt. 3) then 129 nn = 0 130 do n = 1, nshg 131 if (nBC(n) .ne. 0) then 132 nn = nn + 1 133 if(mod(nn,50).eq.1) 134 & write(iecho,1000)ititle,(j,j=1,ndofBC) 135 write (iecho,1100) n, (BC(n,i),i=1,ndofBC) 136 endif 137 enddo 138 endif 139c 140c.... return 141c 142 return 143c 144 1000 format(a80,//, 145 &' P r e s c r i b e d B o u n d a r y C o n d i t i o n s',//, 146 & ' Node ',/, 147 & ' Number ',5x,6('BC',i1,:,10x)) 148 1100 format(1p,2x,i5,3x,6(e12.5,1x)) 149c 150 end 151 152 153 154 155 156 157 158 159 160 161 subroutine genwnm (iBC, BCtmp, x, ilwork, iper, nsurf) 162c---------------------------------------------------------------------- 163c This routine generates the normal to a wall 164c 165c input: 166c iBC (nshg) : boundary condition code 167c nBC (nshg) : boundary condition mapping array 168c 169c output: 170c BCtmp (nshg,ndof+6) : The constraint data for prescribed BC 171c 172c 173c---------------------------------------------------------------------- 174c 175 use turbSA 176 use pointer_data ! used for mienb, mibcb 177 include "common.h" 178 include "mpif.h" 179c 180 character*20 fname1, fmt1 181 character*5 cname 182 dimension iBC(nshg), iper(nshg), 183 & x(numnp,nsd), ilwork(nlwork) 184c 185 dimension BCtmpSAV(nshg,ndof+7) 186 dimension BCtmp(nshg,ndof+7), fBC(nshg,ndofBC), 187 & e1(3), e2(3), 188 & elnrm(3), asum(numnp) 189c 190 integer sid, nsidg 191 integer nsurf(nshg), ivec(nsd) 192 logical :: firstvisit(nshg) 193 real*8 BCvecs(2,nsd) 194 integer, allocatable :: ienb(:) 195 dimension wnmdb(nshg,nsd) 196c 197c wnrm is dimensioned nshg but the math is only done for straight sided 198c elements at this point so wnrm will not be calculated for the hierarchic 199c modes. Note that the wall model creates a p.w. linear representation 200c only at this time. 201c 202 allocate ( wnrm(nshg,3) ) 203c 204c.... ----------------------> Wall Normals <-------------------------- 205c (calculate the normal and adjust BCinp to the true normal as needed) 206c 207c 208 asum = zero 209 wnrm = zero 210 211c 212c.... Save a copy of BCtmp so that after we calculate the normals we 213c can recover the comp3 information. 214c 215 BCtmpSAV=BCtmp 216c 217c Count out the number of surface ID's on-processor, and map them 218 call gensidcount(nsidg) 219 220c 221 if(nsidg.gt.0) then ! if there are any surfID's 222 nsurf(:) = 0 223 do k = 1, nsidg ! loop over Surface ID's 224 sid = sidmapg(k) 225 firstvisit(:)=.true. 226 wnrm(:,1:3)=zero 227 do iblk=1, nelblb ! loop over boundary element blocks 228 npro = lcblkb(1,iblk+1)-lcblkb(1,iblk) 229 nenbl = lcblkb(6,iblk) 230 nshl = lcblkb(9,iblk) 231 allocate( ienb(nshl) ) 232 do i = 1, npro ! loop over boundary elements 233 iBCB1=miBCB(iblk)%p(i,1) 234 iBCB2=miBCB(iblk)%p(i,2) 235 ienb(1:nshl)=mienb(iblk)%p(i,1:nshl) 236c don't bother with elements that aren't on modeled surfaces 237c if ( not (wall set and traction set) ) 238 if (.not.(btest(iBCB1,2).and.btest(iBCB1,4))) 239 & cycle 240c don't bother with elements that don't lie on the current surface 241 if (iBCB2.ne.sid) cycle 242c 243c.... calculate this element's area-weighted normal vector 244c 245 e1 = x(ienb(2),:)-x(ienb(1),:) 246 e2 = x(ienb(3),:)-x(ienb(1),:) 247 elnrm(1) = e1(2)*e2(3)-e1(3)*e2(2) 248 elnrm(2) = e1(3)*e2(1)-e1(1)*e2(3) 249 elnrm(3) = e1(1)*e2(2)-e1(2)*e2(1) 250c Tetrahedral elements have negative volumes in phastaI, so 251c the normals calculated from the boundary faces must be inverted 252c to point into the fluid 253 if(nenbl.eq.3) elnrm(:)=-elnrm(:) 254c 255c.... add area-weighted normals to the nodal tallies for this surface 256c 257 do j = 1, nenbl ! loop over elt boundary nodes 258 nn=ienb(j) ! global node number 259 if(firstvisit(nn)) then 260 firstvisit(nn)=.false. 261 nsurf(nn)=nsurf(nn)+1 262 if(nsurf(nn).eq.1) BCtmp(nn,4:6)=zero 263 if(nsurf(nn).eq.2) BCtmp(nn,8:10)=zero 264 endif 265 wnrm(nn,:)=wnrm(nn,:)+elnrm 266 enddo ! loop over elt boundary nodes 267 enddo ! end loop over boundary elements in block 268 deallocate(ienb) 269 enddo ! end loop over boundary element blocks 270c Now we have all of this surface's contributions to wall normals 271c for all nodes, along with an indication of how many surfaces 272c each node has encountered so far. Contributions from other processors 273c should now be accumulated for this surface 274c 275c axisymm BC's need BC (and we have not built it yet) so we need to create 276c the entries it needs. 277c 278 if(iabc==1) then 279 where (btest(iBC,10)) 280 fBC(:,1) = cos(BCtmp(:,12)) 281 fBC(:,2) = sin(BCtmp(:,12)) 282 endwhere 283 284c before the commu we need to rotate the residual vector for axisymmetric 285c boundary conditions (so that off processor periodicity is a dof add instead 286c of a dof combination). Take care of all nodes now so periodicity, like 287c commu is a simple dof add. 288 call rotabc(wnrm, iBC, 'in ') 289 endif 290 291 if (numpe.gt.1) then 292c.... add areas and norms contributed from other processors 293 call commu (wnrm, ilwork, 3, 'in ') 294 endif 295c.... account for periodicity and axisymmetry 296 call bc3per(iBC,wnrm, iper,ilwork, 3) 297c at this point the master has all the information (slaves are zeroed and 298c waiting to be told what the master has...lets do that). 299c 300c.... local periodic (and axisymmetric) boundary conditions (no communications) 301 wnmdb=wnrm 302 do i = 1,numnp ! only use the vertices in the normal calc 303 wnrm(i,:) = wnrm(iper(i),:) 304 enddo 305 wnmdb=wnrm 306 if (numpe.gt.1) then 307c.... tell other processors the resulting (and now complete) sums 308 call commu (wnrm, ilwork, 3, 'out') 309 endif 310c Axisymmetric slaves need to be rotated back 311 if(iabc==1) then !are there any axisym bc's 312 call rotabc(wnrm, iBC, 'out') 313 endif 314c Now all nodes have all the normal contributions for this surface, 315c including those from off-processor and periodic nodes. 316c We distribute these summed vectors to the proper slots in BCtmp, 317c according to how many surfaces each node has seen so far 318 do nn = 1, nshg 319 if(nsurf(nn).eq.1) 320 & BCtmp(nn,4:6)=BCtmp(nn,4:6)+wnrm(nn,:) 321 if(nsurf(nn).eq.2) 322 & BCtmp(nn,8:10)=BCtmp(nn,8:10)+wnrm(nn,:) 323 if(nsurf(nn).gt.2) 324 & BCtmp(nn,4:6)=BCtmp(nn,4:6)+wnrm(nn,:) 325 enddo 326c That's all for this surface; move on to the next 327 enddo ! end loop over surface ID's 328 deallocate( sidmapg ) 329 330c 331c.... complete the averaging, adjust iBC, adjust BCtmp 332c 333 wnrm(:,1:3)=zero 334 do nn = 1, numnp ! loop over all nodes 335c leave nodes without wall-modeled surfaces unchanged 336 if(nsurf(nn).eq.0) cycle 337c 338c mark this as a node with comp3 339c 340 ikp=0 341 if(ibits(iBC(nn),3,3).eq.7 ) ikp=1 342c if(ibits(ibc(nn),3,3).eq.7 .and. BCtmp(nn,7).eq.zero) cycle 343c find out which velocity BC's were set by the user, and cancel them 344 ixset=ibits(iBC(nn),3,1) 345 iyset=ibits(iBC(nn),4,1) 346 izset=ibits(iBC(nn),5,1) 347 iBC(nn)=iBC(nn)-ixset*8-iyset*16-izset*32 348c 349c save this stripped iBC for later un-fixing 350c 351 iBCSAV=iBC(nn) 352c 353 if(abs(itwmod).eq.1) then ! slip velocity wall-model 354c If we're using the slip-velocity wall-model, then the velocity 355c boundary condition for this node will depend on how many modeled 356c surfaces share it... 357 if(nsurf(nn).eq.1) then ! 1 modeled wall 358c If this node is part of only one modeled wall, then the component 359c of the velocity normal to the surface is set to zero. In this case 360c only the first vector of BCtmp received normal contributions 361 iBC(nn)=iBC(nn)+8 ! assume normal is x-dominated first 362 wnrm(nn,:)=BCtmp(nn,4:6) 363 if(abs(wnrm(nn,3)).gt.abs(wnrm(nn,2)))then ! z beats y 364 if(abs(wnrm(nn,3)).gt.abs(wnrm(nn,1)))then ! z beats x 365 iBC(nn)=iBC(nn)+24 366 endif ! z beats x 367 endif ! z beats y 368 if(abs(wnrm(nn,2)).ge.abs(wnrm(nn,3)))then ! y beats z 369 if(abs(wnrm(nn,2)).gt.abs(wnrm(nn,1)))then ! y beats x 370 iBC(nn)=iBC(nn)+8 371 endif ! y beats x 372 endif ! y beats z !(adjusted iBC) 373 BCtmp(nn,7)=zero 374 else if(nsurf(nn).eq.2) then 375c If this node is shared by two modeled walls, then each wall 376c provides a normal vector along which the velocity must be zero. 377c This leaves only one "free" direction, along which the flow is nonzero. 378c The two normal vectors define a plane. Any pair of non-parallel 379c vectors in this plane can also be specified, leaving the same free 380c direction. Area-weighted average normal vectors for the two surfaces 381c sharing this node have been stored in BCtmp(4:6) and BCtmp(8:10) 382c We normalize the first of these, and then orthogonalize the second 383c against the first. After then normalizing the second, we choose which 384c cartesian direction dominates each of the vectors, and adjust iBC 385c and BCtmp accordingly 386 e1=BCtmp(nn,4:6) 387 wmag=e1(1)*e1(1)+e1(2)*e1(2)+e1(3)*e1(3) 388 wmag=1./sqrt(wmag) 389 e1=e1*wmag ! first vector is normalized 390c 391 e2=BCtmp(nn,8:10) 392 wmag=e2(1)*e1(1)+e2(2)*e1(2)+e2(3)*e1(3) ! wmag=e2.n1 393 e2=e2-wmag*e1 ! second vector is orthogonalized 394c 395 wmag=e2(1)*e2(1)+e2(2)*e2(2)+e2(3)*e2(3) 396 wmag=1./sqrt(wmag) 397 e2=e2*wmag ! second vector is normalized 398c 399 ! idom tells us which component is currently dominant 400 ! ivec(n) tells us which vector is dominated by comp n 401 ivec(:)=0 ! initialize 402 idom=1 ! assume x-comp dominates e1 403 bigcomp=abs(e1(1)) 404 ivec(idom)=1 405 do i = 2, nsd 406 if(abs(e1(i)).gt.bigcomp) then 407 ivec(idom)=0 408 bigcomp=abs(e1(i)) 409 idom=i 410 ivec(i)=1 411 endif 412 enddo 413 if(idom.ne.1) then 414 idom=1 ! assume x-comp dominates e2 415 else 416 idom=3 ! assume z-comp dominates e2 417 endif 418 bigcomp=abs(e2(idom)) 419 420 ivec(idom)=2 421 do i = 1, nsd 422 if(abs(e2(i)).gt.bigcomp) then 423 if(ivec(i).eq.1) cycle 424 ivec(idom)=0 425 bigcomp=abs(e2(i)) 426 idom=i 427 ivec(i)=2 428 endif 429 enddo 430 ! now we know which components dominate each vector 431 ixset=0 ! 432 iyset=0 ! initialize 433 izset=0 ! 434 if(ivec(1).ne.0) ixset=1 435 if(ivec(2).ne.0) iyset=1 436 if(ivec(3).ne.0) izset=1 437 ncomp=ixset+iyset+izset 438 if(ncomp.ne.2) write(*,*) 'WARNING: TROUBLE IN GENBC' 439 BCvecs(1,1:3)=e1(:) 440 BCvecs(2,1:3)=e2(:) 441 if((ixset.eq.1).and.(iyset.eq.1)) then 442 BCtmp(nn,4:6)=BCvecs(ivec(1),1:3) 443 BCtmp(nn,8:10)=BCvecs(ivec(2),1:3) 444 else if((ixset.eq.1).and.(izset.eq.1)) then 445 BCtmp(nn,4:6)=BCvecs(ivec(1),1:3) 446 BCtmp(nn,8:10)=BCvecs(ivec(3),1:3) 447 else if((iyset.eq.1).and.(izset.eq.1)) then 448 BCtmp(nn,4:6)=BCvecs(ivec(2),1:3) 449 BCtmp(nn,8:10)=BCvecs(ivec(3),1:3) 450 endif 451 iBC(nn)=iBC(nn)+ixset*8+iyset*16+izset*32 452 BCtmp(nn,7)=zero 453 BCtmp(nn,11)=zero 454 wnrm(nn,:)=BCtmp(nn,4:6)+BCtmp(nn,8:10) 455 else if(nsurf(nn).gt.2) then 456c If this node is shared by more than two modeled walls, then 457c it is a corner node and it will be treated as having no slip 458c The normals for all surfaces beyond the first two were 459c contributed to the first vector of BCtmp 460 wnrm(nn,:)=BCtmp(nn,4:6)+BCtmp(nn,8:10) 461 iBC(nn)=iBC(nn)+56 462 BCtmp(nn,7)=zero 463 endif ! curved surface 464 else if(abs(itwmod).eq.2) then ! effective viscosity wall-model 465c Otherwise, we're using the effective-viscosity wall-model and the 466c nodes on modeled surfaces are treated as no-slip 467 iBC(nn)=iBC(nn)+56 ! set 3-comp 468 if(itwmod.eq.-2) BCtmp(nn,7)=zero ! no slip 469 if(nsurf(nn).eq.1) 470 & wnrm(nn,:)=BCtmp(nn,4:6) 471 if(nsurf(nn).ge.2) 472 & wnrm(nn,:)=BCtmp(nn,4:6)+BCtmp(nn,8:10) 473 endif 474c Now normalize the wall normal for this node 475 if(itwmod.ne.0) then 476 wmag=sqrt(wnrm(nn,1)*wnrm(nn,1) 477 & +wnrm(nn,2)*wnrm(nn,2)+wnrm(nn,3)*wnrm(nn,3)) 478 wnrm(nn,:)=wnrm(nn,:)/wmag 479 endif 480c 481c.... put back the comp3 info for bctmp 482c 483 if(ikp.eq.1) then 484 iBC(nn)=iBCSAV+56 ! put it back to a comp3 485 BCtmp(nn,:)=BCtmpSAV(nn,:) ! ditto 486 endif 487 enddo ! loop over all nodes 488 endif ! end "if there are any surfID's" 489c 490c If you are using the turbulence wall with axisymmetry you 491c need to modify the axisymmetry angle to account for the discrete 492c normals at the wall being different than the exact normals 493c 494c find the my normal, my partners normal and correct the angle 495c$$$ 496c$$$ do i=1,numnp 497c$$$ wmag = wnrm(i,1) * wnrm(i,1) 498c$$$ & + wnrm(i,2) * wnrm(i,2) 499c$$$ & + wnrm(i,3) * wnrm(i,3) 500c$$$c 501c$$$c only "fix" wall nodes...other nodes still have a zero in wnrm 502c$$$c 503c$$$ if((btest(iBC(i),12)).and.(wmag.ne.zero)) then 504c$$$ BCtmp(i,1)=acos( wnrm(i,1) * wnrm(iper(i),1) 505c$$$ & + wnrm(i,2) * wnrm(iper(i),2) 506c$$$ & + wnrm(i,3) * wnrm(iper(i),3) ) 507c$$$ endif 508c$$$ enddo 509c 510c.... return 511c 512 return 513c 514 end 515 516 517 subroutine gensidcount(nsidg) 518c--------------------------------------------------------------------- 519c 520c This routine counts up the total number of surface ID's across 521c all processors and makes a list of them 522c 523c Inputs: 524c iBCB natural boundary condition switches and surfIDs 525c 526c Outputs: 527c nsidg number of surface ID's globally (including all procs) 528c sidmapg global list of surface ID's, lowest to highest 529c 530c--------------------------------------------------------------------- 531 use pointer_data ! access to miBCB 532 use turbSA ! access to sidmapg 533 include "common.h" 534 include "mpif.h" 535c 536 integer newflag, i, j 537 integer nsidl ! number of surface IDs on-proc 538 integer nsidt ! sum of all nsidl's 539 integer nsidg ! number of surface IDs globally 540 integer nsid(numpe) ! array of nsidl's 541 integer idisp(numpe) ! needed by mpi: see note below 542 type llnod ! structure for one node in a linked list 543 integer :: value 544 type (llnod), pointer :: next 545 end type llnod 546 type (llnod), pointer :: sidlist ! points to first elt of linked list 547 type (llnod), pointer :: sidelt ! points to generic elt of linked list 548 type (llnod), pointer :: nextelt ! points to generic elt of linked list 549 integer, allocatable :: sidmapl(:) ! list of surfID's on-proc 550 integer, allocatable :: temp(:) ! temp space 551c integer iBCB(numelb,ndiBCB) ! column 1: naturalBC switches 552 ! column 2: surface ID's 553c Since we don't know a priori how many surface ID's there are, 554c on-processor or globally, we will store the ID's as an open-ended 555c link list while we determine the total number of distinct ID's 556 allocate (sidlist) ! allocate the first element of the sid 557 ! linked list and point sidlist to it 558 nsidl=0 ! initialize # of sids on this processor 559 nsidg=0 560 nullify(sidlist % next) ! next does not exist yet 561 do iblk=1, nelblb ! loop over boundary element blocks 562 npro = lcblkb(1,iblk+1)-lcblkb(1,iblk) 563 do i = 1, npro ! loop over boundary elements (belts) 564 iBCB2=miBCB(iblk)%p(i,2) 565 if(iBCB2.ne.zero) then ! if a sid is given for this belt 566 if(nsidl.eq.0) then ! if this is the first sid we've seen 567 nsidl=1 ! increment our count and 568 sidlist % value = iBCB2 ! record its value 569 nullify(sidlist % next) ! next does not exist yet 570 else ! if this isn't the first sid 571 newflag = 1 ! assume this is a new sid 572 sidelt => sidlist ! starting with the first sid 573 do j = 1, nsidl ! check the assumption 574 if((sidelt % value).eq.iBCB2) then 575 newflag=0 ! ... 576 endif 577 if(j.ne.nsidl) then ! ... 578 sidelt => sidelt % next 579 endif! ... 580 enddo 581 if(newflag.eq.1) then! if it really is new to us 582 nsidl = nsidl + 1 ! increment our count 583 allocate (sidelt % next)! tack a new element to the end 584 sidelt => sidelt % next! point to the new element 585 sidelt % value = iBCB2 ! record the new sid 586 nullify(sidelt % next) ! next does not exist yet 587 endif ! (really is a new sid) 588 endif ! (first sid) 589 endif ! (belt has a sid) 590 enddo ! (loop over belts) 591 enddo ! (loop over boundary element blocks) 592c Copy the data from the linked list to a more easily-used array form 593 if(nsidl.gt.0) then 594 allocate( sidmapl(nsidl) ) 595 sidelt => sidlist ! starting with the first sid 596 do j = 1, nsidl 597 sidmapl(j)=sidelt%value 598 if(j.ne.nsidl) sidelt => sidelt%next 599 enddo 600 else 601 allocate( sidmapl(1)) ! some compilers/MPI don't like to send unallocated arrays 602 endif 603! Deallocate the link list 604! http://stackoverflow.com/questions/9184675/how-does-fortran-deallocate-linked-lists 605 sidelt => sidlist 606 nextelt => sidelt % next 607 do 608 deallocate(sidelt) 609 if( .not. associated(nextelt) ) exit 610 sidelt => nextelt 611 nextelt => sidelt % next 612 enddo 613 614c Gather the number of surface ID's on each processor 615 if (numpe.gt.1) then ! multiple processors 616c write the number of surfID's on the jth processor to slot j of nsid 617 call MPI_ALLGATHER(nsidl,1,MPI_INTEGER,nsid,1, 618 & MPI_INTEGER,MPI_COMM_WORLD,ierr) 619c count up the total number of surfID's among all processes 620 nsidt=0 621 do j=1,numpe 622 nsidt=nsidt+nsid(j) 623 enddo 624 else ! single processor 625c the local information is the global information for single-processor 626 nsid=nsidl 627 nsidt=nsidl 628 endif ! if-else for multiple processors 629 if(nsidt.gt.0) then 630c 631c Make all-processor surfID collage 632c 633c there will be some duplicate surface ID's when we gather, so we 634c will use a temporary array 635 allocate (temp(nsidt)) 636 if (numpe.gt.1) then ! multiple processors 637c we will gather surfID's from local on-proc sets to a global set 638c we will stack each processor's surfID list atop that of the previous 639c processor. If the list for processor i is called sidmapi, then our 640c global coordinate list sidmap will look like this: 641c --------------------------------------------------------------- 642c | sidmap1 | sidmap2 | sidmap3 | ... | 643c --------------------------------------------------------------- 644c <---nsid(1)---> <-----nsid(2)-----> <-nsid(3)-> 645c <------------------------nsidt-----------------------...----> 646c To accomplish this with MPI, we use MPI_ALLGATHERV, summarized as: 647cMPI_ALLGATHERV(sendbuf,sendcount,sendtype,recvbuf,recvcount,disp,recvtype,comm) 648c[ IN sendbuf] starting address of send buffer (choice) 649c[ IN sendcount] number of elements in send buffer (integer) 650c[ IN sendtype] data type of send buffer elements (handle) 651c[ OUT recvbuf] address of receive buffer (choice) 652c[ IN recvcount] number of elements received from each process (int array) 653c[ IN disp] displacement array 654c[ IN recvtype] data type of receive buffer elements (handle) 655c[ IN comm] communicator (handle) 656c The displacement array disp is an array of integers in which the jth 657c entry indicates which slot of sidmap marks beginning of sidmapj 658c So, first we will build this displacement array 659 idisp(:)=0 ! starting with zero, since MPI likes C-numbering 660 do j=2,numpe 661 idisp(j)=idisp(j-1)+nsid(j-1) ! see diagram above 662 enddo 663c Now, we gather the data 664 call MPI_ALLGATHERV(sidmapl(:),nsidl, 665 & MPI_INTEGER,temp(:),nsid,idisp, 666 & MPI_INTEGER,MPI_COMM_WORLD,ierr) 667c sort surfID's, lowest to highest 668 isorted = 0 669 do while (isorted.eq.0) ! while the list isn't sorted 670 isorted = 1 ! assume the list is sorted this time 671 do j = 2, nsidt ! loop over ID's 672 if(temp(j).lt.temp(j-1)) then ! ID exceeds predecessor 673 itmp=temp(j-1) 674 temp(j-1)=temp(j) 675 temp(j)=itmp 676 isorted=0 ! not sorted yet 677 endif 678 enddo !loop over ID's 679 enddo ! while not sorted 680c determine the total number of distinct surfID's, globally 681 nsidg=nsidt ! assume there are no duplicate SurfID's 682 do j = 2, nsidt 683 if(temp(j).eq.temp(j-1)) nsidg = nsidg - 1 ! correction 684 enddo 685c create duplicate-free surfID list 686 allocate( sidmapg(nsidg) ) 687 sidmapg(1)=temp(1) 688 nsidg = 1 689 do j = 2, nsidt 690 if(temp(j).ne.temp(j-1)) then 691 nsidg = nsidg + 1 692 sidmapg(nsidg)=temp(j) 693 endif 694 enddo 695 deallocate( temp ) 696 else ! single-processor 697c global data is local data in single processor case 698 nsidg=nsidl 699 allocate( sidmapg(nsidg) ) 700 sidmapg=sidmapl 701 deallocate(sidmapl) 702 endif ! if-else multiple processor 703c 704 endif ! if at least one surfid 705c 706 return 707c 708 end 709 710 711 712 subroutine genotwn(x, BCtmp, iBC, nsurf) 713c 714c---------------------------------------------------------------------- 715c This routine determines which node to use as the first node off the 716c wall in near-wall modeling traction calculations for each wall node. 717c Each wall node has a normal, calculated from the wall elements to 718c which that node belongs. We seek the node within the boundary 719c element that lies closest to the line defined by the normal vector. 720c We create normalized vectors pointing from the wall node in 721c question to each of the nodes in the boundary element. The vector 722c that has the largest projection onto the wall node's normal vector 723c points to the node we want. Nodes that are not on a wall point to 724c themselves as their own off-the-wall node. 725c 726c input: 727c x (nshg,3) : : nodal position vectors 728c wnrm (nshg,3) : (mod) : normal vectors for each node 729c iBCB5 (nshg) : (file) : wall flag for belt 730c ienb (numelb,nen) : (file) : connectivity for belts 731c 732c output: 733c otwn (nshg) : (mod) : off-the-wall nodes for each node 734c 735c 736c Kenneth Jansen, Summer 2000 (algorithm) 737c Michael Yaworski, Summer 2000 (code) 738c---------------------------------------------------------------------- 739c 740 use pointer_data ! used for mienb, miBCB 741 use turbSA 742 include "common.h" 743c 744 integer iel, nod, can 745 real*8 vec(3), leng, dp, bigdp, lil 746 real*8 x(numnp,nsd),BCtmp(nshg,ndof+7) 747 integer iBC(nshg), nsurf(nshg) 748 integer gbits 749 integer, allocatable :: ienb(:) 750 751 allocate( otwn(nshg) ) 752c 753c initialize otwn to oneself 754c 755 do nod = 1, nshg 756 otwn(nod)=nod 757 enddo 758c 759c determine otwn for each wall node 760c 761 do iblk=1, nelblb ! loop over boundary element blocks 762 npro = lcblkb(1,iblk+1)-lcblkb(1,iblk) 763 nenl = lcblkb(5,iblk) 764 nenbl = lcblkb(6,iblk) 765 nshl = lcblkb(9,iblk) 766 allocate( ienb(nshl) ) 767 do i = 1, npro ! loop over boundary elements 768 iBCB1=miBCB(iblk)%p(i,1) 769 iBCB2=miBCB(iblk)%p(i,2) 770 ienb(1:nshl)=mienb(iblk)%p(i,1:nshl) 771 if (btest(iBCB1,4)) then ! (on the wall) 772 do nod = 1, nenbl ! for each wall node in this belt 773 bigdp = zero ! initialize largest dot product 774 do can=nenbl+1,nenl ! loop over off-the-wall candidates 775 nn=ienb(can) 776 ! don't bother with wall nodes 777 if(nsurf(nn).ne.0) cycle 778 ! don't bother with no-slip nodes 779 if(ibits(iBC(nn),3,3).eq.7 .and. 780 & BCtmp(nn,7).eq.zero) cycle 781c ! calculate candidate vector 782 vec(:)=x(ienb(can),:)-x(ienb(nod),:) 783 leng=sqrt(vec(1)**2+vec(2)**2+vec(3)**2) 784 vec(:)=vec(:)/leng 785c ! calculate dot product with wnrm 786 vec(:)=vec(:)*wnrm(ienb(nod),:) 787 dp=vec(1)+vec(2)+vec(3) 788c ! set candidate as otwn if necessary 789c ! (wnrm points into fluid, so 790c ! we want the most positive dp) 791 if (dp.gt.bigdp) then 792 otwn(ienb(nod)) = ienb(can) 793 bigdp=dp 794 endif 795 enddo !(loop over off-the-wall candidates) 796 enddo !(loop over wall nodes in current belt) 797 endif 798 enddo !(loop over elts in block) 799 deallocate(ienb) 800 enddo !(loop over boundary element blocks) 801 do nn = 1, nshg 802 if((otwn(nn).eq.nn).and.(nsurf(nn).ne.0)) then ! if a node on a 803 ! modeled surface 804 ! didn't find a node 805 ! off the wall 806 lil = 1.0e32 ! distance to current closest prospect 807 do can = 1, nshg ! loop over nodes 808 if(nsurf(can).eq.0) then ! if this candidate is off the 809 ! wall 810 if(ibits(iBC(can),3,3).eq.7 .and. 811 & BCtmp(can,7).eq.zero) then ! no slip nodes not allowed 812 else ! not a forbidden node 813 vec(:)=x(nn,:)-x(can,:) 814 leng=vec(1)**2+vec(2)**2+vec(3)**2 815 if(leng.lt.lil) then ! this candidate is closest so far 816 lil=leng 817 otwn(nn)=can 818 endif ! closest so far 819 endif ! end of no slip nodes 820 endif ! off-the-wall check 821 enddo ! loop over nodes 822 endif ! lonely wall-node check 823 enddo ! loop over nodes 824c 825 return 826c 827 end 828 829