xref: /phasta/phSolver/common/turbsa.f (revision 712d3df0b59ebebaaeaea358162c8d2c043c6e08)
1c-----------------------------------------------------------------------
2c
3c     Spalart-Allmaras turbulence model constants
4c
5c-----------------------------------------------------------------------
6      module turbSA
7
8      real*8, allocatable ::  d2wall(:)
9      real*8, allocatable ::  wnrm(:,:)
10      integer, allocatable :: otwn(:)
11      real*8  saCb1, saCb2, saCw1, saCw2, saCw3, saCv1, saSigma,
12     &        saKappa, saKappaP2Inv, saCv2P3, saCw3P6, saSigmaInv
13      integer, allocatable :: sidmapg(:) ! list of all surfID's, low to high
14
15      parameter (
16     &     saCb1        = 0.1355d0,
17     &     saCb2        = 0.622d0,
18     &     saCw1        = 3.239067817d0,
19     &     saCw2        = 0.3d0,
20     &     saCw3        = 2.0d0,
21     &     saKappa      = 0.41d0,
22     &     saSigma      = 0.666666666666666667d0,
23     &     saCv1        = 7.1d0,
24     &     saKappaP2Inv = 5.94883997620464d0,
25     &     saCv1P3      = 3.579109999999999d+02,
26     &     saCw3P6      = 64.0d0,
27     &     saSigmaInv   = 1.50d0
28     &     )
29
30
31      end module
32
33c-----------------------------------------------------------------------
34c
35c     Initialize: compute the distance to the nearest wall for
36c     each vertex in the mesh.
37c
38c     Michael Yaworski (fall 1998)
39c
40c-----------------------------------------------------------------------
41      subroutine initTurb( x )
42
43      use     pointer_data
44      use     turbSA
45      include "common.h"
46      include "mpif.h"
47
48      character(len=20) fname1,  fmt1
49      real*8   x(numnp,nsd)
50      integer  nwall(numpe),      idisp(numpe), npwmark(numnp)
51      character(len=5)  cname
52      real*8, allocatable :: xwi(:,:), xw(:,:)
53
54!MR CHANGE
55      integer :: ierr
56      integer :: numparts, nfile, itmp2, id2wall, itwo, ifoundd2wall
57      integer iarray(10)
58      character(len=255) :: fnamer, fname2, temp2
59      character(len=64) :: temp1
60!MR CHANGE END
61
62      allocate ( d2wall(numnp) )
63
64!MR CHANGE
65      if(myrank.eq.master) then
66        write (*,*) 'entering initTurb'
67      endif
68      call MPI_BARRIER(MPI_COMM_WORLD,ierr)
69
70      ! First we try to read the d2wall field from either the restart files or the d2wall files
71      call read_d2wall(myrank,numnp,d2wall,ifoundd2wall)
72!MR CHANGE END
73
74      if(ifoundd2wall.eq.0) then   ! d2wall was not found so calculate the distance
75        if(myrank.eq.master) then
76          write (*,*) 'Computing the d2wall field'
77        endif
78c
79c   hard code trip point until we are sure it is worth doing
80c
81
82c
83c.... find the points that are on the wall and mark npwmap with a 1 if they are
84c
85         npwmark(1:numnp)=0
86         do iblk = 1, nelblb    ! loop over boundary elt blocks
87            npro = lcblkb(1,iblk+1) - lcblkb(1,iblk)
88            nenbl  = lcblkb(6,iblk) ! no. of vertices per bdry. face
89            do j = 1, npro
90               if(btest(miBCB(iblk)%p(j,1),4)) then
91                  do k=1,nenbl
92                    npw=mienb(iblk)%p(j,k)
93                    npwmark(npw)=1
94                  enddo
95                endif
96            enddo
97         enddo
98         nwalli=sum(npwmark(1:numnp))
99         markNeeded=1
100         if (nwalli.eq.0) then
101             markNeeded=0
102             nwalli=1 !  patch for mpi's lack of imagination
103         endif
104         allocate (xwi(nwalli,nsd))
105
106         if(markneeded.eq.1) then
107           nwalli=0
108           do i = 1,numnp
109              if(npwmark(i).eq.1)  then
110                nwalli=nwalli+1
111                xwi(nwalli,1:nsd)=x(i,1:nsd)
112              endif
113           enddo
114         else
115           xwi=1.0e32 ! fix for mpi's lack of imagination
116         endif
117c
118c  Pool "number of welts" info from all processors
119c
120cMPI_ALLGATHER(sendbuf,sendcount,sendtype,recvbuf,recvcount,recvtype,comm)
121c[ IN sendbuf] starting address of send buffer (choice)
122c[ IN sendcount] number of elements in send buffer (integer)
123c[ IN sendtype] data type of send buffer elements (handle)
124c[ OUT recvbuf] address of receive buffer (choice)
125c[ IN recvcount] number of elements received from any process (integer)
126c[ IN recvtype] data type of receive buffer elements (handle)
127c[ IN comm] communicator (handle)
128         if (numpe.gt.1) then   ! multiple processors
129c write the number of wall elts on the jth processor to slot j of nwall
130            call MPI_ALLGATHER(nwalli,1,MPI_INTEGER,nwall,1,
131     &          MPI_INTEGER,MPI_COMM_WORLD,ierr)
132c count up the total number of wall elts among all processes
133            nwallt=0
134            do j=1,numpe
135               nwallt=nwallt+nwall(j)
136            enddo
137         else                   ! single processor
138c the local information is the global information for single-processor
139            nwall=nwalli
140            nwallt=nwalli
141         endif                  ! if-else for multiple processors
142c
143c  Make all-processor wallnode-coord collage
144c
145         allocate (xw(nwallt,nsd))
146         if (numpe.gt.1) then   ! multiple processors
147c we will gather coordinates from local on-proc sets to a global set
148c we will stack each processor's coordinate list atop that of the
149c previous processor.  If the coordinate list for processor i is
150c called xwi, then our global coordinate list xw will look like this:
151c ---------------------------------------------------------------
152c | xw1            | xw2                | xw3        |   ...    |
153c ---------------------------------------------------------------
154c  <---nwall(1)---> <-----nwall(2)-----> <-nwall(3)->
155c  <------------------------nwallt-----------------------...---->
156c To accomplish this with MPI, we use MPI_ALLGATHERV, summarized as:
157cMPI_ALLGATHERV(sendbuf,sendcount,sendtype,recvbuf,recvcount,disp,recvtype,comm)
158c[ IN sendbuf] starting address of send buffer (choice)
159c[ IN sendcount] number of elements in send buffer (integer)
160c[ IN sendtype] data type of send buffer elements (handle)
161c[ OUT recvbuf] address of receive buffer (choice)
162c[ IN recvcount] number of elements received from any process (integer)
163c[ IN disp] displacement array
164c[ IN recvtype] data type of receive buffer elements (handle)
165c[ IN comm] communicator (handle)
166c The displacement array disp is an array of integers in which the jth
167c entry indicates which slot of xw marks beginning of xwj
168c So, first we will build this displacement array
169            idisp(:)=0 ! starting with zero, since MPI likes C-numbering
170            do j=2,numpe
171               idisp(j)=idisp(j-1)+nwall(j-1) ! see diagram above
172            enddo
173            do k=1,nsd
174               call MPI_ALLGATHERV(xwi(:,k),nwalli,
175     &              MPI_DOUBLE_PRECISION,xw(:,k),nwall,idisp,
176     &              MPI_DOUBLE_PRECISION,MPI_COMM_WORLD,ierr)
177            enddo
178         else                   ! single-processor
179c global data is local data in single processor case
180            xw=xwi
181         endif
182
183c
184c  For each point, loop over wall nodes and calculate distance;
185c  save the distance in this node's position of d2wall if it's
186c  shorter than currently stored distance
187c
188         d2wall=1.0e32
189         do i=1,numnp
190            do j=1, nwallt
191               distance =  ( x(i,1) - xw(j,1) )**2
192     &              +( x(i,2) - xw(j,2) )**2
193     &              +( x(i,3) - xw(j,3) )**2
194               if ( d2wall(i).gt.distance ) d2wall(i) = distance
195            enddo
196         enddo
197         d2wall=sqrt(d2wall)
198c
199         deallocate(xwi)
200         deallocate(xw)
201      endif
202
203      call MPI_BARRIER(MPI_COMM_WORLD,ierr)
204      if(myrank.eq.master) then
205        write (*,*) 'leaving initTurb'
206      endif
207
208      return
209995     call error ('d2wall  ','opening ', 72)
210996     call error ('d2wall  ','opening ', 72)
211
212      end subroutine
213
214
215