xref: /petsc/src/ksp/pc/impls/wb/wb.c (revision 7cf18bfc0af3bd20f57ca1ab9dbfc3d774cdee5d)
1 
2 #include <petscpcmg.h>   /*I "petscpcmg.h" I*/
3 #include <petscdmda.h>   /*I "petscdmda.h" I*/
4 #include <../src/ksp/pc/impls/mg/mgimpl.h>
5 
6 typedef struct {
7   PCExoticType type;
8   Mat          P;            /* the constructed interpolation matrix */
9   PetscBool    directSolve;  /* use direct LU factorization to construct interpolation */
10   KSP          ksp;
11 } PC_Exotic;
12 
13 const char *PCExoticTypes[] = {"face","wirebasket","PCExoticType","PC_Exotic",0};
14 
15 
16 #undef __FUNCT__
17 #define __FUNCT__ "DMDAGetWireBasketInterpolation"
18 /*
19       DMDAGetWireBasketInterpolation - Gets the interpolation for a wirebasket based coarse space
20 
21 */
22 PetscErrorCode DMDAGetWireBasketInterpolation(DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
23 {
24   PetscErrorCode         ierr;
25   PetscInt               dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
26   PetscInt               mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[26],*globals,Ng,*IIint,*IIsurf,Nt;
27   Mat                    Xint, Xsurf,Xint_tmp;
28   IS                     isint,issurf,is,row,col;
29   ISLocalToGlobalMapping ltg;
30   MPI_Comm               comm;
31   Mat                    A,Aii,Ais,Asi,*Aholder,iAii;
32   MatFactorInfo          info;
33   PetscScalar            *xsurf,*xint;
34 #if defined(PETSC_USE_DEBUG_foo)
35   PetscScalar            tmp;
36 #endif
37   PetscTable             ht;
38 
39   PetscFunctionBegin;
40   ierr = DMDAGetInfo(da,&dim,0,0,0,&mp,&np,&pp,&dof,0,0,0,0,0);CHKERRQ(ierr);
41   if (dof != 1) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only for single field problems");
42   if (dim != 3) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only coded for 3d problems");
43   ierr = DMDAGetCorners(da,0,0,0,&m,&n,&p);CHKERRQ(ierr);
44   ierr = DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);CHKERRQ(ierr);
45   istart = istart ? -1 : 0;
46   jstart = jstart ? -1 : 0;
47   kstart = kstart ? -1 : 0;
48 
49   /*
50     the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
51     to all the local degrees of freedom (this includes the vertices, edges and faces).
52 
53     Xint are the subset of the interpolation into the interior
54 
55     Xface are the interpolation onto faces but not into the interior
56 
57     Xsurf are the interpolation onto the vertices and edges (the surfbasket)
58                                         Xint
59     Symbolically one could write P = (  Xface  ) after interchanging the rows to match the natural ordering on the domain
60                                         Xsurf
61   */
62   N     = (m - istart)*(n - jstart)*(p - kstart);
63   Nint  = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
64   Nface = 2*( (m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart) );
65   Nwire = 4*( (m-2-istart) + (n-2-jstart) + (p-2-kstart) ) + 8;
66   Nsurf = Nface + Nwire;
67   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nint,26,PETSC_NULL,&Xint);CHKERRQ(ierr);
68   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nsurf,26,PETSC_NULL,&Xsurf);CHKERRQ(ierr);
69   ierr = MatGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
70 
71   /*
72      Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
73      Xsurf will be all zero (thus making the coarse matrix singular).
74   */
75   if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
76   if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
77   if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
78 
79   cnt = 0;
80   xsurf[cnt++] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + Nsurf] = 1;} xsurf[cnt++ + 2*Nsurf] = 1;
81   for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 3*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 4*Nsurf] = 1;} xsurf[cnt++ + 5*Nsurf] = 1;}
82   xsurf[cnt++ + 6*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 7*Nsurf] = 1;} xsurf[cnt++ + 8*Nsurf] = 1;
83   for (k=1;k<p-1-kstart;k++) {
84     xsurf[cnt++ + 9*Nsurf] = 1;  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 10*Nsurf] = 1;}  xsurf[cnt++ + 11*Nsurf] = 1;
85     for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 12*Nsurf] = 1; /* these are the interior nodes */ xsurf[cnt++ + 13*Nsurf] = 1;}
86     xsurf[cnt++ + 14*Nsurf] = 1;  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 15*Nsurf] = 1;} xsurf[cnt++ + 16*Nsurf] = 1;
87   }
88   xsurf[cnt++ + 17*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 18*Nsurf] = 1;} xsurf[cnt++ + 19*Nsurf] = 1;
89   for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 20*Nsurf] = 1;  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 21*Nsurf] = 1;} xsurf[cnt++ + 22*Nsurf] = 1;}
90   xsurf[cnt++ + 23*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 24*Nsurf] = 1;} xsurf[cnt++ + 25*Nsurf] = 1;
91 
92   /* interpolations only sum to 1 when using direct solver */
93 #if defined(PETSC_USE_DEBUG_foo)
94   for (i=0; i<Nsurf; i++) {
95     tmp = 0.0;
96     for (j=0; j<26; j++) {
97       tmp += xsurf[i+j*Nsurf];
98     }
99     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %G",i,PetscAbsScalar(tmp));
100   }
101 #endif
102   ierr = MatRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
103   /* ierr = MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);*/
104 
105 
106   /*
107        I are the indices for all the needed vertices (in global numbering)
108        Iint are the indices for the interior values, I surf for the surface values
109             (This is just for the part of the global matrix obtained with MatGetSubMatrix(), it
110              is NOT the local DMDA ordering.)
111        IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
112   */
113 #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
114   ierr = PetscMalloc3(N,PetscInt,&II,Nint,PetscInt,&Iint,Nsurf,PetscInt,&Isurf);CHKERRQ(ierr);
115   ierr = PetscMalloc2(Nint,PetscInt,&IIint,Nsurf,PetscInt,&IIsurf);CHKERRQ(ierr);
116   for (k=0; k<p-kstart; k++) {
117     for (j=0; j<n-jstart; j++) {
118       for (i=0; i<m-istart; i++) {
119         II[c++] = i + j*mwidth + k*mwidth*nwidth;
120 
121         if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
122           IIint[cint]  = i + j*mwidth + k*mwidth*nwidth;
123           Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
124         } else {
125           IIsurf[csurf]  = i + j*mwidth + k*mwidth*nwidth;
126           Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
127         }
128       }
129     }
130   }
131   if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
132   if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
133   if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
134   ierr = DMGetLocalToGlobalMapping(da,&ltg);CHKERRQ(ierr);
135   ierr = ISLocalToGlobalMappingApply(ltg,N,II,II);CHKERRQ(ierr);
136   ierr = ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);CHKERRQ(ierr);
137   ierr = ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);CHKERRQ(ierr);
138   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
139   ierr = ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
140   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);CHKERRQ(ierr);
141   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);CHKERRQ(ierr);
142   ierr = PetscFree3(II,Iint,Isurf);CHKERRQ(ierr);
143 
144   ierr = MatGetSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);CHKERRQ(ierr);
145   A    = *Aholder;
146   ierr = PetscFree(Aholder);CHKERRQ(ierr);
147 
148   ierr = MatGetSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);CHKERRQ(ierr);
149   ierr = MatGetSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);CHKERRQ(ierr);
150   ierr = MatGetSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);CHKERRQ(ierr);
151 
152   /*
153      Solve for the interpolation onto the interior Xint
154   */
155   ierr = MatDuplicate(Xint,MAT_DO_NOT_COPY_VALUES,&Xint_tmp);CHKERRQ(ierr);
156   ierr = MatMatMult(Ais,Xsurf,MAT_REUSE_MATRIX,PETSC_DETERMINE,&Xint_tmp);CHKERRQ(ierr);
157   ierr = MatScale(Xint_tmp,-1.0);CHKERRQ(ierr);
158   if (exotic->directSolve) {
159     ierr = MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);CHKERRQ(ierr);
160     ierr = MatFactorInfoInitialize(&info);CHKERRQ(ierr);
161     ierr = MatGetOrdering(Aii,MATORDERINGND,&row,&col);CHKERRQ(ierr);
162     ierr = MatLUFactorSymbolic(iAii,Aii,row,col,&info);CHKERRQ(ierr);
163     ierr = ISDestroy(&row);CHKERRQ(ierr);
164     ierr = ISDestroy(&col);CHKERRQ(ierr);
165     ierr = MatLUFactorNumeric(iAii,Aii,&info);CHKERRQ(ierr);
166     ierr = MatMatSolve(iAii,Xint_tmp,Xint);CHKERRQ(ierr);
167     ierr = MatDestroy(&iAii);CHKERRQ(ierr);
168   } else {
169     Vec         b,x;
170     PetscScalar *xint_tmp;
171 
172     ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
173     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,Nint,0,&x);CHKERRQ(ierr);
174     ierr = MatGetArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
175     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,Nint,0,&b);CHKERRQ(ierr);
176     ierr = KSPSetOperators(exotic->ksp,Aii,Aii,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
177     for (i=0; i<26; i++) {
178       ierr = VecPlaceArray(x,xint+i*Nint);CHKERRQ(ierr);
179       ierr = VecPlaceArray(b,xint_tmp+i*Nint);CHKERRQ(ierr);
180       ierr = KSPSolve(exotic->ksp,b,x);CHKERRQ(ierr);
181       ierr = VecResetArray(x);CHKERRQ(ierr);
182       ierr = VecResetArray(b);CHKERRQ(ierr);
183     }
184     ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
185     ierr = MatRestoreArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
186     ierr = VecDestroy(&x);CHKERRQ(ierr);
187     ierr = VecDestroy(&b);CHKERRQ(ierr);
188   }
189   ierr = MatDestroy(&Xint_tmp);CHKERRQ(ierr);
190 
191 #if defined(PETSC_USE_DEBUG_foo)
192   ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
193   for (i=0; i<Nint; i++) {
194     tmp = 0.0;
195     for (j=0; j<26; j++) {
196       tmp += xint[i+j*Nint];
197     }
198     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %G",i,PetscAbsScalar(tmp));
199   }
200   ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
201   /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); */
202 #endif
203 
204 
205   /*         total vertices             total faces                                  total edges */
206   Ntotal = (mp + 1)*(np + 1)*(pp + 1) + mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1) + mp*(np+1)*(pp+1) + np*(mp+1)*(pp+1) +  pp*(mp+1)*(np+1);
207 
208   /*
209       For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
210   */
211   cnt = 0;
212   gl[cnt++] = 0;  { gl[cnt++] = 1;} gl[cnt++] = m-istart-1;
213   { gl[cnt++] = mwidth;  { gl[cnt++] = mwidth+1;} gl[cnt++] = mwidth + m-istart-1;}
214   gl[cnt++] = mwidth*(n-jstart-1);  { gl[cnt++] = mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*(n-jstart-1) + m-istart-1;
215   {
216     gl[cnt++] = mwidth*nwidth;  { gl[cnt++] = mwidth*nwidth+1;}  gl[cnt++] = mwidth*nwidth+ m-istart-1;
217     { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
218     gl[cnt++] = mwidth*nwidth+ mwidth*(n-jstart-1);   { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1) + m-istart-1;
219   }
220   gl[cnt++] = mwidth*nwidth*(p-kstart-1); { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) +  m-istart-1;
221   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth;   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1)+mwidth+m-istart-1;}
222   gl[cnt++] = mwidth*nwidth*(p-kstart-1) +  mwidth*(n-jstart-1);  { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+ mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth*(n-jstart-1) + m-istart-1;
223 
224   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
225   /* convert that to global numbering and get them on all processes */
226   ierr = ISLocalToGlobalMappingApply(ltg,26,gl,gl);CHKERRQ(ierr);
227   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
228   ierr = PetscMalloc(26*mp*np*pp*sizeof(PetscInt),&globals);CHKERRQ(ierr);
229   ierr = MPI_Allgather(gl,26,MPIU_INT,globals,26,MPIU_INT,((PetscObject)da)->comm);CHKERRQ(ierr);
230 
231   /* Number the coarse grid points from 0 to Ntotal */
232   ierr = MatGetSize(Aglobal,&Nt,PETSC_NULL);CHKERRQ(ierr);
233   ierr = PetscTableCreate(Ntotal/3,Ng+1,&ht);CHKERRQ(ierr);
234   for (i=0; i<26*mp*np*pp; i++){
235     ierr = PetscTableAddCount(ht,globals[i]+1);CHKERRQ(ierr);
236   }
237   ierr = PetscTableGetCount(ht,&cnt);CHKERRQ(ierr);
238   if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
239   ierr = PetscFree(globals);CHKERRQ(ierr);
240   for (i=0; i<26; i++) {
241     ierr = PetscTableFind(ht,gl[i]+1,&gl[i]);CHKERRQ(ierr);
242     gl[i]--;
243   }
244   ierr = PetscTableDestroy(&ht);CHKERRQ(ierr);
245   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
246 
247   /* construct global interpolation matrix */
248   ierr = MatGetLocalSize(Aglobal,&Ng,PETSC_NULL);CHKERRQ(ierr);
249   if (reuse == MAT_INITIAL_MATRIX) {
250     ierr = MatCreateMPIAIJ(((PetscObject)da)->comm,Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,PETSC_NULL,Nint+Nsurf,PETSC_NULL,P);CHKERRQ(ierr);
251   } else {
252     ierr = MatZeroEntries(*P);CHKERRQ(ierr);
253   }
254   ierr = MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr);
255   ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
256   ierr = MatSetValues(*P,Nint,IIint,26,gl,xint,INSERT_VALUES);CHKERRQ(ierr);
257   ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
258   ierr = MatGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
259   ierr = MatSetValues(*P,Nsurf,IIsurf,26,gl,xsurf,INSERT_VALUES);CHKERRQ(ierr);
260   ierr = MatRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
261   ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
262   ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
263   ierr = PetscFree2(IIint,IIsurf);CHKERRQ(ierr);
264 
265 #if defined(PETSC_USE_DEBUG_foo)
266   {
267     Vec         x,y;
268     PetscScalar *yy;
269     ierr = VecCreateMPI(((PetscObject)da)->comm,Ng,PETSC_DETERMINE,&y);CHKERRQ(ierr);
270     ierr = VecCreateMPI(((PetscObject)da)->comm,PETSC_DETERMINE,Ntotal,&x);CHKERRQ(ierr);
271     ierr = VecSet(x,1.0);CHKERRQ(ierr);
272     ierr = MatMult(*P,x,y);CHKERRQ(ierr);
273     ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
274     for (i=0; i<Ng; i++) {
275       if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %G",i,PetscAbsScalar(yy[i]));
276     }
277     ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
278     ierr = VecDestroy(x);CHKERRQ(ierr);
279     ierr = VecDestroy(y);CHKERRQ(ierr);
280   }
281 #endif
282 
283   ierr = MatDestroy(&Aii);CHKERRQ(ierr);
284   ierr = MatDestroy(&Ais);CHKERRQ(ierr);
285   ierr = MatDestroy(&Asi);CHKERRQ(ierr);
286   ierr = MatDestroy(&A);CHKERRQ(ierr);
287   ierr = ISDestroy(&is);CHKERRQ(ierr);
288   ierr = ISDestroy(&isint);CHKERRQ(ierr);
289   ierr = ISDestroy(&issurf);CHKERRQ(ierr);
290   ierr = MatDestroy(&Xint);CHKERRQ(ierr);
291   ierr = MatDestroy(&Xsurf);CHKERRQ(ierr);
292   PetscFunctionReturn(0);
293 }
294 
295 #undef __FUNCT__
296 #define __FUNCT__ "DMDAGetFaceInterpolation"
297 /*
298       DMDAGetFaceInterpolation - Gets the interpolation for a face based coarse space
299 
300 */
301 PetscErrorCode DMDAGetFaceInterpolation(DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
302 {
303   PetscErrorCode         ierr;
304   PetscInt               dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
305   PetscInt               mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[6],*globals,Ng,*IIint,*IIsurf,Nt;
306   Mat                    Xint, Xsurf,Xint_tmp;
307   IS                     isint,issurf,is,row,col;
308   ISLocalToGlobalMapping ltg;
309   MPI_Comm               comm;
310   Mat                    A,Aii,Ais,Asi,*Aholder,iAii;
311   MatFactorInfo          info;
312   PetscScalar            *xsurf,*xint;
313 #if defined(PETSC_USE_DEBUG_foo)
314   PetscScalar            tmp;
315 #endif
316   PetscTable             ht;
317 
318   PetscFunctionBegin;
319   ierr = DMDAGetInfo(da,&dim,0,0,0,&mp,&np,&pp,&dof,0,0,0,0,0);CHKERRQ(ierr);
320   if (dof != 1) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only for single field problems");
321   if (dim != 3) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only coded for 3d problems");
322   ierr = DMDAGetCorners(da,0,0,0,&m,&n,&p);CHKERRQ(ierr);
323   ierr = DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);CHKERRQ(ierr);
324   istart = istart ? -1 : 0;
325   jstart = jstart ? -1 : 0;
326   kstart = kstart ? -1 : 0;
327 
328   /*
329     the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
330     to all the local degrees of freedom (this includes the vertices, edges and faces).
331 
332     Xint are the subset of the interpolation into the interior
333 
334     Xface are the interpolation onto faces but not into the interior
335 
336     Xsurf are the interpolation onto the vertices and edges (the surfbasket)
337                                         Xint
338     Symbolically one could write P = (  Xface  ) after interchanging the rows to match the natural ordering on the domain
339                                         Xsurf
340   */
341   N     = (m - istart)*(n - jstart)*(p - kstart);
342   Nint  = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
343   Nface = 2*( (m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart) );
344   Nwire = 4*( (m-2-istart) + (n-2-jstart) + (p-2-kstart) ) + 8;
345   Nsurf = Nface + Nwire;
346   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nint,6,PETSC_NULL,&Xint);CHKERRQ(ierr);
347   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nsurf,6,PETSC_NULL,&Xsurf);CHKERRQ(ierr);
348   ierr = MatGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
349 
350   /*
351      Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
352      Xsurf will be all zero (thus making the coarse matrix singular).
353   */
354   if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
355   if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
356   if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
357 
358   cnt = 0;
359   for (j=1;j<n-1-jstart;j++) {  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 0*Nsurf] = 1;} }
360    for (k=1;k<p-1-kstart;k++) {
361     for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 1*Nsurf] = 1;}
362     for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 2*Nsurf] = 1; /* these are the interior nodes */ xsurf[cnt++ + 3*Nsurf] = 1;}
363     for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 4*Nsurf] = 1;}
364   }
365   for (j=1;j<n-1-jstart;j++) {for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 5*Nsurf] = 1;} }
366 
367 #if defined(PETSC_USE_DEBUG_foo)
368   for (i=0; i<Nsurf; i++) {
369     tmp = 0.0;
370     for (j=0; j<6; j++) {
371       tmp += xsurf[i+j*Nsurf];
372     }
373     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %G",i,PetscAbsScalar(tmp));
374   }
375 #endif
376   ierr = MatRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
377   /* ierr = MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);*/
378 
379 
380   /*
381        I are the indices for all the needed vertices (in global numbering)
382        Iint are the indices for the interior values, I surf for the surface values
383             (This is just for the part of the global matrix obtained with MatGetSubMatrix(), it
384              is NOT the local DMDA ordering.)
385        IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
386   */
387 #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
388   ierr = PetscMalloc3(N,PetscInt,&II,Nint,PetscInt,&Iint,Nsurf,PetscInt,&Isurf);CHKERRQ(ierr);
389   ierr = PetscMalloc2(Nint,PetscInt,&IIint,Nsurf,PetscInt,&IIsurf);CHKERRQ(ierr);
390   for (k=0; k<p-kstart; k++) {
391     for (j=0; j<n-jstart; j++) {
392       for (i=0; i<m-istart; i++) {
393         II[c++] = i + j*mwidth + k*mwidth*nwidth;
394 
395         if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
396           IIint[cint]  = i + j*mwidth + k*mwidth*nwidth;
397           Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
398         } else {
399           IIsurf[csurf]  = i + j*mwidth + k*mwidth*nwidth;
400           Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
401         }
402       }
403     }
404   }
405   if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
406   if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
407   if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
408   ierr = DMGetLocalToGlobalMapping(da,&ltg);CHKERRQ(ierr);
409   ierr = ISLocalToGlobalMappingApply(ltg,N,II,II);CHKERRQ(ierr);
410   ierr = ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);CHKERRQ(ierr);
411   ierr = ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);CHKERRQ(ierr);
412   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
413   ierr = ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
414   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);CHKERRQ(ierr);
415   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);CHKERRQ(ierr);
416   ierr = PetscFree3(II,Iint,Isurf);CHKERRQ(ierr);
417 
418   ierr = MatGetSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);CHKERRQ(ierr);
419   A    = *Aholder;
420   ierr = PetscFree(Aholder);CHKERRQ(ierr);
421 
422   ierr = MatGetSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);CHKERRQ(ierr);
423   ierr = MatGetSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);CHKERRQ(ierr);
424   ierr = MatGetSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);CHKERRQ(ierr);
425 
426   /*
427      Solve for the interpolation onto the interior Xint
428   */
429   ierr = MatDuplicate(Xint,MAT_DO_NOT_COPY_VALUES,&Xint_tmp);CHKERRQ(ierr);
430   ierr = MatMatMult(Ais,Xsurf,MAT_REUSE_MATRIX,PETSC_DETERMINE,&Xint_tmp);CHKERRQ(ierr);
431   ierr = MatScale(Xint_tmp,-1.0);CHKERRQ(ierr);
432 
433   if (exotic->directSolve) {
434     ierr = MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);CHKERRQ(ierr);
435     ierr = MatFactorInfoInitialize(&info);CHKERRQ(ierr);
436     ierr = MatGetOrdering(Aii,MATORDERINGND,&row,&col);CHKERRQ(ierr);
437     ierr = MatLUFactorSymbolic(iAii,Aii,row,col,&info);CHKERRQ(ierr);
438     ierr = ISDestroy(&row);CHKERRQ(ierr);
439     ierr = ISDestroy(&col);CHKERRQ(ierr);
440     ierr = MatLUFactorNumeric(iAii,Aii,&info);CHKERRQ(ierr);
441     ierr = MatMatSolve(iAii,Xint_tmp,Xint);CHKERRQ(ierr);
442     ierr = MatDestroy(&iAii);CHKERRQ(ierr);
443   } else {
444     Vec         b,x;
445     PetscScalar *xint_tmp;
446 
447     ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
448     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,Nint,0,&x);CHKERRQ(ierr);
449     ierr = MatGetArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
450     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,Nint,0,&b);CHKERRQ(ierr);
451     ierr = KSPSetOperators(exotic->ksp,Aii,Aii,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
452     for (i=0; i<6; i++) {
453       ierr = VecPlaceArray(x,xint+i*Nint);CHKERRQ(ierr);
454       ierr = VecPlaceArray(b,xint_tmp+i*Nint);CHKERRQ(ierr);
455       ierr = KSPSolve(exotic->ksp,b,x);CHKERRQ(ierr);
456       ierr = VecResetArray(x);CHKERRQ(ierr);
457       ierr = VecResetArray(b);CHKERRQ(ierr);
458     }
459     ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
460     ierr = MatRestoreArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
461     ierr = VecDestroy(&x);CHKERRQ(ierr);
462     ierr = VecDestroy(&b);CHKERRQ(ierr);
463   }
464   ierr = MatDestroy(&Xint_tmp);CHKERRQ(ierr);
465 
466 #if defined(PETSC_USE_DEBUG_foo)
467   ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
468   for (i=0; i<Nint; i++) {
469     tmp = 0.0;
470     for (j=0; j<6; j++) {
471       tmp += xint[i+j*Nint];
472     }
473     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %G",i,PetscAbsScalar(tmp));
474   }
475   ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
476   /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); */
477 #endif
478 
479 
480   /*         total faces    */
481   Ntotal =  mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1);
482 
483   /*
484       For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
485   */
486   cnt = 0;
487   { gl[cnt++] = mwidth+1;}
488   {
489     { gl[cnt++] = mwidth*nwidth+1;}
490     { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
491     { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;}
492   }
493   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;}
494 
495   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
496   /* convert that to global numbering and get them on all processes */
497   ierr = ISLocalToGlobalMappingApply(ltg,6,gl,gl);CHKERRQ(ierr);
498   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
499   ierr = PetscMalloc(6*mp*np*pp*sizeof(PetscInt),&globals);CHKERRQ(ierr);
500   ierr = MPI_Allgather(gl,6,MPIU_INT,globals,6,MPIU_INT,((PetscObject)da)->comm);CHKERRQ(ierr);
501 
502   /* Number the coarse grid points from 0 to Ntotal */
503   ierr = MatGetSize(Aglobal,&Nt,PETSC_NULL);CHKERRQ(ierr);
504   ierr = PetscTableCreate(Ntotal/3,Nt+1,&ht);CHKERRQ(ierr);
505   for (i=0; i<6*mp*np*pp; i++){
506     ierr = PetscTableAddCount(ht,globals[i]+1);CHKERRQ(ierr);
507   }
508   ierr = PetscTableGetCount(ht,&cnt);CHKERRQ(ierr);
509   if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
510   ierr = PetscFree(globals);CHKERRQ(ierr);
511   for (i=0; i<6; i++) {
512     ierr = PetscTableFind(ht,gl[i]+1,&gl[i]);CHKERRQ(ierr);
513     gl[i]--;
514   }
515   ierr = PetscTableDestroy(&ht);CHKERRQ(ierr);
516   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
517 
518   /* construct global interpolation matrix */
519   ierr = MatGetLocalSize(Aglobal,&Ng,PETSC_NULL);CHKERRQ(ierr);
520   if (reuse == MAT_INITIAL_MATRIX) {
521     ierr = MatCreateMPIAIJ(((PetscObject)da)->comm,Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,PETSC_NULL,Nint,PETSC_NULL,P);CHKERRQ(ierr);
522   } else {
523     ierr = MatZeroEntries(*P);CHKERRQ(ierr);
524   }
525   ierr = MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr);
526   ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
527   ierr = MatSetValues(*P,Nint,IIint,6,gl,xint,INSERT_VALUES);CHKERRQ(ierr);
528   ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
529   ierr = MatGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
530   ierr = MatSetValues(*P,Nsurf,IIsurf,6,gl,xsurf,INSERT_VALUES);CHKERRQ(ierr);
531   ierr = MatRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
532   ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
533   ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
534   ierr = PetscFree2(IIint,IIsurf);CHKERRQ(ierr);
535 
536 
537 #if defined(PETSC_USE_DEBUG_foo)
538   {
539     Vec         x,y;
540     PetscScalar *yy;
541     ierr = VecCreateMPI(((PetscObject)da)->comm,Ng,PETSC_DETERMINE,&y);CHKERRQ(ierr);
542     ierr = VecCreateMPI(((PetscObject)da)->comm,PETSC_DETERMINE,Ntotal,&x);CHKERRQ(ierr);
543     ierr = VecSet(x,1.0);CHKERRQ(ierr);
544     ierr = MatMult(*P,x,y);CHKERRQ(ierr);
545     ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
546     for (i=0; i<Ng; i++) {
547       if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %G",i,PetscAbsScalar(yy[i]));
548     }
549     ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
550     ierr = VecDestroy(x);CHKERRQ(ierr);
551     ierr = VecDestroy(y);CHKERRQ(ierr);
552   }
553 #endif
554 
555   ierr = MatDestroy(&Aii);CHKERRQ(ierr);
556   ierr = MatDestroy(&Ais);CHKERRQ(ierr);
557   ierr = MatDestroy(&Asi);CHKERRQ(ierr);
558   ierr = MatDestroy(&A);CHKERRQ(ierr);
559   ierr = ISDestroy(&is);CHKERRQ(ierr);
560   ierr = ISDestroy(&isint);CHKERRQ(ierr);
561   ierr = ISDestroy(&issurf);CHKERRQ(ierr);
562   ierr = MatDestroy(&Xint);CHKERRQ(ierr);
563   ierr = MatDestroy(&Xsurf);CHKERRQ(ierr);
564   PetscFunctionReturn(0);
565 }
566 
567 
568 #undef __FUNCT__
569 #define __FUNCT__ "PCExoticSetType"
570 /*@
571    PCExoticSetType - Sets the type of coarse grid interpolation to use
572 
573    Logically Collective on PC
574 
575    Input Parameters:
576 +  pc - the preconditioner context
577 -  type - either PC_EXOTIC_FACE or PC_EXOTIC_WIREBASKET (defaults to face)
578 
579    Notes: The face based interpolation has 1 degree of freedom per face and ignores the
580      edge and vertex values completely in the coarse problem. For any seven point
581      stencil the interpolation of a constant on all faces into the interior is that constant.
582 
583      The wirebasket interpolation has 1 degree of freedom per vertex, per edge and
584      per face. A constant on the subdomain boundary is interpolated as that constant
585      in the interior of the domain.
586 
587      The coarse grid matrix is obtained via the Galerkin computation A_c = R A R^T, hence
588      if A is nonsingular A_c is also nonsingular.
589 
590      Both interpolations are suitable for only scalar problems.
591 
592    Level: intermediate
593 
594 
595 .seealso: PCEXOTIC, PCExoticType()
596 @*/
597 PetscErrorCode  PCExoticSetType(PC pc,PCExoticType type)
598 {
599   PetscErrorCode ierr;
600 
601   PetscFunctionBegin;
602   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
603   PetscValidLogicalCollectiveEnum(pc,type,2);
604   ierr = PetscTryMethod(pc,"PCExoticSetType_C",(PC,PCExoticType),(pc,type));CHKERRQ(ierr);
605   PetscFunctionReturn(0);
606 }
607 
608 #undef __FUNCT__
609 #define __FUNCT__ "PCExoticSetType_Exotic"
610 PetscErrorCode  PCExoticSetType_Exotic(PC pc,PCExoticType type)
611 {
612   PC_MG     *mg = (PC_MG*)pc->data;
613   PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
614 
615   PetscFunctionBegin;
616   ctx->type = type;
617   PetscFunctionReturn(0);
618 }
619 
620 #undef __FUNCT__
621 #define __FUNCT__ "PCSetUp_Exotic"
622 PetscErrorCode PCSetUp_Exotic(PC pc)
623 {
624   PetscErrorCode ierr;
625   Mat            A;
626   PC_MG          *mg = (PC_MG*)pc->data;
627   PC_Exotic      *ex = (PC_Exotic*) mg->innerctx;
628   MatReuse       reuse = (ex->P) ? MAT_REUSE_MATRIX : MAT_INITIAL_MATRIX;
629 
630   PetscFunctionBegin;
631   if (!pc->dm) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONGSTATE,"Need to call PCSetDM() before using this PC");
632   ierr = PCGetOperators(pc,PETSC_NULL,&A,PETSC_NULL);CHKERRQ(ierr);
633   if (ex->type == PC_EXOTIC_FACE) {
634     ierr = DMDAGetFaceInterpolation(pc->dm,ex,A,reuse,&ex->P);CHKERRQ(ierr);
635   } else if (ex->type == PC_EXOTIC_WIREBASKET) {
636     ierr = DMDAGetWireBasketInterpolation(pc->dm,ex,A,reuse,&ex->P);CHKERRQ(ierr);
637   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_PLIB,"Unknown exotic coarse space %d",ex->type);
638   ierr = PCMGSetInterpolation(pc,1,ex->P);CHKERRQ(ierr);
639   /* if PC has attached DM we must remove it or the PCMG will use it to compute incorrect sized vectors and interpolations */
640   ierr = PCSetDM(pc,PETSC_NULL);CHKERRQ(ierr);
641   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
642   PetscFunctionReturn(0);
643 }
644 
645 #undef __FUNCT__
646 #define __FUNCT__ "PCDestroy_Exotic"
647 PetscErrorCode PCDestroy_Exotic(PC pc)
648 {
649   PetscErrorCode ierr;
650   PC_MG          *mg = (PC_MG*)pc->data;
651   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
652 
653   PetscFunctionBegin;
654   ierr = MatDestroy(&ctx->P);CHKERRQ(ierr);
655   ierr = KSPDestroy(&ctx->ksp);CHKERRQ(ierr);
656   ierr = PetscFree(ctx);CHKERRQ(ierr);
657   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
658   PetscFunctionReturn(0);
659 }
660 
661 #undef __FUNCT__
662 #define __FUNCT__ "PCView_Exotic"
663 PetscErrorCode PCView_Exotic(PC pc,PetscViewer viewer)
664 {
665   PC_MG          *mg = (PC_MG*)pc->data;
666   PetscErrorCode ierr;
667   PetscBool      iascii;
668   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
669 
670   PetscFunctionBegin;
671   ierr = PetscTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
672   if (iascii) {
673     ierr = PetscViewerASCIIPrintf(viewer,"    Exotic type = %s\n",PCExoticTypes[ctx->type]);CHKERRQ(ierr);
674     if (ctx->directSolve) {
675       ierr = PetscViewerASCIIPrintf(viewer,"      Using direct solver to construct interpolation\n");CHKERRQ(ierr);
676     } else {
677       PetscViewer sviewer;
678       PetscMPIInt rank;
679 
680       ierr = PetscViewerASCIIPrintf(viewer,"      Using iterative solver to construct interpolation\n");CHKERRQ(ierr);
681       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);
682       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);  /* should not need to push this twice? */
683       ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
684       ierr = MPI_Comm_rank(((PetscObject)pc)->comm,&rank);CHKERRQ(ierr);
685       if (!rank) {
686 	ierr = KSPView(ctx->ksp,sviewer);CHKERRQ(ierr);
687       }
688       ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
689       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
690       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
691     }
692   }
693   ierr = PCView_MG(pc,viewer);CHKERRQ(ierr);
694   PetscFunctionReturn(0);
695 }
696 
697 #undef __FUNCT__
698 #define __FUNCT__ "PCSetFromOptions_Exotic"
699 PetscErrorCode PCSetFromOptions_Exotic(PC pc)
700 {
701   PetscErrorCode ierr;
702   PetscBool      flg;
703   PC_MG          *mg = (PC_MG*)pc->data;
704   PCExoticType   mgctype;
705   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
706 
707   PetscFunctionBegin;
708   ierr = PetscOptionsHead("Exotic coarse space options");CHKERRQ(ierr);
709     ierr = PetscOptionsEnum("-pc_exotic_type","face or wirebasket","PCExoticSetType",PCExoticTypes,(PetscEnum)ctx->type,(PetscEnum*)&mgctype,&flg);CHKERRQ(ierr);
710     if (flg) {
711       ierr = PCExoticSetType(pc,mgctype);CHKERRQ(ierr);
712     }
713     ierr = PetscOptionsBool("-pc_exotic_direct_solver","use direct solver to construct interpolation","None",ctx->directSolve,&ctx->directSolve,PETSC_NULL);CHKERRQ(ierr);
714     if (!ctx->directSolve) {
715       if (!ctx->ksp) {
716         const char *prefix;
717         ierr = KSPCreate(PETSC_COMM_SELF,&ctx->ksp);CHKERRQ(ierr);
718         ierr = PetscObjectIncrementTabLevel((PetscObject)ctx->ksp,(PetscObject)pc,1);CHKERRQ(ierr);
719         ierr = PetscLogObjectParent(pc,ctx->ksp);CHKERRQ(ierr);
720         ierr = PCGetOptionsPrefix(pc,&prefix);CHKERRQ(ierr);
721         ierr = KSPSetOptionsPrefix(ctx->ksp,prefix);CHKERRQ(ierr);
722         ierr = KSPAppendOptionsPrefix(ctx->ksp,"exotic_");CHKERRQ(ierr);
723       }
724       ierr = KSPSetFromOptions(ctx->ksp);CHKERRQ(ierr);
725     }
726   ierr = PetscOptionsTail();CHKERRQ(ierr);
727   PetscFunctionReturn(0);
728 }
729 
730 
731 /*MC
732      PCEXOTIC - Two level overlapping Schwarz preconditioner with exotic (non-standard) coarse grid spaces
733 
734      This uses the PCMG infrastructure restricted to two levels and the face and wirebasket based coarse
735    grid spaces.
736 
737    Notes: By default this uses GMRES on the fine grid smoother so this should be used with KSPFGMRES or the smoother changed to not use GMRES
738 
739    References: These coarse grid spaces originate in the work of Bramble, Pasciak  and Schatz, "The Construction
740    of Preconditioners for Elliptic Problems by Substructing IV", Mathematics of Computation, volume 53 pages 1--24, 1989.
741    They were generalized slightly in "Domain Decomposition Method for Linear Elasticity", Ph. D. thesis, Barry Smith,
742    New York University, 1990. They were then explored in great detail in Dryja, Smith, Widlund, "Schwarz Analysis
743    of Iterative Substructuring Methods for Elliptic Problems in Three Dimensions, SIAM Journal on Numerical
744    Analysis, volume 31. pages 1662-1694, 1994. These were developed in the context of iterative substructuring preconditioners.
745    They were then ingeniously applied as coarse grid spaces for overlapping Schwarz methods by Dohrmann and Widlund.
746    They refer to them as GDSW (generalized Dryja, Smith, Widlund preconditioners). See, for example,
747    Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Extending theory for domain decomposition algorithms to irregular subdomains. In Ulrich Langer, Marco
748    Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
749    of the 17th International Conference on Domain Decomposition Methods in
750    Science and Engineering, held in Strobl, Austria, July 3-7, 2006, number 60 in
751    Springer-Verlag, Lecture Notes in Computational Science and Engineering, pages 255-261, 2007.
752    Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. A family of energy min-
753    imizing coarse spaces for overlapping Schwarz preconditioners. In Ulrich Langer,
754    Marco Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
755    of the 17th International Conference on Domain Decomposition Methods
756    in Science and Engineering, held in Strobl, Austria, July 3-7, 2006, number 60 in
757    Springer-Verlag, Lecture Notes in Computational Science and Engineering, pages 247-254, 2007
758    Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Domain decomposition
759    for less regular subdomains: Overlapping Schwarz in two dimensions. SIAM J.
760    Numer. Anal., 46(4):2153-2168, 2008.
761    Clark R. Dohrmann and Olof B. Widlund. An overlapping Schwarz
762    algorithm for almost incompressible elasticity. Technical Report
763    TR2008-912, Department of Computer Science, Courant Institute
764    of Mathematical Sciences, New York University, May 2008. URL:
765    http://cs.nyu.edu/csweb/Research/TechReports/TR2008-912/TR2008-912.pdf
766 
767    Options Database: The usual PCMG options are supported, such as -mg_levels_pc_type <type> -mg_coarse_pc_type <type>
768       -pc_mg_type <type>
769 
770    Level: advanced
771 
772 .seealso:  PCMG, PCSetDM(), PCExoticType, PCExoticSetType()
773 M*/
774 
775 EXTERN_C_BEGIN
776 #undef __FUNCT__
777 #define __FUNCT__ "PCCreate_Exotic"
778 PetscErrorCode  PCCreate_Exotic(PC pc)
779 {
780   PetscErrorCode ierr;
781   PC_Exotic      *ex;
782   PC_MG          *mg;
783 
784   PetscFunctionBegin;
785   /* if type was previously mg; must manually destroy it because call to PCSetType(pc,PCMG) will not destroy it */
786   if (pc->ops->destroy) { ierr =  (*pc->ops->destroy)(pc);CHKERRQ(ierr); pc->data = 0;}
787   ierr = PetscFree(((PetscObject)pc)->type_name);CHKERRQ(ierr);
788   ((PetscObject)pc)->type_name = 0;
789 
790   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr);
791   ierr = PCMGSetLevels(pc,2,PETSC_NULL);CHKERRQ(ierr);
792   ierr = PCMGSetGalerkin(pc,PETSC_TRUE);CHKERRQ(ierr);
793   ierr = PetscNew(PC_Exotic,&ex);CHKERRQ(ierr);\
794   ex->type = PC_EXOTIC_FACE;
795   mg = (PC_MG*) pc->data;
796   mg->innerctx = ex;
797 
798 
799   pc->ops->setfromoptions = PCSetFromOptions_Exotic;
800   pc->ops->view           = PCView_Exotic;
801   pc->ops->destroy        = PCDestroy_Exotic;
802   pc->ops->setup          = PCSetUp_Exotic;
803   ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCExoticSetType_C","PCExoticSetType_Exotic",PCExoticSetType_Exotic);CHKERRQ(ierr);
804   PetscFunctionReturn(0);
805 }
806 EXTERN_C_END
807