xref: /petsc/src/ksp/pc/impls/wb/wb.c (revision 117ef88edefbfc12e7c19efe87a19a2e1b0acd4f)
1 
2 #include <petscdmda.h>   /*I "petscdmda.h" I*/
3 #include <petsc/private/pcmgimpl.h>   /*I "petscksp.h" I*/
4 #include <petscctable.h>
5 
6 typedef struct {
7   PCExoticType type;
8   Mat          P;            /* the constructed interpolation matrix */
9   PetscBool    directSolve;  /* use direct LU factorization to construct interpolation */
10   KSP          ksp;
11 } PC_Exotic;
12 
13 const char *const PCExoticTypes[] = {"face","wirebasket","PCExoticType","PC_Exotic",NULL};
14 
15 
16 /*
17       DMDAGetWireBasketInterpolation - Gets the interpolation for a wirebasket based coarse space
18 
19 */
20 PetscErrorCode DMDAGetWireBasketInterpolation(PC pc,DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
21 {
22   PetscErrorCode         ierr;
23   PetscInt               dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
24   PetscInt               mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[26],*globals,Ng,*IIint,*IIsurf,Nt;
25   Mat                    Xint, Xsurf,Xint_tmp;
26   IS                     isint,issurf,is,row,col;
27   ISLocalToGlobalMapping ltg;
28   MPI_Comm               comm;
29   Mat                    A,Aii,Ais,Asi,*Aholder,iAii;
30   MatFactorInfo          info;
31   PetscScalar            *xsurf,*xint;
32   const PetscScalar      *rxint;
33 #if defined(PETSC_USE_DEBUG_foo)
34   PetscScalar            tmp;
35 #endif
36   PetscTable             ht;
37 
38   PetscFunctionBegin;
39   ierr = DMDAGetInfo(da,&dim,NULL,NULL,NULL,&mp,&np,&pp,&dof,NULL,NULL,NULL,NULL,NULL);CHKERRQ(ierr);
40   if (dof != 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only for single field problems");
41   if (dim != 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only coded for 3d problems");
42   ierr   = DMDAGetCorners(da,NULL,NULL,NULL,&m,&n,&p);CHKERRQ(ierr);
43   ierr   = DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);CHKERRQ(ierr);
44   istart = istart ? -1 : 0;
45   jstart = jstart ? -1 : 0;
46   kstart = kstart ? -1 : 0;
47 
48   /*
49     the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
50     to all the local degrees of freedom (this includes the vertices, edges and faces).
51 
52     Xint are the subset of the interpolation into the interior
53 
54     Xface are the interpolation onto faces but not into the interior
55 
56     Xsurf are the interpolation onto the vertices and edges (the surfbasket)
57                                       Xint
58     Symbolically one could write P = (Xface) after interchanging the rows to match the natural ordering on the domain
59                                       Xsurf
60   */
61   N     = (m - istart)*(n - jstart)*(p - kstart);
62   Nint  = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
63   Nface = 2*((m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart));
64   Nwire = 4*((m-2-istart) + (n-2-jstart) + (p-2-kstart)) + 8;
65   Nsurf = Nface + Nwire;
66   ierr  = MatCreateSeqDense(MPI_COMM_SELF,Nint,26,NULL,&Xint);CHKERRQ(ierr);
67   ierr  = MatCreateSeqDense(MPI_COMM_SELF,Nsurf,26,NULL,&Xsurf);CHKERRQ(ierr);
68   ierr  = MatDenseGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
69 
70   /*
71      Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
72      Xsurf will be all zero (thus making the coarse matrix singular).
73   */
74   if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
75   if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
76   if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
77 
78   cnt = 0;
79 
80   xsurf[cnt++] = 1;
81   for (i=1; i<m-istart-1; i++) xsurf[cnt++ + Nsurf] = 1;
82   xsurf[cnt++ + 2*Nsurf] = 1;
83 
84   for (j=1; j<n-1-jstart; j++) {
85     xsurf[cnt++ + 3*Nsurf] = 1;
86     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 4*Nsurf] = 1;
87     xsurf[cnt++ + 5*Nsurf] = 1;
88   }
89 
90   xsurf[cnt++ + 6*Nsurf] = 1;
91   for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 7*Nsurf] = 1;
92   xsurf[cnt++ + 8*Nsurf] = 1;
93 
94   for (k=1; k<p-1-kstart; k++) {
95     xsurf[cnt++ + 9*Nsurf] = 1;
96     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 10*Nsurf] = 1;
97     xsurf[cnt++ + 11*Nsurf] = 1;
98 
99     for (j=1; j<n-1-jstart; j++) {
100       xsurf[cnt++ + 12*Nsurf] = 1;
101       /* these are the interior nodes */
102       xsurf[cnt++ + 13*Nsurf] = 1;
103     }
104 
105     xsurf[cnt++ + 14*Nsurf] = 1;
106     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 15*Nsurf] = 1;
107     xsurf[cnt++ + 16*Nsurf] = 1;
108   }
109 
110   xsurf[cnt++ + 17*Nsurf] = 1;
111   for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 18*Nsurf] = 1;
112   xsurf[cnt++ + 19*Nsurf] = 1;
113 
114   for (j=1;j<n-1-jstart;j++) {
115     xsurf[cnt++ + 20*Nsurf] = 1;
116     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 21*Nsurf] = 1;
117     xsurf[cnt++ + 22*Nsurf] = 1;
118   }
119 
120   xsurf[cnt++ + 23*Nsurf] = 1;
121   for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 24*Nsurf] = 1;
122   xsurf[cnt++ + 25*Nsurf] = 1;
123 
124 
125   /* interpolations only sum to 1 when using direct solver */
126 #if defined(PETSC_USE_DEBUG_foo)
127   for (i=0; i<Nsurf; i++) {
128     tmp = 0.0;
129     for (j=0; j<26; j++) tmp += xsurf[i+j*Nsurf];
130     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
131   }
132 #endif
133   ierr = MatDenseRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
134   /* ierr = MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);*/
135 
136 
137   /*
138        I are the indices for all the needed vertices (in global numbering)
139        Iint are the indices for the interior values, I surf for the surface values
140             (This is just for the part of the global matrix obtained with MatCreateSubMatrix(), it
141              is NOT the local DMDA ordering.)
142        IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
143   */
144 #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
145   ierr = PetscMalloc3(N,&II,Nint,&Iint,Nsurf,&Isurf);CHKERRQ(ierr);
146   ierr = PetscMalloc2(Nint,&IIint,Nsurf,&IIsurf);CHKERRQ(ierr);
147   for (k=0; k<p-kstart; k++) {
148     for (j=0; j<n-jstart; j++) {
149       for (i=0; i<m-istart; i++) {
150         II[c++] = i + j*mwidth + k*mwidth*nwidth;
151 
152         if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
153           IIint[cint]  = i + j*mwidth + k*mwidth*nwidth;
154           Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
155         } else {
156           IIsurf[csurf]  = i + j*mwidth + k*mwidth*nwidth;
157           Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
158         }
159       }
160     }
161   }
162   if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
163   if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
164   if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
165   ierr = DMGetLocalToGlobalMapping(da,&ltg);CHKERRQ(ierr);
166   ierr = ISLocalToGlobalMappingApply(ltg,N,II,II);CHKERRQ(ierr);
167   ierr = ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);CHKERRQ(ierr);
168   ierr = ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);CHKERRQ(ierr);
169   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
170   ierr = ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
171   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);CHKERRQ(ierr);
172   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);CHKERRQ(ierr);
173   ierr = PetscFree3(II,Iint,Isurf);CHKERRQ(ierr);
174 
175   ierr = MatCreateSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);CHKERRQ(ierr);
176   A    = *Aholder;
177   ierr = PetscFree(Aholder);CHKERRQ(ierr);
178 
179   ierr = MatCreateSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);CHKERRQ(ierr);
180   ierr = MatCreateSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);CHKERRQ(ierr);
181   ierr = MatCreateSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);CHKERRQ(ierr);
182 
183   /*
184      Solve for the interpolation onto the interior Xint
185   */
186   ierr = MatMatMult(Ais,Xsurf,MAT_INITIAL_MATRIX,PETSC_DETERMINE,&Xint_tmp);CHKERRQ(ierr);
187   ierr = MatScale(Xint_tmp,-1.0);CHKERRQ(ierr);
188   if (exotic->directSolve) {
189     ierr = MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);CHKERRQ(ierr);
190     ierr = MatFactorInfoInitialize(&info);CHKERRQ(ierr);
191     ierr = MatGetOrdering(Aii,MATORDERINGND,&row,&col);CHKERRQ(ierr);
192     ierr = MatLUFactorSymbolic(iAii,Aii,row,col,&info);CHKERRQ(ierr);
193     ierr = ISDestroy(&row);CHKERRQ(ierr);
194     ierr = ISDestroy(&col);CHKERRQ(ierr);
195     ierr = MatLUFactorNumeric(iAii,Aii,&info);CHKERRQ(ierr);
196     ierr = MatMatSolve(iAii,Xint_tmp,Xint);CHKERRQ(ierr);
197     ierr = MatDestroy(&iAii);CHKERRQ(ierr);
198   } else {
199     Vec         b,x;
200     PetscScalar *xint_tmp;
201 
202     ierr = MatDenseGetArray(Xint,&xint);CHKERRQ(ierr);
203     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,NULL,&x);CHKERRQ(ierr);
204     ierr = MatDenseGetArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
205     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,NULL,&b);CHKERRQ(ierr);
206     ierr = KSPSetOperators(exotic->ksp,Aii,Aii);CHKERRQ(ierr);
207     for (i=0; i<26; i++) {
208       ierr = VecPlaceArray(x,xint+i*Nint);CHKERRQ(ierr);
209       ierr = VecPlaceArray(b,xint_tmp+i*Nint);CHKERRQ(ierr);
210       ierr = KSPSolve(exotic->ksp,b,x);CHKERRQ(ierr);
211       ierr = KSPCheckSolve(exotic->ksp,pc,x);CHKERRQ(ierr);
212       ierr = VecResetArray(x);CHKERRQ(ierr);
213       ierr = VecResetArray(b);CHKERRQ(ierr);
214     }
215     ierr = MatDenseRestoreArray(Xint,&xint);CHKERRQ(ierr);
216     ierr = MatDenseRestoreArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
217     ierr = VecDestroy(&x);CHKERRQ(ierr);
218     ierr = VecDestroy(&b);CHKERRQ(ierr);
219   }
220   ierr = MatDestroy(&Xint_tmp);CHKERRQ(ierr);
221 
222 #if defined(PETSC_USE_DEBUG_foo)
223   ierr = MatDenseGetArrayRead(Xint,&rxint);CHKERRQ(ierr);
224   for (i=0; i<Nint; i++) {
225     tmp = 0.0;
226     for (j=0; j<26; j++) tmp += rxint[i+j*Nint];
227 
228     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
229   }
230   ierr = MatDenseRestoreArrayRead(Xint,&rxint);CHKERRQ(ierr);
231   /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); */
232 #endif
233 
234 
235   /*         total vertices             total faces                                  total edges */
236   Ntotal = (mp + 1)*(np + 1)*(pp + 1) + mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1) + mp*(np+1)*(pp+1) + np*(mp+1)*(pp+1) +  pp*(mp+1)*(np+1);
237 
238   /*
239       For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
240   */
241   cnt = 0;
242 
243   gl[cnt++] = 0;  { gl[cnt++] = 1;} gl[cnt++] = m-istart-1;
244   { gl[cnt++] = mwidth;  { gl[cnt++] = mwidth+1;} gl[cnt++] = mwidth + m-istart-1;}
245   gl[cnt++] = mwidth*(n-jstart-1);  { gl[cnt++] = mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*(n-jstart-1) + m-istart-1;
246   {
247     gl[cnt++] = mwidth*nwidth;  { gl[cnt++] = mwidth*nwidth+1;}  gl[cnt++] = mwidth*nwidth+ m-istart-1;
248     { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
249     gl[cnt++] = mwidth*nwidth+ mwidth*(n-jstart-1);   { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1) + m-istart-1;
250   }
251   gl[cnt++] = mwidth*nwidth*(p-kstart-1); { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) +  m-istart-1;
252   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth;   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1)+mwidth+m-istart-1;}
253   gl[cnt++] = mwidth*nwidth*(p-kstart-1) +  mwidth*(n-jstart-1);  { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+ mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth*(n-jstart-1) + m-istart-1;
254 
255   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
256   /* convert that to global numbering and get them on all processes */
257   ierr = ISLocalToGlobalMappingApply(ltg,26,gl,gl);CHKERRQ(ierr);
258   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
259   ierr = PetscMalloc1(26*mp*np*pp,&globals);CHKERRQ(ierr);
260   ierr = MPI_Allgather(gl,26,MPIU_INT,globals,26,MPIU_INT,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
261 
262   /* Number the coarse grid points from 0 to Ntotal */
263   ierr = MatGetSize(Aglobal,&Nt,NULL);CHKERRQ(ierr);
264   ierr = PetscTableCreate(Ntotal/3,Nt+1,&ht);CHKERRQ(ierr);
265   for (i=0; i<26*mp*np*pp; i++) {
266     ierr = PetscTableAddCount(ht,globals[i]+1);CHKERRQ(ierr);
267   }
268   ierr = PetscTableGetCount(ht,&cnt);CHKERRQ(ierr);
269   if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
270   ierr = PetscFree(globals);CHKERRQ(ierr);
271   for (i=0; i<26; i++) {
272     ierr = PetscTableFind(ht,gl[i]+1,&gl[i]);CHKERRQ(ierr);
273     gl[i]--;
274   }
275   ierr = PetscTableDestroy(&ht);CHKERRQ(ierr);
276   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
277 
278   /* construct global interpolation matrix */
279   ierr = MatGetLocalSize(Aglobal,&Ng,NULL);CHKERRQ(ierr);
280   if (reuse == MAT_INITIAL_MATRIX) {
281     ierr = MatCreateAIJ(PetscObjectComm((PetscObject)da),Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,NULL,Nint+Nsurf,NULL,P);CHKERRQ(ierr);
282   } else {
283     ierr = MatZeroEntries(*P);CHKERRQ(ierr);
284   }
285   ierr = MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr);
286   ierr = MatDenseGetArrayRead(Xint,&rxint);CHKERRQ(ierr);
287   ierr = MatSetValues(*P,Nint,IIint,26,gl,rxint,INSERT_VALUES);CHKERRQ(ierr);
288   ierr = MatDenseRestoreArrayRead(Xint,&rxint);CHKERRQ(ierr);
289   ierr = MatDenseGetArrayRead(Xsurf,&rxint);CHKERRQ(ierr);
290   ierr = MatSetValues(*P,Nsurf,IIsurf,26,gl,rxint,INSERT_VALUES);CHKERRQ(ierr);
291   ierr = MatDenseRestoreArrayRead(Xsurf,&rxint);CHKERRQ(ierr);
292   ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
293   ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
294   ierr = PetscFree2(IIint,IIsurf);CHKERRQ(ierr);
295 
296 #if defined(PETSC_USE_DEBUG_foo)
297   {
298     Vec         x,y;
299     PetscScalar *yy;
300     ierr = VecCreateMPI(PetscObjectComm((PetscObject)da),Ng,PETSC_DETERMINE,&y);CHKERRQ(ierr);
301     ierr = VecCreateMPI(PetscObjectComm((PetscObject)da),PETSC_DETERMINE,Ntotal,&x);CHKERRQ(ierr);
302     ierr = VecSet(x,1.0);CHKERRQ(ierr);
303     ierr = MatMult(*P,x,y);CHKERRQ(ierr);
304     ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
305     for (i=0; i<Ng; i++) {
306       if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %g",i,(double)PetscAbsScalar(yy[i]));
307     }
308     ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
309     ierr = VecDestroy(x);CHKERRQ(ierr);
310     ierr = VecDestroy(y);CHKERRQ(ierr);
311   }
312 #endif
313 
314   ierr = MatDestroy(&Aii);CHKERRQ(ierr);
315   ierr = MatDestroy(&Ais);CHKERRQ(ierr);
316   ierr = MatDestroy(&Asi);CHKERRQ(ierr);
317   ierr = MatDestroy(&A);CHKERRQ(ierr);
318   ierr = ISDestroy(&is);CHKERRQ(ierr);
319   ierr = ISDestroy(&isint);CHKERRQ(ierr);
320   ierr = ISDestroy(&issurf);CHKERRQ(ierr);
321   ierr = MatDestroy(&Xint);CHKERRQ(ierr);
322   ierr = MatDestroy(&Xsurf);CHKERRQ(ierr);
323   PetscFunctionReturn(0);
324 }
325 
326 /*
327       DMDAGetFaceInterpolation - Gets the interpolation for a face based coarse space
328 
329 */
330 PetscErrorCode DMDAGetFaceInterpolation(PC pc,DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
331 {
332   PetscErrorCode         ierr;
333   PetscInt               dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
334   PetscInt               mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[6],*globals,Ng,*IIint,*IIsurf,Nt;
335   Mat                    Xint, Xsurf,Xint_tmp;
336   IS                     isint,issurf,is,row,col;
337   ISLocalToGlobalMapping ltg;
338   MPI_Comm               comm;
339   Mat                    A,Aii,Ais,Asi,*Aholder,iAii;
340   MatFactorInfo          info;
341   PetscScalar            *xsurf,*xint;
342   const PetscScalar      *rxint;
343 #if defined(PETSC_USE_DEBUG_foo)
344   PetscScalar            tmp;
345 #endif
346   PetscTable             ht;
347 
348   PetscFunctionBegin;
349   ierr = DMDAGetInfo(da,&dim,NULL,NULL,NULL,&mp,&np,&pp,&dof,NULL,NULL,NULL,NULL,NULL);CHKERRQ(ierr);
350   if (dof != 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only for single field problems");
351   if (dim != 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only coded for 3d problems");
352   ierr   = DMDAGetCorners(da,NULL,NULL,NULL,&m,&n,&p);CHKERRQ(ierr);
353   ierr   = DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);CHKERRQ(ierr);
354   istart = istart ? -1 : 0;
355   jstart = jstart ? -1 : 0;
356   kstart = kstart ? -1 : 0;
357 
358   /*
359     the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
360     to all the local degrees of freedom (this includes the vertices, edges and faces).
361 
362     Xint are the subset of the interpolation into the interior
363 
364     Xface are the interpolation onto faces but not into the interior
365 
366     Xsurf are the interpolation onto the vertices and edges (the surfbasket)
367                                       Xint
368     Symbolically one could write P = (Xface) after interchanging the rows to match the natural ordering on the domain
369                                       Xsurf
370   */
371   N     = (m - istart)*(n - jstart)*(p - kstart);
372   Nint  = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
373   Nface = 2*((m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart));
374   Nwire = 4*((m-2-istart) + (n-2-jstart) + (p-2-kstart)) + 8;
375   Nsurf = Nface + Nwire;
376   ierr  = MatCreateSeqDense(MPI_COMM_SELF,Nint,6,NULL,&Xint);CHKERRQ(ierr);
377   ierr  = MatCreateSeqDense(MPI_COMM_SELF,Nsurf,6,NULL,&Xsurf);CHKERRQ(ierr);
378   ierr  = MatDenseGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
379 
380   /*
381      Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
382      Xsurf will be all zero (thus making the coarse matrix singular).
383   */
384   if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
385   if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
386   if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
387 
388   cnt = 0;
389   for (j=1; j<n-1-jstart; j++) {
390     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 0*Nsurf] = 1;
391   }
392 
393   for (k=1; k<p-1-kstart; k++) {
394     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 1*Nsurf] = 1;
395     for (j=1; j<n-1-jstart; j++) {
396       xsurf[cnt++ + 2*Nsurf] = 1;
397       /* these are the interior nodes */
398       xsurf[cnt++ + 3*Nsurf] = 1;
399     }
400     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 4*Nsurf] = 1;
401   }
402   for (j=1;j<n-1-jstart;j++) {
403     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 5*Nsurf] = 1;
404   }
405 
406 #if defined(PETSC_USE_DEBUG_foo)
407   for (i=0; i<Nsurf; i++) {
408     tmp = 0.0;
409     for (j=0; j<6; j++) tmp += xsurf[i+j*Nsurf];
410 
411     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
412   }
413 #endif
414   ierr = MatDenseRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
415   /* ierr = MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);*/
416 
417 
418   /*
419        I are the indices for all the needed vertices (in global numbering)
420        Iint are the indices for the interior values, I surf for the surface values
421             (This is just for the part of the global matrix obtained with MatCreateSubMatrix(), it
422              is NOT the local DMDA ordering.)
423        IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
424   */
425 #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
426   ierr = PetscMalloc3(N,&II,Nint,&Iint,Nsurf,&Isurf);CHKERRQ(ierr);
427   ierr = PetscMalloc2(Nint,&IIint,Nsurf,&IIsurf);CHKERRQ(ierr);
428   for (k=0; k<p-kstart; k++) {
429     for (j=0; j<n-jstart; j++) {
430       for (i=0; i<m-istart; i++) {
431         II[c++] = i + j*mwidth + k*mwidth*nwidth;
432 
433         if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
434           IIint[cint]  = i + j*mwidth + k*mwidth*nwidth;
435           Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
436         } else {
437           IIsurf[csurf]  = i + j*mwidth + k*mwidth*nwidth;
438           Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
439         }
440       }
441     }
442   }
443   if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
444   if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
445   if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
446   ierr = DMGetLocalToGlobalMapping(da,&ltg);CHKERRQ(ierr);
447   ierr = ISLocalToGlobalMappingApply(ltg,N,II,II);CHKERRQ(ierr);
448   ierr = ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);CHKERRQ(ierr);
449   ierr = ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);CHKERRQ(ierr);
450   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
451   ierr = ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
452   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);CHKERRQ(ierr);
453   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);CHKERRQ(ierr);
454   ierr = PetscFree3(II,Iint,Isurf);CHKERRQ(ierr);
455 
456   ierr = ISSort(is);CHKERRQ(ierr);
457   ierr = MatCreateSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);CHKERRQ(ierr);
458   A    = *Aholder;
459   ierr = PetscFree(Aholder);CHKERRQ(ierr);
460 
461   ierr = MatCreateSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);CHKERRQ(ierr);
462   ierr = MatCreateSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);CHKERRQ(ierr);
463   ierr = MatCreateSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);CHKERRQ(ierr);
464 
465   /*
466      Solve for the interpolation onto the interior Xint
467   */
468   ierr = MatMatMult(Ais,Xsurf,MAT_INITIAL_MATRIX,PETSC_DETERMINE,&Xint_tmp);CHKERRQ(ierr);
469   ierr = MatScale(Xint_tmp,-1.0);CHKERRQ(ierr);
470 
471   if (exotic->directSolve) {
472     ierr = MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);CHKERRQ(ierr);
473     ierr = MatFactorInfoInitialize(&info);CHKERRQ(ierr);
474     ierr = MatGetOrdering(Aii,MATORDERINGND,&row,&col);CHKERRQ(ierr);
475     ierr = MatLUFactorSymbolic(iAii,Aii,row,col,&info);CHKERRQ(ierr);
476     ierr = ISDestroy(&row);CHKERRQ(ierr);
477     ierr = ISDestroy(&col);CHKERRQ(ierr);
478     ierr = MatLUFactorNumeric(iAii,Aii,&info);CHKERRQ(ierr);
479     ierr = MatMatSolve(iAii,Xint_tmp,Xint);CHKERRQ(ierr);
480     ierr = MatDestroy(&iAii);CHKERRQ(ierr);
481   } else {
482     Vec         b,x;
483     PetscScalar *xint_tmp;
484 
485     ierr = MatDenseGetArray(Xint,&xint);CHKERRQ(ierr);
486     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,NULL,&x);CHKERRQ(ierr);
487     ierr = MatDenseGetArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
488     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,NULL,&b);CHKERRQ(ierr);
489     ierr = KSPSetOperators(exotic->ksp,Aii,Aii);CHKERRQ(ierr);
490     for (i=0; i<6; i++) {
491       ierr = VecPlaceArray(x,xint+i*Nint);CHKERRQ(ierr);
492       ierr = VecPlaceArray(b,xint_tmp+i*Nint);CHKERRQ(ierr);
493       ierr = KSPSolve(exotic->ksp,b,x);CHKERRQ(ierr);
494       ierr = KSPCheckSolve(exotic->ksp,pc,x);CHKERRQ(ierr);
495       ierr = VecResetArray(x);CHKERRQ(ierr);
496       ierr = VecResetArray(b);CHKERRQ(ierr);
497     }
498     ierr = MatDenseRestoreArray(Xint,&xint);CHKERRQ(ierr);
499     ierr = MatDenseRestoreArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
500     ierr = VecDestroy(&x);CHKERRQ(ierr);
501     ierr = VecDestroy(&b);CHKERRQ(ierr);
502   }
503   ierr = MatDestroy(&Xint_tmp);CHKERRQ(ierr);
504 
505 #if defined(PETSC_USE_DEBUG_foo)
506   ierr = MatDenseGetArrayRead(Xint,&rxint);CHKERRQ(ierr);
507   for (i=0; i<Nint; i++) {
508     tmp = 0.0;
509     for (j=0; j<6; j++) tmp += rxint[i+j*Nint];
510 
511     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
512   }
513   ierr = MatDenseRestoreArrayRead(Xint,&rxint);CHKERRQ(ierr);
514   /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); */
515 #endif
516 
517 
518   /*         total faces    */
519   Ntotal =  mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1);
520 
521   /*
522       For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
523   */
524   cnt = 0;
525   { gl[cnt++] = mwidth+1;}
526   {
527     { gl[cnt++] = mwidth*nwidth+1;}
528     { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
529     { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;}
530   }
531   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;}
532 
533   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
534   /* convert that to global numbering and get them on all processes */
535   ierr = ISLocalToGlobalMappingApply(ltg,6,gl,gl);CHKERRQ(ierr);
536   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
537   ierr = PetscMalloc1(6*mp*np*pp,&globals);CHKERRQ(ierr);
538   ierr = MPI_Allgather(gl,6,MPIU_INT,globals,6,MPIU_INT,PetscObjectComm((PetscObject)da));CHKERRQ(ierr);
539 
540   /* Number the coarse grid points from 0 to Ntotal */
541   ierr = MatGetSize(Aglobal,&Nt,NULL);CHKERRQ(ierr);
542   ierr = PetscTableCreate(Ntotal/3,Nt+1,&ht);CHKERRQ(ierr);
543   for (i=0; i<6*mp*np*pp; i++) {
544     ierr = PetscTableAddCount(ht,globals[i]+1);CHKERRQ(ierr);
545   }
546   ierr = PetscTableGetCount(ht,&cnt);CHKERRQ(ierr);
547   if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
548   ierr = PetscFree(globals);CHKERRQ(ierr);
549   for (i=0; i<6; i++) {
550     ierr = PetscTableFind(ht,gl[i]+1,&gl[i]);CHKERRQ(ierr);
551     gl[i]--;
552   }
553   ierr = PetscTableDestroy(&ht);CHKERRQ(ierr);
554   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
555 
556   /* construct global interpolation matrix */
557   ierr = MatGetLocalSize(Aglobal,&Ng,NULL);CHKERRQ(ierr);
558   if (reuse == MAT_INITIAL_MATRIX) {
559     ierr = MatCreateAIJ(PetscObjectComm((PetscObject)da),Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,NULL,Nint,NULL,P);CHKERRQ(ierr);
560   } else {
561     ierr = MatZeroEntries(*P);CHKERRQ(ierr);
562   }
563   ierr = MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr);
564   ierr = MatDenseGetArrayRead(Xint,&rxint);CHKERRQ(ierr);
565   ierr = MatSetValues(*P,Nint,IIint,6,gl,rxint,INSERT_VALUES);CHKERRQ(ierr);
566   ierr = MatDenseRestoreArrayRead(Xint,&rxint);CHKERRQ(ierr);
567   ierr = MatDenseGetArrayRead(Xsurf,&rxint);CHKERRQ(ierr);
568   ierr = MatSetValues(*P,Nsurf,IIsurf,6,gl,rxint,INSERT_VALUES);CHKERRQ(ierr);
569   ierr = MatDenseRestoreArrayRead(Xsurf,&rxint);CHKERRQ(ierr);
570   ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
571   ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
572   ierr = PetscFree2(IIint,IIsurf);CHKERRQ(ierr);
573 
574 
575 #if defined(PETSC_USE_DEBUG_foo)
576   {
577     Vec         x,y;
578     PetscScalar *yy;
579     ierr = VecCreateMPI(PetscObjectComm((PetscObject)da),Ng,PETSC_DETERMINE,&y);CHKERRQ(ierr);
580     ierr = VecCreateMPI(PetscObjectComm((PetscObject)da),PETSC_DETERMINE,Ntotal,&x);CHKERRQ(ierr);
581     ierr = VecSet(x,1.0);CHKERRQ(ierr);
582     ierr = MatMult(*P,x,y);CHKERRQ(ierr);
583     ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
584     for (i=0; i<Ng; i++) {
585       if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %g",i,(double)PetscAbsScalar(yy[i]));
586     }
587     ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
588     ierr = VecDestroy(x);CHKERRQ(ierr);
589     ierr = VecDestroy(y);CHKERRQ(ierr);
590   }
591 #endif
592 
593   ierr = MatDestroy(&Aii);CHKERRQ(ierr);
594   ierr = MatDestroy(&Ais);CHKERRQ(ierr);
595   ierr = MatDestroy(&Asi);CHKERRQ(ierr);
596   ierr = MatDestroy(&A);CHKERRQ(ierr);
597   ierr = ISDestroy(&is);CHKERRQ(ierr);
598   ierr = ISDestroy(&isint);CHKERRQ(ierr);
599   ierr = ISDestroy(&issurf);CHKERRQ(ierr);
600   ierr = MatDestroy(&Xint);CHKERRQ(ierr);
601   ierr = MatDestroy(&Xsurf);CHKERRQ(ierr);
602   PetscFunctionReturn(0);
603 }
604 
605 
606 /*@
607    PCExoticSetType - Sets the type of coarse grid interpolation to use
608 
609    Logically Collective on PC
610 
611    Input Parameters:
612 +  pc - the preconditioner context
613 -  type - either PC_EXOTIC_FACE or PC_EXOTIC_WIREBASKET (defaults to face)
614 
615    Notes:
616     The face based interpolation has 1 degree of freedom per face and ignores the
617      edge and vertex values completely in the coarse problem. For any seven point
618      stencil the interpolation of a constant on all faces into the interior is that constant.
619 
620      The wirebasket interpolation has 1 degree of freedom per vertex, per edge and
621      per face. A constant on the subdomain boundary is interpolated as that constant
622      in the interior of the domain.
623 
624      The coarse grid matrix is obtained via the Galerkin computation A_c = R A R^T, hence
625      if A is nonsingular A_c is also nonsingular.
626 
627      Both interpolations are suitable for only scalar problems.
628 
629    Level: intermediate
630 
631 
632 .seealso: PCEXOTIC, PCExoticType()
633 @*/
634 PetscErrorCode  PCExoticSetType(PC pc,PCExoticType type)
635 {
636   PetscErrorCode ierr;
637 
638   PetscFunctionBegin;
639   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
640   PetscValidLogicalCollectiveEnum(pc,type,2);
641   ierr = PetscTryMethod(pc,"PCExoticSetType_C",(PC,PCExoticType),(pc,type));CHKERRQ(ierr);
642   PetscFunctionReturn(0);
643 }
644 
645 static PetscErrorCode  PCExoticSetType_Exotic(PC pc,PCExoticType type)
646 {
647   PC_MG     *mg  = (PC_MG*)pc->data;
648   PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
649 
650   PetscFunctionBegin;
651   ctx->type = type;
652   PetscFunctionReturn(0);
653 }
654 
655 PetscErrorCode PCSetUp_Exotic(PC pc)
656 {
657   PetscErrorCode ierr;
658   Mat            A;
659   PC_MG          *mg   = (PC_MG*)pc->data;
660   PC_Exotic      *ex   = (PC_Exotic*) mg->innerctx;
661   MatReuse       reuse = (ex->P) ? MAT_REUSE_MATRIX : MAT_INITIAL_MATRIX;
662 
663   PetscFunctionBegin;
664   if (!pc->dm) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONGSTATE,"Need to call PCSetDM() before using this PC");
665   ierr = PCGetOperators(pc,NULL,&A);CHKERRQ(ierr);
666   if (ex->type == PC_EXOTIC_FACE) {
667     ierr = DMDAGetFaceInterpolation(pc,pc->dm,ex,A,reuse,&ex->P);CHKERRQ(ierr);
668   } else if (ex->type == PC_EXOTIC_WIREBASKET) {
669     ierr = DMDAGetWireBasketInterpolation(pc,pc->dm,ex,A,reuse,&ex->P);CHKERRQ(ierr);
670   } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_PLIB,"Unknown exotic coarse space %d",ex->type);
671   ierr = PCMGSetInterpolation(pc,1,ex->P);CHKERRQ(ierr);
672   /* if PC has attached DM we must remove it or the PCMG will use it to compute incorrect sized vectors and interpolations */
673   ierr = PCSetDM(pc,NULL);CHKERRQ(ierr);
674   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
675   PetscFunctionReturn(0);
676 }
677 
678 PetscErrorCode PCDestroy_Exotic(PC pc)
679 {
680   PetscErrorCode ierr;
681   PC_MG          *mg  = (PC_MG*)pc->data;
682   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
683 
684   PetscFunctionBegin;
685   ierr = MatDestroy(&ctx->P);CHKERRQ(ierr);
686   ierr = KSPDestroy(&ctx->ksp);CHKERRQ(ierr);
687   ierr = PetscFree(ctx);CHKERRQ(ierr);
688   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
689   PetscFunctionReturn(0);
690 }
691 
692 PetscErrorCode PCView_Exotic(PC pc,PetscViewer viewer)
693 {
694   PC_MG          *mg = (PC_MG*)pc->data;
695   PetscErrorCode ierr;
696   PetscBool      iascii;
697   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
698 
699   PetscFunctionBegin;
700   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
701   if (iascii) {
702     ierr = PetscViewerASCIIPrintf(viewer,"    Exotic type = %s\n",PCExoticTypes[ctx->type]);CHKERRQ(ierr);
703     if (ctx->directSolve) {
704       ierr = PetscViewerASCIIPrintf(viewer,"      Using direct solver to construct interpolation\n");CHKERRQ(ierr);
705     } else {
706       PetscViewer sviewer;
707       PetscMPIInt rank;
708 
709       ierr = PetscViewerASCIIPrintf(viewer,"      Using iterative solver to construct interpolation\n");CHKERRQ(ierr);
710       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);
711       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);  /* should not need to push this twice? */
712       ierr = PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr);
713       ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)pc),&rank);CHKERRQ(ierr);
714       if (!rank) {
715         ierr = KSPView(ctx->ksp,sviewer);CHKERRQ(ierr);
716       }
717       ierr = PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr);
718       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
719       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
720     }
721   }
722   ierr = PCView_MG(pc,viewer);CHKERRQ(ierr);
723   PetscFunctionReturn(0);
724 }
725 
726 PetscErrorCode PCSetFromOptions_Exotic(PetscOptionItems *PetscOptionsObject,PC pc)
727 {
728   PetscErrorCode ierr;
729   PetscBool      flg;
730   PC_MG          *mg = (PC_MG*)pc->data;
731   PCExoticType   mgctype;
732   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
733 
734   PetscFunctionBegin;
735   ierr = PetscOptionsHead(PetscOptionsObject,"Exotic coarse space options");CHKERRQ(ierr);
736   ierr = PetscOptionsEnum("-pc_exotic_type","face or wirebasket","PCExoticSetType",PCExoticTypes,(PetscEnum)ctx->type,(PetscEnum*)&mgctype,&flg);CHKERRQ(ierr);
737   if (flg) {
738     ierr = PCExoticSetType(pc,mgctype);CHKERRQ(ierr);
739   }
740   ierr = PetscOptionsBool("-pc_exotic_direct_solver","use direct solver to construct interpolation","None",ctx->directSolve,&ctx->directSolve,NULL);CHKERRQ(ierr);
741   if (!ctx->directSolve) {
742     if (!ctx->ksp) {
743       const char *prefix;
744       ierr = KSPCreate(PETSC_COMM_SELF,&ctx->ksp);CHKERRQ(ierr);
745       ierr = KSPSetErrorIfNotConverged(ctx->ksp,pc->erroriffailure);CHKERRQ(ierr);
746       ierr = PetscObjectIncrementTabLevel((PetscObject)ctx->ksp,(PetscObject)pc,1);CHKERRQ(ierr);
747       ierr = PetscLogObjectParent((PetscObject)pc,(PetscObject)ctx->ksp);CHKERRQ(ierr);
748       ierr = PCGetOptionsPrefix(pc,&prefix);CHKERRQ(ierr);
749       ierr = KSPSetOptionsPrefix(ctx->ksp,prefix);CHKERRQ(ierr);
750       ierr = KSPAppendOptionsPrefix(ctx->ksp,"exotic_");CHKERRQ(ierr);
751     }
752     ierr = KSPSetFromOptions(ctx->ksp);CHKERRQ(ierr);
753   }
754   ierr = PetscOptionsTail();CHKERRQ(ierr);
755   PetscFunctionReturn(0);
756 }
757 
758 
759 /*MC
760      PCEXOTIC - Two level overlapping Schwarz preconditioner with exotic (non-standard) coarse grid spaces
761 
762      This uses the PCMG infrastructure restricted to two levels and the face and wirebasket based coarse
763    grid spaces.
764 
765    Notes:
766     By default this uses GMRES on the fine grid smoother so this should be used with KSPFGMRES or the smoother changed to not use GMRES
767 
768    References:
769 +  1. - These coarse grid spaces originate in the work of Bramble, Pasciak  and Schatz, "The Construction
770    of Preconditioners for Elliptic Problems by Substructing IV", Mathematics of Computation, volume 53, 1989.
771 .  2. - They were generalized slightly in "Domain Decomposition Method for Linear Elasticity", Ph. D. thesis, Barry Smith,
772    New York University, 1990.
773 .  3. - They were then explored in great detail in Dryja, Smith, Widlund, "Schwarz Analysis
774    of Iterative Substructuring Methods for Elliptic Problems in Three Dimensions, SIAM Journal on Numerical
775    Analysis, volume 31. 1994. These were developed in the context of iterative substructuring preconditioners.
776 .  4. - They were then ingeniously applied as coarse grid spaces for overlapping Schwarz methods by Dohrmann and Widlund.
777    They refer to them as GDSW (generalized Dryja, Smith, Widlund preconditioners). See, for example,
778    Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Extending theory for domain decomposition algorithms to irregular subdomains. In Ulrich Langer, Marco
779    Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
780    of the 17th International Conference on Domain Decomposition Methods in
781    Science and Engineering, held in Strobl, Austria, 2006, number 60 in
782    Springer Verlag, Lecture Notes in Computational Science and Engineering, 2007.
783 .  5. -  Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. A family of energy minimizing coarse spaces for overlapping Schwarz preconditioners. In Ulrich Langer,
784    Marco Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
785    of the 17th International Conference on Domain Decomposition Methods
786    in Science and Engineering, held in Strobl, Austria, 2006, number 60 in
787    Springer Verlag, Lecture Notes in Computational Science and Engineering, 2007
788 .  6. - Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Domain decomposition
789    for less regular subdomains: Overlapping Schwarz in two dimensions. SIAM J.
790    Numer. Anal., 46(4), 2008.
791 -  7. - Clark R. Dohrmann and Olof B. Widlund. An overlapping Schwarz
792    algorithm for almost incompressible elasticity. Technical Report
793    TR2008 912, Department of Computer Science, Courant Institute
794    of Mathematical Sciences, New York University, May 2008. URL:
795 
796    Options Database: The usual PCMG options are supported, such as -mg_levels_pc_type <type> -mg_coarse_pc_type <type>
797       -pc_mg_type <type>
798 
799    Level: advanced
800 
801 .seealso:  PCMG, PCSetDM(), PCExoticType, PCExoticSetType()
802 M*/
803 
804 PETSC_EXTERN PetscErrorCode PCCreate_Exotic(PC pc)
805 {
806   PetscErrorCode ierr;
807   PC_Exotic      *ex;
808   PC_MG          *mg;
809 
810   PetscFunctionBegin;
811   /* if type was previously mg; must manually destroy it because call to PCSetType(pc,PCMG) will not destroy it */
812   if (pc->ops->destroy) {
813     ierr =  (*pc->ops->destroy)(pc);CHKERRQ(ierr);
814     pc->data = NULL;
815   }
816   ierr = PetscFree(((PetscObject)pc)->type_name);CHKERRQ(ierr);
817   ((PetscObject)pc)->type_name = NULL;
818 
819   ierr         = PCSetType(pc,PCMG);CHKERRQ(ierr);
820   ierr         = PCMGSetLevels(pc,2,NULL);CHKERRQ(ierr);
821   ierr         = PCMGSetGalerkin(pc,PC_MG_GALERKIN_PMAT);CHKERRQ(ierr);
822   ierr         = PetscNew(&ex);CHKERRQ(ierr); \
823   ex->type     = PC_EXOTIC_FACE;
824   mg           = (PC_MG*) pc->data;
825   mg->innerctx = ex;
826 
827 
828   pc->ops->setfromoptions = PCSetFromOptions_Exotic;
829   pc->ops->view           = PCView_Exotic;
830   pc->ops->destroy        = PCDestroy_Exotic;
831   pc->ops->setup          = PCSetUp_Exotic;
832 
833   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCExoticSetType_C",PCExoticSetType_Exotic);CHKERRQ(ierr);
834   PetscFunctionReturn(0);
835 }
836