xref: /petsc/src/ksp/pc/impls/wb/wb.c (revision 7d0a6c19129e7069c8a40e210b34ed62989173db)
1 
2 #include "petscpc.h"   /*I "petscpc.h" I*/
3 #include "petscmg.h"   /*I "petscmg.h" I*/
4 #include "petscdm.h"   /*I "petscdm.h" I*/
5 #include "../src/ksp/pc/impls/mg/mgimpl.h"
6 
7 typedef struct {
8   PCExoticType type;
9   Mat          P;            /* the constructed interpolation matrix */
10   PetscBool    directSolve;  /* use direct LU factorization to construct interpolation */
11   KSP          ksp;
12 } PC_Exotic;
13 
14 const char *PCExoticTypes[] = {"face","wirebasket","PCExoticType","PC_Exotic",0};
15 
16 
17 #undef __FUNCT__
18 #define __FUNCT__ "DMDAGetWireBasketInterpolation"
19 /*
20       DMDAGetWireBasketInterpolation - Gets the interpolation for a wirebasket based coarse space
21 
22 */
23 PetscErrorCode DMDAGetWireBasketInterpolation(DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
24 {
25   PetscErrorCode         ierr;
26   PetscInt               dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
27   PetscInt               mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[26],*globals,Ng,*IIint,*IIsurf;
28   Mat                    Xint, Xsurf,Xint_tmp;
29   IS                     isint,issurf,is,row,col;
30   ISLocalToGlobalMapping ltg;
31   MPI_Comm               comm;
32   Mat                    A,Aii,Ais,Asi,*Aholder,iAii;
33   MatFactorInfo          info;
34   PetscScalar            *xsurf,*xint;
35 #if defined(PETSC_USE_DEBUG_foo)
36   PetscScalar            tmp;
37 #endif
38   PetscTable             ht;
39 
40   PetscFunctionBegin;
41   ierr = DMDAGetInfo(da,&dim,0,0,0,&mp,&np,&pp,&dof,0,0,0,0,0);CHKERRQ(ierr);
42   if (dof != 1) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only for single field problems");
43   if (dim != 3) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only coded for 3d problems");
44   ierr = DMDAGetCorners(da,0,0,0,&m,&n,&p);CHKERRQ(ierr);
45   ierr = DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);CHKERRQ(ierr);
46   istart = istart ? -1 : 0;
47   jstart = jstart ? -1 : 0;
48   kstart = kstart ? -1 : 0;
49 
50   /*
51     the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
52     to all the local degrees of freedom (this includes the vertices, edges and faces).
53 
54     Xint are the subset of the interpolation into the interior
55 
56     Xface are the interpolation onto faces but not into the interior
57 
58     Xsurf are the interpolation onto the vertices and edges (the surfbasket)
59                                         Xint
60     Symbolically one could write P = (  Xface  ) after interchanging the rows to match the natural ordering on the domain
61                                         Xsurf
62   */
63   N     = (m - istart)*(n - jstart)*(p - kstart);
64   Nint  = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
65   Nface = 2*( (m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart) );
66   Nwire = 4*( (m-2-istart) + (n-2-jstart) + (p-2-kstart) ) + 8;
67   Nsurf = Nface + Nwire;
68   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nint,26,PETSC_NULL,&Xint);CHKERRQ(ierr);
69   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nsurf,26,PETSC_NULL,&Xsurf);CHKERRQ(ierr);
70   ierr = MatGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
71 
72   /*
73      Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
74      Xsurf will be all zero (thus making the coarse matrix singular).
75   */
76   if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
77   if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
78   if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
79 
80   cnt = 0;
81   xsurf[cnt++] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + Nsurf] = 1;} xsurf[cnt++ + 2*Nsurf] = 1;
82   for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 3*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 4*Nsurf] = 1;} xsurf[cnt++ + 5*Nsurf] = 1;}
83   xsurf[cnt++ + 6*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 7*Nsurf] = 1;} xsurf[cnt++ + 8*Nsurf] = 1;
84   for (k=1;k<p-1-kstart;k++) {
85     xsurf[cnt++ + 9*Nsurf] = 1;  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 10*Nsurf] = 1;}  xsurf[cnt++ + 11*Nsurf] = 1;
86     for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 12*Nsurf] = 1; /* these are the interior nodes */ xsurf[cnt++ + 13*Nsurf] = 1;}
87     xsurf[cnt++ + 14*Nsurf] = 1;  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 15*Nsurf] = 1;} xsurf[cnt++ + 16*Nsurf] = 1;
88   }
89   xsurf[cnt++ + 17*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 18*Nsurf] = 1;} xsurf[cnt++ + 19*Nsurf] = 1;
90   for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 20*Nsurf] = 1;  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 21*Nsurf] = 1;} xsurf[cnt++ + 22*Nsurf] = 1;}
91   xsurf[cnt++ + 23*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 24*Nsurf] = 1;} xsurf[cnt++ + 25*Nsurf] = 1;
92 
93   /* interpolations only sum to 1 when using direct solver */
94 #if defined(PETSC_USE_DEBUG_foo)
95   for (i=0; i<Nsurf; i++) {
96     tmp = 0.0;
97     for (j=0; j<26; j++) {
98       tmp += xsurf[i+j*Nsurf];
99     }
100     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %G",i,PetscAbsScalar(tmp));
101   }
102 #endif
103   ierr = MatRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
104   /* ierr = MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);*/
105 
106 
107   /*
108        I are the indices for all the needed vertices (in global numbering)
109        Iint are the indices for the interior values, I surf for the surface values
110             (This is just for the part of the global matrix obtained with MatGetSubMatrix(), it
111              is NOT the local DMDA ordering.)
112        IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
113   */
114 #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
115   ierr = PetscMalloc3(N,PetscInt,&II,Nint,PetscInt,&Iint,Nsurf,PetscInt,&Isurf);CHKERRQ(ierr);
116   ierr = PetscMalloc2(Nint,PetscInt,&IIint,Nsurf,PetscInt,&IIsurf);CHKERRQ(ierr);
117   for (k=0; k<p-kstart; k++) {
118     for (j=0; j<n-jstart; j++) {
119       for (i=0; i<m-istart; i++) {
120         II[c++] = i + j*mwidth + k*mwidth*nwidth;
121 
122         if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
123           IIint[cint]  = i + j*mwidth + k*mwidth*nwidth;
124           Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
125         } else {
126           IIsurf[csurf]  = i + j*mwidth + k*mwidth*nwidth;
127           Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
128         }
129       }
130     }
131   }
132   if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
133   if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
134   if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
135   ierr = DMGetLocalToGlobalMapping(da,&ltg);CHKERRQ(ierr);
136   ierr = ISLocalToGlobalMappingApply(ltg,N,II,II);CHKERRQ(ierr);
137   ierr = ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);CHKERRQ(ierr);
138   ierr = ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);CHKERRQ(ierr);
139   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
140   ierr = ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
141   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);CHKERRQ(ierr);
142   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);CHKERRQ(ierr);
143   ierr = PetscFree3(II,Iint,Isurf);CHKERRQ(ierr);
144 
145   ierr = MatGetSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);CHKERRQ(ierr);
146   A    = *Aholder;
147   ierr = PetscFree(Aholder);CHKERRQ(ierr);
148 
149   ierr = MatGetSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);CHKERRQ(ierr);
150   ierr = MatGetSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);CHKERRQ(ierr);
151   ierr = MatGetSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);CHKERRQ(ierr);
152 
153   /*
154      Solve for the interpolation onto the interior Xint
155   */
156   ierr = MatDuplicate(Xint,MAT_DO_NOT_COPY_VALUES,&Xint_tmp);CHKERRQ(ierr);
157   ierr = MatMatMult(Ais,Xsurf,MAT_REUSE_MATRIX,PETSC_DETERMINE,&Xint_tmp);CHKERRQ(ierr);
158   ierr = MatScale(Xint_tmp,-1.0);CHKERRQ(ierr);
159   if (exotic->directSolve) {
160     ierr = MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);CHKERRQ(ierr);
161     ierr = MatFactorInfoInitialize(&info);CHKERRQ(ierr);
162     ierr = MatGetOrdering(Aii,MATORDERINGND,&row,&col);CHKERRQ(ierr);
163     ierr = MatLUFactorSymbolic(iAii,Aii,row,col,&info);CHKERRQ(ierr);
164     ierr = ISDestroy(row);CHKERRQ(ierr);
165     ierr = ISDestroy(col);CHKERRQ(ierr);
166     ierr = MatLUFactorNumeric(iAii,Aii,&info);CHKERRQ(ierr);
167     ierr = MatMatSolve(iAii,Xint_tmp,Xint);CHKERRQ(ierr);
168     ierr = MatDestroy(iAii);CHKERRQ(ierr);
169   } else {
170     Vec         b,x;
171     PetscScalar *xint_tmp;
172 
173     ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
174     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,Nint,0,&x);CHKERRQ(ierr);
175     ierr = MatGetArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
176     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,Nint,0,&b);CHKERRQ(ierr);
177     ierr = KSPSetOperators(exotic->ksp,Aii,Aii,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
178     for (i=0; i<26; i++) {
179       ierr = VecPlaceArray(x,xint+i*Nint);CHKERRQ(ierr);
180       ierr = VecPlaceArray(b,xint_tmp+i*Nint);CHKERRQ(ierr);
181       ierr = KSPSolve(exotic->ksp,b,x);CHKERRQ(ierr);
182       ierr = VecResetArray(x);CHKERRQ(ierr);
183       ierr = VecResetArray(b);CHKERRQ(ierr);
184     }
185     ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
186     ierr = MatRestoreArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
187     ierr = VecDestroy(x);CHKERRQ(ierr);
188     ierr = VecDestroy(b);CHKERRQ(ierr);
189   }
190   ierr = MatDestroy(Xint_tmp);CHKERRQ(ierr);
191 
192 #if defined(PETSC_USE_DEBUG_foo)
193   ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
194   for (i=0; i<Nint; i++) {
195     tmp = 0.0;
196     for (j=0; j<26; j++) {
197       tmp += xint[i+j*Nint];
198     }
199     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %G",i,PetscAbsScalar(tmp));
200   }
201   ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
202   /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); */
203 #endif
204 
205 
206   /*         total vertices             total faces                                  total edges */
207   Ntotal = (mp + 1)*(np + 1)*(pp + 1) + mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1) + mp*(np+1)*(pp+1) + np*(mp+1)*(pp+1) +  pp*(mp+1)*(np+1);
208 
209   /*
210       For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
211   */
212   cnt = 0;
213   gl[cnt++] = 0;  { gl[cnt++] = 1;} gl[cnt++] = m-istart-1;
214   { gl[cnt++] = mwidth;  { gl[cnt++] = mwidth+1;} gl[cnt++] = mwidth + m-istart-1;}
215   gl[cnt++] = mwidth*(n-jstart-1);  { gl[cnt++] = mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*(n-jstart-1) + m-istart-1;
216   {
217     gl[cnt++] = mwidth*nwidth;  { gl[cnt++] = mwidth*nwidth+1;}  gl[cnt++] = mwidth*nwidth+ m-istart-1;
218     { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
219     gl[cnt++] = mwidth*nwidth+ mwidth*(n-jstart-1);   { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1) + m-istart-1;
220   }
221   gl[cnt++] = mwidth*nwidth*(p-kstart-1); { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) +  m-istart-1;
222   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth;   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1)+mwidth+m-istart-1;}
223   gl[cnt++] = mwidth*nwidth*(p-kstart-1) +  mwidth*(n-jstart-1);  { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+ mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth*(n-jstart-1) + m-istart-1;
224 
225   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
226   /* convert that to global numbering and get them on all processes */
227   ierr = ISLocalToGlobalMappingApply(ltg,26,gl,gl);CHKERRQ(ierr);
228   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
229   ierr = PetscMalloc(26*mp*np*pp*sizeof(PetscInt),&globals);CHKERRQ(ierr);
230   ierr = MPI_Allgather(gl,26,MPIU_INT,globals,26,MPIU_INT,((PetscObject)da)->comm);CHKERRQ(ierr);
231 
232   /* Number the coarse grid points from 0 to Ntotal */
233   ierr = PetscTableCreate(Ntotal/3,&ht);CHKERRQ(ierr);
234   for (i=0; i<26*mp*np*pp; i++){
235     ierr = PetscTableAddCount(ht,globals[i]+1);CHKERRQ(ierr);
236   }
237   ierr = PetscTableGetCount(ht,&cnt);CHKERRQ(ierr);
238   if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
239   ierr = PetscFree(globals);CHKERRQ(ierr);
240   for (i=0; i<26; i++) {
241     ierr = PetscTableFind(ht,gl[i]+1,&gl[i]);CHKERRQ(ierr);
242     gl[i]--;
243   }
244   ierr = PetscTableDestroy(ht);CHKERRQ(ierr);
245   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
246 
247   /* construct global interpolation matrix */
248   ierr = MatGetLocalSize(Aglobal,&Ng,PETSC_NULL);CHKERRQ(ierr);
249   if (reuse == MAT_INITIAL_MATRIX) {
250     ierr = MatCreateMPIAIJ(((PetscObject)da)->comm,Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,PETSC_NULL,Nint+Nsurf,PETSC_NULL,P);CHKERRQ(ierr);
251   } else {
252     ierr = MatZeroEntries(*P);CHKERRQ(ierr);
253   }
254   ierr = MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr);
255   ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
256   ierr = MatSetValues(*P,Nint,IIint,26,gl,xint,INSERT_VALUES);CHKERRQ(ierr);
257   ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
258   ierr = MatGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
259   ierr = MatSetValues(*P,Nsurf,IIsurf,26,gl,xsurf,INSERT_VALUES);CHKERRQ(ierr);
260   ierr = MatRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
261   ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
262   ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
263   ierr = PetscFree2(IIint,IIsurf);CHKERRQ(ierr);
264 
265 #if defined(PETSC_USE_DEBUG_foo)
266   {
267     Vec         x,y;
268     PetscScalar *yy;
269     ierr = VecCreateMPI(((PetscObject)da)->comm,Ng,PETSC_DETERMINE,&y);CHKERRQ(ierr);
270     ierr = VecCreateMPI(((PetscObject)da)->comm,PETSC_DETERMINE,Ntotal,&x);CHKERRQ(ierr);
271     ierr = VecSet(x,1.0);CHKERRQ(ierr);
272     ierr = MatMult(*P,x,y);CHKERRQ(ierr);
273     ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
274     for (i=0; i<Ng; i++) {
275       if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %G",i,PetscAbsScalar(yy[i]));
276     }
277     ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
278     ierr = VecDestroy(x);CHKERRQ(ierr);
279     ierr = VecDestroy(y);CHKERRQ(ierr);
280   }
281 #endif
282 
283   ierr = MatDestroy(Aii);CHKERRQ(ierr);
284   ierr = MatDestroy(Ais);CHKERRQ(ierr);
285   ierr = MatDestroy(Asi);CHKERRQ(ierr);
286   ierr = MatDestroy(A);CHKERRQ(ierr);
287   ierr = ISDestroy(is);CHKERRQ(ierr);
288   ierr = ISDestroy(isint);CHKERRQ(ierr);
289   ierr = ISDestroy(issurf);CHKERRQ(ierr);
290   ierr = MatDestroy(Xint);CHKERRQ(ierr);
291   ierr = MatDestroy(Xsurf);CHKERRQ(ierr);
292   PetscFunctionReturn(0);
293 }
294 
295 #undef __FUNCT__
296 #define __FUNCT__ "DMDAGetFaceInterpolation"
297 /*
298       DMDAGetFaceInterpolation - Gets the interpolation for a face based coarse space
299 
300 */
301 PetscErrorCode DMDAGetFaceInterpolation(DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
302 {
303   PetscErrorCode         ierr;
304   PetscInt               dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
305   PetscInt               mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[6],*globals,Ng,*IIint,*IIsurf;
306   Mat                    Xint, Xsurf,Xint_tmp;
307   IS                     isint,issurf,is,row,col;
308   ISLocalToGlobalMapping ltg;
309   MPI_Comm               comm;
310   Mat                    A,Aii,Ais,Asi,*Aholder,iAii;
311   MatFactorInfo          info;
312   PetscScalar            *xsurf,*xint;
313 #if defined(PETSC_USE_DEBUG_foo)
314   PetscScalar            tmp;
315 #endif
316   PetscTable             ht;
317 
318   PetscFunctionBegin;
319   ierr = DMDAGetInfo(da,&dim,0,0,0,&mp,&np,&pp,&dof,0,0,0,0,0);CHKERRQ(ierr);
320   if (dof != 1) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only for single field problems");
321   if (dim != 3) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only coded for 3d problems");
322   ierr = DMDAGetCorners(da,0,0,0,&m,&n,&p);CHKERRQ(ierr);
323   ierr = DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);CHKERRQ(ierr);
324   istart = istart ? -1 : 0;
325   jstart = jstart ? -1 : 0;
326   kstart = kstart ? -1 : 0;
327 
328   /*
329     the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
330     to all the local degrees of freedom (this includes the vertices, edges and faces).
331 
332     Xint are the subset of the interpolation into the interior
333 
334     Xface are the interpolation onto faces but not into the interior
335 
336     Xsurf are the interpolation onto the vertices and edges (the surfbasket)
337                                         Xint
338     Symbolically one could write P = (  Xface  ) after interchanging the rows to match the natural ordering on the domain
339                                         Xsurf
340   */
341   N     = (m - istart)*(n - jstart)*(p - kstart);
342   Nint  = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
343   Nface = 2*( (m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart) );
344   Nwire = 4*( (m-2-istart) + (n-2-jstart) + (p-2-kstart) ) + 8;
345   Nsurf = Nface + Nwire;
346   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nint,6,PETSC_NULL,&Xint);CHKERRQ(ierr);
347   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nsurf,6,PETSC_NULL,&Xsurf);CHKERRQ(ierr);
348   ierr = MatGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
349 
350   /*
351      Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
352      Xsurf will be all zero (thus making the coarse matrix singular).
353   */
354   if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
355   if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
356   if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
357 
358   cnt = 0;
359   for (j=1;j<n-1-jstart;j++) {  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 0*Nsurf] = 1;} }
360    for (k=1;k<p-1-kstart;k++) {
361     for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 1*Nsurf] = 1;}
362     for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 2*Nsurf] = 1; /* these are the interior nodes */ xsurf[cnt++ + 3*Nsurf] = 1;}
363     for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 4*Nsurf] = 1;}
364   }
365   for (j=1;j<n-1-jstart;j++) {for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 5*Nsurf] = 1;} }
366 
367 #if defined(PETSC_USE_DEBUG_foo)
368   for (i=0; i<Nsurf; i++) {
369     tmp = 0.0;
370     for (j=0; j<6; j++) {
371       tmp += xsurf[i+j*Nsurf];
372     }
373     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %G",i,PetscAbsScalar(tmp));
374   }
375 #endif
376   ierr = MatRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
377   /* ierr = MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);*/
378 
379 
380   /*
381        I are the indices for all the needed vertices (in global numbering)
382        Iint are the indices for the interior values, I surf for the surface values
383             (This is just for the part of the global matrix obtained with MatGetSubMatrix(), it
384              is NOT the local DMDA ordering.)
385        IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
386   */
387 #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
388   ierr = PetscMalloc3(N,PetscInt,&II,Nint,PetscInt,&Iint,Nsurf,PetscInt,&Isurf);CHKERRQ(ierr);
389   ierr = PetscMalloc2(Nint,PetscInt,&IIint,Nsurf,PetscInt,&IIsurf);CHKERRQ(ierr);
390   for (k=0; k<p-kstart; k++) {
391     for (j=0; j<n-jstart; j++) {
392       for (i=0; i<m-istart; i++) {
393         II[c++] = i + j*mwidth + k*mwidth*nwidth;
394 
395         if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
396           IIint[cint]  = i + j*mwidth + k*mwidth*nwidth;
397           Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
398         } else {
399           IIsurf[csurf]  = i + j*mwidth + k*mwidth*nwidth;
400           Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
401         }
402       }
403     }
404   }
405   if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
406   if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
407   if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
408   ierr = DMGetLocalToGlobalMapping(da,&ltg);CHKERRQ(ierr);
409   ierr = ISLocalToGlobalMappingApply(ltg,N,II,II);CHKERRQ(ierr);
410   ierr = ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);CHKERRQ(ierr);
411   ierr = ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);CHKERRQ(ierr);
412   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
413   ierr = ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
414   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);CHKERRQ(ierr);
415   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);CHKERRQ(ierr);
416   ierr = PetscFree3(II,Iint,Isurf);CHKERRQ(ierr);
417 
418   ierr = MatGetSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);CHKERRQ(ierr);
419   A    = *Aholder;
420   ierr = PetscFree(Aholder);CHKERRQ(ierr);
421 
422   ierr = MatGetSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);CHKERRQ(ierr);
423   ierr = MatGetSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);CHKERRQ(ierr);
424   ierr = MatGetSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);CHKERRQ(ierr);
425 
426   /*
427      Solve for the interpolation onto the interior Xint
428   */
429   ierr = MatDuplicate(Xint,MAT_DO_NOT_COPY_VALUES,&Xint_tmp);CHKERRQ(ierr);
430   ierr = MatMatMult(Ais,Xsurf,MAT_REUSE_MATRIX,PETSC_DETERMINE,&Xint_tmp);CHKERRQ(ierr);
431   ierr = MatScale(Xint_tmp,-1.0);CHKERRQ(ierr);
432 
433   if (exotic->directSolve) {
434     ierr = MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);CHKERRQ(ierr);
435     ierr = MatFactorInfoInitialize(&info);CHKERRQ(ierr);
436     ierr = MatGetOrdering(Aii,MATORDERINGND,&row,&col);CHKERRQ(ierr);
437     ierr = MatLUFactorSymbolic(iAii,Aii,row,col,&info);CHKERRQ(ierr);
438     ierr = ISDestroy(row);CHKERRQ(ierr);
439     ierr = ISDestroy(col);CHKERRQ(ierr);
440     ierr = MatLUFactorNumeric(iAii,Aii,&info);CHKERRQ(ierr);
441     ierr = MatMatSolve(iAii,Xint_tmp,Xint);CHKERRQ(ierr);
442     ierr = MatDestroy(iAii);CHKERRQ(ierr);
443   } else {
444     Vec         b,x;
445     PetscScalar *xint_tmp;
446 
447     ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
448     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,Nint,0,&x);CHKERRQ(ierr);
449     ierr = MatGetArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
450     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,Nint,0,&b);CHKERRQ(ierr);
451     ierr = KSPSetOperators(exotic->ksp,Aii,Aii,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
452     for (i=0; i<6; i++) {
453       ierr = VecPlaceArray(x,xint+i*Nint);CHKERRQ(ierr);
454       ierr = VecPlaceArray(b,xint_tmp+i*Nint);CHKERRQ(ierr);
455       ierr = KSPSolve(exotic->ksp,b,x);CHKERRQ(ierr);
456       ierr = VecResetArray(x);CHKERRQ(ierr);
457       ierr = VecResetArray(b);CHKERRQ(ierr);
458     }
459     ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
460     ierr = MatRestoreArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
461     ierr = VecDestroy(x);CHKERRQ(ierr);
462     ierr = VecDestroy(b);CHKERRQ(ierr);
463   }
464   ierr = MatDestroy(Xint_tmp);CHKERRQ(ierr);
465 
466 #if defined(PETSC_USE_DEBUG_foo)
467   ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
468   for (i=0; i<Nint; i++) {
469     tmp = 0.0;
470     for (j=0; j<6; j++) {
471       tmp += xint[i+j*Nint];
472     }
473     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %G",i,PetscAbsScalar(tmp));
474   }
475   ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
476   /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); */
477 #endif
478 
479 
480   /*         total faces    */
481   Ntotal =  mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1);
482 
483   /*
484       For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
485   */
486   cnt = 0;
487   { gl[cnt++] = mwidth+1;}
488   {
489     { gl[cnt++] = mwidth*nwidth+1;}
490     { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
491     { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;}
492   }
493   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;}
494 
495   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
496   /* convert that to global numbering and get them on all processes */
497   ierr = ISLocalToGlobalMappingApply(ltg,6,gl,gl);CHKERRQ(ierr);
498   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
499   ierr = PetscMalloc(6*mp*np*pp*sizeof(PetscInt),&globals);CHKERRQ(ierr);
500   ierr = MPI_Allgather(gl,6,MPIU_INT,globals,6,MPIU_INT,((PetscObject)da)->comm);CHKERRQ(ierr);
501 
502   /* Number the coarse grid points from 0 to Ntotal */
503   ierr = PetscTableCreate(Ntotal/3,&ht);CHKERRQ(ierr);
504   for (i=0; i<6*mp*np*pp; i++){
505     ierr = PetscTableAddCount(ht,globals[i]+1);CHKERRQ(ierr);
506   }
507   ierr = PetscTableGetCount(ht,&cnt);CHKERRQ(ierr);
508   if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
509   ierr = PetscFree(globals);CHKERRQ(ierr);
510   for (i=0; i<6; i++) {
511     ierr = PetscTableFind(ht,gl[i]+1,&gl[i]);CHKERRQ(ierr);
512     gl[i]--;
513   }
514   ierr = PetscTableDestroy(ht);CHKERRQ(ierr);
515   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
516 
517   /* construct global interpolation matrix */
518   ierr = MatGetLocalSize(Aglobal,&Ng,PETSC_NULL);CHKERRQ(ierr);
519   if (reuse == MAT_INITIAL_MATRIX) {
520     ierr = MatCreateMPIAIJ(((PetscObject)da)->comm,Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,PETSC_NULL,Nint,PETSC_NULL,P);CHKERRQ(ierr);
521   } else {
522     ierr = MatZeroEntries(*P);CHKERRQ(ierr);
523   }
524   ierr = MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr);
525   ierr = MatGetArray(Xint,&xint);CHKERRQ(ierr);
526   ierr = MatSetValues(*P,Nint,IIint,6,gl,xint,INSERT_VALUES);CHKERRQ(ierr);
527   ierr = MatRestoreArray(Xint,&xint);CHKERRQ(ierr);
528   ierr = MatGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
529   ierr = MatSetValues(*P,Nsurf,IIsurf,6,gl,xsurf,INSERT_VALUES);CHKERRQ(ierr);
530   ierr = MatRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
531   ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
532   ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
533   ierr = PetscFree2(IIint,IIsurf);CHKERRQ(ierr);
534 
535 
536 #if defined(PETSC_USE_DEBUG_foo)
537   {
538     Vec         x,y;
539     PetscScalar *yy;
540     ierr = VecCreateMPI(((PetscObject)da)->comm,Ng,PETSC_DETERMINE,&y);CHKERRQ(ierr);
541     ierr = VecCreateMPI(((PetscObject)da)->comm,PETSC_DETERMINE,Ntotal,&x);CHKERRQ(ierr);
542     ierr = VecSet(x,1.0);CHKERRQ(ierr);
543     ierr = MatMult(*P,x,y);CHKERRQ(ierr);
544     ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
545     for (i=0; i<Ng; i++) {
546       if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %G",i,PetscAbsScalar(yy[i]));
547     }
548     ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
549     ierr = VecDestroy(x);CHKERRQ(ierr);
550     ierr = VecDestroy(y);CHKERRQ(ierr);
551   }
552 #endif
553 
554   ierr = MatDestroy(Aii);CHKERRQ(ierr);
555   ierr = MatDestroy(Ais);CHKERRQ(ierr);
556   ierr = MatDestroy(Asi);CHKERRQ(ierr);
557   ierr = MatDestroy(A);CHKERRQ(ierr);
558   ierr = ISDestroy(is);CHKERRQ(ierr);
559   ierr = ISDestroy(isint);CHKERRQ(ierr);
560   ierr = ISDestroy(issurf);CHKERRQ(ierr);
561   ierr = MatDestroy(Xint);CHKERRQ(ierr);
562   ierr = MatDestroy(Xsurf);CHKERRQ(ierr);
563   PetscFunctionReturn(0);
564 }
565 
566 
567 #undef __FUNCT__
568 #define __FUNCT__ "PCExoticSetType"
569 /*@
570    PCExoticSetType - Sets the type of coarse grid interpolation to use
571 
572    Logically Collective on PC
573 
574    Input Parameters:
575 +  pc - the preconditioner context
576 -  type - either PC_EXOTIC_FACE or PC_EXOTIC_WIREBASKET (defaults to face)
577 
578    Notes: The face based interpolation has 1 degree of freedom per face and ignores the
579      edge and vertex values completely in the coarse problem. For any seven point
580      stencil the interpolation of a constant on all faces into the interior is that constant.
581 
582      The wirebasket interpolation has 1 degree of freedom per vertex, per edge and
583      per face. A constant on the subdomain boundary is interpolated as that constant
584      in the interior of the domain.
585 
586      The coarse grid matrix is obtained via the Galerkin computation A_c = R A R^T, hence
587      if A is nonsingular A_c is also nonsingular.
588 
589      Both interpolations are suitable for only scalar problems.
590 
591    Level: intermediate
592 
593 
594 .seealso: PCEXOTIC, PCExoticType()
595 @*/
596 PetscErrorCode  PCExoticSetType(PC pc,PCExoticType type)
597 {
598   PetscErrorCode ierr;
599 
600   PetscFunctionBegin;
601   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
602   PetscValidLogicalCollectiveEnum(pc,type,2);
603   ierr = PetscTryMethod(pc,"PCExoticSetType_C",(PC,PCExoticType),(pc,type));CHKERRQ(ierr);
604   PetscFunctionReturn(0);
605 }
606 
607 #undef __FUNCT__
608 #define __FUNCT__ "PCExoticSetType_Exotic"
609 PetscErrorCode  PCExoticSetType_Exotic(PC pc,PCExoticType type)
610 {
611   PC_MG     *mg = (PC_MG*)pc->data;
612   PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
613 
614   PetscFunctionBegin;
615   ctx->type = type;
616   PetscFunctionReturn(0);
617 }
618 
619 #undef __FUNCT__
620 #define __FUNCT__ "PCSetUp_Exotic"
621 PetscErrorCode PCSetUp_Exotic(PC pc)
622 {
623   PetscErrorCode ierr;
624   Mat            A;
625   PC_MG          *mg = (PC_MG*)pc->data;
626   PC_Exotic      *ex = (PC_Exotic*) mg->innerctx;
627   MatReuse       reuse = (ex->P) ? MAT_REUSE_MATRIX : MAT_INITIAL_MATRIX;
628 
629   PetscFunctionBegin;
630   if (!pc->dm) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONGSTATE,"Need to call PCSetDM() before using this PC");
631   ierr = PCGetOperators(pc,PETSC_NULL,&A,PETSC_NULL);CHKERRQ(ierr);
632   if (ex->type == PC_EXOTIC_FACE) {
633     ierr = DMDAGetFaceInterpolation(pc->dm,ex,A,reuse,&ex->P);CHKERRQ(ierr);
634   } else if (ex->type == PC_EXOTIC_WIREBASKET) {
635     ierr = DMDAGetWireBasketInterpolation(pc->dm,ex,A,reuse,&ex->P);CHKERRQ(ierr);
636   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_PLIB,"Unknown exotic coarse space %d",ex->type);
637   ierr = PCMGSetInterpolation(pc,1,ex->P);CHKERRQ(ierr);
638   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
639   PetscFunctionReturn(0);
640 }
641 
642 #undef __FUNCT__
643 #define __FUNCT__ "PCDestroy_Exotic"
644 PetscErrorCode PCDestroy_Exotic(PC pc)
645 {
646   PetscErrorCode ierr;
647   PC_MG          *mg = (PC_MG*)pc->data;
648   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
649 
650   PetscFunctionBegin;
651   if (ctx->P) {ierr = MatDestroy(ctx->P);CHKERRQ(ierr);}
652   if (ctx->ksp) {ierr = KSPDestroy(ctx->ksp);CHKERRQ(ierr);}
653   ierr = PetscFree(ctx);CHKERRQ(ierr);
654   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
655   PetscFunctionReturn(0);
656 }
657 
658 #undef __FUNCT__
659 #define __FUNCT__ "PCView_Exotic"
660 PetscErrorCode PCView_Exotic(PC pc,PetscViewer viewer)
661 {
662   PC_MG          *mg = (PC_MG*)pc->data;
663   PetscErrorCode ierr;
664   PetscBool      iascii;
665   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
666 
667   PetscFunctionBegin;
668   ierr = PetscTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
669   if (iascii) {
670     ierr = PetscViewerASCIIPrintf(viewer,"    Exotic type = %s\n",PCExoticTypes[ctx->type]);CHKERRQ(ierr);
671     if (ctx->directSolve) {
672       ierr = PetscViewerASCIIPrintf(viewer,"      Using direct solver to construct interpolation\n");CHKERRQ(ierr);
673     } else {
674       PetscViewer sviewer;
675       PetscMPIInt rank;
676 
677       ierr = PetscViewerASCIIPrintf(viewer,"      Using iterative solver to construct interpolation\n");CHKERRQ(ierr);
678       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);
679       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);  /* should not need to push this twice? */
680       ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
681       ierr = MPI_Comm_rank(((PetscObject)pc)->comm,&rank);CHKERRQ(ierr);
682       if (!rank) {
683 	ierr = KSPView(ctx->ksp,sviewer);CHKERRQ(ierr);
684       }
685       ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
686       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
687       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
688     }
689   }
690   ierr = PCView_MG(pc,viewer);CHKERRQ(ierr);
691   PetscFunctionReturn(0);
692 }
693 
694 #undef __FUNCT__
695 #define __FUNCT__ "PCSetFromOptions_Exotic"
696 PetscErrorCode PCSetFromOptions_Exotic(PC pc)
697 {
698   PetscErrorCode ierr;
699   PetscBool      flg;
700   PC_MG          *mg = (PC_MG*)pc->data;
701   PCExoticType   mgctype;
702   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
703 
704   PetscFunctionBegin;
705   ierr = PetscOptionsHead("Exotic coarse space options");CHKERRQ(ierr);
706     ierr = PetscOptionsEnum("-pc_exotic_type","face or wirebasket","PCExoticSetType",PCExoticTypes,(PetscEnum)ctx->type,(PetscEnum*)&mgctype,&flg);CHKERRQ(ierr);
707     if (flg) {
708       ierr = PCExoticSetType(pc,mgctype);CHKERRQ(ierr);
709     }
710     ierr = PetscOptionsBool("-pc_exotic_direct_solver","use direct solver to construct interpolation","None",ctx->directSolve,&ctx->directSolve,PETSC_NULL);CHKERRQ(ierr);
711     if (!ctx->directSolve) {
712       if (!ctx->ksp) {
713         const char *prefix;
714         ierr = KSPCreate(PETSC_COMM_SELF,&ctx->ksp);CHKERRQ(ierr);
715         ierr = PetscObjectIncrementTabLevel((PetscObject)ctx->ksp,(PetscObject)pc,1);CHKERRQ(ierr);
716         ierr = PetscLogObjectParent(pc,ctx->ksp);CHKERRQ(ierr);
717         ierr = PCGetOptionsPrefix(pc,&prefix);CHKERRQ(ierr);
718         ierr = KSPSetOptionsPrefix(ctx->ksp,prefix);CHKERRQ(ierr);
719         ierr = KSPAppendOptionsPrefix(ctx->ksp,"exotic_");CHKERRQ(ierr);
720       }
721       ierr = KSPSetFromOptions(ctx->ksp);CHKERRQ(ierr);
722     }
723   ierr = PetscOptionsTail();CHKERRQ(ierr);
724   PetscFunctionReturn(0);
725 }
726 
727 
728 /*MC
729      PCEXOTIC - Two level overlapping Schwarz preconditioner with exotic (non-standard) coarse grid spaces
730 
731      This uses the PCMG infrastructure restricted to two levels and the face and wirebasket based coarse
732    grid spaces.
733 
734    Notes: By default this uses GMRES on the fine grid smoother so this should be used with KSPFGMRES or the smoother changed to not use GMRES
735 
736    References: These coarse grid spaces originate in the work of Bramble, Pasciak  and Schatz, "The Construction
737    of Preconditioners for Elliptic Problems by Substructing IV", Mathematics of Computation, volume 53 pages 1--24, 1989.
738    They were generalized slightly in "Domain Decomposition Method for Linear Elasticity", Ph. D. thesis, Barry Smith,
739    New York University, 1990. They were then explored in great detail in Dryja, Smith, Widlund, "Schwarz Analysis
740    of Iterative Substructuring Methods for Elliptic Problems in Three Dimensions, SIAM Journal on Numerical
741    Analysis, volume 31. pages 1662-1694, 1994. These were developed in the context of iterative substructuring preconditioners.
742    They were then ingeniously applied as coarse grid spaces for overlapping Schwarz methods by Dohrmann and Widlund.
743    They refer to them as GDSW (generalized Dryja, Smith, Widlund preconditioners). See, for example,
744    Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Extending theory for domain decomposition algorithms to irregular subdomains. In Ulrich Langer, Marco
745    Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
746    of the 17th International Conference on Domain Decomposition Methods in
747    Science and Engineering, held in Strobl, Austria, July 3-7, 2006, number 60 in
748    Springer-Verlag, Lecture Notes in Computational Science and Engineering, pages 255-261, 2007.
749    Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. A family of energy min-
750    imizing coarse spaces for overlapping Schwarz preconditioners. In Ulrich Langer,
751    Marco Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
752    of the 17th International Conference on Domain Decomposition Methods
753    in Science and Engineering, held in Strobl, Austria, July 3-7, 2006, number 60 in
754    Springer-Verlag, Lecture Notes in Computational Science and Engineering, pages 247-254, 2007
755    Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Domain decomposition
756    for less regular subdomains: Overlapping Schwarz in two dimensions. SIAM J.
757    Numer. Anal., 46(4):2153-2168, 2008.
758    Clark R. Dohrmann and Olof B. Widlund. An overlapping Schwarz
759    algorithm for almost incompressible elasticity. Technical Report
760    TR2008-912, Department of Computer Science, Courant Institute
761    of Mathematical Sciences, New York University, May 2008. URL:
762    http://cs.nyu.edu/csweb/Research/TechReports/TR2008-912/TR2008-912.pdf
763 
764    Options Database: The usual PCMG options are supported, such as -mg_levels_pc_type <type> -mg_coarse_pc_type <type>
765       -pc_mg_type <type>
766 
767    Level: advanced
768 
769 .seealso:  PCMG, PCSetDM(), PCExoticType, PCExoticSetType()
770 M*/
771 
772 EXTERN_C_BEGIN
773 #undef __FUNCT__
774 #define __FUNCT__ "PCCreate_Exotic"
775 PetscErrorCode  PCCreate_Exotic(PC pc)
776 {
777   PetscErrorCode ierr;
778   PC_Exotic      *ex;
779   PC_MG          *mg;
780 
781   PetscFunctionBegin;
782   /* if type was previously mg; must manually destroy it because call to PCSetType(pc,PCMG) will not destroy it */
783   if (pc->ops->destroy) { ierr =  (*pc->ops->destroy)(pc);CHKERRQ(ierr); pc->data = 0;}
784   ierr = PetscFree(((PetscObject)pc)->type_name);CHKERRQ(ierr);
785   ((PetscObject)pc)->type_name = 0;
786 
787   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr);
788   ierr = PCMGSetLevels(pc,2,PETSC_NULL);CHKERRQ(ierr);
789   ierr = PCMGSetGalerkin(pc);CHKERRQ(ierr);
790   ierr = PetscNew(PC_Exotic,&ex);CHKERRQ(ierr);\
791   ex->type = PC_EXOTIC_FACE;
792   mg = (PC_MG*) pc->data;
793   mg->innerctx = ex;
794 
795 
796   pc->ops->setfromoptions = PCSetFromOptions_Exotic;
797   pc->ops->view           = PCView_Exotic;
798   pc->ops->destroy        = PCDestroy_Exotic;
799   pc->ops->setup          = PCSetUp_Exotic;
800   ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCExoticSetType_C","PCExoticSetType_Exotic",PCExoticSetType_Exotic);CHKERRQ(ierr);
801   PetscFunctionReturn(0);
802 }
803 EXTERN_C_END
804