xref: /petsc/src/ksp/pc/impls/wb/wb.c (revision ccb4e88a40f0b86eaeca07ff64c64e4de2fae686)
1 
2 #include <petscdmda.h>   /*I "petscdmda.h" I*/
3 #include <petsc/private/pcmgimpl.h>   /*I "petscksp.h" I*/
4 #include <petscctable.h>
5 
6 typedef struct {
7   PCExoticType type;
8   Mat          P;            /* the constructed interpolation matrix */
9   PetscBool    directSolve;  /* use direct LU factorization to construct interpolation */
10   KSP          ksp;
11 } PC_Exotic;
12 
13 const char *const PCExoticTypes[] = {"face","wirebasket","PCExoticType","PC_Exotic",NULL};
14 
15 /*
16       DMDAGetWireBasketInterpolation - Gets the interpolation for a wirebasket based coarse space
17 
18 */
19 PetscErrorCode DMDAGetWireBasketInterpolation(PC pc,DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
20 {
21   PetscErrorCode         ierr;
22   PetscInt               dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
23   PetscInt               mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[26],*globals,Ng,*IIint,*IIsurf,Nt;
24   Mat                    Xint, Xsurf,Xint_tmp;
25   IS                     isint,issurf,is,row,col;
26   ISLocalToGlobalMapping ltg;
27   MPI_Comm               comm;
28   Mat                    A,Aii,Ais,Asi,*Aholder,iAii;
29   MatFactorInfo          info;
30   PetscScalar            *xsurf,*xint;
31   const PetscScalar      *rxint;
32 #if defined(PETSC_USE_DEBUG_foo)
33   PetscScalar            tmp;
34 #endif
35   PetscTable             ht;
36 
37   PetscFunctionBegin;
38   ierr = DMDAGetInfo(da,&dim,NULL,NULL,NULL,&mp,&np,&pp,&dof,NULL,NULL,NULL,NULL,NULL);CHKERRQ(ierr);
39   if (dof != 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only for single field problems");
40   if (dim != 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only coded for 3d problems");
41   ierr   = DMDAGetCorners(da,NULL,NULL,NULL,&m,&n,&p);CHKERRQ(ierr);
42   ierr   = DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);CHKERRQ(ierr);
43   istart = istart ? -1 : 0;
44   jstart = jstart ? -1 : 0;
45   kstart = kstart ? -1 : 0;
46 
47   /*
48     the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
49     to all the local degrees of freedom (this includes the vertices, edges and faces).
50 
51     Xint are the subset of the interpolation into the interior
52 
53     Xface are the interpolation onto faces but not into the interior
54 
55     Xsurf are the interpolation onto the vertices and edges (the surfbasket)
56                                       Xint
57     Symbolically one could write P = (Xface) after interchanging the rows to match the natural ordering on the domain
58                                       Xsurf
59   */
60   N     = (m - istart)*(n - jstart)*(p - kstart);
61   Nint  = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
62   Nface = 2*((m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart));
63   Nwire = 4*((m-2-istart) + (n-2-jstart) + (p-2-kstart)) + 8;
64   Nsurf = Nface + Nwire;
65   ierr  = MatCreateSeqDense(MPI_COMM_SELF,Nint,26,NULL,&Xint);CHKERRQ(ierr);
66   ierr  = MatCreateSeqDense(MPI_COMM_SELF,Nsurf,26,NULL,&Xsurf);CHKERRQ(ierr);
67   ierr  = MatDenseGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
68 
69   /*
70      Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
71      Xsurf will be all zero (thus making the coarse matrix singular).
72   */
73   if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
74   if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
75   if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
76 
77   cnt = 0;
78 
79   xsurf[cnt++] = 1;
80   for (i=1; i<m-istart-1; i++) xsurf[cnt++ + Nsurf] = 1;
81   xsurf[cnt++ + 2*Nsurf] = 1;
82 
83   for (j=1; j<n-1-jstart; j++) {
84     xsurf[cnt++ + 3*Nsurf] = 1;
85     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 4*Nsurf] = 1;
86     xsurf[cnt++ + 5*Nsurf] = 1;
87   }
88 
89   xsurf[cnt++ + 6*Nsurf] = 1;
90   for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 7*Nsurf] = 1;
91   xsurf[cnt++ + 8*Nsurf] = 1;
92 
93   for (k=1; k<p-1-kstart; k++) {
94     xsurf[cnt++ + 9*Nsurf] = 1;
95     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 10*Nsurf] = 1;
96     xsurf[cnt++ + 11*Nsurf] = 1;
97 
98     for (j=1; j<n-1-jstart; j++) {
99       xsurf[cnt++ + 12*Nsurf] = 1;
100       /* these are the interior nodes */
101       xsurf[cnt++ + 13*Nsurf] = 1;
102     }
103 
104     xsurf[cnt++ + 14*Nsurf] = 1;
105     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 15*Nsurf] = 1;
106     xsurf[cnt++ + 16*Nsurf] = 1;
107   }
108 
109   xsurf[cnt++ + 17*Nsurf] = 1;
110   for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 18*Nsurf] = 1;
111   xsurf[cnt++ + 19*Nsurf] = 1;
112 
113   for (j=1;j<n-1-jstart;j++) {
114     xsurf[cnt++ + 20*Nsurf] = 1;
115     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 21*Nsurf] = 1;
116     xsurf[cnt++ + 22*Nsurf] = 1;
117   }
118 
119   xsurf[cnt++ + 23*Nsurf] = 1;
120   for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 24*Nsurf] = 1;
121   xsurf[cnt++ + 25*Nsurf] = 1;
122 
123   /* interpolations only sum to 1 when using direct solver */
124 #if defined(PETSC_USE_DEBUG_foo)
125   for (i=0; i<Nsurf; i++) {
126     tmp = 0.0;
127     for (j=0; j<26; j++) tmp += xsurf[i+j*Nsurf];
128     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
129   }
130 #endif
131   ierr = MatDenseRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
132   /* ierr = MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);*/
133 
134   /*
135        I are the indices for all the needed vertices (in global numbering)
136        Iint are the indices for the interior values, I surf for the surface values
137             (This is just for the part of the global matrix obtained with MatCreateSubMatrix(), it
138              is NOT the local DMDA ordering.)
139        IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
140   */
141 #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
142   ierr = PetscMalloc3(N,&II,Nint,&Iint,Nsurf,&Isurf);CHKERRQ(ierr);
143   ierr = PetscMalloc2(Nint,&IIint,Nsurf,&IIsurf);CHKERRQ(ierr);
144   for (k=0; k<p-kstart; k++) {
145     for (j=0; j<n-jstart; j++) {
146       for (i=0; i<m-istart; i++) {
147         II[c++] = i + j*mwidth + k*mwidth*nwidth;
148 
149         if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
150           IIint[cint]  = i + j*mwidth + k*mwidth*nwidth;
151           Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
152         } else {
153           IIsurf[csurf]  = i + j*mwidth + k*mwidth*nwidth;
154           Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
155         }
156       }
157     }
158   }
159   if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
160   if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
161   if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
162   ierr = DMGetLocalToGlobalMapping(da,&ltg);CHKERRQ(ierr);
163   ierr = ISLocalToGlobalMappingApply(ltg,N,II,II);CHKERRQ(ierr);
164   ierr = ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);CHKERRQ(ierr);
165   ierr = ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);CHKERRQ(ierr);
166   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
167   ierr = ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
168   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);CHKERRQ(ierr);
169   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);CHKERRQ(ierr);
170   ierr = PetscFree3(II,Iint,Isurf);CHKERRQ(ierr);
171 
172   ierr = MatCreateSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);CHKERRQ(ierr);
173   A    = *Aholder;
174   ierr = PetscFree(Aholder);CHKERRQ(ierr);
175 
176   ierr = MatCreateSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);CHKERRQ(ierr);
177   ierr = MatCreateSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);CHKERRQ(ierr);
178   ierr = MatCreateSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);CHKERRQ(ierr);
179 
180   /*
181      Solve for the interpolation onto the interior Xint
182   */
183   ierr = MatMatMult(Ais,Xsurf,MAT_INITIAL_MATRIX,PETSC_DETERMINE,&Xint_tmp);CHKERRQ(ierr);
184   ierr = MatScale(Xint_tmp,-1.0);CHKERRQ(ierr);
185   if (exotic->directSolve) {
186     ierr = MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);CHKERRQ(ierr);
187     ierr = MatFactorInfoInitialize(&info);CHKERRQ(ierr);
188     ierr = MatGetOrdering(Aii,MATORDERINGND,&row,&col);CHKERRQ(ierr);
189     ierr = MatLUFactorSymbolic(iAii,Aii,row,col,&info);CHKERRQ(ierr);
190     ierr = ISDestroy(&row);CHKERRQ(ierr);
191     ierr = ISDestroy(&col);CHKERRQ(ierr);
192     ierr = MatLUFactorNumeric(iAii,Aii,&info);CHKERRQ(ierr);
193     ierr = MatMatSolve(iAii,Xint_tmp,Xint);CHKERRQ(ierr);
194     ierr = MatDestroy(&iAii);CHKERRQ(ierr);
195   } else {
196     Vec         b,x;
197     PetscScalar *xint_tmp;
198 
199     ierr = MatDenseGetArray(Xint,&xint);CHKERRQ(ierr);
200     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,NULL,&x);CHKERRQ(ierr);
201     ierr = MatDenseGetArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
202     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,NULL,&b);CHKERRQ(ierr);
203     ierr = KSPSetOperators(exotic->ksp,Aii,Aii);CHKERRQ(ierr);
204     for (i=0; i<26; i++) {
205       ierr = VecPlaceArray(x,xint+i*Nint);CHKERRQ(ierr);
206       ierr = VecPlaceArray(b,xint_tmp+i*Nint);CHKERRQ(ierr);
207       ierr = KSPSolve(exotic->ksp,b,x);CHKERRQ(ierr);
208       ierr = KSPCheckSolve(exotic->ksp,pc,x);CHKERRQ(ierr);
209       ierr = VecResetArray(x);CHKERRQ(ierr);
210       ierr = VecResetArray(b);CHKERRQ(ierr);
211     }
212     ierr = MatDenseRestoreArray(Xint,&xint);CHKERRQ(ierr);
213     ierr = MatDenseRestoreArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
214     ierr = VecDestroy(&x);CHKERRQ(ierr);
215     ierr = VecDestroy(&b);CHKERRQ(ierr);
216   }
217   ierr = MatDestroy(&Xint_tmp);CHKERRQ(ierr);
218 
219 #if defined(PETSC_USE_DEBUG_foo)
220   ierr = MatDenseGetArrayRead(Xint,&rxint);CHKERRQ(ierr);
221   for (i=0; i<Nint; i++) {
222     tmp = 0.0;
223     for (j=0; j<26; j++) tmp += rxint[i+j*Nint];
224 
225     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
226   }
227   ierr = MatDenseRestoreArrayRead(Xint,&rxint);CHKERRQ(ierr);
228   /* ierr = MatView(Xint,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); */
229 #endif
230 
231   /*         total vertices             total faces                                  total edges */
232   Ntotal = (mp + 1)*(np + 1)*(pp + 1) + mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1) + mp*(np+1)*(pp+1) + np*(mp+1)*(pp+1) +  pp*(mp+1)*(np+1);
233 
234   /*
235       For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
236   */
237   cnt = 0;
238 
239   gl[cnt++] = 0;  { gl[cnt++] = 1;} gl[cnt++] = m-istart-1;
240   { gl[cnt++] = mwidth;  { gl[cnt++] = mwidth+1;} gl[cnt++] = mwidth + m-istart-1;}
241   gl[cnt++] = mwidth*(n-jstart-1);  { gl[cnt++] = mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*(n-jstart-1) + m-istart-1;
242   {
243     gl[cnt++] = mwidth*nwidth;  { gl[cnt++] = mwidth*nwidth+1;}  gl[cnt++] = mwidth*nwidth+ m-istart-1;
244     { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
245     gl[cnt++] = mwidth*nwidth+ mwidth*(n-jstart-1);   { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1) + m-istart-1;
246   }
247   gl[cnt++] = mwidth*nwidth*(p-kstart-1); { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) +  m-istart-1;
248   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth;   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1)+mwidth+m-istart-1;}
249   gl[cnt++] = mwidth*nwidth*(p-kstart-1) +  mwidth*(n-jstart-1);  { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+ mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth*(n-jstart-1) + m-istart-1;
250 
251   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
252   /* convert that to global numbering and get them on all processes */
253   ierr = ISLocalToGlobalMappingApply(ltg,26,gl,gl);CHKERRQ(ierr);
254   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
255   ierr = PetscMalloc1(26*mp*np*pp,&globals);CHKERRQ(ierr);
256   ierr = MPI_Allgather(gl,26,MPIU_INT,globals,26,MPIU_INT,PetscObjectComm((PetscObject)da));CHKERRMPI(ierr);
257 
258   /* Number the coarse grid points from 0 to Ntotal */
259   ierr = MatGetSize(Aglobal,&Nt,NULL);CHKERRQ(ierr);
260   ierr = PetscTableCreate(Ntotal/3,Nt+1,&ht);CHKERRQ(ierr);
261   for (i=0; i<26*mp*np*pp; i++) {
262     ierr = PetscTableAddCount(ht,globals[i]+1);CHKERRQ(ierr);
263   }
264   ierr = PetscTableGetCount(ht,&cnt);CHKERRQ(ierr);
265   if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
266   ierr = PetscFree(globals);CHKERRQ(ierr);
267   for (i=0; i<26; i++) {
268     ierr = PetscTableFind(ht,gl[i]+1,&gl[i]);CHKERRQ(ierr);
269     gl[i]--;
270   }
271   ierr = PetscTableDestroy(&ht);CHKERRQ(ierr);
272   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
273 
274   /* construct global interpolation matrix */
275   ierr = MatGetLocalSize(Aglobal,&Ng,NULL);CHKERRQ(ierr);
276   if (reuse == MAT_INITIAL_MATRIX) {
277     ierr = MatCreateAIJ(PetscObjectComm((PetscObject)da),Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,NULL,Nint+Nsurf,NULL,P);CHKERRQ(ierr);
278   } else {
279     ierr = MatZeroEntries(*P);CHKERRQ(ierr);
280   }
281   ierr = MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr);
282   ierr = MatDenseGetArrayRead(Xint,&rxint);CHKERRQ(ierr);
283   ierr = MatSetValues(*P,Nint,IIint,26,gl,rxint,INSERT_VALUES);CHKERRQ(ierr);
284   ierr = MatDenseRestoreArrayRead(Xint,&rxint);CHKERRQ(ierr);
285   ierr = MatDenseGetArrayRead(Xsurf,&rxint);CHKERRQ(ierr);
286   ierr = MatSetValues(*P,Nsurf,IIsurf,26,gl,rxint,INSERT_VALUES);CHKERRQ(ierr);
287   ierr = MatDenseRestoreArrayRead(Xsurf,&rxint);CHKERRQ(ierr);
288   ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
289   ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
290   ierr = PetscFree2(IIint,IIsurf);CHKERRQ(ierr);
291 
292 #if defined(PETSC_USE_DEBUG_foo)
293   {
294     Vec         x,y;
295     PetscScalar *yy;
296     ierr = VecCreateMPI(PetscObjectComm((PetscObject)da),Ng,PETSC_DETERMINE,&y);CHKERRQ(ierr);
297     ierr = VecCreateMPI(PetscObjectComm((PetscObject)da),PETSC_DETERMINE,Ntotal,&x);CHKERRQ(ierr);
298     ierr = VecSet(x,1.0);CHKERRQ(ierr);
299     ierr = MatMult(*P,x,y);CHKERRQ(ierr);
300     ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
301     for (i=0; i<Ng; i++) {
302       if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %g",i,(double)PetscAbsScalar(yy[i]));
303     }
304     ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
305     ierr = VecDestroy(x);CHKERRQ(ierr);
306     ierr = VecDestroy(y);CHKERRQ(ierr);
307   }
308 #endif
309 
310   ierr = MatDestroy(&Aii);CHKERRQ(ierr);
311   ierr = MatDestroy(&Ais);CHKERRQ(ierr);
312   ierr = MatDestroy(&Asi);CHKERRQ(ierr);
313   ierr = MatDestroy(&A);CHKERRQ(ierr);
314   ierr = ISDestroy(&is);CHKERRQ(ierr);
315   ierr = ISDestroy(&isint);CHKERRQ(ierr);
316   ierr = ISDestroy(&issurf);CHKERRQ(ierr);
317   ierr = MatDestroy(&Xint);CHKERRQ(ierr);
318   ierr = MatDestroy(&Xsurf);CHKERRQ(ierr);
319   PetscFunctionReturn(0);
320 }
321 
322 /*
323       DMDAGetFaceInterpolation - Gets the interpolation for a face based coarse space
324 
325 */
326 PetscErrorCode DMDAGetFaceInterpolation(PC pc,DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
327 {
328   PetscErrorCode         ierr;
329   PetscInt               dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
330   PetscInt               mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[6],*globals,Ng,*IIint,*IIsurf,Nt;
331   Mat                    Xint, Xsurf,Xint_tmp;
332   IS                     isint,issurf,is,row,col;
333   ISLocalToGlobalMapping ltg;
334   MPI_Comm               comm;
335   Mat                    A,Aii,Ais,Asi,*Aholder,iAii;
336   MatFactorInfo          info;
337   PetscScalar            *xsurf,*xint;
338   const PetscScalar      *rxint;
339 #if defined(PETSC_USE_DEBUG_foo)
340   PetscScalar            tmp;
341 #endif
342   PetscTable             ht;
343 
344   PetscFunctionBegin;
345   ierr = DMDAGetInfo(da,&dim,NULL,NULL,NULL,&mp,&np,&pp,&dof,NULL,NULL,NULL,NULL,NULL);CHKERRQ(ierr);
346   if (dof != 1) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only for single field problems");
347   if (dim != 3) SETERRQ(PetscObjectComm((PetscObject)da),PETSC_ERR_SUP,"Only coded for 3d problems");
348   ierr   = DMDAGetCorners(da,NULL,NULL,NULL,&m,&n,&p);CHKERRQ(ierr);
349   ierr   = DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);CHKERRQ(ierr);
350   istart = istart ? -1 : 0;
351   jstart = jstart ? -1 : 0;
352   kstart = kstart ? -1 : 0;
353 
354   /*
355     the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
356     to all the local degrees of freedom (this includes the vertices, edges and faces).
357 
358     Xint are the subset of the interpolation into the interior
359 
360     Xface are the interpolation onto faces but not into the interior
361 
362     Xsurf are the interpolation onto the vertices and edges (the surfbasket)
363                                       Xint
364     Symbolically one could write P = (Xface) after interchanging the rows to match the natural ordering on the domain
365                                       Xsurf
366   */
367   N     = (m - istart)*(n - jstart)*(p - kstart);
368   Nint  = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
369   Nface = 2*((m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart));
370   Nwire = 4*((m-2-istart) + (n-2-jstart) + (p-2-kstart)) + 8;
371   Nsurf = Nface + Nwire;
372   ierr  = MatCreateSeqDense(MPI_COMM_SELF,Nint,6,NULL,&Xint);CHKERRQ(ierr);
373   ierr  = MatCreateSeqDense(MPI_COMM_SELF,Nsurf,6,NULL,&Xsurf);CHKERRQ(ierr);
374   ierr  = MatDenseGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
375 
376   /*
377      Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
378      Xsurf will be all zero (thus making the coarse matrix singular).
379   */
380   if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
381   if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
382   if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
383 
384   cnt = 0;
385   for (j=1; j<n-1-jstart; j++) {
386     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 0*Nsurf] = 1;
387   }
388 
389   for (k=1; k<p-1-kstart; k++) {
390     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 1*Nsurf] = 1;
391     for (j=1; j<n-1-jstart; j++) {
392       xsurf[cnt++ + 2*Nsurf] = 1;
393       /* these are the interior nodes */
394       xsurf[cnt++ + 3*Nsurf] = 1;
395     }
396     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 4*Nsurf] = 1;
397   }
398   for (j=1;j<n-1-jstart;j++) {
399     for (i=1; i<m-istart-1; i++) xsurf[cnt++ + 5*Nsurf] = 1;
400   }
401 
402 #if defined(PETSC_USE_DEBUG_foo)
403   for (i=0; i<Nsurf; i++) {
404     tmp = 0.0;
405     for (j=0; j<6; j++) tmp += xsurf[i+j*Nsurf];
406 
407     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
408   }
409 #endif
410   ierr = MatDenseRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
411   /* ierr = MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);*/
412 
413   /*
414        I are the indices for all the needed vertices (in global numbering)
415        Iint are the indices for the interior values, I surf for the surface values
416             (This is just for the part of the global matrix obtained with MatCreateSubMatrix(), it
417              is NOT the local DMDA ordering.)
418        IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
419   */
420 #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
421   ierr = PetscMalloc3(N,&II,Nint,&Iint,Nsurf,&Isurf);CHKERRQ(ierr);
422   ierr = PetscMalloc2(Nint,&IIint,Nsurf,&IIsurf);CHKERRQ(ierr);
423   for (k=0; k<p-kstart; k++) {
424     for (j=0; j<n-jstart; j++) {
425       for (i=0; i<m-istart; i++) {
426         II[c++] = i + j*mwidth + k*mwidth*nwidth;
427 
428         if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
429           IIint[cint]  = i + j*mwidth + k*mwidth*nwidth;
430           Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
431         } else {
432           IIsurf[csurf]  = i + j*mwidth + k*mwidth*nwidth;
433           Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
434         }
435       }
436     }
437   }
438   if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
439   if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
440   if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
441   ierr = DMGetLocalToGlobalMapping(da,&ltg);CHKERRQ(ierr);
442   ierr = ISLocalToGlobalMappingApply(ltg,N,II,II);CHKERRQ(ierr);
443   ierr = ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);CHKERRQ(ierr);
444   ierr = ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);CHKERRQ(ierr);
445   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
446   ierr = ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
447   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);CHKERRQ(ierr);
448   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);CHKERRQ(ierr);
449   ierr = PetscFree3(II,Iint,Isurf);CHKERRQ(ierr);
450 
451   ierr = ISSort(is);CHKERRQ(ierr);
452   ierr = MatCreateSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);CHKERRQ(ierr);
453   A    = *Aholder;
454   ierr = PetscFree(Aholder);CHKERRQ(ierr);
455 
456   ierr = MatCreateSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);CHKERRQ(ierr);
457   ierr = MatCreateSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);CHKERRQ(ierr);
458   ierr = MatCreateSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);CHKERRQ(ierr);
459 
460   /*
461      Solve for the interpolation onto the interior Xint
462   */
463   ierr = MatMatMult(Ais,Xsurf,MAT_INITIAL_MATRIX,PETSC_DETERMINE,&Xint_tmp);CHKERRQ(ierr);
464   ierr = MatScale(Xint_tmp,-1.0);CHKERRQ(ierr);
465 
466   if (exotic->directSolve) {
467     ierr = MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);CHKERRQ(ierr);
468     ierr = MatFactorInfoInitialize(&info);CHKERRQ(ierr);
469     ierr = MatGetOrdering(Aii,MATORDERINGND,&row,&col);CHKERRQ(ierr);
470     ierr = MatLUFactorSymbolic(iAii,Aii,row,col,&info);CHKERRQ(ierr);
471     ierr = ISDestroy(&row);CHKERRQ(ierr);
472     ierr = ISDestroy(&col);CHKERRQ(ierr);
473     ierr = MatLUFactorNumeric(iAii,Aii,&info);CHKERRQ(ierr);
474     ierr = MatMatSolve(iAii,Xint_tmp,Xint);CHKERRQ(ierr);
475     ierr = MatDestroy(&iAii);CHKERRQ(ierr);
476   } else {
477     Vec         b,x;
478     PetscScalar *xint_tmp;
479 
480     ierr = MatDenseGetArray(Xint,&xint);CHKERRQ(ierr);
481     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,NULL,&x);CHKERRQ(ierr);
482     ierr = MatDenseGetArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
483     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,NULL,&b);CHKERRQ(ierr);
484     ierr = KSPSetOperators(exotic->ksp,Aii,Aii);CHKERRQ(ierr);
485     for (i=0; i<6; i++) {
486       ierr = VecPlaceArray(x,xint+i*Nint);CHKERRQ(ierr);
487       ierr = VecPlaceArray(b,xint_tmp+i*Nint);CHKERRQ(ierr);
488       ierr = KSPSolve(exotic->ksp,b,x);CHKERRQ(ierr);
489       ierr = KSPCheckSolve(exotic->ksp,pc,x);CHKERRQ(ierr);
490       ierr = VecResetArray(x);CHKERRQ(ierr);
491       ierr = VecResetArray(b);CHKERRQ(ierr);
492     }
493     ierr = MatDenseRestoreArray(Xint,&xint);CHKERRQ(ierr);
494     ierr = MatDenseRestoreArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
495     ierr = VecDestroy(&x);CHKERRQ(ierr);
496     ierr = VecDestroy(&b);CHKERRQ(ierr);
497   }
498   ierr = MatDestroy(&Xint_tmp);CHKERRQ(ierr);
499 
500 #if defined(PETSC_USE_DEBUG_foo)
501   ierr = MatDenseGetArrayRead(Xint,&rxint);CHKERRQ(ierr);
502   for (i=0; i<Nint; i++) {
503     tmp = 0.0;
504     for (j=0; j<6; j++) tmp += rxint[i+j*Nint];
505 
506     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %g",i,(double)PetscAbsScalar(tmp));
507   }
508   ierr = MatDenseRestoreArrayRead(Xint,&rxint);CHKERRQ(ierr);
509   /* ierr = MatView(Xint,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); */
510 #endif
511 
512   /*         total faces    */
513   Ntotal =  mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1);
514 
515   /*
516       For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
517   */
518   cnt = 0;
519   { gl[cnt++] = mwidth+1;}
520   {
521     { gl[cnt++] = mwidth*nwidth+1;}
522     { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
523     { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;}
524   }
525   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;}
526 
527   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
528   /* convert that to global numbering and get them on all processes */
529   ierr = ISLocalToGlobalMappingApply(ltg,6,gl,gl);CHKERRQ(ierr);
530   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
531   ierr = PetscMalloc1(6*mp*np*pp,&globals);CHKERRQ(ierr);
532   ierr = MPI_Allgather(gl,6,MPIU_INT,globals,6,MPIU_INT,PetscObjectComm((PetscObject)da));CHKERRMPI(ierr);
533 
534   /* Number the coarse grid points from 0 to Ntotal */
535   ierr = MatGetSize(Aglobal,&Nt,NULL);CHKERRQ(ierr);
536   ierr = PetscTableCreate(Ntotal/3,Nt+1,&ht);CHKERRQ(ierr);
537   for (i=0; i<6*mp*np*pp; i++) {
538     ierr = PetscTableAddCount(ht,globals[i]+1);CHKERRQ(ierr);
539   }
540   ierr = PetscTableGetCount(ht,&cnt);CHKERRQ(ierr);
541   if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
542   ierr = PetscFree(globals);CHKERRQ(ierr);
543   for (i=0; i<6; i++) {
544     ierr = PetscTableFind(ht,gl[i]+1,&gl[i]);CHKERRQ(ierr);
545     gl[i]--;
546   }
547   ierr = PetscTableDestroy(&ht);CHKERRQ(ierr);
548   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
549 
550   /* construct global interpolation matrix */
551   ierr = MatGetLocalSize(Aglobal,&Ng,NULL);CHKERRQ(ierr);
552   if (reuse == MAT_INITIAL_MATRIX) {
553     ierr = MatCreateAIJ(PetscObjectComm((PetscObject)da),Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,NULL,Nint,NULL,P);CHKERRQ(ierr);
554   } else {
555     ierr = MatZeroEntries(*P);CHKERRQ(ierr);
556   }
557   ierr = MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr);
558   ierr = MatDenseGetArrayRead(Xint,&rxint);CHKERRQ(ierr);
559   ierr = MatSetValues(*P,Nint,IIint,6,gl,rxint,INSERT_VALUES);CHKERRQ(ierr);
560   ierr = MatDenseRestoreArrayRead(Xint,&rxint);CHKERRQ(ierr);
561   ierr = MatDenseGetArrayRead(Xsurf,&rxint);CHKERRQ(ierr);
562   ierr = MatSetValues(*P,Nsurf,IIsurf,6,gl,rxint,INSERT_VALUES);CHKERRQ(ierr);
563   ierr = MatDenseRestoreArrayRead(Xsurf,&rxint);CHKERRQ(ierr);
564   ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
565   ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
566   ierr = PetscFree2(IIint,IIsurf);CHKERRQ(ierr);
567 
568 #if defined(PETSC_USE_DEBUG_foo)
569   {
570     Vec         x,y;
571     PetscScalar *yy;
572     ierr = VecCreateMPI(PetscObjectComm((PetscObject)da),Ng,PETSC_DETERMINE,&y);CHKERRQ(ierr);
573     ierr = VecCreateMPI(PetscObjectComm((PetscObject)da),PETSC_DETERMINE,Ntotal,&x);CHKERRQ(ierr);
574     ierr = VecSet(x,1.0);CHKERRQ(ierr);
575     ierr = MatMult(*P,x,y);CHKERRQ(ierr);
576     ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
577     for (i=0; i<Ng; i++) {
578       if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %g",i,(double)PetscAbsScalar(yy[i]));
579     }
580     ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
581     ierr = VecDestroy(x);CHKERRQ(ierr);
582     ierr = VecDestroy(y);CHKERRQ(ierr);
583   }
584 #endif
585 
586   ierr = MatDestroy(&Aii);CHKERRQ(ierr);
587   ierr = MatDestroy(&Ais);CHKERRQ(ierr);
588   ierr = MatDestroy(&Asi);CHKERRQ(ierr);
589   ierr = MatDestroy(&A);CHKERRQ(ierr);
590   ierr = ISDestroy(&is);CHKERRQ(ierr);
591   ierr = ISDestroy(&isint);CHKERRQ(ierr);
592   ierr = ISDestroy(&issurf);CHKERRQ(ierr);
593   ierr = MatDestroy(&Xint);CHKERRQ(ierr);
594   ierr = MatDestroy(&Xsurf);CHKERRQ(ierr);
595   PetscFunctionReturn(0);
596 }
597 
598 /*@
599    PCExoticSetType - Sets the type of coarse grid interpolation to use
600 
601    Logically Collective on PC
602 
603    Input Parameters:
604 +  pc - the preconditioner context
605 -  type - either PC_EXOTIC_FACE or PC_EXOTIC_WIREBASKET (defaults to face)
606 
607    Notes:
608     The face based interpolation has 1 degree of freedom per face and ignores the
609      edge and vertex values completely in the coarse problem. For any seven point
610      stencil the interpolation of a constant on all faces into the interior is that constant.
611 
612      The wirebasket interpolation has 1 degree of freedom per vertex, per edge and
613      per face. A constant on the subdomain boundary is interpolated as that constant
614      in the interior of the domain.
615 
616      The coarse grid matrix is obtained via the Galerkin computation A_c = R A R^T, hence
617      if A is nonsingular A_c is also nonsingular.
618 
619      Both interpolations are suitable for only scalar problems.
620 
621    Level: intermediate
622 
623 .seealso: PCEXOTIC, PCExoticType()
624 @*/
625 PetscErrorCode  PCExoticSetType(PC pc,PCExoticType type)
626 {
627   PetscErrorCode ierr;
628 
629   PetscFunctionBegin;
630   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
631   PetscValidLogicalCollectiveEnum(pc,type,2);
632   ierr = PetscTryMethod(pc,"PCExoticSetType_C",(PC,PCExoticType),(pc,type));CHKERRQ(ierr);
633   PetscFunctionReturn(0);
634 }
635 
636 static PetscErrorCode  PCExoticSetType_Exotic(PC pc,PCExoticType type)
637 {
638   PC_MG     *mg  = (PC_MG*)pc->data;
639   PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
640 
641   PetscFunctionBegin;
642   ctx->type = type;
643   PetscFunctionReturn(0);
644 }
645 
646 PetscErrorCode PCSetUp_Exotic(PC pc)
647 {
648   PetscErrorCode ierr;
649   Mat            A;
650   PC_MG          *mg   = (PC_MG*)pc->data;
651   PC_Exotic      *ex   = (PC_Exotic*) mg->innerctx;
652   MatReuse       reuse = (ex->P) ? MAT_REUSE_MATRIX : MAT_INITIAL_MATRIX;
653 
654   PetscFunctionBegin;
655   if (!pc->dm) SETERRQ(PetscObjectComm((PetscObject)pc),PETSC_ERR_ARG_WRONGSTATE,"Need to call PCSetDM() before using this PC");
656   ierr = PCGetOperators(pc,NULL,&A);CHKERRQ(ierr);
657   if (ex->type == PC_EXOTIC_FACE) {
658     ierr = DMDAGetFaceInterpolation(pc,pc->dm,ex,A,reuse,&ex->P);CHKERRQ(ierr);
659   } else if (ex->type == PC_EXOTIC_WIREBASKET) {
660     ierr = DMDAGetWireBasketInterpolation(pc,pc->dm,ex,A,reuse,&ex->P);CHKERRQ(ierr);
661   } else SETERRQ1(PetscObjectComm((PetscObject)pc),PETSC_ERR_PLIB,"Unknown exotic coarse space %d",ex->type);
662   ierr = PCMGSetInterpolation(pc,1,ex->P);CHKERRQ(ierr);
663   /* if PC has attached DM we must remove it or the PCMG will use it to compute incorrect sized vectors and interpolations */
664   ierr = PCSetDM(pc,NULL);CHKERRQ(ierr);
665   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
666   PetscFunctionReturn(0);
667 }
668 
669 PetscErrorCode PCDestroy_Exotic(PC pc)
670 {
671   PetscErrorCode ierr;
672   PC_MG          *mg  = (PC_MG*)pc->data;
673   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
674 
675   PetscFunctionBegin;
676   ierr = MatDestroy(&ctx->P);CHKERRQ(ierr);
677   ierr = KSPDestroy(&ctx->ksp);CHKERRQ(ierr);
678   ierr = PetscFree(ctx);CHKERRQ(ierr);
679   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
680   PetscFunctionReturn(0);
681 }
682 
683 PetscErrorCode PCView_Exotic(PC pc,PetscViewer viewer)
684 {
685   PC_MG          *mg = (PC_MG*)pc->data;
686   PetscErrorCode ierr;
687   PetscBool      iascii;
688   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
689 
690   PetscFunctionBegin;
691   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
692   if (iascii) {
693     ierr = PetscViewerASCIIPrintf(viewer,"    Exotic type = %s\n",PCExoticTypes[ctx->type]);CHKERRQ(ierr);
694     if (ctx->directSolve) {
695       ierr = PetscViewerASCIIPrintf(viewer,"      Using direct solver to construct interpolation\n");CHKERRQ(ierr);
696     } else {
697       PetscViewer sviewer;
698       PetscMPIInt rank;
699 
700       ierr = PetscViewerASCIIPrintf(viewer,"      Using iterative solver to construct interpolation\n");CHKERRQ(ierr);
701       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);
702       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);  /* should not need to push this twice? */
703       ierr = PetscViewerGetSubViewer(viewer,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr);
704       ierr = MPI_Comm_rank(PetscObjectComm((PetscObject)pc),&rank);CHKERRMPI(ierr);
705       if (rank == 0) {
706         ierr = KSPView(ctx->ksp,sviewer);CHKERRQ(ierr);
707       }
708       ierr = PetscViewerRestoreSubViewer(viewer,PETSC_COMM_SELF,&sviewer);CHKERRQ(ierr);
709       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
710       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
711     }
712   }
713   ierr = PCView_MG(pc,viewer);CHKERRQ(ierr);
714   PetscFunctionReturn(0);
715 }
716 
717 PetscErrorCode PCSetFromOptions_Exotic(PetscOptionItems *PetscOptionsObject,PC pc)
718 {
719   PetscErrorCode ierr;
720   PetscBool      flg;
721   PC_MG          *mg = (PC_MG*)pc->data;
722   PCExoticType   mgctype;
723   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
724 
725   PetscFunctionBegin;
726   ierr = PetscOptionsHead(PetscOptionsObject,"Exotic coarse space options");CHKERRQ(ierr);
727   ierr = PetscOptionsEnum("-pc_exotic_type","face or wirebasket","PCExoticSetType",PCExoticTypes,(PetscEnum)ctx->type,(PetscEnum*)&mgctype,&flg);CHKERRQ(ierr);
728   if (flg) {
729     ierr = PCExoticSetType(pc,mgctype);CHKERRQ(ierr);
730   }
731   ierr = PetscOptionsBool("-pc_exotic_direct_solver","use direct solver to construct interpolation","None",ctx->directSolve,&ctx->directSolve,NULL);CHKERRQ(ierr);
732   if (!ctx->directSolve) {
733     if (!ctx->ksp) {
734       const char *prefix;
735       ierr = KSPCreate(PETSC_COMM_SELF,&ctx->ksp);CHKERRQ(ierr);
736       ierr = KSPSetErrorIfNotConverged(ctx->ksp,pc->erroriffailure);CHKERRQ(ierr);
737       ierr = PetscObjectIncrementTabLevel((PetscObject)ctx->ksp,(PetscObject)pc,1);CHKERRQ(ierr);
738       ierr = PetscLogObjectParent((PetscObject)pc,(PetscObject)ctx->ksp);CHKERRQ(ierr);
739       ierr = PCGetOptionsPrefix(pc,&prefix);CHKERRQ(ierr);
740       ierr = KSPSetOptionsPrefix(ctx->ksp,prefix);CHKERRQ(ierr);
741       ierr = KSPAppendOptionsPrefix(ctx->ksp,"exotic_");CHKERRQ(ierr);
742     }
743     ierr = KSPSetFromOptions(ctx->ksp);CHKERRQ(ierr);
744   }
745   ierr = PetscOptionsTail();CHKERRQ(ierr);
746   PetscFunctionReturn(0);
747 }
748 
749 /*MC
750      PCEXOTIC - Two level overlapping Schwarz preconditioner with exotic (non-standard) coarse grid spaces
751 
752      This uses the PCMG infrastructure restricted to two levels and the face and wirebasket based coarse
753    grid spaces.
754 
755    Notes:
756     By default this uses GMRES on the fine grid smoother so this should be used with KSPFGMRES or the smoother changed to not use GMRES
757 
758    References:
759 +  1. - These coarse grid spaces originate in the work of Bramble, Pasciak  and Schatz, "The Construction
760    of Preconditioners for Elliptic Problems by Substructing IV", Mathematics of Computation, volume 53, 1989.
761 .  2. - They were generalized slightly in "Domain Decomposition Method for Linear Elasticity", Ph. D. thesis, Barry Smith,
762    New York University, 1990.
763 .  3. - They were then explored in great detail in Dryja, Smith, Widlund, "Schwarz Analysis
764    of Iterative Substructuring Methods for Elliptic Problems in Three Dimensions, SIAM Journal on Numerical
765    Analysis, volume 31. 1994. These were developed in the context of iterative substructuring preconditioners.
766 .  4. - They were then ingeniously applied as coarse grid spaces for overlapping Schwarz methods by Dohrmann and Widlund.
767    They refer to them as GDSW (generalized Dryja, Smith, Widlund preconditioners). See, for example,
768    Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Extending theory for domain decomposition algorithms to irregular subdomains. In Ulrich Langer, Marco
769    Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
770    of the 17th International Conference on Domain Decomposition Methods in
771    Science and Engineering, held in Strobl, Austria, 2006, number 60 in
772    Springer Verlag, Lecture Notes in Computational Science and Engineering, 2007.
773 .  5. -  Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. A family of energy minimizing coarse spaces for overlapping Schwarz preconditioners. In Ulrich Langer,
774    Marco Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
775    of the 17th International Conference on Domain Decomposition Methods
776    in Science and Engineering, held in Strobl, Austria, 2006, number 60 in
777    Springer Verlag, Lecture Notes in Computational Science and Engineering, 2007
778 .  6. - Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Domain decomposition
779    for less regular subdomains: Overlapping Schwarz in two dimensions. SIAM J.
780    Numer. Anal., 46(4), 2008.
781 -  7. - Clark R. Dohrmann and Olof B. Widlund. An overlapping Schwarz
782    algorithm for almost incompressible elasticity. Technical Report
783    TR2008 912, Department of Computer Science, Courant Institute
784    of Mathematical Sciences, New York University, May 2008. URL:
785 
786    Options Database: The usual PCMG options are supported, such as -mg_levels_pc_type <type> -mg_coarse_pc_type <type>
787       -pc_mg_type <type>
788 
789    Level: advanced
790 
791 .seealso:  PCMG, PCSetDM(), PCExoticType, PCExoticSetType()
792 M*/
793 
794 PETSC_EXTERN PetscErrorCode PCCreate_Exotic(PC pc)
795 {
796   PetscErrorCode ierr;
797   PC_Exotic      *ex;
798   PC_MG          *mg;
799 
800   PetscFunctionBegin;
801   /* if type was previously mg; must manually destroy it because call to PCSetType(pc,PCMG) will not destroy it */
802   if (pc->ops->destroy) {
803     ierr =  (*pc->ops->destroy)(pc);CHKERRQ(ierr);
804     pc->data = NULL;
805   }
806   ierr = PetscFree(((PetscObject)pc)->type_name);CHKERRQ(ierr);
807   ((PetscObject)pc)->type_name = NULL;
808 
809   ierr         = PCSetType(pc,PCMG);CHKERRQ(ierr);
810   ierr         = PCMGSetLevels(pc,2,NULL);CHKERRQ(ierr);
811   ierr         = PCMGSetGalerkin(pc,PC_MG_GALERKIN_PMAT);CHKERRQ(ierr);
812   ierr         = PetscNew(&ex);CHKERRQ(ierr); \
813   ex->type     = PC_EXOTIC_FACE;
814   mg           = (PC_MG*) pc->data;
815   mg->innerctx = ex;
816 
817   pc->ops->setfromoptions = PCSetFromOptions_Exotic;
818   pc->ops->view           = PCView_Exotic;
819   pc->ops->destroy        = PCDestroy_Exotic;
820   pc->ops->setup          = PCSetUp_Exotic;
821 
822   ierr = PetscObjectComposeFunction((PetscObject)pc,"PCExoticSetType_C",PCExoticSetType_Exotic);CHKERRQ(ierr);
823   PetscFunctionReturn(0);
824 }
825