xref: /petsc/src/ksp/pc/impls/wb/wb.c (revision 0e9bae810fdaeb60e2713eaa8ddb89f42e079fd1)
1 
2 #include <petscpcmg.h>   /*I "petscpcmg.h" I*/
3 #include <petscdmda.h>   /*I "petscdmda.h" I*/
4 #include <../src/ksp/pc/impls/mg/mgimpl.h>
5 
6 typedef struct {
7   PCExoticType type;
8   Mat          P;            /* the constructed interpolation matrix */
9   PetscBool    directSolve;  /* use direct LU factorization to construct interpolation */
10   KSP          ksp;
11 } PC_Exotic;
12 
13 const char *const PCExoticTypes[] = {"face","wirebasket","PCExoticType","PC_Exotic",0};
14 
15 
16 #undef __FUNCT__
17 #define __FUNCT__ "DMDAGetWireBasketInterpolation"
18 /*
19       DMDAGetWireBasketInterpolation - Gets the interpolation for a wirebasket based coarse space
20 
21 */
22 PetscErrorCode DMDAGetWireBasketInterpolation(DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
23 {
24   PetscErrorCode         ierr;
25   PetscInt               dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
26   PetscInt               mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[26],*globals,Ng,*IIint,*IIsurf,Nt;
27   Mat                    Xint, Xsurf,Xint_tmp;
28   IS                     isint,issurf,is,row,col;
29   ISLocalToGlobalMapping ltg;
30   MPI_Comm               comm;
31   Mat                    A,Aii,Ais,Asi,*Aholder,iAii;
32   MatFactorInfo          info;
33   PetscScalar            *xsurf,*xint;
34 #if defined(PETSC_USE_DEBUG_foo)
35   PetscScalar            tmp;
36 #endif
37   PetscTable             ht;
38 
39   PetscFunctionBegin;
40   ierr = DMDAGetInfo(da,&dim,0,0,0,&mp,&np,&pp,&dof,0,0,0,0,0);CHKERRQ(ierr);
41   if (dof != 1) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only for single field problems");
42   if (dim != 3) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only coded for 3d problems");
43   ierr = DMDAGetCorners(da,0,0,0,&m,&n,&p);CHKERRQ(ierr);
44   ierr = DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);CHKERRQ(ierr);
45   istart = istart ? -1 : 0;
46   jstart = jstart ? -1 : 0;
47   kstart = kstart ? -1 : 0;
48 
49   /*
50     the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
51     to all the local degrees of freedom (this includes the vertices, edges and faces).
52 
53     Xint are the subset of the interpolation into the interior
54 
55     Xface are the interpolation onto faces but not into the interior
56 
57     Xsurf are the interpolation onto the vertices and edges (the surfbasket)
58                                         Xint
59     Symbolically one could write P = (  Xface  ) after interchanging the rows to match the natural ordering on the domain
60                                         Xsurf
61   */
62   N     = (m - istart)*(n - jstart)*(p - kstart);
63   Nint  = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
64   Nface = 2*( (m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart) );
65   Nwire = 4*( (m-2-istart) + (n-2-jstart) + (p-2-kstart) ) + 8;
66   Nsurf = Nface + Nwire;
67   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nint,26,PETSC_NULL,&Xint);CHKERRQ(ierr);
68   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nsurf,26,PETSC_NULL,&Xsurf);CHKERRQ(ierr);
69   ierr = MatDenseGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
70 
71   /*
72      Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
73      Xsurf will be all zero (thus making the coarse matrix singular).
74   */
75   if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
76   if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
77   if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
78 
79   cnt = 0;
80   xsurf[cnt++] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + Nsurf] = 1;} xsurf[cnt++ + 2*Nsurf] = 1;
81   for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 3*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 4*Nsurf] = 1;} xsurf[cnt++ + 5*Nsurf] = 1;}
82   xsurf[cnt++ + 6*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 7*Nsurf] = 1;} xsurf[cnt++ + 8*Nsurf] = 1;
83   for (k=1;k<p-1-kstart;k++) {
84     xsurf[cnt++ + 9*Nsurf] = 1;  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 10*Nsurf] = 1;}  xsurf[cnt++ + 11*Nsurf] = 1;
85     for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 12*Nsurf] = 1; /* these are the interior nodes */ xsurf[cnt++ + 13*Nsurf] = 1;}
86     xsurf[cnt++ + 14*Nsurf] = 1;  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 15*Nsurf] = 1;} xsurf[cnt++ + 16*Nsurf] = 1;
87   }
88   xsurf[cnt++ + 17*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 18*Nsurf] = 1;} xsurf[cnt++ + 19*Nsurf] = 1;
89   for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 20*Nsurf] = 1;  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 21*Nsurf] = 1;} xsurf[cnt++ + 22*Nsurf] = 1;}
90   xsurf[cnt++ + 23*Nsurf] = 1; for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 24*Nsurf] = 1;} xsurf[cnt++ + 25*Nsurf] = 1;
91 
92   /* interpolations only sum to 1 when using direct solver */
93 #if defined(PETSC_USE_DEBUG_foo)
94   for (i=0; i<Nsurf; i++) {
95     tmp = 0.0;
96     for (j=0; j<26; j++) {
97       tmp += xsurf[i+j*Nsurf];
98     }
99     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %G",i,PetscAbsScalar(tmp));
100   }
101 #endif
102   ierr = MatDenseRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
103   /* ierr = MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);*/
104 
105 
106   /*
107        I are the indices for all the needed vertices (in global numbering)
108        Iint are the indices for the interior values, I surf for the surface values
109             (This is just for the part of the global matrix obtained with MatGetSubMatrix(), it
110              is NOT the local DMDA ordering.)
111        IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
112   */
113 #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
114   ierr = PetscMalloc3(N,PetscInt,&II,Nint,PetscInt,&Iint,Nsurf,PetscInt,&Isurf);CHKERRQ(ierr);
115   ierr = PetscMalloc2(Nint,PetscInt,&IIint,Nsurf,PetscInt,&IIsurf);CHKERRQ(ierr);
116   for (k=0; k<p-kstart; k++) {
117     for (j=0; j<n-jstart; j++) {
118       for (i=0; i<m-istart; i++) {
119         II[c++] = i + j*mwidth + k*mwidth*nwidth;
120 
121         if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
122           IIint[cint]  = i + j*mwidth + k*mwidth*nwidth;
123           Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
124         } else {
125           IIsurf[csurf]  = i + j*mwidth + k*mwidth*nwidth;
126           Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
127         }
128       }
129     }
130   }
131   if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
132   if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
133   if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
134   ierr = DMGetLocalToGlobalMapping(da,&ltg);CHKERRQ(ierr);
135   ierr = ISLocalToGlobalMappingApply(ltg,N,II,II);CHKERRQ(ierr);
136   ierr = ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);CHKERRQ(ierr);
137   ierr = ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);CHKERRQ(ierr);
138   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
139   ierr = ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
140   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);CHKERRQ(ierr);
141   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);CHKERRQ(ierr);
142   ierr = PetscFree3(II,Iint,Isurf);CHKERRQ(ierr);
143 
144   ierr = MatGetSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);CHKERRQ(ierr);
145   A    = *Aholder;
146   ierr = PetscFree(Aholder);CHKERRQ(ierr);
147 
148   ierr = MatGetSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);CHKERRQ(ierr);
149   ierr = MatGetSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);CHKERRQ(ierr);
150   ierr = MatGetSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);CHKERRQ(ierr);
151 
152   /*
153      Solve for the interpolation onto the interior Xint
154   */
155   ierr = MatMatMult(Ais,Xsurf,MAT_INITIAL_MATRIX,PETSC_DETERMINE,&Xint_tmp);CHKERRQ(ierr);
156   ierr = MatScale(Xint_tmp,-1.0);CHKERRQ(ierr);
157   if (exotic->directSolve) {
158     ierr = MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);CHKERRQ(ierr);
159     ierr = MatFactorInfoInitialize(&info);CHKERRQ(ierr);
160     ierr = MatGetOrdering(Aii,MATORDERINGND,&row,&col);CHKERRQ(ierr);
161     ierr = MatLUFactorSymbolic(iAii,Aii,row,col,&info);CHKERRQ(ierr);
162     ierr = ISDestroy(&row);CHKERRQ(ierr);
163     ierr = ISDestroy(&col);CHKERRQ(ierr);
164     ierr = MatLUFactorNumeric(iAii,Aii,&info);CHKERRQ(ierr);
165     ierr = MatMatSolve(iAii,Xint_tmp,Xint);CHKERRQ(ierr);
166     ierr = MatDestroy(&iAii);CHKERRQ(ierr);
167   } else {
168     Vec         b,x;
169     PetscScalar *xint_tmp;
170 
171     ierr = MatDenseGetArray(Xint,&xint);CHKERRQ(ierr);
172     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&x);CHKERRQ(ierr);
173     ierr = MatDenseGetArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
174     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&b);CHKERRQ(ierr);
175     ierr = KSPSetOperators(exotic->ksp,Aii,Aii,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
176     for (i=0; i<26; i++) {
177       ierr = VecPlaceArray(x,xint+i*Nint);CHKERRQ(ierr);
178       ierr = VecPlaceArray(b,xint_tmp+i*Nint);CHKERRQ(ierr);
179       ierr = KSPSolve(exotic->ksp,b,x);CHKERRQ(ierr);
180       ierr = VecResetArray(x);CHKERRQ(ierr);
181       ierr = VecResetArray(b);CHKERRQ(ierr);
182     }
183     ierr = MatDenseRestoreArray(Xint,&xint);CHKERRQ(ierr);
184     ierr = MatDenseRestoreArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
185     ierr = VecDestroy(&x);CHKERRQ(ierr);
186     ierr = VecDestroy(&b);CHKERRQ(ierr);
187   }
188   ierr = MatDestroy(&Xint_tmp);CHKERRQ(ierr);
189 
190 #if defined(PETSC_USE_DEBUG_foo)
191   ierr = MatDenseGetArray(Xint,&xint);CHKERRQ(ierr);
192   for (i=0; i<Nint; i++) {
193     tmp = 0.0;
194     for (j=0; j<26; j++) {
195       tmp += xint[i+j*Nint];
196     }
197     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %G",i,PetscAbsScalar(tmp));
198   }
199   ierr = MatDenseRestoreArray(Xint,&xint);CHKERRQ(ierr);
200   /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); */
201 #endif
202 
203 
204   /*         total vertices             total faces                                  total edges */
205   Ntotal = (mp + 1)*(np + 1)*(pp + 1) + mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1) + mp*(np+1)*(pp+1) + np*(mp+1)*(pp+1) +  pp*(mp+1)*(np+1);
206 
207   /*
208       For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
209   */
210   cnt = 0;
211   gl[cnt++] = 0;  { gl[cnt++] = 1;} gl[cnt++] = m-istart-1;
212   { gl[cnt++] = mwidth;  { gl[cnt++] = mwidth+1;} gl[cnt++] = mwidth + m-istart-1;}
213   gl[cnt++] = mwidth*(n-jstart-1);  { gl[cnt++] = mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*(n-jstart-1) + m-istart-1;
214   {
215     gl[cnt++] = mwidth*nwidth;  { gl[cnt++] = mwidth*nwidth+1;}  gl[cnt++] = mwidth*nwidth+ m-istart-1;
216     { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
217     gl[cnt++] = mwidth*nwidth+ mwidth*(n-jstart-1);   { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1) + m-istart-1;
218   }
219   gl[cnt++] = mwidth*nwidth*(p-kstart-1); { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) +  m-istart-1;
220   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth;   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1)+mwidth+m-istart-1;}
221   gl[cnt++] = mwidth*nwidth*(p-kstart-1) +  mwidth*(n-jstart-1);  { gl[cnt++] = mwidth*nwidth*(p-kstart-1)+ mwidth*(n-jstart-1)+1;} gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth*(n-jstart-1) + m-istart-1;
222 
223   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
224   /* convert that to global numbering and get them on all processes */
225   ierr = ISLocalToGlobalMappingApply(ltg,26,gl,gl);CHKERRQ(ierr);
226   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
227   ierr = PetscMalloc(26*mp*np*pp*sizeof(PetscInt),&globals);CHKERRQ(ierr);
228   ierr = MPI_Allgather(gl,26,MPIU_INT,globals,26,MPIU_INT,((PetscObject)da)->comm);CHKERRQ(ierr);
229 
230   /* Number the coarse grid points from 0 to Ntotal */
231   ierr = MatGetSize(Aglobal,&Nt,PETSC_NULL);CHKERRQ(ierr);
232   ierr = PetscTableCreate(Ntotal/3,Nt+1,&ht);CHKERRQ(ierr);
233   for (i=0; i<26*mp*np*pp; i++){
234     ierr = PetscTableAddCount(ht,globals[i]+1);CHKERRQ(ierr);
235   }
236   ierr = PetscTableGetCount(ht,&cnt);CHKERRQ(ierr);
237   if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
238   ierr = PetscFree(globals);CHKERRQ(ierr);
239   for (i=0; i<26; i++) {
240     ierr = PetscTableFind(ht,gl[i]+1,&gl[i]);CHKERRQ(ierr);
241     gl[i]--;
242   }
243   ierr = PetscTableDestroy(&ht);CHKERRQ(ierr);
244   /* PetscIntView(26,gl,PETSC_VIEWER_STDOUT_WORLD); */
245 
246   /* construct global interpolation matrix */
247   ierr = MatGetLocalSize(Aglobal,&Ng,PETSC_NULL);CHKERRQ(ierr);
248   if (reuse == MAT_INITIAL_MATRIX) {
249     ierr = MatCreateAIJ(((PetscObject)da)->comm,Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,PETSC_NULL,Nint+Nsurf,PETSC_NULL,P);CHKERRQ(ierr);
250   } else {
251     ierr = MatZeroEntries(*P);CHKERRQ(ierr);
252   }
253   ierr = MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr);
254   ierr = MatDenseGetArray(Xint,&xint);CHKERRQ(ierr);
255   ierr = MatSetValues(*P,Nint,IIint,26,gl,xint,INSERT_VALUES);CHKERRQ(ierr);
256   ierr = MatDenseRestoreArray(Xint,&xint);CHKERRQ(ierr);
257   ierr = MatDenseGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
258   ierr = MatSetValues(*P,Nsurf,IIsurf,26,gl,xsurf,INSERT_VALUES);CHKERRQ(ierr);
259   ierr = MatDenseRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
260   ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
261   ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
262   ierr = PetscFree2(IIint,IIsurf);CHKERRQ(ierr);
263 
264 #if defined(PETSC_USE_DEBUG_foo)
265   {
266     Vec         x,y;
267     PetscScalar *yy;
268     ierr = VecCreateMPI(((PetscObject)da)->comm,Ng,PETSC_DETERMINE,&y);CHKERRQ(ierr);
269     ierr = VecCreateMPI(((PetscObject)da)->comm,PETSC_DETERMINE,Ntotal,&x);CHKERRQ(ierr);
270     ierr = VecSet(x,1.0);CHKERRQ(ierr);
271     ierr = MatMult(*P,x,y);CHKERRQ(ierr);
272     ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
273     for (i=0; i<Ng; i++) {
274       if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %G",i,PetscAbsScalar(yy[i]));
275     }
276     ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
277     ierr = VecDestroy(x);CHKERRQ(ierr);
278     ierr = VecDestroy(y);CHKERRQ(ierr);
279   }
280 #endif
281 
282   ierr = MatDestroy(&Aii);CHKERRQ(ierr);
283   ierr = MatDestroy(&Ais);CHKERRQ(ierr);
284   ierr = MatDestroy(&Asi);CHKERRQ(ierr);
285   ierr = MatDestroy(&A);CHKERRQ(ierr);
286   ierr = ISDestroy(&is);CHKERRQ(ierr);
287   ierr = ISDestroy(&isint);CHKERRQ(ierr);
288   ierr = ISDestroy(&issurf);CHKERRQ(ierr);
289   ierr = MatDestroy(&Xint);CHKERRQ(ierr);
290   ierr = MatDestroy(&Xsurf);CHKERRQ(ierr);
291   PetscFunctionReturn(0);
292 }
293 
294 #undef __FUNCT__
295 #define __FUNCT__ "DMDAGetFaceInterpolation"
296 /*
297       DMDAGetFaceInterpolation - Gets the interpolation for a face based coarse space
298 
299 */
300 PetscErrorCode DMDAGetFaceInterpolation(DM da,PC_Exotic *exotic,Mat Aglobal,MatReuse reuse,Mat *P)
301 {
302   PetscErrorCode         ierr;
303   PetscInt               dim,i,j,k,m,n,p,dof,Nint,Nface,Nwire,Nsurf,*Iint,*Isurf,cint = 0,csurf = 0,istart,jstart,kstart,*II,N,c = 0;
304   PetscInt               mwidth,nwidth,pwidth,cnt,mp,np,pp,Ntotal,gl[6],*globals,Ng,*IIint,*IIsurf,Nt;
305   Mat                    Xint, Xsurf,Xint_tmp;
306   IS                     isint,issurf,is,row,col;
307   ISLocalToGlobalMapping ltg;
308   MPI_Comm               comm;
309   Mat                    A,Aii,Ais,Asi,*Aholder,iAii;
310   MatFactorInfo          info;
311   PetscScalar            *xsurf,*xint;
312 #if defined(PETSC_USE_DEBUG_foo)
313   PetscScalar            tmp;
314 #endif
315   PetscTable             ht;
316 
317   PetscFunctionBegin;
318   ierr = DMDAGetInfo(da,&dim,0,0,0,&mp,&np,&pp,&dof,0,0,0,0,0);CHKERRQ(ierr);
319   if (dof != 1) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only for single field problems");
320   if (dim != 3) SETERRQ(((PetscObject)da)->comm,PETSC_ERR_SUP,"Only coded for 3d problems");
321   ierr = DMDAGetCorners(da,0,0,0,&m,&n,&p);CHKERRQ(ierr);
322   ierr = DMDAGetGhostCorners(da,&istart,&jstart,&kstart,&mwidth,&nwidth,&pwidth);CHKERRQ(ierr);
323   istart = istart ? -1 : 0;
324   jstart = jstart ? -1 : 0;
325   kstart = kstart ? -1 : 0;
326 
327   /*
328     the columns of P are the interpolation of each coarse grid point (one for each vertex and edge)
329     to all the local degrees of freedom (this includes the vertices, edges and faces).
330 
331     Xint are the subset of the interpolation into the interior
332 
333     Xface are the interpolation onto faces but not into the interior
334 
335     Xsurf are the interpolation onto the vertices and edges (the surfbasket)
336                                         Xint
337     Symbolically one could write P = (  Xface  ) after interchanging the rows to match the natural ordering on the domain
338                                         Xsurf
339   */
340   N     = (m - istart)*(n - jstart)*(p - kstart);
341   Nint  = (m-2-istart)*(n-2-jstart)*(p-2-kstart);
342   Nface = 2*( (m-2-istart)*(n-2-jstart) + (m-2-istart)*(p-2-kstart) + (n-2-jstart)*(p-2-kstart) );
343   Nwire = 4*( (m-2-istart) + (n-2-jstart) + (p-2-kstart) ) + 8;
344   Nsurf = Nface + Nwire;
345   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nint,6,PETSC_NULL,&Xint);CHKERRQ(ierr);
346   ierr = MatCreateSeqDense(MPI_COMM_SELF,Nsurf,6,PETSC_NULL,&Xsurf);CHKERRQ(ierr);
347   ierr = MatDenseGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
348 
349   /*
350      Require that all 12 edges and 6 faces have at least one grid point. Otherwise some of the columns of
351      Xsurf will be all zero (thus making the coarse matrix singular).
352   */
353   if (m-istart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in X direction must be at least 3");
354   if (n-jstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Y direction must be at least 3");
355   if (p-kstart < 3) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Number of grid points per process in Z direction must be at least 3");
356 
357   cnt = 0;
358   for (j=1;j<n-1-jstart;j++) {  for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 0*Nsurf] = 1;} }
359    for (k=1;k<p-1-kstart;k++) {
360     for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 1*Nsurf] = 1;}
361     for (j=1;j<n-1-jstart;j++) { xsurf[cnt++ + 2*Nsurf] = 1; /* these are the interior nodes */ xsurf[cnt++ + 3*Nsurf] = 1;}
362     for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 4*Nsurf] = 1;}
363   }
364   for (j=1;j<n-1-jstart;j++) {for (i=1; i<m-istart-1; i++) { xsurf[cnt++ + 5*Nsurf] = 1;} }
365 
366 #if defined(PETSC_USE_DEBUG_foo)
367   for (i=0; i<Nsurf; i++) {
368     tmp = 0.0;
369     for (j=0; j<6; j++) {
370       tmp += xsurf[i+j*Nsurf];
371     }
372     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xsurf interpolation at i %D value %G",i,PetscAbsScalar(tmp));
373   }
374 #endif
375   ierr = MatDenseRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
376   /* ierr = MatView(Xsurf,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr);*/
377 
378 
379   /*
380        I are the indices for all the needed vertices (in global numbering)
381        Iint are the indices for the interior values, I surf for the surface values
382             (This is just for the part of the global matrix obtained with MatGetSubMatrix(), it
383              is NOT the local DMDA ordering.)
384        IIint and IIsurf are the same as the Iint, Isurf except they are in the global numbering
385   */
386 #define Endpoint(a,start,b) (a == 0 || a == (b-1-start))
387   ierr = PetscMalloc3(N,PetscInt,&II,Nint,PetscInt,&Iint,Nsurf,PetscInt,&Isurf);CHKERRQ(ierr);
388   ierr = PetscMalloc2(Nint,PetscInt,&IIint,Nsurf,PetscInt,&IIsurf);CHKERRQ(ierr);
389   for (k=0; k<p-kstart; k++) {
390     for (j=0; j<n-jstart; j++) {
391       for (i=0; i<m-istart; i++) {
392         II[c++] = i + j*mwidth + k*mwidth*nwidth;
393 
394         if (!Endpoint(i,istart,m) && !Endpoint(j,jstart,n) && !Endpoint(k,kstart,p)) {
395           IIint[cint]  = i + j*mwidth + k*mwidth*nwidth;
396           Iint[cint++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
397         } else {
398           IIsurf[csurf]  = i + j*mwidth + k*mwidth*nwidth;
399           Isurf[csurf++] = i + j*(m-istart) + k*(m-istart)*(n-jstart);
400         }
401       }
402     }
403   }
404   if (c != N) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"c != N");
405   if (cint != Nint) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"cint != Nint");
406   if (csurf != Nsurf) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_PLIB,"csurf != Nsurf");
407   ierr = DMGetLocalToGlobalMapping(da,&ltg);CHKERRQ(ierr);
408   ierr = ISLocalToGlobalMappingApply(ltg,N,II,II);CHKERRQ(ierr);
409   ierr = ISLocalToGlobalMappingApply(ltg,Nint,IIint,IIint);CHKERRQ(ierr);
410   ierr = ISLocalToGlobalMappingApply(ltg,Nsurf,IIsurf,IIsurf);CHKERRQ(ierr);
411   ierr = PetscObjectGetComm((PetscObject)da,&comm);CHKERRQ(ierr);
412   ierr = ISCreateGeneral(comm,N,II,PETSC_COPY_VALUES,&is);CHKERRQ(ierr);
413   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nint,Iint,PETSC_COPY_VALUES,&isint);CHKERRQ(ierr);
414   ierr = ISCreateGeneral(PETSC_COMM_SELF,Nsurf,Isurf,PETSC_COPY_VALUES,&issurf);CHKERRQ(ierr);
415   ierr = PetscFree3(II,Iint,Isurf);CHKERRQ(ierr);
416 
417   ierr = ISSort(is);CHKERRQ(ierr);
418   ierr = MatGetSubMatrices(Aglobal,1,&is,&is,MAT_INITIAL_MATRIX,&Aholder);CHKERRQ(ierr);
419   A    = *Aholder;
420   ierr = PetscFree(Aholder);CHKERRQ(ierr);
421 
422   ierr = MatGetSubMatrix(A,isint,isint,MAT_INITIAL_MATRIX,&Aii);CHKERRQ(ierr);
423   ierr = MatGetSubMatrix(A,isint,issurf,MAT_INITIAL_MATRIX,&Ais);CHKERRQ(ierr);
424   ierr = MatGetSubMatrix(A,issurf,isint,MAT_INITIAL_MATRIX,&Asi);CHKERRQ(ierr);
425 
426   /*
427      Solve for the interpolation onto the interior Xint
428   */
429   ierr = MatMatMult(Ais,Xsurf,MAT_INITIAL_MATRIX,PETSC_DETERMINE,&Xint_tmp);CHKERRQ(ierr);
430   ierr = MatScale(Xint_tmp,-1.0);CHKERRQ(ierr);
431 
432   if (exotic->directSolve) {
433     ierr = MatGetFactor(Aii,MATSOLVERPETSC,MAT_FACTOR_LU,&iAii);CHKERRQ(ierr);
434     ierr = MatFactorInfoInitialize(&info);CHKERRQ(ierr);
435     ierr = MatGetOrdering(Aii,MATORDERINGND,&row,&col);CHKERRQ(ierr);
436     ierr = MatLUFactorSymbolic(iAii,Aii,row,col,&info);CHKERRQ(ierr);
437     ierr = ISDestroy(&row);CHKERRQ(ierr);
438     ierr = ISDestroy(&col);CHKERRQ(ierr);
439     ierr = MatLUFactorNumeric(iAii,Aii,&info);CHKERRQ(ierr);
440     ierr = MatMatSolve(iAii,Xint_tmp,Xint);CHKERRQ(ierr);
441     ierr = MatDestroy(&iAii);CHKERRQ(ierr);
442   } else {
443     Vec         b,x;
444     PetscScalar *xint_tmp;
445 
446     ierr = MatDenseGetArray(Xint,&xint);CHKERRQ(ierr);
447     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&x);CHKERRQ(ierr);
448     ierr = MatDenseGetArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
449     ierr = VecCreateSeqWithArray(PETSC_COMM_SELF,1,Nint,0,&b);CHKERRQ(ierr);
450     ierr = KSPSetOperators(exotic->ksp,Aii,Aii,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
451     for (i=0; i<6; i++) {
452       ierr = VecPlaceArray(x,xint+i*Nint);CHKERRQ(ierr);
453       ierr = VecPlaceArray(b,xint_tmp+i*Nint);CHKERRQ(ierr);
454       ierr = KSPSolve(exotic->ksp,b,x);CHKERRQ(ierr);
455       ierr = VecResetArray(x);CHKERRQ(ierr);
456       ierr = VecResetArray(b);CHKERRQ(ierr);
457     }
458     ierr = MatDenseRestoreArray(Xint,&xint);CHKERRQ(ierr);
459     ierr = MatDenseRestoreArray(Xint_tmp,&xint_tmp);CHKERRQ(ierr);
460     ierr = VecDestroy(&x);CHKERRQ(ierr);
461     ierr = VecDestroy(&b);CHKERRQ(ierr);
462   }
463   ierr = MatDestroy(&Xint_tmp);CHKERRQ(ierr);
464 
465 #if defined(PETSC_USE_DEBUG_foo)
466   ierr = MatDenseGetArray(Xint,&xint);CHKERRQ(ierr);
467   for (i=0; i<Nint; i++) {
468     tmp = 0.0;
469     for (j=0; j<6; j++) {
470       tmp += xint[i+j*Nint];
471     }
472     if (PetscAbsScalar(tmp-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong Xint interpolation at i %D value %G",i,PetscAbsScalar(tmp));
473   }
474   ierr = MatDenseRestoreArray(Xint,&xint);CHKERRQ(ierr);
475   /* ierr =MatView(Xint,PETSC_VIEWER_STDOUT_WORLD);CHKERRQ(ierr); */
476 #endif
477 
478 
479   /*         total faces    */
480   Ntotal =  mp*np*(pp+1) + mp*pp*(np+1) + np*pp*(mp+1);
481 
482   /*
483       For each vertex, edge, face on process (in the same orderings as used above) determine its local number including ghost points
484   */
485   cnt = 0;
486   { gl[cnt++] = mwidth+1;}
487   {
488     { gl[cnt++] = mwidth*nwidth+1;}
489     { gl[cnt++] = mwidth*nwidth + mwidth; /* these are the interior nodes */ gl[cnt++] = mwidth*nwidth + mwidth+m-istart-1;}
490     { gl[cnt++] = mwidth*nwidth+mwidth*(n-jstart-1)+1;}
491   }
492   { gl[cnt++] = mwidth*nwidth*(p-kstart-1) + mwidth+1;}
493 
494   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
495   /* convert that to global numbering and get them on all processes */
496   ierr = ISLocalToGlobalMappingApply(ltg,6,gl,gl);CHKERRQ(ierr);
497   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
498   ierr = PetscMalloc(6*mp*np*pp*sizeof(PetscInt),&globals);CHKERRQ(ierr);
499   ierr = MPI_Allgather(gl,6,MPIU_INT,globals,6,MPIU_INT,((PetscObject)da)->comm);CHKERRQ(ierr);
500 
501   /* Number the coarse grid points from 0 to Ntotal */
502   ierr = MatGetSize(Aglobal,&Nt,PETSC_NULL);CHKERRQ(ierr);
503   ierr = PetscTableCreate(Ntotal/3,Nt+1,&ht);CHKERRQ(ierr);
504   for (i=0; i<6*mp*np*pp; i++){
505     ierr = PetscTableAddCount(ht,globals[i]+1);CHKERRQ(ierr);
506   }
507   ierr = PetscTableGetCount(ht,&cnt);CHKERRQ(ierr);
508   if (cnt != Ntotal) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hash table size %D not equal to total number coarse grid points %D",cnt,Ntotal);
509   ierr = PetscFree(globals);CHKERRQ(ierr);
510   for (i=0; i<6; i++) {
511     ierr = PetscTableFind(ht,gl[i]+1,&gl[i]);CHKERRQ(ierr);
512     gl[i]--;
513   }
514   ierr = PetscTableDestroy(&ht);CHKERRQ(ierr);
515   /* PetscIntView(6,gl,PETSC_VIEWER_STDOUT_WORLD); */
516 
517   /* construct global interpolation matrix */
518   ierr = MatGetLocalSize(Aglobal,&Ng,PETSC_NULL);CHKERRQ(ierr);
519   if (reuse == MAT_INITIAL_MATRIX) {
520     ierr = MatCreateAIJ(((PetscObject)da)->comm,Ng,PETSC_DECIDE,PETSC_DECIDE,Ntotal,Nint+Nsurf,PETSC_NULL,Nint,PETSC_NULL,P);CHKERRQ(ierr);
521   } else {
522     ierr = MatZeroEntries(*P);CHKERRQ(ierr);
523   }
524   ierr = MatSetOption(*P,MAT_ROW_ORIENTED,PETSC_FALSE);CHKERRQ(ierr);
525   ierr = MatDenseGetArray(Xint,&xint);CHKERRQ(ierr);
526   ierr = MatSetValues(*P,Nint,IIint,6,gl,xint,INSERT_VALUES);CHKERRQ(ierr);
527   ierr = MatDenseRestoreArray(Xint,&xint);CHKERRQ(ierr);
528   ierr = MatDenseGetArray(Xsurf,&xsurf);CHKERRQ(ierr);
529   ierr = MatSetValues(*P,Nsurf,IIsurf,6,gl,xsurf,INSERT_VALUES);CHKERRQ(ierr);
530   ierr = MatDenseRestoreArray(Xsurf,&xsurf);CHKERRQ(ierr);
531   ierr = MatAssemblyBegin(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
532   ierr = MatAssemblyEnd(*P,MAT_FINAL_ASSEMBLY);CHKERRQ(ierr);
533   ierr = PetscFree2(IIint,IIsurf);CHKERRQ(ierr);
534 
535 
536 #if defined(PETSC_USE_DEBUG_foo)
537   {
538     Vec         x,y;
539     PetscScalar *yy;
540     ierr = VecCreateMPI(((PetscObject)da)->comm,Ng,PETSC_DETERMINE,&y);CHKERRQ(ierr);
541     ierr = VecCreateMPI(((PetscObject)da)->comm,PETSC_DETERMINE,Ntotal,&x);CHKERRQ(ierr);
542     ierr = VecSet(x,1.0);CHKERRQ(ierr);
543     ierr = MatMult(*P,x,y);CHKERRQ(ierr);
544     ierr = VecGetArray(y,&yy);CHKERRQ(ierr);
545     for (i=0; i<Ng; i++) {
546       if (PetscAbsScalar(yy[i]-1.0) > 1.e-10) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Wrong p interpolation at i %D value %G",i,PetscAbsScalar(yy[i]));
547     }
548     ierr = VecRestoreArray(y,&yy);CHKERRQ(ierr);
549     ierr = VecDestroy(x);CHKERRQ(ierr);
550     ierr = VecDestroy(y);CHKERRQ(ierr);
551   }
552 #endif
553 
554   ierr = MatDestroy(&Aii);CHKERRQ(ierr);
555   ierr = MatDestroy(&Ais);CHKERRQ(ierr);
556   ierr = MatDestroy(&Asi);CHKERRQ(ierr);
557   ierr = MatDestroy(&A);CHKERRQ(ierr);
558   ierr = ISDestroy(&is);CHKERRQ(ierr);
559   ierr = ISDestroy(&isint);CHKERRQ(ierr);
560   ierr = ISDestroy(&issurf);CHKERRQ(ierr);
561   ierr = MatDestroy(&Xint);CHKERRQ(ierr);
562   ierr = MatDestroy(&Xsurf);CHKERRQ(ierr);
563   PetscFunctionReturn(0);
564 }
565 
566 
567 #undef __FUNCT__
568 #define __FUNCT__ "PCExoticSetType"
569 /*@
570    PCExoticSetType - Sets the type of coarse grid interpolation to use
571 
572    Logically Collective on PC
573 
574    Input Parameters:
575 +  pc - the preconditioner context
576 -  type - either PC_EXOTIC_FACE or PC_EXOTIC_WIREBASKET (defaults to face)
577 
578    Notes: The face based interpolation has 1 degree of freedom per face and ignores the
579      edge and vertex values completely in the coarse problem. For any seven point
580      stencil the interpolation of a constant on all faces into the interior is that constant.
581 
582      The wirebasket interpolation has 1 degree of freedom per vertex, per edge and
583      per face. A constant on the subdomain boundary is interpolated as that constant
584      in the interior of the domain.
585 
586      The coarse grid matrix is obtained via the Galerkin computation A_c = R A R^T, hence
587      if A is nonsingular A_c is also nonsingular.
588 
589      Both interpolations are suitable for only scalar problems.
590 
591    Level: intermediate
592 
593 
594 .seealso: PCEXOTIC, PCExoticType()
595 @*/
596 PetscErrorCode  PCExoticSetType(PC pc,PCExoticType type)
597 {
598   PetscErrorCode ierr;
599 
600   PetscFunctionBegin;
601   PetscValidHeaderSpecific(pc,PC_CLASSID,1);
602   PetscValidLogicalCollectiveEnum(pc,type,2);
603   ierr = PetscTryMethod(pc,"PCExoticSetType_C",(PC,PCExoticType),(pc,type));CHKERRQ(ierr);
604   PetscFunctionReturn(0);
605 }
606 
607 #undef __FUNCT__
608 #define __FUNCT__ "PCExoticSetType_Exotic"
609 PetscErrorCode  PCExoticSetType_Exotic(PC pc,PCExoticType type)
610 {
611   PC_MG     *mg = (PC_MG*)pc->data;
612   PC_Exotic *ctx = (PC_Exotic*) mg->innerctx;
613 
614   PetscFunctionBegin;
615   ctx->type = type;
616   PetscFunctionReturn(0);
617 }
618 
619 #undef __FUNCT__
620 #define __FUNCT__ "PCSetUp_Exotic"
621 PetscErrorCode PCSetUp_Exotic(PC pc)
622 {
623   PetscErrorCode ierr;
624   Mat            A;
625   PC_MG          *mg = (PC_MG*)pc->data;
626   PC_Exotic      *ex = (PC_Exotic*) mg->innerctx;
627   MatReuse       reuse = (ex->P) ? MAT_REUSE_MATRIX : MAT_INITIAL_MATRIX;
628 
629   PetscFunctionBegin;
630   if (!pc->dm) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONGSTATE,"Need to call PCSetDM() before using this PC");
631   ierr = PCGetOperators(pc,PETSC_NULL,&A,PETSC_NULL);CHKERRQ(ierr);
632   if (ex->type == PC_EXOTIC_FACE) {
633     ierr = DMDAGetFaceInterpolation(pc->dm,ex,A,reuse,&ex->P);CHKERRQ(ierr);
634   } else if (ex->type == PC_EXOTIC_WIREBASKET) {
635     ierr = DMDAGetWireBasketInterpolation(pc->dm,ex,A,reuse,&ex->P);CHKERRQ(ierr);
636   } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_PLIB,"Unknown exotic coarse space %d",ex->type);
637   ierr = PCMGSetInterpolation(pc,1,ex->P);CHKERRQ(ierr);
638   /* if PC has attached DM we must remove it or the PCMG will use it to compute incorrect sized vectors and interpolations */
639   ierr = PCSetDM(pc,PETSC_NULL);CHKERRQ(ierr);
640   ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
641   PetscFunctionReturn(0);
642 }
643 
644 #undef __FUNCT__
645 #define __FUNCT__ "PCDestroy_Exotic"
646 PetscErrorCode PCDestroy_Exotic(PC pc)
647 {
648   PetscErrorCode ierr;
649   PC_MG          *mg = (PC_MG*)pc->data;
650   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
651 
652   PetscFunctionBegin;
653   ierr = MatDestroy(&ctx->P);CHKERRQ(ierr);
654   ierr = KSPDestroy(&ctx->ksp);CHKERRQ(ierr);
655   ierr = PetscFree(ctx);CHKERRQ(ierr);
656   ierr = PCDestroy_MG(pc);CHKERRQ(ierr);
657   PetscFunctionReturn(0);
658 }
659 
660 #undef __FUNCT__
661 #define __FUNCT__ "PCView_Exotic"
662 PetscErrorCode PCView_Exotic(PC pc,PetscViewer viewer)
663 {
664   PC_MG          *mg = (PC_MG*)pc->data;
665   PetscErrorCode ierr;
666   PetscBool      iascii;
667   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
668 
669   PetscFunctionBegin;
670   ierr = PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&iascii);CHKERRQ(ierr);
671   if (iascii) {
672     ierr = PetscViewerASCIIPrintf(viewer,"    Exotic type = %s\n",PCExoticTypes[ctx->type]);CHKERRQ(ierr);
673     if (ctx->directSolve) {
674       ierr = PetscViewerASCIIPrintf(viewer,"      Using direct solver to construct interpolation\n");CHKERRQ(ierr);
675     } else {
676       PetscViewer sviewer;
677       PetscMPIInt rank;
678 
679       ierr = PetscViewerASCIIPrintf(viewer,"      Using iterative solver to construct interpolation\n");CHKERRQ(ierr);
680       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);
681       ierr = PetscViewerASCIIPushTab(viewer);CHKERRQ(ierr);  /* should not need to push this twice? */
682       ierr = PetscViewerGetSingleton(viewer,&sviewer);CHKERRQ(ierr);
683       ierr = MPI_Comm_rank(((PetscObject)pc)->comm,&rank);CHKERRQ(ierr);
684       if (!rank) {
685 	ierr = KSPView(ctx->ksp,sviewer);CHKERRQ(ierr);
686       }
687       ierr = PetscViewerRestoreSingleton(viewer,&sviewer);CHKERRQ(ierr);
688       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
689       ierr = PetscViewerASCIIPopTab(viewer);CHKERRQ(ierr);
690     }
691   }
692   ierr = PCView_MG(pc,viewer);CHKERRQ(ierr);
693   PetscFunctionReturn(0);
694 }
695 
696 #undef __FUNCT__
697 #define __FUNCT__ "PCSetFromOptions_Exotic"
698 PetscErrorCode PCSetFromOptions_Exotic(PC pc)
699 {
700   PetscErrorCode ierr;
701   PetscBool      flg;
702   PC_MG          *mg = (PC_MG*)pc->data;
703   PCExoticType   mgctype;
704   PC_Exotic      *ctx = (PC_Exotic*) mg->innerctx;
705 
706   PetscFunctionBegin;
707   ierr = PetscOptionsHead("Exotic coarse space options");CHKERRQ(ierr);
708     ierr = PetscOptionsEnum("-pc_exotic_type","face or wirebasket","PCExoticSetType",PCExoticTypes,(PetscEnum)ctx->type,(PetscEnum*)&mgctype,&flg);CHKERRQ(ierr);
709     if (flg) {
710       ierr = PCExoticSetType(pc,mgctype);CHKERRQ(ierr);
711     }
712     ierr = PetscOptionsBool("-pc_exotic_direct_solver","use direct solver to construct interpolation","None",ctx->directSolve,&ctx->directSolve,PETSC_NULL);CHKERRQ(ierr);
713     if (!ctx->directSolve) {
714       if (!ctx->ksp) {
715         const char *prefix;
716         ierr = KSPCreate(PETSC_COMM_SELF,&ctx->ksp);CHKERRQ(ierr);
717         ierr = PetscObjectIncrementTabLevel((PetscObject)ctx->ksp,(PetscObject)pc,1);CHKERRQ(ierr);
718         ierr = PetscLogObjectParent(pc,ctx->ksp);CHKERRQ(ierr);
719         ierr = PCGetOptionsPrefix(pc,&prefix);CHKERRQ(ierr);
720         ierr = KSPSetOptionsPrefix(ctx->ksp,prefix);CHKERRQ(ierr);
721         ierr = KSPAppendOptionsPrefix(ctx->ksp,"exotic_");CHKERRQ(ierr);
722       }
723       ierr = KSPSetFromOptions(ctx->ksp);CHKERRQ(ierr);
724     }
725   ierr = PetscOptionsTail();CHKERRQ(ierr);
726   PetscFunctionReturn(0);
727 }
728 
729 
730 /*MC
731      PCEXOTIC - Two level overlapping Schwarz preconditioner with exotic (non-standard) coarse grid spaces
732 
733      This uses the PCMG infrastructure restricted to two levels and the face and wirebasket based coarse
734    grid spaces.
735 
736    Notes: By default this uses GMRES on the fine grid smoother so this should be used with KSPFGMRES or the smoother changed to not use GMRES
737 
738    References: These coarse grid spaces originate in the work of Bramble, Pasciak  and Schatz, "The Construction
739    of Preconditioners for Elliptic Problems by Substructing IV", Mathematics of Computation, volume 53 pages 1--24, 1989.
740    They were generalized slightly in "Domain Decomposition Method for Linear Elasticity", Ph. D. thesis, Barry Smith,
741    New York University, 1990. They were then explored in great detail in Dryja, Smith, Widlund, "Schwarz Analysis
742    of Iterative Substructuring Methods for Elliptic Problems in Three Dimensions, SIAM Journal on Numerical
743    Analysis, volume 31. pages 1662-1694, 1994. These were developed in the context of iterative substructuring preconditioners.
744    They were then ingeniously applied as coarse grid spaces for overlapping Schwarz methods by Dohrmann and Widlund.
745    They refer to them as GDSW (generalized Dryja, Smith, Widlund preconditioners). See, for example,
746    Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Extending theory for domain decomposition algorithms to irregular subdomains. In Ulrich Langer, Marco
747    Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
748    of the 17th International Conference on Domain Decomposition Methods in
749    Science and Engineering, held in Strobl, Austria, July 3-7, 2006, number 60 in
750    Springer-Verlag, Lecture Notes in Computational Science and Engineering, pages 255-261, 2007.
751    Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. A family of energy min-
752    imizing coarse spaces for overlapping Schwarz preconditioners. In Ulrich Langer,
753    Marco Discacciati, David Keyes, Olof Widlund, and Walter Zulehner, editors, Proceedings
754    of the 17th International Conference on Domain Decomposition Methods
755    in Science and Engineering, held in Strobl, Austria, July 3-7, 2006, number 60 in
756    Springer-Verlag, Lecture Notes in Computational Science and Engineering, pages 247-254, 2007
757    Clark R. Dohrmann, Axel Klawonn, and Olof B. Widlund. Domain decomposition
758    for less regular subdomains: Overlapping Schwarz in two dimensions. SIAM J.
759    Numer. Anal., 46(4):2153-2168, 2008.
760    Clark R. Dohrmann and Olof B. Widlund. An overlapping Schwarz
761    algorithm for almost incompressible elasticity. Technical Report
762    TR2008-912, Department of Computer Science, Courant Institute
763    of Mathematical Sciences, New York University, May 2008. URL:
764    http://cs.nyu.edu/csweb/Research/TechReports/TR2008-912/TR2008-912.pdf
765 
766    Options Database: The usual PCMG options are supported, such as -mg_levels_pc_type <type> -mg_coarse_pc_type <type>
767       -pc_mg_type <type>
768 
769    Level: advanced
770 
771 .seealso:  PCMG, PCSetDM(), PCExoticType, PCExoticSetType()
772 M*/
773 
774 EXTERN_C_BEGIN
775 #undef __FUNCT__
776 #define __FUNCT__ "PCCreate_Exotic"
777 PetscErrorCode  PCCreate_Exotic(PC pc)
778 {
779   PetscErrorCode ierr;
780   PC_Exotic      *ex;
781   PC_MG          *mg;
782 
783   PetscFunctionBegin;
784   /* if type was previously mg; must manually destroy it because call to PCSetType(pc,PCMG) will not destroy it */
785   if (pc->ops->destroy) { ierr =  (*pc->ops->destroy)(pc);CHKERRQ(ierr); pc->data = 0;}
786   ierr = PetscFree(((PetscObject)pc)->type_name);CHKERRQ(ierr);
787   ((PetscObject)pc)->type_name = 0;
788 
789   ierr = PCSetType(pc,PCMG);CHKERRQ(ierr);
790   ierr = PCMGSetLevels(pc,2,PETSC_NULL);CHKERRQ(ierr);
791   ierr = PCMGSetGalerkin(pc,PETSC_TRUE);CHKERRQ(ierr);
792   ierr = PetscNew(PC_Exotic,&ex);CHKERRQ(ierr);\
793   ex->type = PC_EXOTIC_FACE;
794   mg = (PC_MG*) pc->data;
795   mg->innerctx = ex;
796 
797 
798   pc->ops->setfromoptions = PCSetFromOptions_Exotic;
799   pc->ops->view           = PCView_Exotic;
800   pc->ops->destroy        = PCDestroy_Exotic;
801   pc->ops->setup          = PCSetUp_Exotic;
802   ierr = PetscObjectComposeFunctionDynamic((PetscObject)pc,"PCExoticSetType_C","PCExoticSetType_Exotic",PCExoticSetType_Exotic);CHKERRQ(ierr);
803   PetscFunctionReturn(0);
804 }
805 EXTERN_C_END
806