xref: /petsc/src/dm/impls/da/daindex.c (revision bbd3f3368baff017a6ea7962313571cf23b2b4ec)
1 
2 /*
3   Code for manipulating distributed regular arrays in parallel.
4 */
5 
6 #include <petsc-private/dmdaimpl.h>    /*I   "petscdmda.h"   I*/
7 
8 #undef __FUNCT__
9 #define __FUNCT__ "DMDAGetNatural_Private"
10 /*
11    Gets the natural number for each global number on the process.
12 
13    Used by DMDAGetAO() and DMDAGlobalToNatural_Create()
14 */
15 PetscErrorCode DMDAGetNatural_Private(DM da,PetscInt *outNlocal,IS *isnatural)
16 {
17   PetscErrorCode ierr;
18   PetscInt       Nlocal,i,j,k,*lidx,lict = 0;
19   DM_DA          *dd = (DM_DA*)da->data;
20 
21   PetscFunctionBegin;
22   Nlocal = (dd->xe-dd->xs);
23   if (dd->dim > 1) Nlocal *= (dd->ye-dd->ys);
24   if (dd->dim > 2) Nlocal *= (dd->ze-dd->zs);
25 
26   ierr = PetscMalloc1(Nlocal,&lidx);CHKERRQ(ierr);
27 
28   if (dd->dim == 1) {
29     for (i=dd->xs; i<dd->xe; i++) {
30       /*  global number in natural ordering */
31       lidx[lict++] = i;
32     }
33   } else if (dd->dim == 2) {
34     for (j=dd->ys; j<dd->ye; j++) {
35       for (i=dd->xs; i<dd->xe; i++) {
36         /*  global number in natural ordering */
37         lidx[lict++] = i + j*dd->M*dd->w;
38       }
39     }
40   } else if (dd->dim == 3) {
41     for (k=dd->zs; k<dd->ze; k++) {
42       for (j=dd->ys; j<dd->ye; j++) {
43         for (i=dd->xs; i<dd->xe; i++) {
44           lidx[lict++] = i + j*dd->M*dd->w + k*dd->M*dd->N*dd->w;
45         }
46       }
47     }
48   }
49   *outNlocal = Nlocal;
50   ierr       = ISCreateGeneral(PetscObjectComm((PetscObject)da),Nlocal,lidx,PETSC_OWN_POINTER,isnatural);CHKERRQ(ierr);
51   PetscFunctionReturn(0);
52 }
53 
54 #undef __FUNCT__
55 #define __FUNCT__ "DMDAGetAO"
56 /*@
57    DMDAGetAO - Gets the application ordering context for a distributed array.
58 
59    Collective on DMDA
60 
61    Input Parameter:
62 .  da - the distributed array
63 
64    Output Parameters:
65 .  ao - the application ordering context for DMDAs
66 
67    Level: intermediate
68 
69    Notes:
70    In this case, the AO maps to the natural grid ordering that would be used
71    for the DMDA if only 1 processor were employed (ordering most rapidly in the
72    x-direction, then y, then z).  Multiple degrees of freedom are numbered
73    for each node (rather than 1 component for the whole grid, then the next
74    component, etc.)
75 
76 .keywords: distributed array, get, global, indices, local-to-global
77 
78 .seealso: DMDACreate2d(), DMDAGetGhostCorners(), DMDAGetCorners(), DMDALocalToGlocal()
79           DMGlobalToLocalBegin(), DMGlobalToLocalEnd(), DMLocalToLocalBegin(), DMLocalToLocalEnd(), DMDAGetOwnershipRanges(),
80           AO, AOPetscToApplication(), AOApplicationToPetsc()
81 @*/
82 PetscErrorCode  DMDAGetAO(DM da,AO *ao)
83 {
84   DM_DA *dd = (DM_DA*)da->data;
85 
86   PetscFunctionBegin;
87   PetscValidHeaderSpecific(da,DM_CLASSID,1);
88   PetscValidPointer(ao,2);
89 
90   /*
91      Build the natural ordering to PETSc ordering mappings.
92   */
93   if (!dd->ao) {
94     IS             ispetsc,isnatural;
95     PetscErrorCode ierr;
96     PetscInt       Nlocal;
97 
98     ierr = DMDAGetNatural_Private(da,&Nlocal,&isnatural);CHKERRQ(ierr);
99     ierr = ISCreateStride(PetscObjectComm((PetscObject)da),Nlocal,dd->base,1,&ispetsc);CHKERRQ(ierr);
100     ierr = AOCreateBasicIS(isnatural,ispetsc,&dd->ao);CHKERRQ(ierr);
101     ierr = PetscLogObjectParent((PetscObject)da,(PetscObject)dd->ao);CHKERRQ(ierr);
102     ierr = ISDestroy(&ispetsc);CHKERRQ(ierr);
103     ierr = ISDestroy(&isnatural);CHKERRQ(ierr);
104   }
105   *ao = dd->ao;
106   PetscFunctionReturn(0);
107 }
108 
109 
110