xref: /petsc/src/mat/tests/ex82.c (revision 40badf4fbc550ac1f60bd080eaff6de6d55b946d)
1 
2 static char help[] = "Partition a tiny grid using hierarchical partitioning.\n\n";
3 
4 /*T
5    Concepts: partitioning
6    Processors: 4
7 T*/
8 
9 /*
10   Include "petscmat.h" so that we can use matrices.  Note that this file
11   automatically includes:
12      petscsys.h       - base PETSc routines   petscvec.h - vectors
13      petscmat.h - matrices
14      petscis.h     - index sets
15      petscviewer.h - viewers
16 */
17 #include <petscmat.h>
18 
19 int main(int argc,char **args)
20 {
21   Mat             A;
22   PetscErrorCode  ierr;
23   PetscMPIInt     rank,size;
24   PetscInt        *ia,*ja;
25   MatPartitioning part;
26   IS              is,isn,isrows;
27   IS              coarseparts,fineparts;
28   MPI_Comm        comm;
29 
30   ierr = PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr;
31   comm = PETSC_COMM_WORLD;
32   CHKERRMPI(MPI_Comm_size(comm,&size));
33   PetscCheckFalse(size != 4,comm,PETSC_ERR_WRONG_MPI_SIZE,"Must run with 4 processors");
34   CHKERRMPI(MPI_Comm_rank(comm,&rank));
35 
36   CHKERRQ(PetscMalloc1(5,&ia));
37   CHKERRQ(PetscMalloc1(16,&ja));
38   if (rank == 0) {
39     ja[0] = 1; ja[1] = 4; ja[2] = 0; ja[3] = 2; ja[4] = 5; ja[5] = 1; ja[6] = 3; ja[7] = 6;
40     ja[8] = 2; ja[9] = 7;
41     ia[0] = 0; ia[1] = 2; ia[2] = 5; ia[3] = 8; ia[4] = 10;
42   } else if (rank == 1) {
43     ja[0] = 0; ja[1] = 5; ja[2] = 8; ja[3] = 1; ja[4] = 4; ja[5] = 6; ja[6] = 9; ja[7] = 2;
44     ja[8] = 5; ja[9] = 7; ja[10] = 10; ja[11] = 3; ja[12] = 6; ja[13] = 11;
45     ia[0] = 0; ia[1] = 3; ia[2] = 7; ia[3] = 11; ia[4] = 14;
46   } else if (rank == 2) {
47     ja[0] = 4; ja[1] = 9; ja[2] = 12; ja[3] = 5; ja[4] = 8; ja[5] = 10; ja[6] = 13; ja[7] = 6;
48     ja[8] = 9; ja[9] = 11; ja[10] = 14; ja[11] = 7; ja[12] = 10; ja[13] = 15;
49     ia[0] = 0; ia[1] = 3; ia[2] = 7; ia[3] = 11; ia[4] = 14;
50   } else {
51     ja[0] = 8; ja[1] = 13; ja[2] = 9; ja[3] = 12; ja[4] = 14; ja[5] = 10; ja[6] = 13; ja[7] = 15;
52     ja[8] = 11; ja[9] = 14;
53     ia[0] = 0; ia[1] = 2; ia[2] = 5; ia[3] = 8; ia[4] = 10;
54   }
55   CHKERRQ(MatCreateMPIAdj(comm,4,16,ia,ja,NULL,&A));
56   CHKERRQ(MatView(A,PETSC_VIEWER_STDOUT_WORLD));
57   /*
58    Partition the graph of the matrix
59   */
60   CHKERRQ(MatPartitioningCreate(comm,&part));
61   CHKERRQ(MatPartitioningSetAdjacency(part,A));
62   CHKERRQ(MatPartitioningSetType(part,MATPARTITIONINGHIERARCH));
63   CHKERRQ(MatPartitioningHierarchicalSetNcoarseparts(part,2));
64   CHKERRQ(MatPartitioningHierarchicalSetNfineparts(part,2));
65   CHKERRQ(MatPartitioningSetFromOptions(part));
66   /* get new processor owner number of each vertex */
67   CHKERRQ(MatPartitioningApply(part,&is));
68   /* coarse parts */
69   CHKERRQ(MatPartitioningHierarchicalGetCoarseparts(part,&coarseparts));
70   CHKERRQ(ISView(coarseparts,PETSC_VIEWER_STDOUT_WORLD));
71   /* fine parts */
72   CHKERRQ(MatPartitioningHierarchicalGetFineparts(part,&fineparts));
73   CHKERRQ(ISView(fineparts,PETSC_VIEWER_STDOUT_WORLD));
74   /* partitioning */
75   CHKERRQ(ISView(is,PETSC_VIEWER_STDOUT_WORLD));
76   /* get new global number of each old global number */
77   CHKERRQ(ISPartitioningToNumbering(is,&isn));
78   CHKERRQ(ISView(isn,PETSC_VIEWER_STDOUT_WORLD));
79   CHKERRQ(ISBuildTwoSided(is,NULL,&isrows));
80   CHKERRQ(ISView(isrows,PETSC_VIEWER_STDOUT_WORLD));
81   CHKERRQ(ISDestroy(&is));
82   CHKERRQ(ISDestroy(&coarseparts));
83   CHKERRQ(ISDestroy(&fineparts));
84   CHKERRQ(ISDestroy(&isrows));
85   CHKERRQ(ISDestroy(&isn));
86   CHKERRQ(MatPartitioningDestroy(&part));
87   /*
88     Free work space.  All PETSc objects should be destroyed when they
89     are no longer needed.
90   */
91   CHKERRQ(MatDestroy(&A));
92   ierr = PetscFinalize();
93   return ierr;
94 }
95 
96 /*TEST
97 
98    test:
99       nsize: 4
100       requires: parmetis
101       TODO: tests cannot use parmetis because it produces different results on different machines
102 
103 TEST*/
104