xref: /petsc/src/mat/impls/aij/mpi/aijperm/mpiaijperm.c (revision e8e8640d1cb9a3a2f50c0c0d7b26e5c4d521e2e4)
1 #include <../src/mat/impls/aij/mpi/mpiaij.h>
2 /*@C
3   MatCreateMPIAIJPERM - Creates a sparse parallel matrix whose local
4   portions are stored as `MATSEQAIJPERM` matrices (a matrix class that inherits
5   from SEQAIJ but includes some optimizations to allow more effective
6   vectorization).
7 
8   Collective
9 
10   Input Parameters:
11 + comm  - MPI communicator
12 . m     - number of local rows (or `PETSC_DECIDE` to have calculated if `M` is given)
13            This value should be the same as the local size used in creating the
14            y vector for the matrix-vector product y = Ax.
15 . n     - This value should be the same as the local size used in creating the
16        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
17        calculated if `N` is given) For square matrices `n` is almost always `m`.
18 . M     - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
19 . N     - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
20 . d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
21            (same value is used for all local rows)
22 . d_nnz - array containing the number of nonzeros in the various rows of the
23            DIAGONAL portion of the local submatrix (possibly different for each row)
24            or `NULL`, if `d_nz` is used to specify the nonzero structure.
25            The size of this array is equal to the number of local rows, i.e `m`.
26            For matrices you plan to factor you must leave room for the diagonal entry and
27            put in the entry even if it is zero.
28 . o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
29            submatrix (same value is used for all local rows).
30 - o_nnz - array containing the number of nonzeros in the various rows of the
31            OFF-DIAGONAL portion of the local submatrix (possibly different for
32            each row) or `NULL`, if `o_nz` is used to specify the nonzero
33            structure. The size of this array is equal to the number
34            of local rows, i.e `m`.
35 
36   Output Parameter:
37 . A - the matrix
38 
39   Options Database Keys:
40 + -mat_no_inode            - Do not use inodes
41 - -mat_inode_limit <limit> - Sets inode limit (max limit=5)
42 
43   Level: intermediate
44 
45   Notes:
46   If the *_nnz parameter is given then the *_nz parameter is ignored
47 
48   `m`,`n`,`M`,`N` parameters specify the size of the matrix, and its partitioning across
49   processors, while `d_nz`,`d_nnz`,`o_nz`,`o_nnz` parameters specify the approximate
50   storage requirements for this matrix.
51 
52   If `PETSC_DECIDE` or  `PETSC_DETERMINE` is used for a particular argument on one
53   processor than it must be used on all processors that share the object for
54   that argument.
55 
56   The user MUST specify either the local or global matrix dimensions
57   (possibly both).
58 
59   The parallel matrix is partitioned such that the first m0 rows belong to
60   process 0, the next m1 rows belong to process 1, the next m2 rows belong
61   to process 2 etc.. where m0,m1,m2... are the input parameter `m`.
62 
63   The DIAGONAL portion of the local submatrix of a processor can be defined
64   as the submatrix which is obtained by extraction the part corresponding
65   to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
66   first row that belongs to the processor, and r2 is the last row belonging
67   to the this processor. This is a square mxm matrix. The remaining portion
68   of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.
69 
70   If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored.
71 
72   When calling this routine with a single process communicator, a matrix of
73   type `MATSEQAIJPERM` is returned.  If a matrix of type `MATMPIAIJPERM` is desired
74   for this type of communicator, use the construction mechanism
75 .vb
76    MatCreate(...,&A);
77    MatSetType(A,MPIAIJ);
78    MatMPIAIJSetPreallocation(A,...);
79 .ve
80 
81   By default, this format uses inodes (identical nodes) when possible.
82   We search for consecutive rows with the same nonzero structure, thereby
83   reusing matrix information to achieve increased efficiency.
84 
85 .seealso: [](ch_matrices), `Mat`, [Sparse Matrix Creation](sec_matsparse), `MATMPIAIJPERM`, `MatCreate()`, `MatCreateSeqAIJPERM()`, `MatSetValues()`
86 @*/
MatCreateMPIAIJPERM(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat * A)87 PetscErrorCode MatCreateMPIAIJPERM(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[], Mat *A)
88 {
89   PetscMPIInt size;
90 
91   PetscFunctionBegin;
92   PetscCall(MatCreate(comm, A));
93   PetscCall(MatSetSizes(*A, m, n, M, N));
94   PetscCallMPI(MPI_Comm_size(comm, &size));
95   if (size > 1) {
96     PetscCall(MatSetType(*A, MATMPIAIJPERM));
97     PetscCall(MatMPIAIJSetPreallocation(*A, d_nz, d_nnz, o_nz, o_nnz));
98   } else {
99     PetscCall(MatSetType(*A, MATSEQAIJPERM));
100     PetscCall(MatSeqAIJSetPreallocation(*A, d_nz, d_nnz));
101   }
102   PetscFunctionReturn(PETSC_SUCCESS);
103 }
104 
MatMPIAIJSetPreallocation_MPIAIJPERM(Mat B,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[])105 static PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJPERM(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
106 {
107   Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;
108 
109   PetscFunctionBegin;
110   PetscCall(MatMPIAIJSetPreallocation_MPIAIJ(B, d_nz, d_nnz, o_nz, o_nnz));
111   PetscCall(MatConvert_SeqAIJ_SeqAIJPERM(b->A, MATSEQAIJPERM, MAT_INPLACE_MATRIX, &b->A));
112   PetscCall(MatConvert_SeqAIJ_SeqAIJPERM(b->B, MATSEQAIJPERM, MAT_INPLACE_MATRIX, &b->B));
113   PetscFunctionReturn(PETSC_SUCCESS);
114 }
115 
MatConvert_MPIAIJ_MPIAIJPERM(Mat A,MatType type,MatReuse reuse,Mat * newmat)116 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat A, MatType type, MatReuse reuse, Mat *newmat)
117 {
118   Mat B = *newmat;
119 
120   PetscFunctionBegin;
121   if (reuse == MAT_INITIAL_MATRIX) PetscCall(MatDuplicate(A, MAT_COPY_VALUES, &B));
122 
123   PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPIAIJPERM));
124   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetPreallocation_C", MatMPIAIJSetPreallocation_MPIAIJPERM));
125   *newmat = B;
126   PetscFunctionReturn(PETSC_SUCCESS);
127 }
128 
MatCreate_MPIAIJPERM(Mat A)129 PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJPERM(Mat A)
130 {
131   PetscFunctionBegin;
132   PetscCall(MatSetType(A, MATMPIAIJ));
133   PetscCall(MatConvert_MPIAIJ_MPIAIJPERM(A, MATMPIAIJPERM, MAT_INPLACE_MATRIX, &A));
134   PetscFunctionReturn(PETSC_SUCCESS);
135 }
136 
137 /*MC
138    MATAIJPERM - "AIJPERM" - A matrix type to be used for sparse matrices.
139 
140    This matrix type is identical to `MATSEQAIJPERM` when constructed with a single process communicator,
141    and `MATMPIAIJPERM` otherwise.  As a result, for single process communicators,
142   `MatSeqAIJSetPreallocation()` is supported, and similarly `MatMPIAIJSetPreallocation()` is supported
143   for communicators controlling multiple processes.  It is recommended that you call both of
144   the above preallocation routines for simplicity.
145 
146    Options Database Key:
147 . -mat_type aijperm - sets the matrix type to `MATAIJPERM`
148 
149   Level: beginner
150 
151 .seealso: [](ch_matrices), `Mat`, `MatCreateMPIAIJPERM()`, `MATSEQAIJPERM`, `MATMPIAIJPERM`, `MATSEQAIJ`, `MATMPIAIJ`, `MATSEQAIJMKL`, `MATMPIAIJMKL`, `MATSEQAIJSELL`, `MATMPIAIJSELL`
152 M*/
153