xref: /petsc/src/mat/impls/aij/mpi/aijperm/mpiaijperm.c (revision bcee047adeeb73090d7e36cc71e39fc287cdbb97)
1 
2 #include <../src/mat/impls/aij/mpi/mpiaij.h>
3 /*@C
4    MatCreateMPIAIJPERM - Creates a sparse parallel matrix whose local
5    portions are stored as `MATSEQAIJPERM` matrices (a matrix class that inherits
6    from SEQAIJ but includes some optimizations to allow more effective
7    vectorization).
8 
9       Collective
10 
11    Input Parameters:
12 +  comm - MPI communicator
13 .  m - number of local rows (or `PETSC_DECIDE` to have calculated if `M` is given)
14            This value should be the same as the local size used in creating the
15            y vector for the matrix-vector product y = Ax.
16 .  n - This value should be the same as the local size used in creating the
17        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
18        calculated if `N` is given) For square matrices `n` is almost always `m`.
19 .  M - number of global rows (or `PETSC_DETERMINE` to have calculated if `m` is given)
20 .  N - number of global columns (or `PETSC_DETERMINE` to have calculated if `n` is given)
21 .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
22            (same value is used for all local rows)
23 .  d_nnz - array containing the number of nonzeros in the various rows of the
24            DIAGONAL portion of the local submatrix (possibly different for each row)
25            or `NULL`, if `d_nz` is used to specify the nonzero structure.
26            The size of this array is equal to the number of local rows, i.e `m`.
27            For matrices you plan to factor you must leave room for the diagonal entry and
28            put in the entry even if it is zero.
29 .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
30            submatrix (same value is used for all local rows).
31 -  o_nnz - array containing the number of nonzeros in the various rows of the
32            OFF-DIAGONAL portion of the local submatrix (possibly different for
33            each row) or `NULL`, if `o_nz` is used to specify the nonzero
34            structure. The size of this array is equal to the number
35            of local rows, i.e `m`.
36 
37    Output Parameter:
38 .  A - the matrix
39 
40    Options Database Keys:
41 +  -mat_no_inode  - Do not use inodes
42 -  -mat_inode_limit <limit> - Sets inode limit (max limit=5)
43 
44    Level: intermediate
45 
46    Notes:
47    If the *_nnz parameter is given then the *_nz parameter is ignored
48 
49    `m`,`n`,`M`,`N` parameters specify the size of the matrix, and its partitioning across
50    processors, while `d_nz`,`d_nnz`,`o_nz`,`o_nnz` parameters specify the approximate
51    storage requirements for this matrix.
52 
53    If `PETSC_DECIDE` or  `PETSC_DETERMINE` is used for a particular argument on one
54    processor than it must be used on all processors that share the object for
55    that argument.
56 
57    The user MUST specify either the local or global matrix dimensions
58    (possibly both).
59 
60    The parallel matrix is partitioned such that the first m0 rows belong to
61    process 0, the next m1 rows belong to process 1, the next m2 rows belong
62    to process 2 etc.. where m0,m1,m2... are the input parameter `m`.
63 
64    The DIAGONAL portion of the local submatrix of a processor can be defined
65    as the submatrix which is obtained by extraction the part corresponding
66    to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the
67    first row that belongs to the processor, and r2 is the last row belonging
68    to the this processor. This is a square mxm matrix. The remaining portion
69    of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.
70 
71    If `o_nnz`, `d_nnz` are specified, then `o_nz`, and `d_nz` are ignored.
72 
73    When calling this routine with a single process communicator, a matrix of
74    type `MATSEQAIJPERM` is returned.  If a matrix of type `MATMPIAIJPERM` is desired
75    for this type of communicator, use the construction mechanism
76 .vb
77    MatCreate(...,&A);
78    MatSetType(A,MPIAIJ);
79    MatMPIAIJSetPreallocation(A,...);
80 .ve
81 
82    By default, this format uses inodes (identical nodes) when possible.
83    We search for consecutive rows with the same nonzero structure, thereby
84    reusing matrix information to achieve increased efficiency.
85 
86 .seealso: [](ch_matrices), `Mat`, [Sparse Matrix Creation](sec_matsparse), `MATMPIAIJPERM`, `MatCreate()`, `MatCreateSeqAIJPERM()`, `MatSetValues()`
87 @*/
88 PetscErrorCode MatCreateMPIAIJPERM(MPI_Comm comm, PetscInt m, PetscInt n, PetscInt M, PetscInt N, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[], Mat *A)
89 {
90   PetscMPIInt size;
91 
92   PetscFunctionBegin;
93   PetscCall(MatCreate(comm, A));
94   PetscCall(MatSetSizes(*A, m, n, M, N));
95   PetscCallMPI(MPI_Comm_size(comm, &size));
96   if (size > 1) {
97     PetscCall(MatSetType(*A, MATMPIAIJPERM));
98     PetscCall(MatMPIAIJSetPreallocation(*A, d_nz, d_nnz, o_nz, o_nnz));
99   } else {
100     PetscCall(MatSetType(*A, MATSEQAIJPERM));
101     PetscCall(MatSeqAIJSetPreallocation(*A, d_nz, d_nnz));
102   }
103   PetscFunctionReturn(PETSC_SUCCESS);
104 }
105 
106 PetscErrorCode MatMPIAIJSetPreallocation_MPIAIJPERM(Mat B, PetscInt d_nz, const PetscInt d_nnz[], PetscInt o_nz, const PetscInt o_nnz[])
107 {
108   Mat_MPIAIJ *b = (Mat_MPIAIJ *)B->data;
109 
110   PetscFunctionBegin;
111   PetscCall(MatMPIAIJSetPreallocation_MPIAIJ(B, d_nz, d_nnz, o_nz, o_nnz));
112   PetscCall(MatConvert_SeqAIJ_SeqAIJPERM(b->A, MATSEQAIJPERM, MAT_INPLACE_MATRIX, &b->A));
113   PetscCall(MatConvert_SeqAIJ_SeqAIJPERM(b->B, MATSEQAIJPERM, MAT_INPLACE_MATRIX, &b->B));
114   PetscFunctionReturn(PETSC_SUCCESS);
115 }
116 
117 PETSC_INTERN PetscErrorCode MatConvert_MPIAIJ_MPIAIJPERM(Mat A, MatType type, MatReuse reuse, Mat *newmat)
118 {
119   Mat B = *newmat;
120 
121   PetscFunctionBegin;
122   if (reuse == MAT_INITIAL_MATRIX) PetscCall(MatDuplicate(A, MAT_COPY_VALUES, &B));
123 
124   PetscCall(PetscObjectChangeTypeName((PetscObject)B, MATMPIAIJPERM));
125   PetscCall(PetscObjectComposeFunction((PetscObject)B, "MatMPIAIJSetPreallocation_C", MatMPIAIJSetPreallocation_MPIAIJPERM));
126   *newmat = B;
127   PetscFunctionReturn(PETSC_SUCCESS);
128 }
129 
130 PETSC_EXTERN PetscErrorCode MatCreate_MPIAIJPERM(Mat A)
131 {
132   PetscFunctionBegin;
133   PetscCall(MatSetType(A, MATMPIAIJ));
134   PetscCall(MatConvert_MPIAIJ_MPIAIJPERM(A, MATMPIAIJPERM, MAT_INPLACE_MATRIX, &A));
135   PetscFunctionReturn(PETSC_SUCCESS);
136 }
137 
138 /*MC
139    MATAIJPERM - "AIJPERM" - A matrix type to be used for sparse matrices.
140 
141    This matrix type is identical to `MATSEQAIJPERM` when constructed with a single process communicator,
142    and `MATMPIAIJPERM` otherwise.  As a result, for single process communicators,
143   `MatSeqAIJSetPreallocation()` is supported, and similarly `MatMPIAIJSetPreallocation()` is supported
144   for communicators controlling multiple processes.  It is recommended that you call both of
145   the above preallocation routines for simplicity.
146 
147    Options Database Key:
148 . -mat_type aijperm - sets the matrix type to `MATAIJPERM`
149 
150   Level: beginner
151 
152 .seealso: [](ch_matrices), `Mat`, `MatCreateMPIAIJPERM()`, `MATSEQAIJPERM`, `MATMPIAIJPERM`, `MATSEQAIJ`, `MATMPIAIJ`, `MATSEQAIJMKL`, `MATMPIAIJMKL`, `MATSEQAIJSELL`, `MATMPIAIJSELL`
153 M*/
154