xref: /petsc/src/mat/impls/aij/seq/crl/crl.c (revision 2205254efee3a00a594e5e2a3a70f74dcb40bc03)
1 
2 /*
3   Defines a matrix-vector product for the MATSEQAIJCRL matrix class.
4   This class is derived from the MATSEQAIJ class and retains the
5   compressed row storage (aka Yale sparse matrix format) but augments
6   it with a column oriented storage that is more efficient for
7   matrix vector products on Vector machines.
8 
9   CRL stands for constant row length (that is the same number of columns
10   is kept (padded with zeros) for each row of the sparse matrix.
11 */
12 #include <../src/mat/impls/aij/seq/crl/crl.h>
13 
14 #undef __FUNCT__
15 #define __FUNCT__ "MatDestroy_SeqAIJCRL"
16 PetscErrorCode MatDestroy_SeqAIJCRL(Mat A)
17 {
18   PetscErrorCode ierr;
19   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL*) A->spptr;
20 
21   /* Free everything in the Mat_AIJCRL data structure. */
22   if (aijcrl) {
23     ierr = PetscFree2(aijcrl->acols,aijcrl->icols);CHKERRQ(ierr);
24   }
25   ierr = PetscFree(A->spptr);CHKERRQ(ierr);
26   ierr = PetscObjectChangeTypeName((PetscObject)A, MATSEQAIJ);CHKERRQ(ierr);
27   ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr);
28   PetscFunctionReturn(0);
29 }
30 
31 PetscErrorCode MatDuplicate_AIJCRL(Mat A, MatDuplicateOption op, Mat *M)
32 {
33   PetscFunctionBegin;
34   SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Cannot duplicate AIJCRL matrices yet");
35   PetscFunctionReturn(0);
36 }
37 
38 #undef __FUNCT__
39 #define __FUNCT__ "MatSeqAIJCRL_create_aijcrl"
40 PetscErrorCode MatSeqAIJCRL_create_aijcrl(Mat A)
41 {
42   Mat_SeqAIJ     *a      = (Mat_SeqAIJ*)(A)->data;
43   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL*) A->spptr;
44   PetscInt       m       = A->rmap->n; /* Number of rows in the matrix. */
45   PetscInt       *aj     = a->j; /* From the CSR representation; points to the beginning  of each row. */
46   PetscInt       i, j,rmax = a->rmax,*icols, *ilen = a->ilen;
47   MatScalar      *aa = a->a;
48   PetscScalar    *acols;
49   PetscErrorCode ierr;
50 
51   PetscFunctionBegin;
52   aijcrl->nz   = a->nz;
53   aijcrl->m    = A->rmap->n;
54   aijcrl->rmax = rmax;
55 
56   ierr  = PetscFree2(aijcrl->acols,aijcrl->icols);CHKERRQ(ierr);
57   ierr  = PetscMalloc2(rmax*m,PetscScalar,&aijcrl->acols,rmax*m,PetscInt,&aijcrl->icols);CHKERRQ(ierr);
58   acols = aijcrl->acols;
59   icols = aijcrl->icols;
60   for (i=0; i<m; i++) {
61     for (j=0; j<ilen[i]; j++) {
62       acols[j*m+i] = *aa++;
63       icols[j*m+i] = *aj++;
64     }
65     for (; j<rmax; j++) { /* empty column entries */
66       acols[j*m+i] = 0.0;
67       icols[j*m+i] = (j) ? icols[(j-1)*m+i] : 0;  /* handle case where row is EMPTY */
68     }
69   }
70   ierr = PetscInfo2(A,"Percentage of 0's introduced for vectorized multiply %g. Rmax= %D\n",1.0-((double)a->nz)/((double)(rmax*m)),rmax);CHKERRQ(ierr);
71   PetscFunctionReturn(0);
72 }
73 
74 extern PetscErrorCode MatAssemblyEnd_SeqAIJ(Mat,MatAssemblyType);
75 
76 #undef __FUNCT__
77 #define __FUNCT__ "MatAssemblyEnd_SeqAIJCRL"
78 PetscErrorCode MatAssemblyEnd_SeqAIJCRL(Mat A, MatAssemblyType mode)
79 {
80   PetscErrorCode ierr;
81   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)A->data;
82 
83   PetscFunctionBegin;
84   a->inode.use = PETSC_FALSE;
85 
86   ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr);
87   if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(0);
88 
89   /* Now calculate the permutation and grouping information. */
90   ierr = MatSeqAIJCRL_create_aijcrl(A);CHKERRQ(ierr);
91   PetscFunctionReturn(0);
92 }
93 
94 #include <../src/mat/impls/aij/seq/crl/ftn-kernels/fmultcrl.h>
95 
96 #undef __FUNCT__
97 #define __FUNCT__ "MatMult_AIJCRL"
98 /*
99     Shared by both sequential and parallel versions of CRL matrix: MATMPIAIJCRL and MATSEQAIJCRL
100     - the scatter is used only in the parallel version
101 
102 */
103 PetscErrorCode MatMult_AIJCRL(Mat A,Vec xx,Vec yy)
104 {
105   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL*) A->spptr;
106   PetscInt       m       = aijcrl->m; /* Number of rows in the matrix. */
107   PetscInt       rmax    = aijcrl->rmax,*icols = aijcrl->icols;
108   PetscScalar    *acols  = aijcrl->acols;
109   PetscErrorCode ierr;
110   PetscScalar    *x,*y;
111 #if !defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
112   PetscInt i,j,ii;
113 #endif
114 
115 
116 #if defined(PETSC_HAVE_PRAGMA_DISJOINT)
117 #pragma disjoint(*x,*y,*aa)
118 #endif
119 
120   PetscFunctionBegin;
121   if (aijcrl->xscat) {
122     ierr = VecCopy(xx,aijcrl->xwork);CHKERRQ(ierr);
123     /* get remote values needed for local part of multiply */
124     ierr = VecScatterBegin(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
125     ierr = VecScatterEnd(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
126     xx   = aijcrl->xwork;
127   }
128 
129   ierr = VecGetArray(xx,&x);CHKERRQ(ierr);
130   ierr = VecGetArray(yy,&y);CHKERRQ(ierr);
131 
132 #if defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
133   fortranmultcrl_(&m,&rmax,x,y,icols,acols);
134 #else
135 
136   /* first column */
137   for (j=0; j<m; j++) y[j] = acols[j]*x[icols[j]];
138 
139   /* other columns */
140 #if defined(PETSC_HAVE_CRAY_VECTOR)
141 #pragma _CRI preferstream
142 #endif
143   for (i=1; i<rmax; i++) {
144     ii = i*m;
145 #if defined(PETSC_HAVE_CRAY_VECTOR)
146 #pragma _CRI prefervector
147 #endif
148     for (j=0; j<m; j++) y[j] = y[j] + acols[ii+j]*x[icols[ii+j]];
149   }
150 #if defined(PETSC_HAVE_CRAY_VECTOR)
151 #pragma _CRI ivdep
152 #endif
153 
154 #endif
155   ierr = PetscLogFlops(2.0*aijcrl->nz - m);CHKERRQ(ierr);
156   ierr = VecRestoreArray(xx,&x);CHKERRQ(ierr);
157   ierr = VecRestoreArray(yy,&y);CHKERRQ(ierr);
158   PetscFunctionReturn(0);
159 }
160 
161 
162 /* MatConvert_SeqAIJ_SeqAIJCRL converts a SeqAIJ matrix into a
163  * SeqAIJCRL matrix.  This routine is called by the MatCreate_SeqAIJCRL()
164  * routine, but can also be used to convert an assembled SeqAIJ matrix
165  * into a SeqAIJCRL one. */
166 EXTERN_C_BEGIN
167 #undef __FUNCT__
168 #define __FUNCT__ "MatConvert_SeqAIJ_SeqAIJCRL"
169 PetscErrorCode  MatConvert_SeqAIJ_SeqAIJCRL(Mat A,MatType type,MatReuse reuse,Mat *newmat)
170 {
171   PetscErrorCode ierr;
172   Mat            B = *newmat;
173   Mat_AIJCRL     *aijcrl;
174 
175   PetscFunctionBegin;
176   if (reuse == MAT_INITIAL_MATRIX) {
177     ierr = MatDuplicate(A,MAT_COPY_VALUES,&B);CHKERRQ(ierr);
178   }
179 
180   ierr     = PetscNewLog(B,Mat_AIJCRL,&aijcrl);CHKERRQ(ierr);
181   B->spptr = (void*) aijcrl;
182 
183   /* Set function pointers for methods that we inherit from AIJ but override. */
184   B->ops->duplicate   = MatDuplicate_AIJCRL;
185   B->ops->assemblyend = MatAssemblyEnd_SeqAIJCRL;
186   B->ops->destroy     = MatDestroy_SeqAIJCRL;
187   B->ops->mult        = MatMult_AIJCRL;
188 
189   /* If A has already been assembled, compute the permutation. */
190   if (A->assembled) {
191     ierr = MatSeqAIJCRL_create_aijcrl(B);CHKERRQ(ierr);
192   }
193   ierr    = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCRL);CHKERRQ(ierr);
194   *newmat = B;
195   PetscFunctionReturn(0);
196 }
197 EXTERN_C_END
198 
199 
200 #undef __FUNCT__
201 #define __FUNCT__ "MatCreateSeqAIJCRL"
202 /*@C
203    MatCreateSeqAIJCRL - Creates a sparse matrix of type SEQAIJCRL.
204    This type inherits from AIJ, but stores some additional
205    information that is used to allow better vectorization of
206    the matrix-vector product. At the cost of increased storage, the AIJ formatted
207    matrix can be copied to a format in which pieces of the matrix are
208    stored in ELLPACK format, allowing the vectorized matrix multiply
209    routine to use stride-1 memory accesses.  As with the AIJ type, it is
210    important to preallocate matrix storage in order to get good assembly
211    performance.
212 
213    Collective on MPI_Comm
214 
215    Input Parameters:
216 +  comm - MPI communicator, set to PETSC_COMM_SELF
217 .  m - number of rows
218 .  n - number of columns
219 .  nz - number of nonzeros per row (same for all rows)
220 -  nnz - array containing the number of nonzeros in the various rows
221          (possibly different for each row) or PETSC_NULL
222 
223    Output Parameter:
224 .  A - the matrix
225 
226    Notes:
227    If nnz is given then nz is ignored
228 
229    Level: intermediate
230 
231 .keywords: matrix, cray, sparse, parallel
232 
233 .seealso: MatCreate(), MatCreateMPIAIJPERM(), MatSetValues()
234 @*/
235 PetscErrorCode  MatCreateSeqAIJCRL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)
236 {
237   PetscErrorCode ierr;
238 
239   PetscFunctionBegin;
240   ierr = MatCreate(comm,A);CHKERRQ(ierr);
241   ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
242   ierr = MatSetType(*A,MATSEQAIJCRL);CHKERRQ(ierr);
243   ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,nnz);CHKERRQ(ierr);
244   PetscFunctionReturn(0);
245 }
246 
247 
248 EXTERN_C_BEGIN
249 #undef __FUNCT__
250 #define __FUNCT__ "MatCreate_SeqAIJCRL"
251 PetscErrorCode  MatCreate_SeqAIJCRL(Mat A)
252 {
253   PetscErrorCode ierr;
254 
255   PetscFunctionBegin;
256   ierr = MatSetType(A,MATSEQAIJ);CHKERRQ(ierr);
257   ierr = MatConvert_SeqAIJ_SeqAIJCRL(A,MATSEQAIJCRL,MAT_REUSE_MATRIX,&A);CHKERRQ(ierr);
258   PetscFunctionReturn(0);
259 }
260 EXTERN_C_END
261 
262