xref: /petsc/src/mat/impls/aij/seq/crl/crl.c (revision bebe2cf65d55febe21a5af8db2bd2e168caaa2e7)
1 
2 /*
3   Defines a matrix-vector product for the MATSEQAIJCRL matrix class.
4   This class is derived from the MATSEQAIJ class and retains the
5   compressed row storage (aka Yale sparse matrix format) but augments
6   it with a column oriented storage that is more efficient for
7   matrix vector products on Vector machines.
8 
9   CRL stands for constant row length (that is the same number of columns
10   is kept (padded with zeros) for each row of the sparse matrix.
11 */
12 #include <../src/mat/impls/aij/seq/crl/crl.h>
13 
14 #undef __FUNCT__
15 #define __FUNCT__ "MatDestroy_SeqAIJCRL"
16 PetscErrorCode MatDestroy_SeqAIJCRL(Mat A)
17 {
18   PetscErrorCode ierr;
19   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL*) A->spptr;
20 
21   /* Free everything in the Mat_AIJCRL data structure. */
22   if (aijcrl) {
23     ierr = PetscFree2(aijcrl->acols,aijcrl->icols);CHKERRQ(ierr);
24   }
25   ierr = PetscFree(A->spptr);CHKERRQ(ierr);
26   ierr = PetscObjectChangeTypeName((PetscObject)A, MATSEQAIJ);CHKERRQ(ierr);
27   ierr = MatDestroy_SeqAIJ(A);CHKERRQ(ierr);
28   PetscFunctionReturn(0);
29 }
30 
31 PetscErrorCode MatDuplicate_AIJCRL(Mat A, MatDuplicateOption op, Mat *M)
32 {
33   PetscFunctionBegin;
34   SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"Cannot duplicate AIJCRL matrices yet");
35   PetscFunctionReturn(0);
36 }
37 
38 #undef __FUNCT__
39 #define __FUNCT__ "MatSeqAIJCRL_create_aijcrl"
40 PetscErrorCode MatSeqAIJCRL_create_aijcrl(Mat A)
41 {
42   Mat_SeqAIJ     *a      = (Mat_SeqAIJ*)(A)->data;
43   Mat_AIJCRL     *aijcrl = (Mat_AIJCRL*) A->spptr;
44   PetscInt       m       = A->rmap->n; /* Number of rows in the matrix. */
45   PetscInt       *aj     = a->j; /* From the CSR representation; points to the beginning  of each row. */
46   PetscInt       i, j,rmax = a->rmax,*icols, *ilen = a->ilen;
47   MatScalar      *aa = a->a;
48   PetscScalar    *acols;
49   PetscErrorCode ierr;
50 
51   PetscFunctionBegin;
52   aijcrl->nz   = a->nz;
53   aijcrl->m    = A->rmap->n;
54   aijcrl->rmax = rmax;
55 
56   ierr  = PetscFree2(aijcrl->acols,aijcrl->icols);CHKERRQ(ierr);
57   ierr  = PetscMalloc2(rmax*m,&aijcrl->acols,rmax*m,&aijcrl->icols);CHKERRQ(ierr);
58   acols = aijcrl->acols;
59   icols = aijcrl->icols;
60   for (i=0; i<m; i++) {
61     for (j=0; j<ilen[i]; j++) {
62       acols[j*m+i] = *aa++;
63       icols[j*m+i] = *aj++;
64     }
65     for (; j<rmax; j++) { /* empty column entries */
66       acols[j*m+i] = 0.0;
67       icols[j*m+i] = (j) ? icols[(j-1)*m+i] : 0;  /* handle case where row is EMPTY */
68     }
69   }
70   ierr = PetscInfo2(A,"Percentage of 0's introduced for vectorized multiply %g. Rmax= %D\n",1.0-((double)a->nz)/((double)(rmax*m)),rmax);CHKERRQ(ierr);
71   PetscFunctionReturn(0);
72 }
73 
74 extern PetscErrorCode MatAssemblyEnd_SeqAIJ(Mat,MatAssemblyType);
75 
76 #undef __FUNCT__
77 #define __FUNCT__ "MatAssemblyEnd_SeqAIJCRL"
78 PetscErrorCode MatAssemblyEnd_SeqAIJCRL(Mat A, MatAssemblyType mode)
79 {
80   PetscErrorCode ierr;
81   Mat_SeqAIJ     *a = (Mat_SeqAIJ*)A->data;
82 
83   PetscFunctionBegin;
84   a->inode.use = PETSC_FALSE;
85 
86   ierr = MatAssemblyEnd_SeqAIJ(A,mode);CHKERRQ(ierr);
87   if (mode == MAT_FLUSH_ASSEMBLY) PetscFunctionReturn(0);
88 
89   /* Now calculate the permutation and grouping information. */
90   ierr = MatSeqAIJCRL_create_aijcrl(A);CHKERRQ(ierr);
91   PetscFunctionReturn(0);
92 }
93 
94 #include <../src/mat/impls/aij/seq/crl/ftn-kernels/fmultcrl.h>
95 
96 #undef __FUNCT__
97 #define __FUNCT__ "MatMult_AIJCRL"
98 /*
99     Shared by both sequential and parallel versions of CRL matrix: MATMPIAIJCRL and MATSEQAIJCRL
100     - the scatter is used only in the parallel version
101 
102 */
103 PetscErrorCode MatMult_AIJCRL(Mat A,Vec xx,Vec yy)
104 {
105   Mat_AIJCRL        *aijcrl = (Mat_AIJCRL*) A->spptr;
106   PetscInt          m       = aijcrl->m; /* Number of rows in the matrix. */
107   PetscInt          rmax    = aijcrl->rmax,*icols = aijcrl->icols;
108   PetscScalar       *acols  = aijcrl->acols;
109   PetscErrorCode    ierr;
110   PetscScalar       *y;
111   const PetscScalar *x;
112 #if !defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
113   PetscInt          i,j,ii;
114 #endif
115 
116 #if defined(PETSC_HAVE_PRAGMA_DISJOINT)
117 #pragma disjoint(*x,*y,*aa)
118 #endif
119 
120   PetscFunctionBegin;
121   if (aijcrl->xscat) {
122     ierr = VecCopy(xx,aijcrl->xwork);CHKERRQ(ierr);
123     /* get remote values needed for local part of multiply */
124     ierr = VecScatterBegin(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
125     ierr = VecScatterEnd(aijcrl->xscat,xx,aijcrl->fwork,INSERT_VALUES,SCATTER_FORWARD);CHKERRQ(ierr);
126     xx   = aijcrl->xwork;
127   }
128 
129   ierr = VecGetArrayRead(xx,&x);CHKERRQ(ierr);
130   ierr = VecGetArray(yy,&y);CHKERRQ(ierr);
131 
132 #if defined(PETSC_USE_FORTRAN_KERNEL_MULTCRL)
133   fortranmultcrl_(&m,&rmax,x,y,icols,acols);
134 #else
135 
136   /* first column */
137   for (j=0; j<m; j++) y[j] = acols[j]*x[icols[j]];
138 
139   /* other columns */
140 #if defined(PETSC_HAVE_CRAY_VECTOR)
141 #pragma _CRI preferstream
142 #endif
143   for (i=1; i<rmax; i++) {
144     ii = i*m;
145 #if defined(PETSC_HAVE_CRAY_VECTOR)
146 #pragma _CRI prefervector
147 #endif
148     for (j=0; j<m; j++) y[j] = y[j] + acols[ii+j]*x[icols[ii+j]];
149   }
150 #if defined(PETSC_HAVE_CRAY_VECTOR)
151 #pragma _CRI ivdep
152 #endif
153 
154 #endif
155   ierr = PetscLogFlops(2.0*aijcrl->nz - m);CHKERRQ(ierr);
156   ierr = VecRestoreArrayRead(xx,&x);CHKERRQ(ierr);
157   ierr = VecRestoreArray(yy,&y);CHKERRQ(ierr);
158   PetscFunctionReturn(0);
159 }
160 
161 
162 /* MatConvert_SeqAIJ_SeqAIJCRL converts a SeqAIJ matrix into a
163  * SeqAIJCRL matrix.  This routine is called by the MatCreate_SeqAIJCRL()
164  * routine, but can also be used to convert an assembled SeqAIJ matrix
165  * into a SeqAIJCRL one. */
166 #undef __FUNCT__
167 #define __FUNCT__ "MatConvert_SeqAIJ_SeqAIJCRL"
168 PETSC_EXTERN PetscErrorCode MatConvert_SeqAIJ_SeqAIJCRL(Mat A,MatType type,MatReuse reuse,Mat *newmat)
169 {
170   PetscErrorCode ierr;
171   Mat            B = *newmat;
172   Mat_AIJCRL     *aijcrl;
173 
174   PetscFunctionBegin;
175   if (reuse == MAT_INITIAL_MATRIX) {
176     ierr = MatDuplicate(A,MAT_COPY_VALUES,&B);CHKERRQ(ierr);
177   }
178 
179   ierr     = PetscNewLog(B,&aijcrl);CHKERRQ(ierr);
180   B->spptr = (void*) aijcrl;
181 
182   /* Set function pointers for methods that we inherit from AIJ but override. */
183   B->ops->duplicate   = MatDuplicate_AIJCRL;
184   B->ops->assemblyend = MatAssemblyEnd_SeqAIJCRL;
185   B->ops->destroy     = MatDestroy_SeqAIJCRL;
186   B->ops->mult        = MatMult_AIJCRL;
187 
188   /* If A has already been assembled, compute the permutation. */
189   if (A->assembled) {
190     ierr = MatSeqAIJCRL_create_aijcrl(B);CHKERRQ(ierr);
191   }
192   ierr    = PetscObjectChangeTypeName((PetscObject)B,MATSEQAIJCRL);CHKERRQ(ierr);
193   *newmat = B;
194   PetscFunctionReturn(0);
195 }
196 
197 #undef __FUNCT__
198 #define __FUNCT__ "MatCreateSeqAIJCRL"
199 /*@C
200    MatCreateSeqAIJCRL - Creates a sparse matrix of type SEQAIJCRL.
201    This type inherits from AIJ, but stores some additional
202    information that is used to allow better vectorization of
203    the matrix-vector product. At the cost of increased storage, the AIJ formatted
204    matrix can be copied to a format in which pieces of the matrix are
205    stored in ELLPACK format, allowing the vectorized matrix multiply
206    routine to use stride-1 memory accesses.  As with the AIJ type, it is
207    important to preallocate matrix storage in order to get good assembly
208    performance.
209 
210    Collective on MPI_Comm
211 
212    Input Parameters:
213 +  comm - MPI communicator, set to PETSC_COMM_SELF
214 .  m - number of rows
215 .  n - number of columns
216 .  nz - number of nonzeros per row (same for all rows)
217 -  nnz - array containing the number of nonzeros in the various rows
218          (possibly different for each row) or NULL
219 
220    Output Parameter:
221 .  A - the matrix
222 
223    Notes:
224    If nnz is given then nz is ignored
225 
226    Level: intermediate
227 
228 .keywords: matrix, cray, sparse, parallel
229 
230 .seealso: MatCreate(), MatCreateMPIAIJPERM(), MatSetValues()
231 @*/
232 PetscErrorCode  MatCreateSeqAIJCRL(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt nz,const PetscInt nnz[],Mat *A)
233 {
234   PetscErrorCode ierr;
235 
236   PetscFunctionBegin;
237   ierr = MatCreate(comm,A);CHKERRQ(ierr);
238   ierr = MatSetSizes(*A,m,n,m,n);CHKERRQ(ierr);
239   ierr = MatSetType(*A,MATSEQAIJCRL);CHKERRQ(ierr);
240   ierr = MatSeqAIJSetPreallocation_SeqAIJ(*A,nz,nnz);CHKERRQ(ierr);
241   PetscFunctionReturn(0);
242 }
243 
244 #undef __FUNCT__
245 #define __FUNCT__ "MatCreate_SeqAIJCRL"
246 PETSC_EXTERN PetscErrorCode MatCreate_SeqAIJCRL(Mat A)
247 {
248   PetscErrorCode ierr;
249 
250   PetscFunctionBegin;
251   ierr = MatSetType(A,MATSEQAIJ);CHKERRQ(ierr);
252   ierr = MatConvert_SeqAIJ_SeqAIJCRL(A,MATSEQAIJCRL,MAT_REUSE_MATRIX,&A);CHKERRQ(ierr);
253   PetscFunctionReturn(0);
254 }
255 
256 
257