1 #ifdef PETSC_RCS_HEADER 2 static char vcid[] = "$Id: baijfact.c,v 1.64 1998/07/14 14:49:24 bsmith Exp bsmith $"; 3 #endif 4 /* 5 Factorization code for BAIJ format. 6 */ 7 8 #include "src/mat/impls/baij/seq/baij.h" 9 #include "src/vec/vecimpl.h" 10 #include "src/inline/ilu.h" 11 12 13 /* 14 The symbolic factorization code is identical to that for AIJ format, 15 except for very small changes since this is now a SeqBAIJ datastructure. 16 NOT good code reuse. 17 */ 18 #undef __FUNC__ 19 #define __FUNC__ "MatLUFactorSymbolic_SeqBAIJ" 20 int MatLUFactorSymbolic_SeqBAIJ(Mat A,IS isrow,IS iscol,double f,Mat *B) 21 { 22 Mat_SeqBAIJ *a = (Mat_SeqBAIJ *) A->data, *b; 23 IS isicol; 24 int *r,*ic, ierr, i, n = a->mbs, *ai = a->i, *aj = a->j; 25 int *ainew,*ajnew, jmax,*fill, *ajtmp, nz, bs = a->bs, bs2=a->bs2; 26 int *idnew, idx, row,m,fm, nnz, nzi,realloc = 0,nzbd,*im; 27 28 PetscFunctionBegin; 29 PetscValidHeaderSpecific(isrow,IS_COOKIE); 30 PetscValidHeaderSpecific(iscol,IS_COOKIE); 31 ierr = ISInvertPermutation(iscol,&isicol); CHKERRQ(ierr); 32 ISGetIndices(isrow,&r); ISGetIndices(isicol,&ic); 33 34 /* get new row pointers */ 35 ainew = (int *) PetscMalloc( (n+1)*sizeof(int) ); CHKPTRQ(ainew); 36 ainew[0] = 0; 37 /* don't know how many column pointers are needed so estimate */ 38 jmax = (int) (f*ai[n] + 1); 39 ajnew = (int *) PetscMalloc( (jmax)*sizeof(int) ); CHKPTRQ(ajnew); 40 /* fill is a linked list of nonzeros in active row */ 41 fill = (int *) PetscMalloc( (2*n+1)*sizeof(int)); CHKPTRQ(fill); 42 im = fill + n + 1; 43 /* idnew is location of diagonal in factor */ 44 idnew = (int *) PetscMalloc( (n+1)*sizeof(int)); CHKPTRQ(idnew); 45 idnew[0] = 0; 46 47 for ( i=0; i<n; i++ ) { 48 /* first copy previous fill into linked list */ 49 nnz = nz = ai[r[i]+1] - ai[r[i]]; 50 if (!nz) SETERRQ(PETSC_ERR_MAT_LU_ZRPVT,1,"Empty row in matrix"); 51 ajtmp = aj + ai[r[i]]; 52 fill[n] = n; 53 while (nz--) { 54 fm = n; 55 idx = ic[*ajtmp++]; 56 do { 57 m = fm; 58 fm = fill[m]; 59 } while (fm < idx); 60 fill[m] = idx; 61 fill[idx] = fm; 62 } 63 row = fill[n]; 64 while ( row < i ) { 65 ajtmp = ajnew + idnew[row] + 1; 66 nzbd = 1 + idnew[row] - ainew[row]; 67 nz = im[row] - nzbd; 68 fm = row; 69 while (nz-- > 0) { 70 idx = *ajtmp++; 71 nzbd++; 72 if (idx == i) im[row] = nzbd; 73 do { 74 m = fm; 75 fm = fill[m]; 76 } while (fm < idx); 77 if (fm != idx) { 78 fill[m] = idx; 79 fill[idx] = fm; 80 fm = idx; 81 nnz++; 82 } 83 } 84 row = fill[row]; 85 } 86 /* copy new filled row into permanent storage */ 87 ainew[i+1] = ainew[i] + nnz; 88 if (ainew[i+1] > jmax) { 89 90 /* estimate how much additional space we will need */ 91 /* use the strategy suggested by David Hysom <hysom@perch-t.icase.edu> */ 92 /* just double the memory each time */ 93 int maxadd = jmax; 94 /* maxadd = (int) ((f*(ai[n]+1)*(n-i+5))/n); */ 95 if (maxadd < nnz) maxadd = (n-i)*(nnz+1); 96 jmax += maxadd; 97 98 /* allocate a longer ajnew */ 99 ajtmp = (int *) PetscMalloc( jmax*sizeof(int) );CHKPTRQ(ajtmp); 100 PetscMemcpy(ajtmp,ajnew,ainew[i]*sizeof(int)); 101 PetscFree(ajnew); 102 ajnew = ajtmp; 103 realloc++; /* count how many times we realloc */ 104 } 105 ajtmp = ajnew + ainew[i]; 106 fm = fill[n]; 107 nzi = 0; 108 im[i] = nnz; 109 while (nnz--) { 110 if (fm < i) nzi++; 111 *ajtmp++ = fm; 112 fm = fill[fm]; 113 } 114 idnew[i] = ainew[i] + nzi; 115 } 116 117 if (ai[n] != 0) { 118 double af = ((double)ainew[n])/((double)ai[n]); 119 PLogInfo(A,"MatLUFactorSymbolic_SeqBAIJ:Reallocs %d Fill ratio:given %g needed %g\n", 120 realloc,f,af); 121 PLogInfo(A,"MatLUFactorSymbolic_SeqBAIJ:Run with -pc_lu_fill %g or use \n",af); 122 PLogInfo(A,"MatLUFactorSymbolic_SeqBAIJ:PCLUSetFill(pc,%g);\n",af); 123 PLogInfo(A,"MatLUFactorSymbolic_SeqBAIJ:for best performance.\n"); 124 } else { 125 PLogInfo(A,"MatLUFactorSymbolic_SeqBAIJ:Empty matrix.\n"); 126 } 127 128 ierr = ISRestoreIndices(isrow,&r); CHKERRQ(ierr); 129 ierr = ISRestoreIndices(isicol,&ic); CHKERRQ(ierr); 130 131 PetscFree(fill); 132 133 /* put together the new matrix */ 134 ierr = MatCreateSeqBAIJ(A->comm,bs,bs*n,bs*n,0,PETSC_NULL,B); CHKERRQ(ierr); 135 PLogObjectParent(*B,isicol); 136 b = (Mat_SeqBAIJ *) (*B)->data; 137 PetscFree(b->imax); 138 b->singlemalloc = 0; 139 /* the next line frees the default space generated by the Create() */ 140 PetscFree(b->a); PetscFree(b->ilen); 141 b->a = (Scalar *) PetscMalloc((ainew[n]+1)*sizeof(Scalar)*bs2);CHKPTRQ(b->a); 142 b->j = ajnew; 143 b->i = ainew; 144 b->diag = idnew; 145 b->ilen = 0; 146 b->imax = 0; 147 b->row = isrow; 148 b->col = iscol; 149 b->icol = isicol; 150 b->solve_work = (Scalar *) PetscMalloc( (bs*n+bs)*sizeof(Scalar));CHKPTRQ(b->solve_work); 151 /* In b structure: Free imax, ilen, old a, old j. 152 Allocate idnew, solve_work, new a, new j */ 153 PLogObjectMemory(*B,(ainew[n]-n)*(sizeof(int)+sizeof(Scalar))); 154 b->maxnz = b->nz = ainew[n]; 155 156 (*B)->factor = FACTOR_LU; 157 (*B)->info.factor_mallocs = realloc; 158 (*B)->info.fill_ratio_given = f; 159 if (ai[n] != 0) { 160 (*B)->info.fill_ratio_needed = ((double)ainew[n])/((double)ai[n]); 161 } else { 162 (*B)->info.fill_ratio_needed = 0.0; 163 } 164 165 166 PetscFunctionReturn(0); 167 } 168 169 /* ----------------------------------------------------------- */ 170 #undef __FUNC__ 171 #define __FUNC__ "MatLUFactorNumeric_SeqBAIJ_N" 172 int MatLUFactorNumeric_SeqBAIJ_N(Mat A,Mat *B) 173 { 174 Mat C = *B; 175 Mat_SeqBAIJ *a = (Mat_SeqBAIJ *) A->data,*b = (Mat_SeqBAIJ *)C->data; 176 IS isrow = b->row, isicol = b->icol; 177 int *r,*ic, ierr, i, j, n = a->mbs, *bi = b->i, *bj = b->j; 178 int *ajtmpold, *ajtmp, nz, row, bslog,*ai=a->i,*aj=a->j,k,flg; 179 int *diag_offset=b->diag,diag,bs=a->bs,bs2 = a->bs2,*v_pivots; 180 register int *pj; 181 register Scalar *pv,*v,*rtmp,*multiplier,*v_work,*pc,*w; 182 Scalar *ba = b->a,*aa = a->a; 183 184 PetscFunctionBegin; 185 ierr = ISGetIndices(isrow,&r); CHKERRQ(ierr); 186 ierr = ISGetIndices(isicol,&ic); CHKERRQ(ierr); 187 rtmp = (Scalar *) PetscMalloc(bs2*(n+1)*sizeof(Scalar));CHKPTRQ(rtmp); 188 PetscMemzero(rtmp,bs2*(n+1)*sizeof(Scalar)); 189 /* generate work space needed by dense LU factorization */ 190 v_work = (Scalar *) PetscMalloc(bs*sizeof(int) + (bs+bs2)*sizeof(Scalar)); 191 CHKPTRQ(v_work); 192 multiplier = v_work + bs; 193 v_pivots = (int *) (multiplier + bs2); 194 195 /* flops in while loop */ 196 bslog = 2*bs*bs2; 197 198 for ( i=0; i<n; i++ ) { 199 nz = bi[i+1] - bi[i]; 200 ajtmp = bj + bi[i]; 201 for ( j=0; j<nz; j++ ) { 202 PetscMemzero(rtmp+bs2*ajtmp[j],bs2*sizeof(Scalar)); 203 } 204 /* load in initial (unfactored row) */ 205 nz = ai[r[i]+1] - ai[r[i]]; 206 ajtmpold = aj + ai[r[i]]; 207 v = aa + bs2*ai[r[i]]; 208 for ( j=0; j<nz; j++ ) { 209 PetscMemcpy(rtmp+bs2*ic[ajtmpold[j]],v+bs2*j,bs2*sizeof(Scalar)); 210 } 211 row = *ajtmp++; 212 while (row < i) { 213 pc = rtmp + bs2*row; 214 /* if (*pc) { */ 215 for ( flg=0,k=0; k<bs2; k++ ) { if (pc[k]!=0.0) { flg =1; break; }} 216 if (flg) { 217 pv = ba + bs2*diag_offset[row]; 218 pj = bj + diag_offset[row] + 1; 219 Kernel_A_gets_A_times_B(bs,pc,pv,multiplier); 220 nz = bi[row+1] - diag_offset[row] - 1; 221 pv += bs2; 222 for (j=0; j<nz; j++) { 223 Kernel_A_gets_A_minus_B_times_C(bs,rtmp+bs2*pj[j],pc,pv+bs2*j); 224 } 225 PLogFlops(bslog*(nz+1)-bs); 226 } 227 row = *ajtmp++; 228 } 229 /* finished row so stick it into b->a */ 230 pv = ba + bs2*bi[i]; 231 pj = bj + bi[i]; 232 nz = bi[i+1] - bi[i]; 233 for ( j=0; j<nz; j++ ) { 234 PetscMemcpy(pv+bs2*j,rtmp+bs2*pj[j],bs2*sizeof(Scalar)); 235 } 236 diag = diag_offset[i] - bi[i]; 237 /* invert diagonal block */ 238 w = pv + bs2*diag; 239 Kernel_A_gets_inverse_A(bs,w,v_pivots,v_work); 240 } 241 242 PetscFree(rtmp); PetscFree(v_work); 243 ierr = ISRestoreIndices(isicol,&ic); CHKERRQ(ierr); 244 ierr = ISRestoreIndices(isrow,&r); CHKERRQ(ierr); 245 C->factor = FACTOR_LU; 246 C->assembled = PETSC_TRUE; 247 PLogFlops(1.3333*bs*bs2*b->mbs); /* from inverting diagonal blocks */ 248 PetscFunctionReturn(0); 249 } 250 /* ------------------------------------------------------------*/ 251 /* 252 Version for when blocks are 5 by 5 253 */ 254 #undef __FUNC__ 255 #define __FUNC__ "MatLUFactorNumeric_SeqBAIJ_5" 256 int MatLUFactorNumeric_SeqBAIJ_5(Mat A,Mat *B) 257 { 258 Mat C = *B; 259 Mat_SeqBAIJ *a = (Mat_SeqBAIJ *) A->data,*b = (Mat_SeqBAIJ *)C->data; 260 IS isrow = b->row, isicol = b->icol; 261 int *r,*ic, ierr, i, j, n = a->mbs, *bi = b->i, *bj = b->j; 262 int *ajtmpold, *ajtmp, nz, row; 263 int *diag_offset = b->diag,idx,*ai=a->i,*aj=a->j; 264 register int *pj; 265 register Scalar *pv,*v,*rtmp,*pc,*w,*x; 266 Scalar p1,p2,p3,p4,m1,m2,m3,m4,m5,m6,m7,m8,m9,x1,x2,x3,x4; 267 Scalar p5,p6,p7,p8,p9,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16; 268 Scalar x17,x18,x19,x20,x21,x22,x23,x24,x25,p10,p11,p12,p13,p14; 269 Scalar p15,p16,p17,p18,p19,p20,p21,p22,p23,p24,p25,m10,m11,m12; 270 Scalar m13,m14,m15,m16,m17,m18,m19,m20,m21,m22,m23,m24,m25; 271 Scalar *ba = b->a,*aa = a->a; 272 273 PetscFunctionBegin; 274 ierr = ISGetIndices(isrow,&r); CHKERRQ(ierr); 275 ierr = ISGetIndices(isicol,&ic); CHKERRQ(ierr); 276 rtmp = (Scalar *) PetscMalloc(25*(n+1)*sizeof(Scalar));CHKPTRQ(rtmp); 277 278 for ( i=0; i<n; i++ ) { 279 nz = bi[i+1] - bi[i]; 280 ajtmp = bj + bi[i]; 281 for ( j=0; j<nz; j++ ) { 282 x = rtmp+25*ajtmp[j]; 283 x[0] = x[1] = x[2] = x[3] = x[4] = x[5] = x[6] = x[7] = x[8] = x[9] = 0.0; 284 x[10] = x[11] = x[12] = x[13] = x[14] = x[15] = x[16] = x[17] = 0.0; 285 x[18] = x[19] = x[20] = x[21] = x[22] = x[23] = x[24] = 0.0; 286 } 287 /* load in initial (unfactored row) */ 288 idx = r[i]; 289 nz = ai[idx+1] - ai[idx]; 290 ajtmpold = aj + ai[idx]; 291 v = aa + 25*ai[idx]; 292 for ( j=0; j<nz; j++ ) { 293 x = rtmp+25*ic[ajtmpold[j]]; 294 x[0] = v[0]; x[1] = v[1]; x[2] = v[2]; x[3] = v[3]; 295 x[4] = v[4]; x[5] = v[5]; x[6] = v[6]; x[7] = v[7]; x[8] = v[8]; 296 x[9] = v[9]; x[10] = v[10]; x[11] = v[11]; x[12] = v[12]; x[13] = v[13]; 297 x[14] = v[14]; x[15] = v[15]; x[16] = v[16]; x[17] = v[17]; 298 x[18] = v[18]; x[19] = v[19]; x[20] = v[20]; x[21] = v[21]; 299 x[22] = v[22]; x[23] = v[23]; x[24] = v[24]; 300 v += 25; 301 } 302 row = *ajtmp++; 303 while (row < i) { 304 pc = rtmp + 25*row; 305 p1 = pc[0]; p2 = pc[1]; p3 = pc[2]; p4 = pc[3]; 306 p5 = pc[4]; p6 = pc[5]; p7 = pc[6]; p8 = pc[7]; p9 = pc[8]; 307 p10 = pc[9]; p11 = pc[10]; p12 = pc[11]; p13 = pc[12]; p14 = pc[13]; 308 p15 = pc[14]; p16 = pc[15]; p17 = pc[16]; p18 = pc[17]; p19 = pc[18]; 309 p20 = pc[19]; p21 = pc[20]; p22 = pc[21]; p23 = pc[22]; p24 = pc[23]; 310 p25 = pc[24]; 311 if (p1 != 0.0 || p2 != 0.0 || p3 != 0.0 || p4 != 0.0 || p5 != 0.0 || 312 p6 != 0.0 || p7 != 0.0 || p8 != 0.0 || p9 != 0.0 || p10 != 0.0 || 313 p11 != 0.0 || p12 != 0.0 || p13 != 0.0 || p14 != 0.0 || p15 != 0.0 314 || p16 != 0.0 || p17 != 0.0 || p18 != 0.0 || p19 != 0.0 || 315 p20 != 0.0 || p21 != 0.0 || p22 != 0.0 || p23 != 0.0 || 316 p24 != 0.0 || p25 != 0.0) { 317 pv = ba + 25*diag_offset[row]; 318 pj = bj + diag_offset[row] + 1; 319 x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3]; 320 x5 = pv[4]; x6 = pv[5]; x7 = pv[6]; x8 = pv[7]; x9 = pv[8]; 321 x10 = pv[9]; x11 = pv[10]; x12 = pv[11]; x13 = pv[12]; x14 = pv[13]; 322 x15 = pv[14]; x16 = pv[15]; x17 = pv[16]; x18 = pv[17]; 323 x19 = pv[18]; x20 = pv[19]; x21 = pv[20]; x22 = pv[21]; 324 x23 = pv[22]; x24 = pv[23]; x25 = pv[24]; 325 pc[0] = m1 = p1*x1 + p6*x2 + p11*x3 + p16*x4 + p21*x5; 326 pc[1] = m2 = p2*x1 + p7*x2 + p12*x3 + p17*x4 + p22*x5; 327 pc[2] = m3 = p3*x1 + p8*x2 + p13*x3 + p18*x4 + p23*x5; 328 pc[3] = m4 = p4*x1 + p9*x2 + p14*x3 + p19*x4 + p24*x5; 329 pc[4] = m5 = p5*x1 + p10*x2 + p15*x3 + p20*x4 + p25*x5; 330 331 pc[5] = m6 = p1*x6 + p6*x7 + p11*x8 + p16*x9 + p21*x10; 332 pc[6] = m7 = p2*x6 + p7*x7 + p12*x8 + p17*x9 + p22*x10; 333 pc[7] = m8 = p3*x6 + p8*x7 + p13*x8 + p18*x9 + p23*x10; 334 pc[8] = m9 = p4*x6 + p9*x7 + p14*x8 + p19*x9 + p24*x10; 335 pc[9] = m10 = p5*x6 + p10*x7 + p15*x8 + p20*x9 + p25*x10; 336 337 pc[10] = m11 = p1*x11 + p6*x12 + p11*x13 + p16*x14 + p21*x15; 338 pc[11] = m12 = p2*x11 + p7*x12 + p12*x13 + p17*x14 + p22*x15; 339 pc[12] = m13 = p3*x11 + p8*x12 + p13*x13 + p18*x14 + p23*x15; 340 pc[13] = m14 = p4*x11 + p9*x12 + p14*x13 + p19*x14 + p24*x15; 341 pc[14] = m15 = p5*x11 + p10*x12 + p15*x13 + p20*x14 + p25*x15; 342 343 pc[15] = m16 = p1*x16 + p6*x17 + p11*x18 + p16*x19 + p21*x20; 344 pc[16] = m17 = p2*x16 + p7*x17 + p12*x18 + p17*x19 + p22*x20; 345 pc[17] = m18 = p3*x16 + p8*x17 + p13*x18 + p18*x19 + p23*x20; 346 pc[18] = m19 = p4*x16 + p9*x17 + p14*x18 + p19*x19 + p24*x20; 347 pc[19] = m20 = p5*x16 + p10*x17 + p15*x18 + p20*x19 + p25*x20; 348 349 pc[20] = m21 = p1*x21 + p6*x22 + p11*x23 + p16*x24 + p21*x25; 350 pc[21] = m22 = p2*x21 + p7*x22 + p12*x23 + p17*x24 + p22*x25; 351 pc[22] = m23 = p3*x21 + p8*x22 + p13*x23 + p18*x24 + p23*x25; 352 pc[23] = m24 = p4*x21 + p9*x22 + p14*x23 + p19*x24 + p24*x25; 353 pc[24] = m25 = p5*x21 + p10*x22 + p15*x23 + p20*x24 + p25*x25; 354 355 nz = bi[row+1] - diag_offset[row] - 1; 356 pv += 25; 357 for (j=0; j<nz; j++) { 358 x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3]; 359 x5 = pv[4]; x6 = pv[5]; x7 = pv[6]; x8 = pv[7]; x9 = pv[8]; 360 x10 = pv[9]; x11 = pv[10]; x12 = pv[11]; x13 = pv[12]; 361 x14 = pv[13]; x15 = pv[14]; x16 = pv[15]; x17 = pv[16]; 362 x18 = pv[17]; x19 = pv[18]; x20 = pv[19]; x21 = pv[20]; 363 x22 = pv[21]; x23 = pv[22]; x24 = pv[23]; x25 = pv[24]; 364 x = rtmp + 25*pj[j]; 365 x[0] -= m1*x1 + m6*x2 + m11*x3 + m16*x4 + m21*x5; 366 x[1] -= m2*x1 + m7*x2 + m12*x3 + m17*x4 + m22*x5; 367 x[2] -= m3*x1 + m8*x2 + m13*x3 + m18*x4 + m23*x5; 368 x[3] -= m4*x1 + m9*x2 + m14*x3 + m19*x4 + m24*x5; 369 x[4] -= m5*x1 + m10*x2 + m15*x3 + m20*x4 + m25*x5; 370 371 x[5] -= m1*x6 + m6*x7 + m11*x8 + m16*x9 + m21*x10; 372 x[6] -= m2*x6 + m7*x7 + m12*x8 + m17*x9 + m22*x10; 373 x[7] -= m3*x6 + m8*x7 + m13*x8 + m18*x9 + m23*x10; 374 x[8] -= m4*x6 + m9*x7 + m14*x8 + m19*x9 + m24*x10; 375 x[9] -= m5*x6 + m10*x7 + m15*x8 + m20*x9 + m25*x10; 376 377 x[10] -= m1*x11 + m6*x12 + m11*x13 + m16*x14 + m21*x15; 378 x[11] -= m2*x11 + m7*x12 + m12*x13 + m17*x14 + m22*x15; 379 x[12] -= m3*x11 + m8*x12 + m13*x13 + m18*x14 + m23*x15; 380 x[13] -= m4*x11 + m9*x12 + m14*x13 + m19*x14 + m24*x15; 381 x[14] -= m5*x11 + m10*x12 + m15*x13 + m20*x14 + m25*x15; 382 383 x[15] -= m1*x16 + m6*x17 + m11*x18 + m16*x19 + m21*x20; 384 x[16] -= m2*x16 + m7*x17 + m12*x18 + m17*x19 + m22*x20; 385 x[17] -= m3*x16 + m8*x17 + m13*x18 + m18*x19 + m23*x20; 386 x[18] -= m4*x16 + m9*x17 + m14*x18 + m19*x19 + m24*x20; 387 x[19] -= m5*x16 + m10*x17 + m15*x18 + m20*x19 + m25*x20; 388 389 x[20] -= m1*x21 + m6*x22 + m11*x23 + m16*x24 + m21*x25; 390 x[21] -= m2*x21 + m7*x22 + m12*x23 + m17*x24 + m22*x25; 391 x[22] -= m3*x21 + m8*x22 + m13*x23 + m18*x24 + m23*x25; 392 x[23] -= m4*x21 + m9*x22 + m14*x23 + m19*x24 + m24*x25; 393 x[24] -= m5*x21 + m10*x22 + m15*x23 + m20*x24 + m25*x25; 394 395 pv += 25; 396 } 397 PLogFlops(250*nz+225); 398 } 399 row = *ajtmp++; 400 } 401 /* finished row so stick it into b->a */ 402 pv = ba + 25*bi[i]; 403 pj = bj + bi[i]; 404 nz = bi[i+1] - bi[i]; 405 for ( j=0; j<nz; j++ ) { 406 x = rtmp+25*pj[j]; 407 pv[0] = x[0]; pv[1] = x[1]; pv[2] = x[2]; pv[3] = x[3]; 408 pv[4] = x[4]; pv[5] = x[5]; pv[6] = x[6]; pv[7] = x[7]; pv[8] = x[8]; 409 pv[9] = x[9]; pv[10] = x[10]; pv[11] = x[11]; pv[12] = x[12]; 410 pv[13] = x[13]; pv[14] = x[14]; pv[15] = x[15]; pv[16] = x[16]; 411 pv[17] = x[17]; pv[18] = x[18]; pv[19] = x[19]; pv[20] = x[20]; 412 pv[21] = x[21]; pv[22] = x[22]; pv[23] = x[23]; pv[24] = x[24]; 413 pv += 25; 414 } 415 /* invert diagonal block */ 416 w = ba + 25*diag_offset[i]; 417 ierr = Kernel_A_gets_inverse_A_5(w); CHKERRQ(ierr); 418 } 419 420 PetscFree(rtmp); 421 ierr = ISRestoreIndices(isicol,&ic); CHKERRQ(ierr); 422 ierr = ISRestoreIndices(isrow,&r); CHKERRQ(ierr); 423 C->factor = FACTOR_LU; 424 C->assembled = PETSC_TRUE; 425 PLogFlops(1.3333*125*b->mbs); /* from inverting diagonal blocks */ 426 PetscFunctionReturn(0); 427 } 428 429 /* ------------------------------------------------------------*/ 430 /* 431 Version for when blocks are 4 by 4 432 */ 433 #undef __FUNC__ 434 #define __FUNC__ "MatLUFactorNumeric_SeqBAIJ_4" 435 int MatLUFactorNumeric_SeqBAIJ_4(Mat A,Mat *B) 436 { 437 Mat C = *B; 438 Mat_SeqBAIJ *a = (Mat_SeqBAIJ *) A->data,*b = (Mat_SeqBAIJ *)C->data; 439 IS isrow = b->row, isicol = b->icol; 440 int *r,*ic, ierr, i, j, n = a->mbs, *bi = b->i, *bj = b->j; 441 int *ajtmpold, *ajtmp, nz, row; 442 int *diag_offset = b->diag,idx,*ai=a->i,*aj=a->j; 443 register int *pj; 444 register Scalar *pv,*v,*rtmp,*pc,*w,*x; 445 Scalar p1,p2,p3,p4,m1,m2,m3,m4,m5,m6,m7,m8,m9,x1,x2,x3,x4; 446 Scalar p5,p6,p7,p8,p9,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16; 447 Scalar p10,p11,p12,p13,p14,p15,p16,m10,m11,m12; 448 Scalar m13,m14,m15,m16; 449 Scalar *ba = b->a,*aa = a->a; 450 451 PetscFunctionBegin; 452 ierr = ISGetIndices(isrow,&r); CHKERRQ(ierr); 453 ierr = ISGetIndices(isicol,&ic); CHKERRQ(ierr); 454 rtmp = (Scalar *) PetscMalloc(16*(n+1)*sizeof(Scalar));CHKPTRQ(rtmp); 455 456 for ( i=0; i<n; i++ ) { 457 nz = bi[i+1] - bi[i]; 458 ajtmp = bj + bi[i]; 459 for ( j=0; j<nz; j++ ) { 460 x = rtmp+16*ajtmp[j]; 461 x[0] = x[1] = x[2] = x[3] = x[4] = x[5] = x[6] = x[7] = x[8] = x[9] = 0.0; 462 x[10] = x[11] = x[12] = x[13] = x[14] = x[15] = 0.0; 463 } 464 /* load in initial (unfactored row) */ 465 idx = r[i]; 466 nz = ai[idx+1] - ai[idx]; 467 ajtmpold = aj + ai[idx]; 468 v = aa + 16*ai[idx]; 469 for ( j=0; j<nz; j++ ) { 470 x = rtmp+16*ic[ajtmpold[j]]; 471 x[0] = v[0]; x[1] = v[1]; x[2] = v[2]; x[3] = v[3]; 472 x[4] = v[4]; x[5] = v[5]; x[6] = v[6]; x[7] = v[7]; x[8] = v[8]; 473 x[9] = v[9]; x[10] = v[10]; x[11] = v[11]; x[12] = v[12]; x[13] = v[13]; 474 x[14] = v[14]; x[15] = v[15]; 475 v += 16; 476 } 477 row = *ajtmp++; 478 while (row < i) { 479 pc = rtmp + 16*row; 480 p1 = pc[0]; p2 = pc[1]; p3 = pc[2]; p4 = pc[3]; 481 p5 = pc[4]; p6 = pc[5]; p7 = pc[6]; p8 = pc[7]; p9 = pc[8]; 482 p10 = pc[9]; p11 = pc[10]; p12 = pc[11]; p13 = pc[12]; p14 = pc[13]; 483 p15 = pc[14]; p16 = pc[15]; 484 if (p1 != 0.0 || p2 != 0.0 || p3 != 0.0 || p4 != 0.0 || p5 != 0.0 || 485 p6 != 0.0 || p7 != 0.0 || p8 != 0.0 || p9 != 0.0 || p10 != 0.0 || 486 p11 != 0.0 || p12 != 0.0 || p13 != 0.0 || p14 != 0.0 || p15 != 0.0 487 || p16 != 0.0) { 488 pv = ba + 16*diag_offset[row]; 489 pj = bj + diag_offset[row] + 1; 490 x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3]; 491 x5 = pv[4]; x6 = pv[5]; x7 = pv[6]; x8 = pv[7]; x9 = pv[8]; 492 x10 = pv[9]; x11 = pv[10]; x12 = pv[11]; x13 = pv[12]; x14 = pv[13]; 493 x15 = pv[14]; x16 = pv[15]; 494 pc[0] = m1 = p1*x1 + p5*x2 + p9*x3 + p13*x4; 495 pc[1] = m2 = p2*x1 + p6*x2 + p10*x3 + p14*x4; 496 pc[2] = m3 = p3*x1 + p7*x2 + p11*x3 + p15*x4; 497 pc[3] = m4 = p4*x1 + p8*x2 + p12*x3 + p16*x4; 498 499 pc[4] = m5 = p1*x5 + p5*x6 + p9*x7 + p13*x8; 500 pc[5] = m6 = p2*x5 + p6*x6 + p10*x7 + p14*x8; 501 pc[6] = m7 = p3*x5 + p7*x6 + p11*x7 + p15*x8; 502 pc[7] = m8 = p4*x5 + p8*x6 + p12*x7 + p16*x8; 503 504 pc[8] = m9 = p1*x9 + p5*x10 + p9*x11 + p13*x12; 505 pc[9] = m10 = p2*x9 + p6*x10 + p10*x11 + p14*x12; 506 pc[10] = m11 = p3*x9 + p7*x10 + p11*x11 + p15*x12; 507 pc[11] = m12 = p4*x9 + p8*x10 + p12*x11 + p16*x12; 508 509 pc[12] = m13 = p1*x13 + p5*x14 + p9*x15 + p13*x16; 510 pc[13] = m14 = p2*x13 + p6*x14 + p10*x15 + p14*x16; 511 pc[14] = m15 = p3*x13 + p7*x14 + p11*x15 + p15*x16; 512 pc[15] = m16 = p4*x13 + p8*x14 + p12*x15 + p16*x16; 513 514 nz = bi[row+1] - diag_offset[row] - 1; 515 pv += 16; 516 for (j=0; j<nz; j++) { 517 x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3]; 518 x5 = pv[4]; x6 = pv[5]; x7 = pv[6]; x8 = pv[7]; x9 = pv[8]; 519 x10 = pv[9]; x11 = pv[10]; x12 = pv[11]; x13 = pv[12]; 520 x14 = pv[13]; x15 = pv[14]; x16 = pv[15]; 521 x = rtmp + 16*pj[j]; 522 x[0] -= m1*x1 + m5*x2 + m9*x3 + m13*x4; 523 x[1] -= m2*x1 + m6*x2 + m10*x3 + m14*x4; 524 x[2] -= m3*x1 + m7*x2 + m11*x3 + m15*x4; 525 x[3] -= m4*x1 + m8*x2 + m12*x3 + m16*x4; 526 527 x[4] -= m1*x5 + m5*x6 + m9*x7 + m13*x8; 528 x[5] -= m2*x5 + m6*x6 + m10*x7 + m14*x8; 529 x[6] -= m3*x5 + m7*x6 + m11*x7 + m15*x8; 530 x[7] -= m4*x5 + m8*x6 + m12*x7 + m16*x8; 531 532 x[8] -= m1*x9 + m5*x10 + m9*x11 + m13*x12; 533 x[9] -= m2*x9 + m6*x10 + m10*x11 + m14*x12; 534 x[10] -= m3*x9 + m7*x10 + m11*x11 + m15*x12; 535 x[11] -= m4*x9 + m8*x10 + m12*x11 + m16*x12; 536 537 x[12] -= m1*x13 + m5*x14 + m9*x15 + m13*x16; 538 x[13] -= m2*x13 + m6*x14 + m10*x15 + m14*x16; 539 x[14] -= m3*x13 + m7*x14 + m11*x15 + m15*x16; 540 x[15] -= m4*x13 + m8*x14 + m12*x15 + m16*x16; 541 542 pv += 16; 543 } 544 PLogFlops(128*nz+112); 545 } 546 row = *ajtmp++; 547 } 548 /* finished row so stick it into b->a */ 549 pv = ba + 16*bi[i]; 550 pj = bj + bi[i]; 551 nz = bi[i+1] - bi[i]; 552 for ( j=0; j<nz; j++ ) { 553 x = rtmp+16*pj[j]; 554 pv[0] = x[0]; pv[1] = x[1]; pv[2] = x[2]; pv[3] = x[3]; 555 pv[4] = x[4]; pv[5] = x[5]; pv[6] = x[6]; pv[7] = x[7]; pv[8] = x[8]; 556 pv[9] = x[9]; pv[10] = x[10]; pv[11] = x[11]; pv[12] = x[12]; 557 pv[13] = x[13]; pv[14] = x[14]; pv[15] = x[15]; 558 pv += 16; 559 } 560 /* invert diagonal block */ 561 w = ba + 16*diag_offset[i]; 562 ierr = Kernel_A_gets_inverse_A_4(w); CHKERRQ(ierr); 563 } 564 565 PetscFree(rtmp); 566 ierr = ISRestoreIndices(isicol,&ic); CHKERRQ(ierr); 567 ierr = ISRestoreIndices(isrow,&r); CHKERRQ(ierr); 568 C->factor = FACTOR_LU; 569 C->assembled = PETSC_TRUE; 570 PLogFlops(1.3333*64*b->mbs); /* from inverting diagonal blocks */ 571 PetscFunctionReturn(0); 572 } 573 /* 574 Version for when blocks are 4 by 4 Using natural ordering 575 */ 576 #undef __FUNC__ 577 #define __FUNC__ "MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering" 578 int MatLUFactorNumeric_SeqBAIJ_4_NaturalOrdering(Mat A,Mat *B) 579 { 580 Mat C = *B; 581 Mat_SeqBAIJ *a = (Mat_SeqBAIJ *) A->data,*b = (Mat_SeqBAIJ *)C->data; 582 int ierr, i, j, n = a->mbs, *bi = b->i, *bj = b->j; 583 int *ajtmpold, *ajtmp, nz, row; 584 int *diag_offset = b->diag,*ai=a->i,*aj=a->j; 585 register int *pj; 586 register Scalar *pv,*v,*rtmp,*pc,*w,*x; 587 Scalar p1,p2,p3,p4,m1,m2,m3,m4,m5,m6,m7,m8,m9,x1,x2,x3,x4; 588 Scalar p5,p6,p7,p8,p9,x5,x6,x7,x8,x9,x10,x11,x12,x13,x14,x15,x16; 589 Scalar p10,p11,p12,p13,p14,p15,p16,m10,m11,m12; 590 Scalar m13,m14,m15,m16; 591 Scalar *ba = b->a,*aa = a->a; 592 593 PetscFunctionBegin; 594 rtmp = (Scalar *) PetscMalloc(16*(n+1)*sizeof(Scalar));CHKPTRQ(rtmp); 595 596 for ( i=0; i<n; i++ ) { 597 nz = bi[i+1] - bi[i]; 598 ajtmp = bj + bi[i]; 599 for ( j=0; j<nz; j++ ) { 600 x = rtmp+16*ajtmp[j]; 601 x[0] = x[1] = x[2] = x[3] = x[4] = x[5] = x[6] = x[7] = x[8] = x[9] = 0.0; 602 x[10] = x[11] = x[12] = x[13] = x[14] = x[15] = 0.0; 603 } 604 /* load in initial (unfactored row) */ 605 nz = ai[i+1] - ai[i]; 606 ajtmpold = aj + ai[i]; 607 v = aa + 16*ai[i]; 608 for ( j=0; j<nz; j++ ) { 609 x = rtmp+16*ajtmpold[j]; 610 x[0] = v[0]; x[1] = v[1]; x[2] = v[2]; x[3] = v[3]; 611 x[4] = v[4]; x[5] = v[5]; x[6] = v[6]; x[7] = v[7]; x[8] = v[8]; 612 x[9] = v[9]; x[10] = v[10]; x[11] = v[11]; x[12] = v[12]; x[13] = v[13]; 613 x[14] = v[14]; x[15] = v[15]; 614 v += 16; 615 } 616 row = *ajtmp++; 617 while (row < i) { 618 pc = rtmp + 16*row; 619 p1 = pc[0]; p2 = pc[1]; p3 = pc[2]; p4 = pc[3]; 620 p5 = pc[4]; p6 = pc[5]; p7 = pc[6]; p8 = pc[7]; p9 = pc[8]; 621 p10 = pc[9]; p11 = pc[10]; p12 = pc[11]; p13 = pc[12]; p14 = pc[13]; 622 p15 = pc[14]; p16 = pc[15]; 623 if (p1 != 0.0 || p2 != 0.0 || p3 != 0.0 || p4 != 0.0 || p5 != 0.0 || 624 p6 != 0.0 || p7 != 0.0 || p8 != 0.0 || p9 != 0.0 || p10 != 0.0 || 625 p11 != 0.0 || p12 != 0.0 || p13 != 0.0 || p14 != 0.0 || p15 != 0.0 626 || p16 != 0.0) { 627 pv = ba + 16*diag_offset[row]; 628 pj = bj + diag_offset[row] + 1; 629 x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3]; 630 x5 = pv[4]; x6 = pv[5]; x7 = pv[6]; x8 = pv[7]; x9 = pv[8]; 631 x10 = pv[9]; x11 = pv[10]; x12 = pv[11]; x13 = pv[12]; x14 = pv[13]; 632 x15 = pv[14]; x16 = pv[15]; 633 pc[0] = m1 = p1*x1 + p5*x2 + p9*x3 + p13*x4; 634 pc[1] = m2 = p2*x1 + p6*x2 + p10*x3 + p14*x4; 635 pc[2] = m3 = p3*x1 + p7*x2 + p11*x3 + p15*x4; 636 pc[3] = m4 = p4*x1 + p8*x2 + p12*x3 + p16*x4; 637 638 pc[4] = m5 = p1*x5 + p5*x6 + p9*x7 + p13*x8; 639 pc[5] = m6 = p2*x5 + p6*x6 + p10*x7 + p14*x8; 640 pc[6] = m7 = p3*x5 + p7*x6 + p11*x7 + p15*x8; 641 pc[7] = m8 = p4*x5 + p8*x6 + p12*x7 + p16*x8; 642 643 pc[8] = m9 = p1*x9 + p5*x10 + p9*x11 + p13*x12; 644 pc[9] = m10 = p2*x9 + p6*x10 + p10*x11 + p14*x12; 645 pc[10] = m11 = p3*x9 + p7*x10 + p11*x11 + p15*x12; 646 pc[11] = m12 = p4*x9 + p8*x10 + p12*x11 + p16*x12; 647 648 pc[12] = m13 = p1*x13 + p5*x14 + p9*x15 + p13*x16; 649 pc[13] = m14 = p2*x13 + p6*x14 + p10*x15 + p14*x16; 650 pc[14] = m15 = p3*x13 + p7*x14 + p11*x15 + p15*x16; 651 pc[15] = m16 = p4*x13 + p8*x14 + p12*x15 + p16*x16; 652 653 nz = bi[row+1] - diag_offset[row] - 1; 654 pv += 16; 655 for (j=0; j<nz; j++) { 656 x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3]; 657 x5 = pv[4]; x6 = pv[5]; x7 = pv[6]; x8 = pv[7]; x9 = pv[8]; 658 x10 = pv[9]; x11 = pv[10]; x12 = pv[11]; x13 = pv[12]; 659 x14 = pv[13]; x15 = pv[14]; x16 = pv[15]; 660 x = rtmp + 16*pj[j]; 661 x[0] -= m1*x1 + m5*x2 + m9*x3 + m13*x4; 662 x[1] -= m2*x1 + m6*x2 + m10*x3 + m14*x4; 663 x[2] -= m3*x1 + m7*x2 + m11*x3 + m15*x4; 664 x[3] -= m4*x1 + m8*x2 + m12*x3 + m16*x4; 665 666 x[4] -= m1*x5 + m5*x6 + m9*x7 + m13*x8; 667 x[5] -= m2*x5 + m6*x6 + m10*x7 + m14*x8; 668 x[6] -= m3*x5 + m7*x6 + m11*x7 + m15*x8; 669 x[7] -= m4*x5 + m8*x6 + m12*x7 + m16*x8; 670 671 x[8] -= m1*x9 + m5*x10 + m9*x11 + m13*x12; 672 x[9] -= m2*x9 + m6*x10 + m10*x11 + m14*x12; 673 x[10] -= m3*x9 + m7*x10 + m11*x11 + m15*x12; 674 x[11] -= m4*x9 + m8*x10 + m12*x11 + m16*x12; 675 676 x[12] -= m1*x13 + m5*x14 + m9*x15 + m13*x16; 677 x[13] -= m2*x13 + m6*x14 + m10*x15 + m14*x16; 678 x[14] -= m3*x13 + m7*x14 + m11*x15 + m15*x16; 679 x[15] -= m4*x13 + m8*x14 + m12*x15 + m16*x16; 680 681 pv += 16; 682 } 683 PLogFlops(128*nz+112); 684 } 685 row = *ajtmp++; 686 } 687 /* finished row so stick it into b->a */ 688 pv = ba + 16*bi[i]; 689 pj = bj + bi[i]; 690 nz = bi[i+1] - bi[i]; 691 for ( j=0; j<nz; j++ ) { 692 x = rtmp+16*pj[j]; 693 pv[0] = x[0]; pv[1] = x[1]; pv[2] = x[2]; pv[3] = x[3]; 694 pv[4] = x[4]; pv[5] = x[5]; pv[6] = x[6]; pv[7] = x[7]; pv[8] = x[8]; 695 pv[9] = x[9]; pv[10] = x[10]; pv[11] = x[11]; pv[12] = x[12]; 696 pv[13] = x[13]; pv[14] = x[14]; pv[15] = x[15]; 697 pv += 16; 698 } 699 /* invert diagonal block */ 700 w = ba + 16*diag_offset[i]; 701 ierr = Kernel_A_gets_inverse_A_4(w); CHKERRQ(ierr); 702 } 703 704 PetscFree(rtmp); 705 C->factor = FACTOR_LU; 706 C->assembled = PETSC_TRUE; 707 PLogFlops(1.3333*64*b->mbs); /* from inverting diagonal blocks */ 708 PetscFunctionReturn(0); 709 } 710 711 /* ------------------------------------------------------------*/ 712 /* 713 Version for when blocks are 3 by 3 714 */ 715 #undef __FUNC__ 716 #define __FUNC__ "MatLUFactorNumeric_SeqBAIJ_3" 717 int MatLUFactorNumeric_SeqBAIJ_3(Mat A,Mat *B) 718 { 719 Mat C = *B; 720 Mat_SeqBAIJ *a = (Mat_SeqBAIJ *) A->data,*b = (Mat_SeqBAIJ *)C->data; 721 IS isrow = b->row, isicol = b->icol; 722 int *r,*ic, ierr, i, j, n = a->mbs, *bi = b->i, *bj = b->j; 723 int *ajtmpold, *ajtmp, nz, row, *ai=a->i,*aj=a->j; 724 int *diag_offset = b->diag,idx; 725 register int *pj; 726 register Scalar *pv,*v,*rtmp,*pc,*w,*x; 727 Scalar p1,p2,p3,p4,m1,m2,m3,m4,m5,m6,m7,m8,m9,x1,x2,x3,x4; 728 Scalar p5,p6,p7,p8,p9,x5,x6,x7,x8,x9; 729 Scalar *ba = b->a,*aa = a->a; 730 731 PetscFunctionBegin; 732 ierr = ISGetIndices(isrow,&r); CHKERRQ(ierr); 733 ierr = ISGetIndices(isicol,&ic); CHKERRQ(ierr); 734 rtmp = (Scalar *) PetscMalloc(9*(n+1)*sizeof(Scalar));CHKPTRQ(rtmp); 735 736 for ( i=0; i<n; i++ ) { 737 nz = bi[i+1] - bi[i]; 738 ajtmp = bj + bi[i]; 739 for ( j=0; j<nz; j++ ) { 740 x = rtmp + 9*ajtmp[j]; 741 x[0] = x[1] = x[2] = x[3] = x[4] = x[5] = x[6] = x[7] = x[8] = x[9] = 0.0; 742 } 743 /* load in initial (unfactored row) */ 744 idx = r[i]; 745 nz = ai[idx+1] - ai[idx]; 746 ajtmpold = aj + ai[idx]; 747 v = aa + 9*ai[idx]; 748 for ( j=0; j<nz; j++ ) { 749 x = rtmp + 9*ic[ajtmpold[j]]; 750 x[0] = v[0]; x[1] = v[1]; x[2] = v[2]; x[3] = v[3]; 751 x[4] = v[4]; x[5] = v[5]; x[6] = v[6]; x[7] = v[7]; x[8] = v[8]; 752 v += 9; 753 } 754 row = *ajtmp++; 755 while (row < i) { 756 pc = rtmp + 9*row; 757 p1 = pc[0]; p2 = pc[1]; p3 = pc[2]; p4 = pc[3]; 758 p5 = pc[4]; p6 = pc[5]; p7 = pc[6]; p8 = pc[7]; p9 = pc[8]; 759 if (p1 != 0.0 || p2 != 0.0 || p3 != 0.0 || p4 != 0.0 || p5 != 0.0 || 760 p6 != 0.0 || p7 != 0.0 || p8 != 0.0 || p9 != 0.0) { 761 pv = ba + 9*diag_offset[row]; 762 pj = bj + diag_offset[row] + 1; 763 x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3]; 764 x5 = pv[4]; x6 = pv[5]; x7 = pv[6]; x8 = pv[7]; x9 = pv[8]; 765 pc[0] = m1 = p1*x1 + p4*x2 + p7*x3; 766 pc[1] = m2 = p2*x1 + p5*x2 + p8*x3; 767 pc[2] = m3 = p3*x1 + p6*x2 + p9*x3; 768 769 pc[3] = m4 = p1*x4 + p4*x5 + p7*x6; 770 pc[4] = m5 = p2*x4 + p5*x5 + p8*x6; 771 pc[5] = m6 = p3*x4 + p6*x5 + p9*x6; 772 773 pc[6] = m7 = p1*x7 + p4*x8 + p7*x9; 774 pc[7] = m8 = p2*x7 + p5*x8 + p8*x9; 775 pc[8] = m9 = p3*x7 + p6*x8 + p9*x9; 776 nz = bi[row+1] - diag_offset[row] - 1; 777 pv += 9; 778 for (j=0; j<nz; j++) { 779 x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3]; 780 x5 = pv[4]; x6 = pv[5]; x7 = pv[6]; x8 = pv[7]; x9 = pv[8]; 781 x = rtmp + 9*pj[j]; 782 x[0] -= m1*x1 + m4*x2 + m7*x3; 783 x[1] -= m2*x1 + m5*x2 + m8*x3; 784 x[2] -= m3*x1 + m6*x2 + m9*x3; 785 786 x[3] -= m1*x4 + m4*x5 + m7*x6; 787 x[4] -= m2*x4 + m5*x5 + m8*x6; 788 x[5] -= m3*x4 + m6*x5 + m9*x6; 789 790 x[6] -= m1*x7 + m4*x8 + m7*x9; 791 x[7] -= m2*x7 + m5*x8 + m8*x9; 792 x[8] -= m3*x7 + m6*x8 + m9*x9; 793 pv += 9; 794 } 795 PLogFlops(54*nz+36); 796 } 797 row = *ajtmp++; 798 } 799 /* finished row so stick it into b->a */ 800 pv = ba + 9*bi[i]; 801 pj = bj + bi[i]; 802 nz = bi[i+1] - bi[i]; 803 for ( j=0; j<nz; j++ ) { 804 x = rtmp + 9*pj[j]; 805 pv[0] = x[0]; pv[1] = x[1]; pv[2] = x[2]; pv[3] = x[3]; 806 pv[4] = x[4]; pv[5] = x[5]; pv[6] = x[6]; pv[7] = x[7]; pv[8] = x[8]; 807 pv += 9; 808 } 809 /* invert diagonal block */ 810 w = ba + 9*diag_offset[i]; 811 ierr = Kernel_A_gets_inverse_A_3(w); CHKERRQ(ierr); 812 } 813 814 PetscFree(rtmp); 815 ierr = ISRestoreIndices(isicol,&ic); CHKERRQ(ierr); 816 ierr = ISRestoreIndices(isrow,&r); CHKERRQ(ierr); 817 C->factor = FACTOR_LU; 818 C->assembled = PETSC_TRUE; 819 PLogFlops(1.3333*27*b->mbs); /* from inverting diagonal blocks */ 820 PetscFunctionReturn(0); 821 } 822 823 /* ------------------------------------------------------------*/ 824 /* 825 Version for when blocks are 2 by 2 826 */ 827 #undef __FUNC__ 828 #define __FUNC__ "MatLUFactorNumeric_SeqBAIJ_2" 829 int MatLUFactorNumeric_SeqBAIJ_2(Mat A,Mat *B) 830 { 831 Mat C = *B; 832 Mat_SeqBAIJ *a = (Mat_SeqBAIJ *) A->data,*b = (Mat_SeqBAIJ *)C->data; 833 IS isrow = b->row, isicol = b->icol; 834 int *r,*ic, ierr, i, j, n = a->mbs, *bi = b->i, *bj = b->j; 835 int *ajtmpold, *ajtmp, nz, row, v_pivots[2]; 836 int *diag_offset=b->diag,bs = 2,idx,*ai=a->i,*aj=a->j; 837 register int *pj; 838 register Scalar *pv,*v,*rtmp,m1,m2,m3,m4,*pc,*w,*x,x1,x2,x3,x4; 839 Scalar p1,p2,p3,p4,v_work[2]; 840 Scalar *ba = b->a,*aa = a->a; 841 842 PetscFunctionBegin; 843 ierr = ISGetIndices(isrow,&r); CHKERRQ(ierr); 844 ierr = ISGetIndices(isicol,&ic); CHKERRQ(ierr); 845 rtmp = (Scalar *) PetscMalloc(4*(n+1)*sizeof(Scalar));CHKPTRQ(rtmp); 846 847 for ( i=0; i<n; i++ ) { 848 nz = bi[i+1] - bi[i]; 849 ajtmp = bj + bi[i]; 850 for ( j=0; j<nz; j++ ) { 851 x = rtmp+4*ajtmp[j]; x[0] = x[1] = x[2] = x[3] = 0.0; 852 } 853 /* load in initial (unfactored row) */ 854 idx = r[i]; 855 nz = ai[idx+1] - ai[idx]; 856 ajtmpold = aj + ai[idx]; 857 v = aa + 4*ai[idx]; 858 for ( j=0; j<nz; j++ ) { 859 x = rtmp+4*ic[ajtmpold[j]]; 860 x[0] = v[0]; x[1] = v[1]; x[2] = v[2]; x[3] = v[3]; 861 v += 4; 862 } 863 row = *ajtmp++; 864 while (row < i) { 865 pc = rtmp + 4*row; 866 p1 = pc[0]; p2 = pc[1]; p3 = pc[2]; p4 = pc[3]; 867 if (p1 != 0.0 || p2 != 0.0 || p3 != 0.0 || p4 != 0.0) { 868 pv = ba + 4*diag_offset[row]; 869 pj = bj + diag_offset[row] + 1; 870 x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3]; 871 pc[0] = m1 = p1*x1 + p3*x2; 872 pc[1] = m2 = p2*x1 + p4*x2; 873 pc[2] = m3 = p1*x3 + p3*x4; 874 pc[3] = m4 = p2*x3 + p4*x4; 875 nz = bi[row+1] - diag_offset[row] - 1; 876 pv += 4; 877 for (j=0; j<nz; j++) { 878 x1 = pv[0]; x2 = pv[1]; x3 = pv[2]; x4 = pv[3]; 879 x = rtmp + 4*pj[j]; 880 x[0] -= m1*x1 + m3*x2; 881 x[1] -= m2*x1 + m4*x2; 882 x[2] -= m1*x3 + m3*x4; 883 x[3] -= m2*x3 + m4*x4; 884 pv += 4; 885 } 886 PLogFlops(16*nz+12); 887 } 888 row = *ajtmp++; 889 } 890 /* finished row so stick it into b->a */ 891 pv = ba + 4*bi[i]; 892 pj = bj + bi[i]; 893 nz = bi[i+1] - bi[i]; 894 for ( j=0; j<nz; j++ ) { 895 x = rtmp+4*pj[j]; 896 pv[0] = x[0]; pv[1] = x[1]; pv[2] = x[2]; pv[3] = x[3]; 897 pv += 4; 898 } 899 /* invert diagonal block */ 900 w = ba + 4*diag_offset[i]; 901 Kernel_A_gets_inverse_A(bs,w,v_pivots,v_work); 902 } 903 904 PetscFree(rtmp); 905 ierr = ISRestoreIndices(isicol,&ic); CHKERRQ(ierr); 906 ierr = ISRestoreIndices(isrow,&r); CHKERRQ(ierr); 907 C->factor = FACTOR_LU; 908 C->assembled = PETSC_TRUE; 909 PLogFlops(1.3333*8*b->mbs); /* from inverting diagonal blocks */ 910 PetscFunctionReturn(0); 911 } 912 913 /* ----------------------------------------------------------- */ 914 /* 915 Version for when blocks are 1 by 1. 916 */ 917 #undef __FUNC__ 918 #define __FUNC__ "MatLUFactorNumeric_SeqBAIJ_1" 919 int MatLUFactorNumeric_SeqBAIJ_1(Mat A,Mat *B) 920 { 921 Mat C = *B; 922 Mat_SeqBAIJ *a = (Mat_SeqBAIJ *) A->data, *b = (Mat_SeqBAIJ *)C->data; 923 IS isrow = b->row, isicol = b->icol; 924 int *r,*ic, ierr, i, j, n = a->mbs, *bi = b->i, *bj = b->j; 925 int *ajtmpold, *ajtmp, nz, row,*ai = a->i,*aj = a->j; 926 int *diag_offset = b->diag,diag; 927 register int *pj; 928 register Scalar *pv,*v,*rtmp,multiplier,*pc; 929 Scalar *ba = b->a,*aa = a->a; 930 931 PetscFunctionBegin; 932 ierr = ISGetIndices(isrow,&r); CHKERRQ(ierr); 933 ierr = ISGetIndices(isicol,&ic); CHKERRQ(ierr); 934 rtmp = (Scalar *) PetscMalloc((n+1)*sizeof(Scalar));CHKPTRQ(rtmp); 935 936 for ( i=0; i<n; i++ ) { 937 nz = bi[i+1] - bi[i]; 938 ajtmp = bj + bi[i]; 939 for ( j=0; j<nz; j++ ) rtmp[ajtmp[j]] = 0.0; 940 941 /* load in initial (unfactored row) */ 942 nz = ai[r[i]+1] - ai[r[i]]; 943 ajtmpold = aj + ai[r[i]]; 944 v = aa + ai[r[i]]; 945 for ( j=0; j<nz; j++ ) rtmp[ic[ajtmpold[j]]] = v[j]; 946 947 row = *ajtmp++; 948 while (row < i) { 949 pc = rtmp + row; 950 if (*pc != 0.0) { 951 pv = ba + diag_offset[row]; 952 pj = bj + diag_offset[row] + 1; 953 multiplier = *pc * *pv++; 954 *pc = multiplier; 955 nz = bi[row+1] - diag_offset[row] - 1; 956 for (j=0; j<nz; j++) rtmp[pj[j]] -= multiplier * pv[j]; 957 PLogFlops(1+2*nz); 958 } 959 row = *ajtmp++; 960 } 961 /* finished row so stick it into b->a */ 962 pv = ba + bi[i]; 963 pj = bj + bi[i]; 964 nz = bi[i+1] - bi[i]; 965 for ( j=0; j<nz; j++ ) {pv[j] = rtmp[pj[j]];} 966 diag = diag_offset[i] - bi[i]; 967 /* check pivot entry for current row */ 968 if (pv[diag] == 0.0) { 969 SETERRQ(PETSC_ERR_MAT_LU_ZRPVT,0,"Zero pivot"); 970 } 971 pv[diag] = 1.0/pv[diag]; 972 } 973 974 PetscFree(rtmp); 975 ierr = ISRestoreIndices(isicol,&ic); CHKERRQ(ierr); 976 ierr = ISRestoreIndices(isrow,&r); CHKERRQ(ierr); 977 C->factor = FACTOR_LU; 978 C->assembled = PETSC_TRUE; 979 PLogFlops(b->n); 980 PetscFunctionReturn(0); 981 } 982 983 /* ----------------------------------------------------------- */ 984 #undef __FUNC__ 985 #define __FUNC__ "MatLUFactor_SeqBAIJ" 986 int MatLUFactor_SeqBAIJ(Mat A,IS row,IS col,double f) 987 { 988 Mat_SeqBAIJ *mat = (Mat_SeqBAIJ *) A->data; 989 int ierr; 990 Mat C; 991 PetscOps *Abops; 992 MatOps Aops; 993 994 PetscFunctionBegin; 995 ierr = MatLUFactorSymbolic(A,row,col,f,&C); CHKERRQ(ierr); 996 ierr = MatLUFactorNumeric(A,&C); CHKERRQ(ierr); 997 998 /* free all the data structures from mat */ 999 PetscFree(mat->a); 1000 if (!mat->singlemalloc) {PetscFree(mat->i); PetscFree(mat->j);} 1001 if (mat->diag) PetscFree(mat->diag); 1002 if (mat->ilen) PetscFree(mat->ilen); 1003 if (mat->imax) PetscFree(mat->imax); 1004 if (mat->solve_work) PetscFree(mat->solve_work); 1005 if (mat->mult_work) PetscFree(mat->mult_work); 1006 if (mat->icol) {ierr = ISDestroy(mat->icol);CHKERRQ(ierr);} 1007 PetscFree(mat); 1008 1009 ierr = MapDestroy(A->rmap);CHKERRQ(ierr); 1010 ierr = MapDestroy(A->cmap);CHKERRQ(ierr); 1011 1012 /* 1013 This is horrible, horrible code. We need to keep the 1014 A pointers for the bops and ops but copy everything 1015 else from C. 1016 */ 1017 Abops = A->bops; 1018 Aops = A->ops; 1019 PetscMemcpy(A,C,sizeof(struct _p_Mat)); 1020 A->bops = Abops; 1021 A->ops = Aops; 1022 A->qlist = 0; 1023 1024 PetscHeaderDestroy(C); 1025 PetscFunctionReturn(0); 1026 } 1027