1 2 /* 3 Inverts 4 by 4 matrix using partial pivoting. 4 5 Used by the sparse factorization routines in 6 src/mat/impls/baij/seq 7 8 This is a combination of the Linpack routines 9 dgefa() and dgedi() specialized for a size of 4. 10 11 */ 12 #include <petscsys.h> 13 14 #undef __FUNCT__ 15 #define __FUNCT__ "PetscKernel_A_gets_inverse_A_4" 16 PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4(MatScalar *a,PetscReal shift,PetscBool allowzeropivot,PetscBool *zeropivotdetected) 17 { 18 PetscInt i__2,i__3,kp1,j,k,l,ll,i,ipvt[4],kb,k3; 19 PetscInt k4,j3; 20 MatScalar *aa,*ax,*ay,work[16],stmp; 21 MatReal tmp,max; 22 23 /* gaussian elimination with partial pivoting */ 24 25 PetscFunctionBegin; 26 shift = .25*shift*(1.e-12 + PetscAbsScalar(a[0]) + PetscAbsScalar(a[5]) + PetscAbsScalar(a[10]) + PetscAbsScalar(a[15])); 27 /* Parameter adjustments */ 28 a -= 5; 29 30 for (k = 1; k <= 3; ++k) { 31 kp1 = k + 1; 32 k3 = 4*k; 33 k4 = k3 + k; 34 /* find l = pivot index */ 35 36 i__2 = 5 - k; 37 aa = &a[k4]; 38 max = PetscAbsScalar(aa[0]); 39 l = 1; 40 for (ll=1; ll<i__2; ll++) { 41 tmp = PetscAbsScalar(aa[ll]); 42 if (tmp > max) { max = tmp; l = ll+1;} 43 } 44 l += k - 1; 45 ipvt[k-1] = l; 46 47 if (a[l + k3] == 0.0) { 48 if (shift == 0.0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",k-1); 49 else { 50 /* SHIFT is applied to SINGLE diagonal entry; does this make any sense? */ 51 a[l + k3] = shift; 52 } 53 } 54 55 /* interchange if necessary */ 56 57 if (l != k) { 58 stmp = a[l + k3]; 59 a[l + k3] = a[k4]; 60 a[k4] = stmp; 61 } 62 63 /* compute multipliers */ 64 65 stmp = -1. / a[k4]; 66 i__2 = 4 - k; 67 aa = &a[1 + k4]; 68 for (ll=0; ll<i__2; ll++) aa[ll] *= stmp; 69 70 /* row elimination with column indexing */ 71 72 ax = &a[k4+1]; 73 for (j = kp1; j <= 4; ++j) { 74 j3 = 4*j; 75 stmp = a[l + j3]; 76 if (l != k) { 77 a[l + j3] = a[k + j3]; 78 a[k + j3] = stmp; 79 } 80 81 i__3 = 4 - k; 82 ay = &a[1+k+j3]; 83 for (ll=0; ll<i__3; ll++) ay[ll] += stmp*ax[ll]; 84 } 85 } 86 ipvt[3] = 4; 87 if (a[20] == 0.0) { 88 PetscErrorCode ierr; 89 if (allowzeropivot) { 90 ierr = PetscInfo1(NULL,"Zero pivot, row %D\n",3);CHKERRQ(ierr); 91 *zeropivotdetected = PETSC_TRUE; 92 } else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",3); 93 } 94 95 /* 96 Now form the inverse 97 */ 98 99 /* compute inverse(u) */ 100 101 for (k = 1; k <= 4; ++k) { 102 k3 = 4*k; 103 k4 = k3 + k; 104 a[k4] = 1.0 / a[k4]; 105 stmp = -a[k4]; 106 i__2 = k - 1; 107 aa = &a[k3 + 1]; 108 for (ll=0; ll<i__2; ll++) aa[ll] *= stmp; 109 kp1 = k + 1; 110 if (4 < kp1) continue; 111 ax = aa; 112 for (j = kp1; j <= 4; ++j) { 113 j3 = 4*j; 114 stmp = a[k + j3]; 115 a[k + j3] = 0.0; 116 ay = &a[j3 + 1]; 117 for (ll=0; ll<k; ll++) ay[ll] += stmp*ax[ll]; 118 } 119 } 120 121 /* form inverse(u)*inverse(l) */ 122 123 for (kb = 1; kb <= 3; ++kb) { 124 k = 4 - kb; 125 k3 = 4*k; 126 kp1 = k + 1; 127 aa = a + k3; 128 for (i = kp1; i <= 4; ++i) { 129 work[i-1] = aa[i]; 130 aa[i] = 0.0; 131 } 132 for (j = kp1; j <= 4; ++j) { 133 stmp = work[j-1]; 134 ax = &a[4*j + 1]; 135 ay = &a[k3 + 1]; 136 ay[0] += stmp*ax[0]; 137 ay[1] += stmp*ax[1]; 138 ay[2] += stmp*ax[2]; 139 ay[3] += stmp*ax[3]; 140 } 141 l = ipvt[k-1]; 142 if (l != k) { 143 ax = &a[k3 + 1]; 144 ay = &a[4*l + 1]; 145 stmp = ax[0]; ax[0] = ay[0]; ay[0] = stmp; 146 stmp = ax[1]; ax[1] = ay[1]; ay[1] = stmp; 147 stmp = ax[2]; ax[2] = ay[2]; ay[2] = stmp; 148 stmp = ax[3]; ax[3] = ay[3]; ay[3] = stmp; 149 } 150 } 151 PetscFunctionReturn(0); 152 } 153 154 #if defined(PETSC_HAVE_SSE) 155 #include PETSC_HAVE_SSE 156 157 #undef __FUNCT__ 158 #define __FUNCT__ "PetscKernel_A_gets_inverse_A_4_SSE" 159 PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4_SSE(float *a) 160 { 161 /* 162 This routine is converted from Intel's Small Matrix Library. 163 See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix 164 Order Number: 245043-001 165 March 1999 166 http://www.intel.com 167 168 Inverse of a 4x4 matrix via Kramer's Rule: 169 bool Invert4x4(SMLXMatrix &); 170 */ 171 PetscFunctionBegin; 172 SSE_SCOPE_BEGIN; 173 SSE_INLINE_BEGIN_1(a) 174 175 /* ----------------------------------------------- */ 176 177 SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0) 178 SSE_LOADH_PS(SSE_ARG_1,FLOAT_4,XMM0) 179 180 SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM5) 181 SSE_LOADH_PS(SSE_ARG_1,FLOAT_12,XMM5) 182 183 SSE_COPY_PS(XMM3,XMM0) 184 SSE_SHUFFLE(XMM3,XMM5,0x88) 185 186 SSE_SHUFFLE(XMM5,XMM0,0xDD) 187 188 SSE_LOADL_PS(SSE_ARG_1,FLOAT_2,XMM0) 189 SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM0) 190 191 SSE_LOADL_PS(SSE_ARG_1,FLOAT_10,XMM6) 192 SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM6) 193 194 SSE_COPY_PS(XMM4,XMM0) 195 SSE_SHUFFLE(XMM4,XMM6,0x88) 196 197 SSE_SHUFFLE(XMM6,XMM0,0xDD) 198 199 /* ----------------------------------------------- */ 200 201 SSE_COPY_PS(XMM7,XMM4) 202 SSE_MULT_PS(XMM7,XMM6) 203 204 SSE_SHUFFLE(XMM7,XMM7,0xB1) 205 206 SSE_COPY_PS(XMM0,XMM5) 207 SSE_MULT_PS(XMM0,XMM7) 208 209 SSE_COPY_PS(XMM2,XMM3) 210 SSE_MULT_PS(XMM2,XMM7) 211 212 SSE_SHUFFLE(XMM7,XMM7,0x4E) 213 214 SSE_COPY_PS(XMM1,XMM5) 215 SSE_MULT_PS(XMM1,XMM7) 216 SSE_SUB_PS(XMM1,XMM0) 217 218 SSE_MULT_PS(XMM7,XMM3) 219 SSE_SUB_PS(XMM7,XMM2) 220 221 SSE_SHUFFLE(XMM7,XMM7,0x4E) 222 SSE_STORE_PS(SSE_ARG_1,FLOAT_4,XMM7) 223 224 /* ----------------------------------------------- */ 225 226 SSE_COPY_PS(XMM0,XMM5) 227 SSE_MULT_PS(XMM0,XMM4) 228 229 SSE_SHUFFLE(XMM0,XMM0,0xB1) 230 231 SSE_COPY_PS(XMM2,XMM6) 232 SSE_MULT_PS(XMM2,XMM0) 233 SSE_ADD_PS(XMM2,XMM1) 234 235 SSE_COPY_PS(XMM7,XMM3) 236 SSE_MULT_PS(XMM7,XMM0) 237 238 SSE_SHUFFLE(XMM0,XMM0,0x4E) 239 240 SSE_COPY_PS(XMM1,XMM6) 241 SSE_MULT_PS(XMM1,XMM0) 242 SSE_SUB_PS(XMM2,XMM1) 243 244 SSE_MULT_PS(XMM0,XMM3) 245 SSE_SUB_PS(XMM0,XMM7) 246 247 SSE_SHUFFLE(XMM0,XMM0,0x4E) 248 SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM0) 249 250 /* ----------------------------------------------- */ 251 252 SSE_COPY_PS(XMM7,XMM5) 253 SSE_SHUFFLE(XMM7,XMM5,0x4E) 254 SSE_MULT_PS(XMM7,XMM6) 255 256 SSE_SHUFFLE(XMM7,XMM7,0xB1) 257 258 SSE_SHUFFLE(XMM4,XMM4,0x4E) 259 260 SSE_COPY_PS(XMM0,XMM4) 261 SSE_MULT_PS(XMM0,XMM7) 262 SSE_ADD_PS(XMM0,XMM2) 263 264 SSE_COPY_PS(XMM2,XMM3) 265 SSE_MULT_PS(XMM2,XMM7) 266 267 SSE_SHUFFLE(XMM7,XMM7,0x4E) 268 269 SSE_COPY_PS(XMM1,XMM4) 270 SSE_MULT_PS(XMM1,XMM7) 271 SSE_SUB_PS(XMM0,XMM1) 272 SSE_STORE_PS(SSE_ARG_1,FLOAT_0,XMM0) 273 274 SSE_MULT_PS(XMM7,XMM3) 275 SSE_SUB_PS(XMM7,XMM2) 276 277 SSE_SHUFFLE(XMM7,XMM7,0x4E) 278 279 /* ----------------------------------------------- */ 280 281 SSE_COPY_PS(XMM1,XMM3) 282 SSE_MULT_PS(XMM1,XMM5) 283 284 SSE_SHUFFLE(XMM1,XMM1,0xB1) 285 286 SSE_COPY_PS(XMM0,XMM6) 287 SSE_MULT_PS(XMM0,XMM1) 288 SSE_ADD_PS(XMM0,XMM7) 289 290 SSE_COPY_PS(XMM2,XMM4) 291 SSE_MULT_PS(XMM2,XMM1) 292 SSE_SUB_PS_M(XMM2,SSE_ARG_1,FLOAT_12) 293 294 SSE_SHUFFLE(XMM1,XMM1,0x4E) 295 296 SSE_COPY_PS(XMM7,XMM6) 297 SSE_MULT_PS(XMM7,XMM1) 298 SSE_SUB_PS(XMM7,XMM0) 299 300 SSE_MULT_PS(XMM1,XMM4) 301 SSE_SUB_PS(XMM2,XMM1) 302 SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM2) 303 304 /* ----------------------------------------------- */ 305 306 SSE_COPY_PS(XMM1,XMM3) 307 SSE_MULT_PS(XMM1,XMM6) 308 309 SSE_SHUFFLE(XMM1,XMM1,0xB1) 310 311 SSE_COPY_PS(XMM2,XMM4) 312 SSE_MULT_PS(XMM2,XMM1) 313 SSE_LOAD_PS(SSE_ARG_1,FLOAT_4,XMM0) 314 SSE_SUB_PS(XMM0,XMM2) 315 316 SSE_COPY_PS(XMM2,XMM5) 317 SSE_MULT_PS(XMM2,XMM1) 318 SSE_ADD_PS(XMM2,XMM7) 319 320 SSE_SHUFFLE(XMM1,XMM1,0x4E) 321 322 SSE_COPY_PS(XMM7,XMM4) 323 SSE_MULT_PS(XMM7,XMM1) 324 SSE_ADD_PS(XMM7,XMM0) 325 326 SSE_MULT_PS(XMM1,XMM5) 327 SSE_SUB_PS(XMM2,XMM1) 328 329 /* ----------------------------------------------- */ 330 331 SSE_MULT_PS(XMM4,XMM3) 332 333 SSE_SHUFFLE(XMM4,XMM4,0xB1) 334 335 SSE_COPY_PS(XMM1,XMM6) 336 SSE_MULT_PS(XMM1,XMM4) 337 SSE_ADD_PS(XMM1,XMM7) 338 339 SSE_COPY_PS(XMM0,XMM5) 340 SSE_MULT_PS(XMM0,XMM4) 341 SSE_LOAD_PS(SSE_ARG_1,FLOAT_12,XMM7) 342 SSE_SUB_PS(XMM7,XMM0) 343 344 SSE_SHUFFLE(XMM4,XMM4,0x4E) 345 346 SSE_MULT_PS(XMM6,XMM4) 347 SSE_SUB_PS(XMM1,XMM6) 348 349 SSE_MULT_PS(XMM5,XMM4) 350 SSE_ADD_PS(XMM5,XMM7) 351 352 /* ----------------------------------------------- */ 353 354 SSE_LOAD_PS(SSE_ARG_1,FLOAT_0,XMM0) 355 SSE_MULT_PS(XMM3,XMM0) 356 357 SSE_COPY_PS(XMM4,XMM3) 358 SSE_SHUFFLE(XMM4,XMM3,0x4E) 359 SSE_ADD_PS(XMM4,XMM3) 360 361 SSE_COPY_PS(XMM6,XMM4) 362 SSE_SHUFFLE(XMM6,XMM4,0xB1) 363 SSE_ADD_SS(XMM6,XMM4) 364 365 SSE_COPY_PS(XMM3,XMM6) 366 SSE_RECIP_SS(XMM3,XMM6) 367 SSE_COPY_SS(XMM4,XMM3) 368 SSE_ADD_SS(XMM4,XMM3) 369 SSE_MULT_SS(XMM3,XMM3) 370 SSE_MULT_SS(XMM6,XMM3) 371 SSE_SUB_SS(XMM4,XMM6) 372 373 SSE_SHUFFLE(XMM4,XMM4,0x00) 374 375 SSE_MULT_PS(XMM0,XMM4) 376 SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM0) 377 SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM0) 378 379 SSE_MULT_PS(XMM1,XMM4) 380 SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM1) 381 SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM1) 382 383 SSE_MULT_PS(XMM2,XMM4) 384 SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM2) 385 SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM2) 386 387 SSE_MULT_PS(XMM4,XMM5) 388 SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM4) 389 SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM4) 390 391 /* ----------------------------------------------- */ 392 393 SSE_INLINE_END_1; 394 SSE_SCOPE_END; 395 PetscFunctionReturn(0); 396 } 397 398 #endif 399 400 401