1 2 /* 3 Inverts 4 by 4 matrix using partial pivoting. 4 5 Used by the sparse factorization routines in 6 src/mat/impls/baij/seq 7 8 This is a combination of the Linpack routines 9 dgefa() and dgedi() specialized for a size of 4. 10 11 */ 12 #include "petscsys.h" 13 14 #undef __FUNCT__ 15 #define __FUNCT__ "Kernel_A_gets_inverse_A_4" 16 PetscErrorCode Kernel_A_gets_inverse_A_4(MatScalar *a,PetscReal shift) 17 { 18 PetscInt i__2,i__3,kp1,j,k,l,ll,i,ipvt[4],kb,k3; 19 PetscInt k4,j3; 20 MatScalar *aa,*ax,*ay,work[16],stmp; 21 MatReal tmp,max; 22 23 /* gaussian elimination with partial pivoting */ 24 25 PetscFunctionBegin; 26 shift = .25*shift*(1.e-12 + PetscAbsScalar(a[0]) + PetscAbsScalar(a[5]) + PetscAbsScalar(a[10]) + PetscAbsScalar(a[15])); 27 /* Parameter adjustments */ 28 a -= 5; 29 30 for (k = 1; k <= 3; ++k) { 31 kp1 = k + 1; 32 k3 = 4*k; 33 k4 = k3 + k; 34 /* find l = pivot index */ 35 36 i__2 = 5 - k; 37 aa = &a[k4]; 38 max = PetscAbsScalar(aa[0]); 39 l = 1; 40 for (ll=1; ll<i__2; ll++) { 41 tmp = PetscAbsScalar(aa[ll]); 42 if (tmp > max) { max = tmp; l = ll+1;} 43 } 44 l += k - 1; 45 ipvt[k-1] = l; 46 47 if (a[l + k3] == 0.0) { 48 if (shift == 0.0) { 49 SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",k-1); 50 } else { 51 /* SHIFT is applied to SINGLE diagonal entry; does this make any sense? */ 52 a[l + k3] = shift; 53 } 54 } 55 56 /* interchange if necessary */ 57 58 if (l != k) { 59 stmp = a[l + k3]; 60 a[l + k3] = a[k4]; 61 a[k4] = stmp; 62 } 63 64 /* compute multipliers */ 65 66 stmp = -1. / a[k4]; 67 i__2 = 4 - k; 68 aa = &a[1 + k4]; 69 for (ll=0; ll<i__2; ll++) { 70 aa[ll] *= stmp; 71 } 72 73 /* row elimination with column indexing */ 74 75 ax = &a[k4+1]; 76 for (j = kp1; j <= 4; ++j) { 77 j3 = 4*j; 78 stmp = a[l + j3]; 79 if (l != k) { 80 a[l + j3] = a[k + j3]; 81 a[k + j3] = stmp; 82 } 83 84 i__3 = 4 - k; 85 ay = &a[1+k+j3]; 86 for (ll=0; ll<i__3; ll++) { 87 ay[ll] += stmp*ax[ll]; 88 } 89 } 90 } 91 ipvt[3] = 4; 92 if (a[20] == 0.0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",3); 93 94 /* 95 Now form the inverse 96 */ 97 98 /* compute inverse(u) */ 99 100 for (k = 1; k <= 4; ++k) { 101 k3 = 4*k; 102 k4 = k3 + k; 103 a[k4] = 1.0 / a[k4]; 104 stmp = -a[k4]; 105 i__2 = k - 1; 106 aa = &a[k3 + 1]; 107 for (ll=0; ll<i__2; ll++) aa[ll] *= stmp; 108 kp1 = k + 1; 109 if (4 < kp1) continue; 110 ax = aa; 111 for (j = kp1; j <= 4; ++j) { 112 j3 = 4*j; 113 stmp = a[k + j3]; 114 a[k + j3] = 0.0; 115 ay = &a[j3 + 1]; 116 for (ll=0; ll<k; ll++) { 117 ay[ll] += stmp*ax[ll]; 118 } 119 } 120 } 121 122 /* form inverse(u)*inverse(l) */ 123 124 for (kb = 1; kb <= 3; ++kb) { 125 k = 4 - kb; 126 k3 = 4*k; 127 kp1 = k + 1; 128 aa = a + k3; 129 for (i = kp1; i <= 4; ++i) { 130 work[i-1] = aa[i]; 131 aa[i] = 0.0; 132 } 133 for (j = kp1; j <= 4; ++j) { 134 stmp = work[j-1]; 135 ax = &a[4*j + 1]; 136 ay = &a[k3 + 1]; 137 ay[0] += stmp*ax[0]; 138 ay[1] += stmp*ax[1]; 139 ay[2] += stmp*ax[2]; 140 ay[3] += stmp*ax[3]; 141 } 142 l = ipvt[k-1]; 143 if (l != k) { 144 ax = &a[k3 + 1]; 145 ay = &a[4*l + 1]; 146 stmp = ax[0]; ax[0] = ay[0]; ay[0] = stmp; 147 stmp = ax[1]; ax[1] = ay[1]; ay[1] = stmp; 148 stmp = ax[2]; ax[2] = ay[2]; ay[2] = stmp; 149 stmp = ax[3]; ax[3] = ay[3]; ay[3] = stmp; 150 } 151 } 152 PetscFunctionReturn(0); 153 } 154 155 #if defined(PETSC_HAVE_SSE) 156 #include PETSC_HAVE_SSE 157 158 #undef __FUNCT__ 159 #define __FUNCT__ "Kernel_A_gets_inverse_A_4_SSE" 160 PetscErrorCode Kernel_A_gets_inverse_A_4_SSE(float *a) 161 { 162 /* 163 This routine is converted from Intel's Small Matrix Library. 164 See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix 165 Order Number: 245043-001 166 March 1999 167 http://www.intel.com 168 169 Inverse of a 4x4 matrix via Kramer's Rule: 170 bool Invert4x4(SMLXMatrix &); 171 */ 172 PetscFunctionBegin; 173 174 SSE_SCOPE_BEGIN; 175 SSE_INLINE_BEGIN_1(a) 176 177 /* ----------------------------------------------- */ 178 179 SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0) 180 SSE_LOADH_PS(SSE_ARG_1,FLOAT_4,XMM0) 181 182 SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM5) 183 SSE_LOADH_PS(SSE_ARG_1,FLOAT_12,XMM5) 184 185 SSE_COPY_PS(XMM3,XMM0) 186 SSE_SHUFFLE(XMM3,XMM5,0x88) 187 188 SSE_SHUFFLE(XMM5,XMM0,0xDD) 189 190 SSE_LOADL_PS(SSE_ARG_1,FLOAT_2,XMM0) 191 SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM0) 192 193 SSE_LOADL_PS(SSE_ARG_1,FLOAT_10,XMM6) 194 SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM6) 195 196 SSE_COPY_PS(XMM4,XMM0) 197 SSE_SHUFFLE(XMM4,XMM6,0x88) 198 199 SSE_SHUFFLE(XMM6,XMM0,0xDD) 200 201 /* ----------------------------------------------- */ 202 203 SSE_COPY_PS(XMM7,XMM4) 204 SSE_MULT_PS(XMM7,XMM6) 205 206 SSE_SHUFFLE(XMM7,XMM7,0xB1) 207 208 SSE_COPY_PS(XMM0,XMM5) 209 SSE_MULT_PS(XMM0,XMM7) 210 211 SSE_COPY_PS(XMM2,XMM3) 212 SSE_MULT_PS(XMM2,XMM7) 213 214 SSE_SHUFFLE(XMM7,XMM7,0x4E) 215 216 SSE_COPY_PS(XMM1,XMM5) 217 SSE_MULT_PS(XMM1,XMM7) 218 SSE_SUB_PS(XMM1,XMM0) 219 220 SSE_MULT_PS(XMM7,XMM3) 221 SSE_SUB_PS(XMM7,XMM2) 222 223 SSE_SHUFFLE(XMM7,XMM7,0x4E) 224 SSE_STORE_PS(SSE_ARG_1,FLOAT_4,XMM7) 225 226 /* ----------------------------------------------- */ 227 228 SSE_COPY_PS(XMM0,XMM5) 229 SSE_MULT_PS(XMM0,XMM4) 230 231 SSE_SHUFFLE(XMM0,XMM0,0xB1) 232 233 SSE_COPY_PS(XMM2,XMM6) 234 SSE_MULT_PS(XMM2,XMM0) 235 SSE_ADD_PS(XMM2,XMM1) 236 237 SSE_COPY_PS(XMM7,XMM3) 238 SSE_MULT_PS(XMM7,XMM0) 239 240 SSE_SHUFFLE(XMM0,XMM0,0x4E) 241 242 SSE_COPY_PS(XMM1,XMM6) 243 SSE_MULT_PS(XMM1,XMM0) 244 SSE_SUB_PS(XMM2,XMM1) 245 246 SSE_MULT_PS(XMM0,XMM3) 247 SSE_SUB_PS(XMM0,XMM7) 248 249 SSE_SHUFFLE(XMM0,XMM0,0x4E) 250 SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM0) 251 252 /* ----------------------------------------------- */ 253 254 SSE_COPY_PS(XMM7,XMM5) 255 SSE_SHUFFLE(XMM7,XMM5,0x4E) 256 SSE_MULT_PS(XMM7,XMM6) 257 258 SSE_SHUFFLE(XMM7,XMM7,0xB1) 259 260 SSE_SHUFFLE(XMM4,XMM4,0x4E) 261 262 SSE_COPY_PS(XMM0,XMM4) 263 SSE_MULT_PS(XMM0,XMM7) 264 SSE_ADD_PS(XMM0,XMM2) 265 266 SSE_COPY_PS(XMM2,XMM3) 267 SSE_MULT_PS(XMM2,XMM7) 268 269 SSE_SHUFFLE(XMM7,XMM7,0x4E) 270 271 SSE_COPY_PS(XMM1,XMM4) 272 SSE_MULT_PS(XMM1,XMM7) 273 SSE_SUB_PS(XMM0,XMM1) 274 SSE_STORE_PS(SSE_ARG_1,FLOAT_0,XMM0) 275 276 SSE_MULT_PS(XMM7,XMM3) 277 SSE_SUB_PS(XMM7,XMM2) 278 279 SSE_SHUFFLE(XMM7,XMM7,0x4E) 280 281 /* ----------------------------------------------- */ 282 283 SSE_COPY_PS(XMM1,XMM3) 284 SSE_MULT_PS(XMM1,XMM5) 285 286 SSE_SHUFFLE(XMM1,XMM1,0xB1) 287 288 SSE_COPY_PS(XMM0,XMM6) 289 SSE_MULT_PS(XMM0,XMM1) 290 SSE_ADD_PS(XMM0,XMM7) 291 292 SSE_COPY_PS(XMM2,XMM4) 293 SSE_MULT_PS(XMM2,XMM1) 294 SSE_SUB_PS_M(XMM2,SSE_ARG_1,FLOAT_12) 295 296 SSE_SHUFFLE(XMM1,XMM1,0x4E) 297 298 SSE_COPY_PS(XMM7,XMM6) 299 SSE_MULT_PS(XMM7,XMM1) 300 SSE_SUB_PS(XMM7,XMM0) 301 302 SSE_MULT_PS(XMM1,XMM4) 303 SSE_SUB_PS(XMM2,XMM1) 304 SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM2) 305 306 /* ----------------------------------------------- */ 307 308 SSE_COPY_PS(XMM1,XMM3) 309 SSE_MULT_PS(XMM1,XMM6) 310 311 SSE_SHUFFLE(XMM1,XMM1,0xB1) 312 313 SSE_COPY_PS(XMM2,XMM4) 314 SSE_MULT_PS(XMM2,XMM1) 315 SSE_LOAD_PS(SSE_ARG_1,FLOAT_4,XMM0) 316 SSE_SUB_PS(XMM0,XMM2) 317 318 SSE_COPY_PS(XMM2,XMM5) 319 SSE_MULT_PS(XMM2,XMM1) 320 SSE_ADD_PS(XMM2,XMM7) 321 322 SSE_SHUFFLE(XMM1,XMM1,0x4E) 323 324 SSE_COPY_PS(XMM7,XMM4) 325 SSE_MULT_PS(XMM7,XMM1) 326 SSE_ADD_PS(XMM7,XMM0) 327 328 SSE_MULT_PS(XMM1,XMM5) 329 SSE_SUB_PS(XMM2,XMM1) 330 331 /* ----------------------------------------------- */ 332 333 SSE_MULT_PS(XMM4,XMM3) 334 335 SSE_SHUFFLE(XMM4,XMM4,0xB1) 336 337 SSE_COPY_PS(XMM1,XMM6) 338 SSE_MULT_PS(XMM1,XMM4) 339 SSE_ADD_PS(XMM1,XMM7) 340 341 SSE_COPY_PS(XMM0,XMM5) 342 SSE_MULT_PS(XMM0,XMM4) 343 SSE_LOAD_PS(SSE_ARG_1,FLOAT_12,XMM7) 344 SSE_SUB_PS(XMM7,XMM0) 345 346 SSE_SHUFFLE(XMM4,XMM4,0x4E) 347 348 SSE_MULT_PS(XMM6,XMM4) 349 SSE_SUB_PS(XMM1,XMM6) 350 351 SSE_MULT_PS(XMM5,XMM4) 352 SSE_ADD_PS(XMM5,XMM7) 353 354 /* ----------------------------------------------- */ 355 356 SSE_LOAD_PS(SSE_ARG_1,FLOAT_0,XMM0) 357 SSE_MULT_PS(XMM3,XMM0) 358 359 SSE_COPY_PS(XMM4,XMM3) 360 SSE_SHUFFLE(XMM4,XMM3,0x4E) 361 SSE_ADD_PS(XMM4,XMM3) 362 363 SSE_COPY_PS(XMM6,XMM4) 364 SSE_SHUFFLE(XMM6,XMM4,0xB1) 365 SSE_ADD_SS(XMM6,XMM4) 366 367 SSE_COPY_PS(XMM3,XMM6) 368 SSE_RECIP_SS(XMM3,XMM6) 369 SSE_COPY_SS(XMM4,XMM3) 370 SSE_ADD_SS(XMM4,XMM3) 371 SSE_MULT_SS(XMM3,XMM3) 372 SSE_MULT_SS(XMM6,XMM3) 373 SSE_SUB_SS(XMM4,XMM6) 374 375 SSE_SHUFFLE(XMM4,XMM4,0x00) 376 377 SSE_MULT_PS(XMM0,XMM4) 378 SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM0) 379 SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM0) 380 381 SSE_MULT_PS(XMM1,XMM4) 382 SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM1) 383 SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM1) 384 385 SSE_MULT_PS(XMM2,XMM4) 386 SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM2) 387 SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM2) 388 389 SSE_MULT_PS(XMM4,XMM5) 390 SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM4) 391 SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM4) 392 393 /* ----------------------------------------------- */ 394 395 SSE_INLINE_END_1; 396 SSE_SCOPE_END; 397 398 PetscFunctionReturn(0); 399 } 400 401 #endif 402 403 404