1 /* 2 Inverts 4 by 4 matrix using partial pivoting. 3 4 Used by the sparse factorization routines in 5 src/mat/impls/baij/seq and src/mat/impls/bdiag/seq 6 7 See also src/inline/ilu.h 8 9 This is a combination of the Linpack routines 10 dgefa() and dgedi() specialized for a size of 4. 11 12 */ 13 #include "petsc.h" 14 15 #undef __FUNCT__ 16 #define __FUNCT__ "Kernel_A_gets_inverse_A_4" 17 PetscErrorCode Kernel_A_gets_inverse_A_4(MatScalar *a) 18 { 19 PetscInt i__2,i__3,kp1,j,k,l,ll,i,ipvt[4],kb,k3; 20 PetscInt k4,j3; 21 MatScalar *aa,*ax,*ay,work[16],stmp; 22 MatReal tmp,max; 23 24 /* gaussian elimination with partial pivoting */ 25 26 PetscFunctionBegin; 27 /* Parameter adjustments */ 28 a -= 5; 29 30 for (k = 1; k <= 3; ++k) { 31 kp1 = k + 1; 32 k3 = 4*k; 33 k4 = k3 + k; 34 /* find l = pivot index */ 35 36 i__2 = 4 - k; 37 aa = &a[k4]; 38 max = PetscAbsScalar(aa[0]); 39 l = 1; 40 for (ll=1; ll<i__2; ll++) { 41 tmp = PetscAbsScalar(aa[ll]); 42 if (tmp > max) { max = tmp; l = ll+1;} 43 } 44 l += k - 1; 45 ipvt[k-1] = l; 46 47 if (a[l + k3] == 0.0) { 48 SETERRQ1(PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",k-1); 49 } 50 51 /* interchange if necessary */ 52 53 if (l != k) { 54 stmp = a[l + k3]; 55 a[l + k3] = a[k4]; 56 a[k4] = stmp; 57 } 58 59 /* compute multipliers */ 60 61 stmp = -1. / a[k4]; 62 i__2 = 4 - k; 63 aa = &a[1 + k4]; 64 for (ll=0; ll<i__2; ll++) { 65 aa[ll] *= stmp; 66 } 67 68 /* row elimination with column indexing */ 69 70 ax = &a[k4+1]; 71 for (j = kp1; j <= 4; ++j) { 72 j3 = 4*j; 73 stmp = a[l + j3]; 74 if (l != k) { 75 a[l + j3] = a[k + j3]; 76 a[k + j3] = stmp; 77 } 78 79 i__3 = 4 - k; 80 ay = &a[1+k+j3]; 81 for (ll=0; ll<i__3; ll++) { 82 ay[ll] += stmp*ax[ll]; 83 } 84 } 85 } 86 ipvt[3] = 4; 87 if (a[20] == 0.0) { 88 SETERRQ1(PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",3); 89 } 90 91 /* 92 Now form the inverse 93 */ 94 95 /* compute inverse(u) */ 96 97 for (k = 1; k <= 4; ++k) { 98 k3 = 4*k; 99 k4 = k3 + k; 100 a[k4] = 1.0 / a[k4]; 101 stmp = -a[k4]; 102 i__2 = k - 1; 103 aa = &a[k3 + 1]; 104 for (ll=0; ll<i__2; ll++) aa[ll] *= stmp; 105 kp1 = k + 1; 106 if (4 < kp1) continue; 107 ax = aa; 108 for (j = kp1; j <= 4; ++j) { 109 j3 = 4*j; 110 stmp = a[k + j3]; 111 a[k + j3] = 0.0; 112 ay = &a[j3 + 1]; 113 for (ll=0; ll<k; ll++) { 114 ay[ll] += stmp*ax[ll]; 115 } 116 } 117 } 118 119 /* form inverse(u)*inverse(l) */ 120 121 for (kb = 1; kb <= 3; ++kb) { 122 k = 4 - kb; 123 k3 = 4*k; 124 kp1 = k + 1; 125 aa = a + k3; 126 for (i = kp1; i <= 4; ++i) { 127 work[i-1] = aa[i]; 128 aa[i] = 0.0; 129 } 130 for (j = kp1; j <= 4; ++j) { 131 stmp = work[j-1]; 132 ax = &a[4*j + 1]; 133 ay = &a[k3 + 1]; 134 ay[0] += stmp*ax[0]; 135 ay[1] += stmp*ax[1]; 136 ay[2] += stmp*ax[2]; 137 ay[3] += stmp*ax[3]; 138 } 139 l = ipvt[k-1]; 140 if (l != k) { 141 ax = &a[k3 + 1]; 142 ay = &a[4*l + 1]; 143 stmp = ax[0]; ax[0] = ay[0]; ay[0] = stmp; 144 stmp = ax[1]; ax[1] = ay[1]; ay[1] = stmp; 145 stmp = ax[2]; ax[2] = ay[2]; ay[2] = stmp; 146 stmp = ax[3]; ax[3] = ay[3]; ay[3] = stmp; 147 } 148 } 149 PetscFunctionReturn(0); 150 } 151 152 #if defined(PETSC_HAVE_SSE) 153 #include PETSC_HAVE_SSE 154 155 #undef __FUNCT__ 156 #define __FUNCT__ "Kernel_A_gets_inverse_A_4_SSE" 157 PetscErrorCode Kernel_A_gets_inverse_A_4_SSE(float *a) 158 { 159 /* 160 This routine is converted from Intel's Small Matrix Library. 161 See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix 162 Order Number: 245043-001 163 March 1999 164 http://www.intel.com 165 166 Inverse of a 4x4 matrix via Kramer's Rule: 167 bool Invert4x4(SMLXMatrix &); 168 */ 169 PetscFunctionBegin; 170 171 SSE_SCOPE_BEGIN; 172 SSE_INLINE_BEGIN_1(a) 173 174 /* ----------------------------------------------- */ 175 176 SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0) 177 SSE_LOADH_PS(SSE_ARG_1,FLOAT_4,XMM0) 178 179 SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM5) 180 SSE_LOADH_PS(SSE_ARG_1,FLOAT_12,XMM5) 181 182 SSE_COPY_PS(XMM3,XMM0) 183 SSE_SHUFFLE(XMM3,XMM5,0x88) 184 185 SSE_SHUFFLE(XMM5,XMM0,0xDD) 186 187 SSE_LOADL_PS(SSE_ARG_1,FLOAT_2,XMM0) 188 SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM0) 189 190 SSE_LOADL_PS(SSE_ARG_1,FLOAT_10,XMM6) 191 SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM6) 192 193 SSE_COPY_PS(XMM4,XMM0) 194 SSE_SHUFFLE(XMM4,XMM6,0x88) 195 196 SSE_SHUFFLE(XMM6,XMM0,0xDD) 197 198 /* ----------------------------------------------- */ 199 200 SSE_COPY_PS(XMM7,XMM4) 201 SSE_MULT_PS(XMM7,XMM6) 202 203 SSE_SHUFFLE(XMM7,XMM7,0xB1) 204 205 SSE_COPY_PS(XMM0,XMM5) 206 SSE_MULT_PS(XMM0,XMM7) 207 208 SSE_COPY_PS(XMM2,XMM3) 209 SSE_MULT_PS(XMM2,XMM7) 210 211 SSE_SHUFFLE(XMM7,XMM7,0x4E) 212 213 SSE_COPY_PS(XMM1,XMM5) 214 SSE_MULT_PS(XMM1,XMM7) 215 SSE_SUB_PS(XMM1,XMM0) 216 217 SSE_MULT_PS(XMM7,XMM3) 218 SSE_SUB_PS(XMM7,XMM2) 219 220 SSE_SHUFFLE(XMM7,XMM7,0x4E) 221 SSE_STORE_PS(SSE_ARG_1,FLOAT_4,XMM7) 222 223 /* ----------------------------------------------- */ 224 225 SSE_COPY_PS(XMM0,XMM5) 226 SSE_MULT_PS(XMM0,XMM4) 227 228 SSE_SHUFFLE(XMM0,XMM0,0xB1) 229 230 SSE_COPY_PS(XMM2,XMM6) 231 SSE_MULT_PS(XMM2,XMM0) 232 SSE_ADD_PS(XMM2,XMM1) 233 234 SSE_COPY_PS(XMM7,XMM3) 235 SSE_MULT_PS(XMM7,XMM0) 236 237 SSE_SHUFFLE(XMM0,XMM0,0x4E) 238 239 SSE_COPY_PS(XMM1,XMM6) 240 SSE_MULT_PS(XMM1,XMM0) 241 SSE_SUB_PS(XMM2,XMM1) 242 243 SSE_MULT_PS(XMM0,XMM3) 244 SSE_SUB_PS(XMM0,XMM7) 245 246 SSE_SHUFFLE(XMM0,XMM0,0x4E) 247 SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM0) 248 249 /* ----------------------------------------------- */ 250 251 SSE_COPY_PS(XMM7,XMM5) 252 SSE_SHUFFLE(XMM7,XMM5,0x4E) 253 SSE_MULT_PS(XMM7,XMM6) 254 255 SSE_SHUFFLE(XMM7,XMM7,0xB1) 256 257 SSE_SHUFFLE(XMM4,XMM4,0x4E) 258 259 SSE_COPY_PS(XMM0,XMM4) 260 SSE_MULT_PS(XMM0,XMM7) 261 SSE_ADD_PS(XMM0,XMM2) 262 263 SSE_COPY_PS(XMM2,XMM3) 264 SSE_MULT_PS(XMM2,XMM7) 265 266 SSE_SHUFFLE(XMM7,XMM7,0x4E) 267 268 SSE_COPY_PS(XMM1,XMM4) 269 SSE_MULT_PS(XMM1,XMM7) 270 SSE_SUB_PS(XMM0,XMM1) 271 SSE_STORE_PS(SSE_ARG_1,FLOAT_0,XMM0) 272 273 SSE_MULT_PS(XMM7,XMM3) 274 SSE_SUB_PS(XMM7,XMM2) 275 276 SSE_SHUFFLE(XMM7,XMM7,0x4E) 277 278 /* ----------------------------------------------- */ 279 280 SSE_COPY_PS(XMM1,XMM3) 281 SSE_MULT_PS(XMM1,XMM5) 282 283 SSE_SHUFFLE(XMM1,XMM1,0xB1) 284 285 SSE_COPY_PS(XMM0,XMM6) 286 SSE_MULT_PS(XMM0,XMM1) 287 SSE_ADD_PS(XMM0,XMM7) 288 289 SSE_COPY_PS(XMM2,XMM4) 290 SSE_MULT_PS(XMM2,XMM1) 291 SSE_SUB_PS_M(XMM2,SSE_ARG_1,FLOAT_12) 292 293 SSE_SHUFFLE(XMM1,XMM1,0x4E) 294 295 SSE_COPY_PS(XMM7,XMM6) 296 SSE_MULT_PS(XMM7,XMM1) 297 SSE_SUB_PS(XMM7,XMM0) 298 299 SSE_MULT_PS(XMM1,XMM4) 300 SSE_SUB_PS(XMM2,XMM1) 301 SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM2) 302 303 /* ----------------------------------------------- */ 304 305 SSE_COPY_PS(XMM1,XMM3) 306 SSE_MULT_PS(XMM1,XMM6) 307 308 SSE_SHUFFLE(XMM1,XMM1,0xB1) 309 310 SSE_COPY_PS(XMM2,XMM4) 311 SSE_MULT_PS(XMM2,XMM1) 312 SSE_LOAD_PS(SSE_ARG_1,FLOAT_4,XMM0) 313 SSE_SUB_PS(XMM0,XMM2) 314 315 SSE_COPY_PS(XMM2,XMM5) 316 SSE_MULT_PS(XMM2,XMM1) 317 SSE_ADD_PS(XMM2,XMM7) 318 319 SSE_SHUFFLE(XMM1,XMM1,0x4E) 320 321 SSE_COPY_PS(XMM7,XMM4) 322 SSE_MULT_PS(XMM7,XMM1) 323 SSE_ADD_PS(XMM7,XMM0) 324 325 SSE_MULT_PS(XMM1,XMM5) 326 SSE_SUB_PS(XMM2,XMM1) 327 328 /* ----------------------------------------------- */ 329 330 SSE_MULT_PS(XMM4,XMM3) 331 332 SSE_SHUFFLE(XMM4,XMM4,0xB1) 333 334 SSE_COPY_PS(XMM1,XMM6) 335 SSE_MULT_PS(XMM1,XMM4) 336 SSE_ADD_PS(XMM1,XMM7) 337 338 SSE_COPY_PS(XMM0,XMM5) 339 SSE_MULT_PS(XMM0,XMM4) 340 SSE_LOAD_PS(SSE_ARG_1,FLOAT_12,XMM7) 341 SSE_SUB_PS(XMM7,XMM0) 342 343 SSE_SHUFFLE(XMM4,XMM4,0x4E) 344 345 SSE_MULT_PS(XMM6,XMM4) 346 SSE_SUB_PS(XMM1,XMM6) 347 348 SSE_MULT_PS(XMM5,XMM4) 349 SSE_ADD_PS(XMM5,XMM7) 350 351 /* ----------------------------------------------- */ 352 353 SSE_LOAD_PS(SSE_ARG_1,FLOAT_0,XMM0) 354 SSE_MULT_PS(XMM3,XMM0) 355 356 SSE_COPY_PS(XMM4,XMM3) 357 SSE_SHUFFLE(XMM4,XMM3,0x4E) 358 SSE_ADD_PS(XMM4,XMM3) 359 360 SSE_COPY_PS(XMM6,XMM4) 361 SSE_SHUFFLE(XMM6,XMM4,0xB1) 362 SSE_ADD_SS(XMM6,XMM4) 363 364 SSE_COPY_PS(XMM3,XMM6) 365 SSE_RECIP_SS(XMM3,XMM6) 366 SSE_COPY_SS(XMM4,XMM3) 367 SSE_ADD_SS(XMM4,XMM3) 368 SSE_MULT_SS(XMM3,XMM3) 369 SSE_MULT_SS(XMM6,XMM3) 370 SSE_SUB_SS(XMM4,XMM6) 371 372 SSE_SHUFFLE(XMM4,XMM4,0x00) 373 374 SSE_MULT_PS(XMM0,XMM4) 375 SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM0) 376 SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM0) 377 378 SSE_MULT_PS(XMM1,XMM4) 379 SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM1) 380 SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM1) 381 382 SSE_MULT_PS(XMM2,XMM4) 383 SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM2) 384 SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM2) 385 386 SSE_MULT_PS(XMM4,XMM5) 387 SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM4) 388 SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM4) 389 390 /* ----------------------------------------------- */ 391 392 SSE_INLINE_END_1; 393 SSE_SCOPE_END; 394 395 PetscFunctionReturn(0); 396 } 397 398 #endif 399 400 401