1 2 /* 3 Inverts 4 by 4 matrix using gaussian elimination with partial pivoting. 4 5 Used by the sparse factorization routines in 6 src/mat/impls/baij/seq 7 8 This is a combination of the Linpack routines 9 dgefa() and dgedi() specialized for a size of 4. 10 11 */ 12 #include <petscsys.h> 13 14 PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4(MatScalar *a, PetscReal shift, PetscBool allowzeropivot, PetscBool *zeropivotdetected) { 15 PetscInt i__2, i__3, kp1, j, k, l, ll, i, ipvt[4], kb, k3; 16 PetscInt k4, j3; 17 MatScalar *aa, *ax, *ay, work[16], stmp; 18 MatReal tmp, max; 19 20 PetscFunctionBegin; 21 if (zeropivotdetected) *zeropivotdetected = PETSC_FALSE; 22 shift = .25 * shift * (1.e-12 + PetscAbsScalar(a[0]) + PetscAbsScalar(a[5]) + PetscAbsScalar(a[10]) + PetscAbsScalar(a[15])); 23 24 /* Parameter adjustments */ 25 a -= 5; 26 27 for (k = 1; k <= 3; ++k) { 28 kp1 = k + 1; 29 k3 = 4 * k; 30 k4 = k3 + k; 31 32 /* find l = pivot index */ 33 i__2 = 5 - k; 34 aa = &a[k4]; 35 max = PetscAbsScalar(aa[0]); 36 l = 1; 37 for (ll = 1; ll < i__2; ll++) { 38 tmp = PetscAbsScalar(aa[ll]); 39 if (tmp > max) { 40 max = tmp; 41 l = ll + 1; 42 } 43 } 44 l += k - 1; 45 ipvt[k - 1] = l; 46 47 if (a[l + k3] == 0.0) { 48 if (shift == 0.0) { 49 if (allowzeropivot) { 50 PetscCall(PetscInfo(NULL, "Zero pivot, row %" PetscInt_FMT "\n", k - 1)); 51 if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE; 52 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_MAT_LU_ZRPVT, "Zero pivot, row %" PetscInt_FMT, k - 1); 53 } else { 54 /* SHIFT is applied to SINGLE diagonal entry; does this make any sense? */ 55 a[l + k3] = shift; 56 } 57 } 58 59 /* interchange if necessary */ 60 if (l != k) { 61 stmp = a[l + k3]; 62 a[l + k3] = a[k4]; 63 a[k4] = stmp; 64 } 65 66 /* compute multipliers */ 67 stmp = -1. / a[k4]; 68 i__2 = 4 - k; 69 aa = &a[1 + k4]; 70 for (ll = 0; ll < i__2; ll++) aa[ll] *= stmp; 71 72 /* row elimination with column indexing */ 73 ax = &a[k4 + 1]; 74 for (j = kp1; j <= 4; ++j) { 75 j3 = 4 * j; 76 stmp = a[l + j3]; 77 if (l != k) { 78 a[l + j3] = a[k + j3]; 79 a[k + j3] = stmp; 80 } 81 82 i__3 = 4 - k; 83 ay = &a[1 + k + j3]; 84 for (ll = 0; ll < i__3; ll++) ay[ll] += stmp * ax[ll]; 85 } 86 } 87 ipvt[3] = 4; 88 if (a[20] == 0.0) { 89 if (PetscLikely(allowzeropivot)) { 90 PetscCall(PetscInfo(NULL, "Zero pivot, row 3\n")); 91 if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE; 92 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_MAT_LU_ZRPVT, "Zero pivot, row 3"); 93 } 94 95 /* Now form the inverse */ 96 /* compute inverse(u) */ 97 for (k = 1; k <= 4; ++k) { 98 k3 = 4 * k; 99 k4 = k3 + k; 100 a[k4] = 1.0 / a[k4]; 101 stmp = -a[k4]; 102 i__2 = k - 1; 103 aa = &a[k3 + 1]; 104 for (ll = 0; ll < i__2; ll++) aa[ll] *= stmp; 105 kp1 = k + 1; 106 if (4 < kp1) continue; 107 ax = aa; 108 for (j = kp1; j <= 4; ++j) { 109 j3 = 4 * j; 110 stmp = a[k + j3]; 111 a[k + j3] = 0.0; 112 ay = &a[j3 + 1]; 113 for (ll = 0; ll < k; ll++) ay[ll] += stmp * ax[ll]; 114 } 115 } 116 117 /* form inverse(u)*inverse(l) */ 118 for (kb = 1; kb <= 3; ++kb) { 119 k = 4 - kb; 120 k3 = 4 * k; 121 kp1 = k + 1; 122 aa = a + k3; 123 for (i = kp1; i <= 4; ++i) { 124 work[i - 1] = aa[i]; 125 aa[i] = 0.0; 126 } 127 for (j = kp1; j <= 4; ++j) { 128 stmp = work[j - 1]; 129 ax = &a[4 * j + 1]; 130 ay = &a[k3 + 1]; 131 ay[0] += stmp * ax[0]; 132 ay[1] += stmp * ax[1]; 133 ay[2] += stmp * ax[2]; 134 ay[3] += stmp * ax[3]; 135 } 136 l = ipvt[k - 1]; 137 if (l != k) { 138 ax = &a[k3 + 1]; 139 ay = &a[4 * l + 1]; 140 stmp = ax[0]; 141 ax[0] = ay[0]; 142 ay[0] = stmp; 143 stmp = ax[1]; 144 ax[1] = ay[1]; 145 ay[1] = stmp; 146 stmp = ax[2]; 147 ax[2] = ay[2]; 148 ay[2] = stmp; 149 stmp = ax[3]; 150 ax[3] = ay[3]; 151 ay[3] = stmp; 152 } 153 } 154 PetscFunctionReturn(0); 155 } 156 157 #if defined(PETSC_HAVE_SSE) 158 #include PETSC_HAVE_SSE 159 160 PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4_SSE(float *a) { 161 /* 162 This routine is converted from Intel's Small Matrix Library. 163 See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix 164 Order Number: 245043-001 165 March 1999 166 https://www.intel.com/content/www/us/en/homepage.html 167 168 Inverse of a 4x4 matrix via Kramer's Rule: 169 bool Invert4x4(SMLXMatrix &); 170 */ 171 PetscFunctionBegin; 172 SSE_SCOPE_BEGIN; 173 SSE_INLINE_BEGIN_1(a) 174 175 /* ----------------------------------------------- */ 176 177 SSE_LOADL_PS(SSE_ARG_1, FLOAT_0, XMM0) 178 SSE_LOADH_PS(SSE_ARG_1, FLOAT_4, XMM0) 179 180 SSE_LOADL_PS(SSE_ARG_1, FLOAT_8, XMM5) 181 SSE_LOADH_PS(SSE_ARG_1, FLOAT_12, XMM5) 182 183 SSE_COPY_PS(XMM3, XMM0) 184 SSE_SHUFFLE(XMM3, XMM5, 0x88) 185 186 SSE_SHUFFLE(XMM5, XMM0, 0xDD) 187 188 SSE_LOADL_PS(SSE_ARG_1, FLOAT_2, XMM0) 189 SSE_LOADH_PS(SSE_ARG_1, FLOAT_6, XMM0) 190 191 SSE_LOADL_PS(SSE_ARG_1, FLOAT_10, XMM6) 192 SSE_LOADH_PS(SSE_ARG_1, FLOAT_14, XMM6) 193 194 SSE_COPY_PS(XMM4, XMM0) 195 SSE_SHUFFLE(XMM4, XMM6, 0x88) 196 197 SSE_SHUFFLE(XMM6, XMM0, 0xDD) 198 199 /* ----------------------------------------------- */ 200 201 SSE_COPY_PS(XMM7, XMM4) 202 SSE_MULT_PS(XMM7, XMM6) 203 204 SSE_SHUFFLE(XMM7, XMM7, 0xB1) 205 206 SSE_COPY_PS(XMM0, XMM5) 207 SSE_MULT_PS(XMM0, XMM7) 208 209 SSE_COPY_PS(XMM2, XMM3) 210 SSE_MULT_PS(XMM2, XMM7) 211 212 SSE_SHUFFLE(XMM7, XMM7, 0x4E) 213 214 SSE_COPY_PS(XMM1, XMM5) 215 SSE_MULT_PS(XMM1, XMM7) 216 SSE_SUB_PS(XMM1, XMM0) 217 218 SSE_MULT_PS(XMM7, XMM3) 219 SSE_SUB_PS(XMM7, XMM2) 220 221 SSE_SHUFFLE(XMM7, XMM7, 0x4E) 222 SSE_STORE_PS(SSE_ARG_1, FLOAT_4, XMM7) 223 224 /* ----------------------------------------------- */ 225 226 SSE_COPY_PS(XMM0, XMM5) 227 SSE_MULT_PS(XMM0, XMM4) 228 229 SSE_SHUFFLE(XMM0, XMM0, 0xB1) 230 231 SSE_COPY_PS(XMM2, XMM6) 232 SSE_MULT_PS(XMM2, XMM0) 233 SSE_ADD_PS(XMM2, XMM1) 234 235 SSE_COPY_PS(XMM7, XMM3) 236 SSE_MULT_PS(XMM7, XMM0) 237 238 SSE_SHUFFLE(XMM0, XMM0, 0x4E) 239 240 SSE_COPY_PS(XMM1, XMM6) 241 SSE_MULT_PS(XMM1, XMM0) 242 SSE_SUB_PS(XMM2, XMM1) 243 244 SSE_MULT_PS(XMM0, XMM3) 245 SSE_SUB_PS(XMM0, XMM7) 246 247 SSE_SHUFFLE(XMM0, XMM0, 0x4E) 248 SSE_STORE_PS(SSE_ARG_1, FLOAT_12, XMM0) 249 250 /* ----------------------------------------------- */ 251 252 SSE_COPY_PS(XMM7, XMM5) 253 SSE_SHUFFLE(XMM7, XMM5, 0x4E) 254 SSE_MULT_PS(XMM7, XMM6) 255 256 SSE_SHUFFLE(XMM7, XMM7, 0xB1) 257 258 SSE_SHUFFLE(XMM4, XMM4, 0x4E) 259 260 SSE_COPY_PS(XMM0, XMM4) 261 SSE_MULT_PS(XMM0, XMM7) 262 SSE_ADD_PS(XMM0, XMM2) 263 264 SSE_COPY_PS(XMM2, XMM3) 265 SSE_MULT_PS(XMM2, XMM7) 266 267 SSE_SHUFFLE(XMM7, XMM7, 0x4E) 268 269 SSE_COPY_PS(XMM1, XMM4) 270 SSE_MULT_PS(XMM1, XMM7) 271 SSE_SUB_PS(XMM0, XMM1) 272 SSE_STORE_PS(SSE_ARG_1, FLOAT_0, XMM0) 273 274 SSE_MULT_PS(XMM7, XMM3) 275 SSE_SUB_PS(XMM7, XMM2) 276 277 SSE_SHUFFLE(XMM7, XMM7, 0x4E) 278 279 /* ----------------------------------------------- */ 280 281 SSE_COPY_PS(XMM1, XMM3) 282 SSE_MULT_PS(XMM1, XMM5) 283 284 SSE_SHUFFLE(XMM1, XMM1, 0xB1) 285 286 SSE_COPY_PS(XMM0, XMM6) 287 SSE_MULT_PS(XMM0, XMM1) 288 SSE_ADD_PS(XMM0, XMM7) 289 290 SSE_COPY_PS(XMM2, XMM4) 291 SSE_MULT_PS(XMM2, XMM1) 292 SSE_SUB_PS_M(XMM2, SSE_ARG_1, FLOAT_12) 293 294 SSE_SHUFFLE(XMM1, XMM1, 0x4E) 295 296 SSE_COPY_PS(XMM7, XMM6) 297 SSE_MULT_PS(XMM7, XMM1) 298 SSE_SUB_PS(XMM7, XMM0) 299 300 SSE_MULT_PS(XMM1, XMM4) 301 SSE_SUB_PS(XMM2, XMM1) 302 SSE_STORE_PS(SSE_ARG_1, FLOAT_12, XMM2) 303 304 /* ----------------------------------------------- */ 305 306 SSE_COPY_PS(XMM1, XMM3) 307 SSE_MULT_PS(XMM1, XMM6) 308 309 SSE_SHUFFLE(XMM1, XMM1, 0xB1) 310 311 SSE_COPY_PS(XMM2, XMM4) 312 SSE_MULT_PS(XMM2, XMM1) 313 SSE_LOAD_PS(SSE_ARG_1, FLOAT_4, XMM0) 314 SSE_SUB_PS(XMM0, XMM2) 315 316 SSE_COPY_PS(XMM2, XMM5) 317 SSE_MULT_PS(XMM2, XMM1) 318 SSE_ADD_PS(XMM2, XMM7) 319 320 SSE_SHUFFLE(XMM1, XMM1, 0x4E) 321 322 SSE_COPY_PS(XMM7, XMM4) 323 SSE_MULT_PS(XMM7, XMM1) 324 SSE_ADD_PS(XMM7, XMM0) 325 326 SSE_MULT_PS(XMM1, XMM5) 327 SSE_SUB_PS(XMM2, XMM1) 328 329 /* ----------------------------------------------- */ 330 331 SSE_MULT_PS(XMM4, XMM3) 332 333 SSE_SHUFFLE(XMM4, XMM4, 0xB1) 334 335 SSE_COPY_PS(XMM1, XMM6) 336 SSE_MULT_PS(XMM1, XMM4) 337 SSE_ADD_PS(XMM1, XMM7) 338 339 SSE_COPY_PS(XMM0, XMM5) 340 SSE_MULT_PS(XMM0, XMM4) 341 SSE_LOAD_PS(SSE_ARG_1, FLOAT_12, XMM7) 342 SSE_SUB_PS(XMM7, XMM0) 343 344 SSE_SHUFFLE(XMM4, XMM4, 0x4E) 345 346 SSE_MULT_PS(XMM6, XMM4) 347 SSE_SUB_PS(XMM1, XMM6) 348 349 SSE_MULT_PS(XMM5, XMM4) 350 SSE_ADD_PS(XMM5, XMM7) 351 352 /* ----------------------------------------------- */ 353 354 SSE_LOAD_PS(SSE_ARG_1, FLOAT_0, XMM0) 355 SSE_MULT_PS(XMM3, XMM0) 356 357 SSE_COPY_PS(XMM4, XMM3) 358 SSE_SHUFFLE(XMM4, XMM3, 0x4E) 359 SSE_ADD_PS(XMM4, XMM3) 360 361 SSE_COPY_PS(XMM6, XMM4) 362 SSE_SHUFFLE(XMM6, XMM4, 0xB1) 363 SSE_ADD_SS(XMM6, XMM4) 364 365 SSE_COPY_PS(XMM3, XMM6) 366 SSE_RECIP_SS(XMM3, XMM6) 367 SSE_COPY_SS(XMM4, XMM3) 368 SSE_ADD_SS(XMM4, XMM3) 369 SSE_MULT_SS(XMM3, XMM3) 370 SSE_MULT_SS(XMM6, XMM3) 371 SSE_SUB_SS(XMM4, XMM6) 372 373 SSE_SHUFFLE(XMM4, XMM4, 0x00) 374 375 SSE_MULT_PS(XMM0, XMM4) 376 SSE_STOREL_PS(SSE_ARG_1, FLOAT_0, XMM0) 377 SSE_STOREH_PS(SSE_ARG_1, FLOAT_2, XMM0) 378 379 SSE_MULT_PS(XMM1, XMM4) 380 SSE_STOREL_PS(SSE_ARG_1, FLOAT_4, XMM1) 381 SSE_STOREH_PS(SSE_ARG_1, FLOAT_6, XMM1) 382 383 SSE_MULT_PS(XMM2, XMM4) 384 SSE_STOREL_PS(SSE_ARG_1, FLOAT_8, XMM2) 385 SSE_STOREH_PS(SSE_ARG_1, FLOAT_10, XMM2) 386 387 SSE_MULT_PS(XMM4, XMM5) 388 SSE_STOREL_PS(SSE_ARG_1, FLOAT_12, XMM4) 389 SSE_STOREH_PS(SSE_ARG_1, FLOAT_14, XMM4) 390 391 /* ----------------------------------------------- */ 392 393 SSE_INLINE_END_1; 394 SSE_SCOPE_END; 395 PetscFunctionReturn(0); 396 } 397 398 #endif 399