1 /* 2 Inverts 4 by 4 matrix using gaussian elimination with partial pivoting. 3 4 Used by the sparse factorization routines in 5 src/mat/impls/baij/seq 6 7 This is a combination of the Linpack routines 8 dgefa() and dgedi() specialized for a size of 4. 9 10 */ 11 #include <petscsys.h> 12 13 PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4(MatScalar *a, PetscReal shift, PetscBool allowzeropivot, PetscBool *zeropivotdetected) 14 { 15 PetscInt i__2, i__3, kp1, j, k, l, ll, i, ipvt[4], kb, k3; 16 PetscInt k4, j3; 17 MatScalar *aa, *ax, *ay, work[16], stmp; 18 MatReal tmp, max; 19 20 PetscFunctionBegin; 21 if (zeropivotdetected) *zeropivotdetected = PETSC_FALSE; 22 shift = .25 * shift * (1.e-12 + PetscAbsScalar(a[0]) + PetscAbsScalar(a[5]) + PetscAbsScalar(a[10]) + PetscAbsScalar(a[15])); 23 24 /* Parameter adjustments */ 25 a -= 5; 26 27 for (k = 1; k <= 3; ++k) { 28 kp1 = k + 1; 29 k3 = 4 * k; 30 k4 = k3 + k; 31 32 /* find l = pivot index */ 33 i__2 = 5 - k; 34 aa = &a[k4]; 35 max = PetscAbsScalar(aa[0]); 36 l = 1; 37 for (ll = 1; ll < i__2; ll++) { 38 tmp = PetscAbsScalar(aa[ll]); 39 if (tmp > max) { 40 max = tmp; 41 l = ll + 1; 42 } 43 } 44 l += k - 1; 45 ipvt[k - 1] = l; 46 47 if (a[l + k3] == 0.0) { 48 if (shift == 0.0) { 49 if (allowzeropivot) { 50 PetscCall(PetscInfo(NULL, "Zero pivot, row %" PetscInt_FMT "\n", k - 1)); 51 if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE; 52 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_MAT_LU_ZRPVT, "Zero pivot, row %" PetscInt_FMT, k - 1); 53 } else { 54 /* SHIFT is applied to SINGLE diagonal entry; does this make any sense? */ 55 a[l + k3] = shift; 56 } 57 } 58 59 /* interchange if necessary */ 60 if (l != k) { 61 stmp = a[l + k3]; 62 a[l + k3] = a[k4]; 63 a[k4] = stmp; 64 } 65 66 /* compute multipliers */ 67 stmp = -1. / a[k4]; 68 i__2 = 4 - k; 69 aa = &a[1 + k4]; 70 for (ll = 0; ll < i__2; ll++) aa[ll] *= stmp; 71 72 /* row elimination with column indexing */ 73 ax = &a[k4 + 1]; 74 for (j = kp1; j <= 4; ++j) { 75 j3 = 4 * j; 76 stmp = a[l + j3]; 77 if (l != k) { 78 a[l + j3] = a[k + j3]; 79 a[k + j3] = stmp; 80 } 81 82 i__3 = 4 - k; 83 ay = &a[1 + k + j3]; 84 for (ll = 0; ll < i__3; ll++) ay[ll] += stmp * ax[ll]; 85 } 86 } 87 ipvt[3] = 4; 88 if (a[20] == 0.0) { 89 if (PetscLikely(allowzeropivot)) { 90 PetscCall(PetscInfo(NULL, "Zero pivot, row 3\n")); 91 if (zeropivotdetected) *zeropivotdetected = PETSC_TRUE; 92 } else SETERRQ(PETSC_COMM_SELF, PETSC_ERR_MAT_LU_ZRPVT, "Zero pivot, row 3"); 93 } 94 95 /* Now form the inverse */ 96 /* compute inverse(u) */ 97 for (k = 1; k <= 4; ++k) { 98 k3 = 4 * k; 99 k4 = k3 + k; 100 a[k4] = 1.0 / a[k4]; 101 stmp = -a[k4]; 102 i__2 = k - 1; 103 aa = &a[k3 + 1]; 104 for (ll = 0; ll < i__2; ll++) aa[ll] *= stmp; 105 kp1 = k + 1; 106 if (4 < kp1) continue; 107 ax = aa; 108 for (j = kp1; j <= 4; ++j) { 109 j3 = 4 * j; 110 stmp = a[k + j3]; 111 a[k + j3] = 0.0; 112 ay = &a[j3 + 1]; 113 for (ll = 0; ll < k; ll++) ay[ll] += stmp * ax[ll]; 114 } 115 } 116 117 /* form inverse(u)*inverse(l) */ 118 for (kb = 1; kb <= 3; ++kb) { 119 k = 4 - kb; 120 k3 = 4 * k; 121 kp1 = k + 1; 122 aa = a + k3; 123 for (i = kp1; i <= 4; ++i) { 124 work[i - 1] = aa[i]; 125 aa[i] = 0.0; 126 } 127 for (j = kp1; j <= 4; ++j) { 128 stmp = work[j - 1]; 129 ax = &a[4 * j + 1]; 130 ay = &a[k3 + 1]; 131 ay[0] += stmp * ax[0]; 132 ay[1] += stmp * ax[1]; 133 ay[2] += stmp * ax[2]; 134 ay[3] += stmp * ax[3]; 135 } 136 l = ipvt[k - 1]; 137 if (l != k) { 138 ax = &a[k3 + 1]; 139 ay = &a[4 * l + 1]; 140 stmp = ax[0]; 141 ax[0] = ay[0]; 142 ay[0] = stmp; 143 stmp = ax[1]; 144 ax[1] = ay[1]; 145 ay[1] = stmp; 146 stmp = ax[2]; 147 ax[2] = ay[2]; 148 ay[2] = stmp; 149 stmp = ax[3]; 150 ax[3] = ay[3]; 151 ay[3] = stmp; 152 } 153 } 154 PetscFunctionReturn(PETSC_SUCCESS); 155 } 156 157 #if defined(PETSC_HAVE_SSE) 158 #include PETSC_HAVE_SSE 159 160 PETSC_EXTERN PetscErrorCode PetscKernel_A_gets_inverse_A_4_SSE(float *a) 161 { 162 /* 163 This routine is converted from Intel's Small Matrix Library. 164 See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix 165 Order Number: 245043-001 166 March 1999 167 https://www.intel.com/content/www/us/en/homepage.html 168 169 Inverse of a 4x4 matrix via Kramer's Rule: 170 bool Invert4x4(SMLXMatrix &); 171 */ 172 PetscFunctionBegin; 173 SSE_SCOPE_BEGIN; 174 SSE_INLINE_BEGIN_1(a) 175 176 /* ----------------------------------------------- */ 177 178 SSE_LOADL_PS(SSE_ARG_1, FLOAT_0, XMM0) 179 SSE_LOADH_PS(SSE_ARG_1, FLOAT_4, XMM0) 180 181 SSE_LOADL_PS(SSE_ARG_1, FLOAT_8, XMM5) 182 SSE_LOADH_PS(SSE_ARG_1, FLOAT_12, XMM5) 183 184 SSE_COPY_PS(XMM3, XMM0) 185 SSE_SHUFFLE(XMM3, XMM5, 0x88) 186 187 SSE_SHUFFLE(XMM5, XMM0, 0xDD) 188 189 SSE_LOADL_PS(SSE_ARG_1, FLOAT_2, XMM0) 190 SSE_LOADH_PS(SSE_ARG_1, FLOAT_6, XMM0) 191 192 SSE_LOADL_PS(SSE_ARG_1, FLOAT_10, XMM6) 193 SSE_LOADH_PS(SSE_ARG_1, FLOAT_14, XMM6) 194 195 SSE_COPY_PS(XMM4, XMM0) 196 SSE_SHUFFLE(XMM4, XMM6, 0x88) 197 198 SSE_SHUFFLE(XMM6, XMM0, 0xDD) 199 200 /* ----------------------------------------------- */ 201 202 SSE_COPY_PS(XMM7, XMM4) 203 SSE_MULT_PS(XMM7, XMM6) 204 205 SSE_SHUFFLE(XMM7, XMM7, 0xB1) 206 207 SSE_COPY_PS(XMM0, XMM5) 208 SSE_MULT_PS(XMM0, XMM7) 209 210 SSE_COPY_PS(XMM2, XMM3) 211 SSE_MULT_PS(XMM2, XMM7) 212 213 SSE_SHUFFLE(XMM7, XMM7, 0x4E) 214 215 SSE_COPY_PS(XMM1, XMM5) 216 SSE_MULT_PS(XMM1, XMM7) 217 SSE_SUB_PS(XMM1, XMM0) 218 219 SSE_MULT_PS(XMM7, XMM3) 220 SSE_SUB_PS(XMM7, XMM2) 221 222 SSE_SHUFFLE(XMM7, XMM7, 0x4E) 223 SSE_STORE_PS(SSE_ARG_1, FLOAT_4, XMM7) 224 225 /* ----------------------------------------------- */ 226 227 SSE_COPY_PS(XMM0, XMM5) 228 SSE_MULT_PS(XMM0, XMM4) 229 230 SSE_SHUFFLE(XMM0, XMM0, 0xB1) 231 232 SSE_COPY_PS(XMM2, XMM6) 233 SSE_MULT_PS(XMM2, XMM0) 234 SSE_ADD_PS(XMM2, XMM1) 235 236 SSE_COPY_PS(XMM7, XMM3) 237 SSE_MULT_PS(XMM7, XMM0) 238 239 SSE_SHUFFLE(XMM0, XMM0, 0x4E) 240 241 SSE_COPY_PS(XMM1, XMM6) 242 SSE_MULT_PS(XMM1, XMM0) 243 SSE_SUB_PS(XMM2, XMM1) 244 245 SSE_MULT_PS(XMM0, XMM3) 246 SSE_SUB_PS(XMM0, XMM7) 247 248 SSE_SHUFFLE(XMM0, XMM0, 0x4E) 249 SSE_STORE_PS(SSE_ARG_1, FLOAT_12, XMM0) 250 251 /* ----------------------------------------------- */ 252 253 SSE_COPY_PS(XMM7, XMM5) 254 SSE_SHUFFLE(XMM7, XMM5, 0x4E) 255 SSE_MULT_PS(XMM7, XMM6) 256 257 SSE_SHUFFLE(XMM7, XMM7, 0xB1) 258 259 SSE_SHUFFLE(XMM4, XMM4, 0x4E) 260 261 SSE_COPY_PS(XMM0, XMM4) 262 SSE_MULT_PS(XMM0, XMM7) 263 SSE_ADD_PS(XMM0, XMM2) 264 265 SSE_COPY_PS(XMM2, XMM3) 266 SSE_MULT_PS(XMM2, XMM7) 267 268 SSE_SHUFFLE(XMM7, XMM7, 0x4E) 269 270 SSE_COPY_PS(XMM1, XMM4) 271 SSE_MULT_PS(XMM1, XMM7) 272 SSE_SUB_PS(XMM0, XMM1) 273 SSE_STORE_PS(SSE_ARG_1, FLOAT_0, XMM0) 274 275 SSE_MULT_PS(XMM7, XMM3) 276 SSE_SUB_PS(XMM7, XMM2) 277 278 SSE_SHUFFLE(XMM7, XMM7, 0x4E) 279 280 /* ----------------------------------------------- */ 281 282 SSE_COPY_PS(XMM1, XMM3) 283 SSE_MULT_PS(XMM1, XMM5) 284 285 SSE_SHUFFLE(XMM1, XMM1, 0xB1) 286 287 SSE_COPY_PS(XMM0, XMM6) 288 SSE_MULT_PS(XMM0, XMM1) 289 SSE_ADD_PS(XMM0, XMM7) 290 291 SSE_COPY_PS(XMM2, XMM4) 292 SSE_MULT_PS(XMM2, XMM1) 293 SSE_SUB_PS_M(XMM2, SSE_ARG_1, FLOAT_12) 294 295 SSE_SHUFFLE(XMM1, XMM1, 0x4E) 296 297 SSE_COPY_PS(XMM7, XMM6) 298 SSE_MULT_PS(XMM7, XMM1) 299 SSE_SUB_PS(XMM7, XMM0) 300 301 SSE_MULT_PS(XMM1, XMM4) 302 SSE_SUB_PS(XMM2, XMM1) 303 SSE_STORE_PS(SSE_ARG_1, FLOAT_12, XMM2) 304 305 /* ----------------------------------------------- */ 306 307 SSE_COPY_PS(XMM1, XMM3) 308 SSE_MULT_PS(XMM1, XMM6) 309 310 SSE_SHUFFLE(XMM1, XMM1, 0xB1) 311 312 SSE_COPY_PS(XMM2, XMM4) 313 SSE_MULT_PS(XMM2, XMM1) 314 SSE_LOAD_PS(SSE_ARG_1, FLOAT_4, XMM0) 315 SSE_SUB_PS(XMM0, XMM2) 316 317 SSE_COPY_PS(XMM2, XMM5) 318 SSE_MULT_PS(XMM2, XMM1) 319 SSE_ADD_PS(XMM2, XMM7) 320 321 SSE_SHUFFLE(XMM1, XMM1, 0x4E) 322 323 SSE_COPY_PS(XMM7, XMM4) 324 SSE_MULT_PS(XMM7, XMM1) 325 SSE_ADD_PS(XMM7, XMM0) 326 327 SSE_MULT_PS(XMM1, XMM5) 328 SSE_SUB_PS(XMM2, XMM1) 329 330 /* ----------------------------------------------- */ 331 332 SSE_MULT_PS(XMM4, XMM3) 333 334 SSE_SHUFFLE(XMM4, XMM4, 0xB1) 335 336 SSE_COPY_PS(XMM1, XMM6) 337 SSE_MULT_PS(XMM1, XMM4) 338 SSE_ADD_PS(XMM1, XMM7) 339 340 SSE_COPY_PS(XMM0, XMM5) 341 SSE_MULT_PS(XMM0, XMM4) 342 SSE_LOAD_PS(SSE_ARG_1, FLOAT_12, XMM7) 343 SSE_SUB_PS(XMM7, XMM0) 344 345 SSE_SHUFFLE(XMM4, XMM4, 0x4E) 346 347 SSE_MULT_PS(XMM6, XMM4) 348 SSE_SUB_PS(XMM1, XMM6) 349 350 SSE_MULT_PS(XMM5, XMM4) 351 SSE_ADD_PS(XMM5, XMM7) 352 353 /* ----------------------------------------------- */ 354 355 SSE_LOAD_PS(SSE_ARG_1, FLOAT_0, XMM0) 356 SSE_MULT_PS(XMM3, XMM0) 357 358 SSE_COPY_PS(XMM4, XMM3) 359 SSE_SHUFFLE(XMM4, XMM3, 0x4E) 360 SSE_ADD_PS(XMM4, XMM3) 361 362 SSE_COPY_PS(XMM6, XMM4) 363 SSE_SHUFFLE(XMM6, XMM4, 0xB1) 364 SSE_ADD_SS(XMM6, XMM4) 365 366 SSE_COPY_PS(XMM3, XMM6) 367 SSE_RECIP_SS(XMM3, XMM6) 368 SSE_COPY_SS(XMM4, XMM3) 369 SSE_ADD_SS(XMM4, XMM3) 370 SSE_MULT_SS(XMM3, XMM3) 371 SSE_MULT_SS(XMM6, XMM3) 372 SSE_SUB_SS(XMM4, XMM6) 373 374 SSE_SHUFFLE(XMM4, XMM4, 0x00) 375 376 SSE_MULT_PS(XMM0, XMM4) 377 SSE_STOREL_PS(SSE_ARG_1, FLOAT_0, XMM0) 378 SSE_STOREH_PS(SSE_ARG_1, FLOAT_2, XMM0) 379 380 SSE_MULT_PS(XMM1, XMM4) 381 SSE_STOREL_PS(SSE_ARG_1, FLOAT_4, XMM1) 382 SSE_STOREH_PS(SSE_ARG_1, FLOAT_6, XMM1) 383 384 SSE_MULT_PS(XMM2, XMM4) 385 SSE_STOREL_PS(SSE_ARG_1, FLOAT_8, XMM2) 386 SSE_STOREH_PS(SSE_ARG_1, FLOAT_10, XMM2) 387 388 SSE_MULT_PS(XMM4, XMM5) 389 SSE_STOREL_PS(SSE_ARG_1, FLOAT_12, XMM4) 390 SSE_STOREH_PS(SSE_ARG_1, FLOAT_14, XMM4) 391 392 /* ----------------------------------------------- */ 393 394 SSE_INLINE_END_1; 395 SSE_SCOPE_END; 396 PetscFunctionReturn(PETSC_SUCCESS); 397 } 398 399 #endif 400