1 #define PETSCMAT_DLL 2 3 /* 4 Inverts 4 by 4 matrix using partial pivoting. 5 6 Used by the sparse factorization routines in 7 src/mat/impls/baij/seq 8 9 This is a combination of the Linpack routines 10 dgefa() and dgedi() specialized for a size of 4. 11 12 */ 13 #include "petsc.h" 14 15 #undef __FUNCT__ 16 #define __FUNCT__ "Kernel_A_gets_inverse_A_4" 17 PetscErrorCode Kernel_A_gets_inverse_A_4(MatScalar *a,PetscReal shift) 18 { 19 PetscInt i__2,i__3,kp1,j,k,l,ll,i,ipvt[4],kb,k3; 20 PetscInt k4,j3; 21 MatScalar *aa,*ax,*ay,work[16],stmp; 22 MatReal tmp,max; 23 24 /* gaussian elimination with partial pivoting */ 25 26 PetscFunctionBegin; 27 shift = .25*shift*(PetscAbsScalar(a[0]) + PetscAbsScalar(a[5]) + PetscAbsScalar(a[10]) + PetscAbsScalar(a[15])); 28 /* Parameter adjustments */ 29 a -= 5; 30 31 for (k = 1; k <= 3; ++k) { 32 kp1 = k + 1; 33 k3 = 4*k; 34 k4 = k3 + k; 35 /* find l = pivot index */ 36 37 i__2 = 5 - k; 38 aa = &a[k4]; 39 max = PetscAbsScalar(aa[0]); 40 l = 1; 41 for (ll=1; ll<i__2; ll++) { 42 tmp = PetscAbsScalar(aa[ll]); 43 if (tmp > max) { max = tmp; l = ll+1;} 44 } 45 l += k - 1; 46 ipvt[k-1] = l; 47 48 if (a[l + k3] == 0.0) { 49 if (shift == 0.0) { 50 SETERRQ1(PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",k-1); 51 } else { 52 /* SHIFT is applied to SINGLE diagonal entry; does this make any sense? */ 53 a[l + k3] = shift; 54 } 55 } 56 57 /* interchange if necessary */ 58 59 if (l != k) { 60 stmp = a[l + k3]; 61 a[l + k3] = a[k4]; 62 a[k4] = stmp; 63 } 64 65 /* compute multipliers */ 66 67 stmp = -1. / a[k4]; 68 i__2 = 4 - k; 69 aa = &a[1 + k4]; 70 for (ll=0; ll<i__2; ll++) { 71 aa[ll] *= stmp; 72 } 73 74 /* row elimination with column indexing */ 75 76 ax = &a[k4+1]; 77 for (j = kp1; j <= 4; ++j) { 78 j3 = 4*j; 79 stmp = a[l + j3]; 80 if (l != k) { 81 a[l + j3] = a[k + j3]; 82 a[k + j3] = stmp; 83 } 84 85 i__3 = 4 - k; 86 ay = &a[1+k+j3]; 87 for (ll=0; ll<i__3; ll++) { 88 ay[ll] += stmp*ax[ll]; 89 } 90 } 91 } 92 ipvt[3] = 4; 93 if (a[20] == 0.0) { 94 SETERRQ1(PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",3); 95 } 96 97 /* 98 Now form the inverse 99 */ 100 101 /* compute inverse(u) */ 102 103 for (k = 1; k <= 4; ++k) { 104 k3 = 4*k; 105 k4 = k3 + k; 106 a[k4] = 1.0 / a[k4]; 107 stmp = -a[k4]; 108 i__2 = k - 1; 109 aa = &a[k3 + 1]; 110 for (ll=0; ll<i__2; ll++) aa[ll] *= stmp; 111 kp1 = k + 1; 112 if (4 < kp1) continue; 113 ax = aa; 114 for (j = kp1; j <= 4; ++j) { 115 j3 = 4*j; 116 stmp = a[k + j3]; 117 a[k + j3] = 0.0; 118 ay = &a[j3 + 1]; 119 for (ll=0; ll<k; ll++) { 120 ay[ll] += stmp*ax[ll]; 121 } 122 } 123 } 124 125 /* form inverse(u)*inverse(l) */ 126 127 for (kb = 1; kb <= 3; ++kb) { 128 k = 4 - kb; 129 k3 = 4*k; 130 kp1 = k + 1; 131 aa = a + k3; 132 for (i = kp1; i <= 4; ++i) { 133 work[i-1] = aa[i]; 134 aa[i] = 0.0; 135 } 136 for (j = kp1; j <= 4; ++j) { 137 stmp = work[j-1]; 138 ax = &a[4*j + 1]; 139 ay = &a[k3 + 1]; 140 ay[0] += stmp*ax[0]; 141 ay[1] += stmp*ax[1]; 142 ay[2] += stmp*ax[2]; 143 ay[3] += stmp*ax[3]; 144 } 145 l = ipvt[k-1]; 146 if (l != k) { 147 ax = &a[k3 + 1]; 148 ay = &a[4*l + 1]; 149 stmp = ax[0]; ax[0] = ay[0]; ay[0] = stmp; 150 stmp = ax[1]; ax[1] = ay[1]; ay[1] = stmp; 151 stmp = ax[2]; ax[2] = ay[2]; ay[2] = stmp; 152 stmp = ax[3]; ax[3] = ay[3]; ay[3] = stmp; 153 } 154 } 155 PetscFunctionReturn(0); 156 } 157 158 #if defined(PETSC_HAVE_SSE) 159 #include PETSC_HAVE_SSE 160 161 #undef __FUNCT__ 162 #define __FUNCT__ "Kernel_A_gets_inverse_A_4_SSE" 163 PetscErrorCode Kernel_A_gets_inverse_A_4_SSE(float *a) 164 { 165 /* 166 This routine is converted from Intel's Small Matrix Library. 167 See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix 168 Order Number: 245043-001 169 March 1999 170 http://www.intel.com 171 172 Inverse of a 4x4 matrix via Kramer's Rule: 173 bool Invert4x4(SMLXMatrix &); 174 */ 175 PetscFunctionBegin; 176 177 SSE_SCOPE_BEGIN; 178 SSE_INLINE_BEGIN_1(a) 179 180 /* ----------------------------------------------- */ 181 182 SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0) 183 SSE_LOADH_PS(SSE_ARG_1,FLOAT_4,XMM0) 184 185 SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM5) 186 SSE_LOADH_PS(SSE_ARG_1,FLOAT_12,XMM5) 187 188 SSE_COPY_PS(XMM3,XMM0) 189 SSE_SHUFFLE(XMM3,XMM5,0x88) 190 191 SSE_SHUFFLE(XMM5,XMM0,0xDD) 192 193 SSE_LOADL_PS(SSE_ARG_1,FLOAT_2,XMM0) 194 SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM0) 195 196 SSE_LOADL_PS(SSE_ARG_1,FLOAT_10,XMM6) 197 SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM6) 198 199 SSE_COPY_PS(XMM4,XMM0) 200 SSE_SHUFFLE(XMM4,XMM6,0x88) 201 202 SSE_SHUFFLE(XMM6,XMM0,0xDD) 203 204 /* ----------------------------------------------- */ 205 206 SSE_COPY_PS(XMM7,XMM4) 207 SSE_MULT_PS(XMM7,XMM6) 208 209 SSE_SHUFFLE(XMM7,XMM7,0xB1) 210 211 SSE_COPY_PS(XMM0,XMM5) 212 SSE_MULT_PS(XMM0,XMM7) 213 214 SSE_COPY_PS(XMM2,XMM3) 215 SSE_MULT_PS(XMM2,XMM7) 216 217 SSE_SHUFFLE(XMM7,XMM7,0x4E) 218 219 SSE_COPY_PS(XMM1,XMM5) 220 SSE_MULT_PS(XMM1,XMM7) 221 SSE_SUB_PS(XMM1,XMM0) 222 223 SSE_MULT_PS(XMM7,XMM3) 224 SSE_SUB_PS(XMM7,XMM2) 225 226 SSE_SHUFFLE(XMM7,XMM7,0x4E) 227 SSE_STORE_PS(SSE_ARG_1,FLOAT_4,XMM7) 228 229 /* ----------------------------------------------- */ 230 231 SSE_COPY_PS(XMM0,XMM5) 232 SSE_MULT_PS(XMM0,XMM4) 233 234 SSE_SHUFFLE(XMM0,XMM0,0xB1) 235 236 SSE_COPY_PS(XMM2,XMM6) 237 SSE_MULT_PS(XMM2,XMM0) 238 SSE_ADD_PS(XMM2,XMM1) 239 240 SSE_COPY_PS(XMM7,XMM3) 241 SSE_MULT_PS(XMM7,XMM0) 242 243 SSE_SHUFFLE(XMM0,XMM0,0x4E) 244 245 SSE_COPY_PS(XMM1,XMM6) 246 SSE_MULT_PS(XMM1,XMM0) 247 SSE_SUB_PS(XMM2,XMM1) 248 249 SSE_MULT_PS(XMM0,XMM3) 250 SSE_SUB_PS(XMM0,XMM7) 251 252 SSE_SHUFFLE(XMM0,XMM0,0x4E) 253 SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM0) 254 255 /* ----------------------------------------------- */ 256 257 SSE_COPY_PS(XMM7,XMM5) 258 SSE_SHUFFLE(XMM7,XMM5,0x4E) 259 SSE_MULT_PS(XMM7,XMM6) 260 261 SSE_SHUFFLE(XMM7,XMM7,0xB1) 262 263 SSE_SHUFFLE(XMM4,XMM4,0x4E) 264 265 SSE_COPY_PS(XMM0,XMM4) 266 SSE_MULT_PS(XMM0,XMM7) 267 SSE_ADD_PS(XMM0,XMM2) 268 269 SSE_COPY_PS(XMM2,XMM3) 270 SSE_MULT_PS(XMM2,XMM7) 271 272 SSE_SHUFFLE(XMM7,XMM7,0x4E) 273 274 SSE_COPY_PS(XMM1,XMM4) 275 SSE_MULT_PS(XMM1,XMM7) 276 SSE_SUB_PS(XMM0,XMM1) 277 SSE_STORE_PS(SSE_ARG_1,FLOAT_0,XMM0) 278 279 SSE_MULT_PS(XMM7,XMM3) 280 SSE_SUB_PS(XMM7,XMM2) 281 282 SSE_SHUFFLE(XMM7,XMM7,0x4E) 283 284 /* ----------------------------------------------- */ 285 286 SSE_COPY_PS(XMM1,XMM3) 287 SSE_MULT_PS(XMM1,XMM5) 288 289 SSE_SHUFFLE(XMM1,XMM1,0xB1) 290 291 SSE_COPY_PS(XMM0,XMM6) 292 SSE_MULT_PS(XMM0,XMM1) 293 SSE_ADD_PS(XMM0,XMM7) 294 295 SSE_COPY_PS(XMM2,XMM4) 296 SSE_MULT_PS(XMM2,XMM1) 297 SSE_SUB_PS_M(XMM2,SSE_ARG_1,FLOAT_12) 298 299 SSE_SHUFFLE(XMM1,XMM1,0x4E) 300 301 SSE_COPY_PS(XMM7,XMM6) 302 SSE_MULT_PS(XMM7,XMM1) 303 SSE_SUB_PS(XMM7,XMM0) 304 305 SSE_MULT_PS(XMM1,XMM4) 306 SSE_SUB_PS(XMM2,XMM1) 307 SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM2) 308 309 /* ----------------------------------------------- */ 310 311 SSE_COPY_PS(XMM1,XMM3) 312 SSE_MULT_PS(XMM1,XMM6) 313 314 SSE_SHUFFLE(XMM1,XMM1,0xB1) 315 316 SSE_COPY_PS(XMM2,XMM4) 317 SSE_MULT_PS(XMM2,XMM1) 318 SSE_LOAD_PS(SSE_ARG_1,FLOAT_4,XMM0) 319 SSE_SUB_PS(XMM0,XMM2) 320 321 SSE_COPY_PS(XMM2,XMM5) 322 SSE_MULT_PS(XMM2,XMM1) 323 SSE_ADD_PS(XMM2,XMM7) 324 325 SSE_SHUFFLE(XMM1,XMM1,0x4E) 326 327 SSE_COPY_PS(XMM7,XMM4) 328 SSE_MULT_PS(XMM7,XMM1) 329 SSE_ADD_PS(XMM7,XMM0) 330 331 SSE_MULT_PS(XMM1,XMM5) 332 SSE_SUB_PS(XMM2,XMM1) 333 334 /* ----------------------------------------------- */ 335 336 SSE_MULT_PS(XMM4,XMM3) 337 338 SSE_SHUFFLE(XMM4,XMM4,0xB1) 339 340 SSE_COPY_PS(XMM1,XMM6) 341 SSE_MULT_PS(XMM1,XMM4) 342 SSE_ADD_PS(XMM1,XMM7) 343 344 SSE_COPY_PS(XMM0,XMM5) 345 SSE_MULT_PS(XMM0,XMM4) 346 SSE_LOAD_PS(SSE_ARG_1,FLOAT_12,XMM7) 347 SSE_SUB_PS(XMM7,XMM0) 348 349 SSE_SHUFFLE(XMM4,XMM4,0x4E) 350 351 SSE_MULT_PS(XMM6,XMM4) 352 SSE_SUB_PS(XMM1,XMM6) 353 354 SSE_MULT_PS(XMM5,XMM4) 355 SSE_ADD_PS(XMM5,XMM7) 356 357 /* ----------------------------------------------- */ 358 359 SSE_LOAD_PS(SSE_ARG_1,FLOAT_0,XMM0) 360 SSE_MULT_PS(XMM3,XMM0) 361 362 SSE_COPY_PS(XMM4,XMM3) 363 SSE_SHUFFLE(XMM4,XMM3,0x4E) 364 SSE_ADD_PS(XMM4,XMM3) 365 366 SSE_COPY_PS(XMM6,XMM4) 367 SSE_SHUFFLE(XMM6,XMM4,0xB1) 368 SSE_ADD_SS(XMM6,XMM4) 369 370 SSE_COPY_PS(XMM3,XMM6) 371 SSE_RECIP_SS(XMM3,XMM6) 372 SSE_COPY_SS(XMM4,XMM3) 373 SSE_ADD_SS(XMM4,XMM3) 374 SSE_MULT_SS(XMM3,XMM3) 375 SSE_MULT_SS(XMM6,XMM3) 376 SSE_SUB_SS(XMM4,XMM6) 377 378 SSE_SHUFFLE(XMM4,XMM4,0x00) 379 380 SSE_MULT_PS(XMM0,XMM4) 381 SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM0) 382 SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM0) 383 384 SSE_MULT_PS(XMM1,XMM4) 385 SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM1) 386 SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM1) 387 388 SSE_MULT_PS(XMM2,XMM4) 389 SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM2) 390 SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM2) 391 392 SSE_MULT_PS(XMM4,XMM5) 393 SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM4) 394 SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM4) 395 396 /* ----------------------------------------------- */ 397 398 SSE_INLINE_END_1; 399 SSE_SCOPE_END; 400 401 PetscFunctionReturn(0); 402 } 403 404 #endif 405 406 407