1 #define PETSCMAT_DLL 2 3 /* 4 Inverts 4 by 4 matrix using partial pivoting. 5 6 Used by the sparse factorization routines in 7 src/mat/impls/baij/seq 8 9 This is a combination of the Linpack routines 10 dgefa() and dgedi() specialized for a size of 4. 11 12 */ 13 #include "petscsys.h" 14 15 #undef __FUNCT__ 16 #define __FUNCT__ "Kernel_A_gets_inverse_A_4" 17 PetscErrorCode Kernel_A_gets_inverse_A_4(MatScalar *a,PetscReal shift) 18 { 19 PetscInt i__2,i__3,kp1,j,k,l,ll,i,ipvt[4],kb,k3; 20 PetscInt k4,j3; 21 MatScalar *aa,*ax,*ay,work[16],stmp; 22 MatReal tmp,max; 23 24 /* gaussian elimination with partial pivoting */ 25 26 PetscFunctionBegin; 27 shift = .25*shift*(1.e-12 + PetscAbsScalar(a[0]) + PetscAbsScalar(a[5]) + PetscAbsScalar(a[10]) + PetscAbsScalar(a[15])); 28 /* Parameter adjustments */ 29 a -= 5; 30 31 for (k = 1; k <= 3; ++k) { 32 kp1 = k + 1; 33 k3 = 4*k; 34 k4 = k3 + k; 35 /* find l = pivot index */ 36 37 i__2 = 5 - k; 38 aa = &a[k4]; 39 max = PetscAbsScalar(aa[0]); 40 l = 1; 41 for (ll=1; ll<i__2; ll++) { 42 tmp = PetscAbsScalar(aa[ll]); 43 if (tmp > max) { max = tmp; l = ll+1;} 44 } 45 l += k - 1; 46 ipvt[k-1] = l; 47 48 if (a[l + k3] == 0.0) { 49 if (shift == 0.0) { 50 SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",k-1); 51 } else { 52 /* SHIFT is applied to SINGLE diagonal entry; does this make any sense? */ 53 a[l + k3] = shift; 54 } 55 } 56 57 /* interchange if necessary */ 58 59 if (l != k) { 60 stmp = a[l + k3]; 61 a[l + k3] = a[k4]; 62 a[k4] = stmp; 63 } 64 65 /* compute multipliers */ 66 67 stmp = -1. / a[k4]; 68 i__2 = 4 - k; 69 aa = &a[1 + k4]; 70 for (ll=0; ll<i__2; ll++) { 71 aa[ll] *= stmp; 72 } 73 74 /* row elimination with column indexing */ 75 76 ax = &a[k4+1]; 77 for (j = kp1; j <= 4; ++j) { 78 j3 = 4*j; 79 stmp = a[l + j3]; 80 if (l != k) { 81 a[l + j3] = a[k + j3]; 82 a[k + j3] = stmp; 83 } 84 85 i__3 = 4 - k; 86 ay = &a[1+k+j3]; 87 for (ll=0; ll<i__3; ll++) { 88 ay[ll] += stmp*ax[ll]; 89 } 90 } 91 } 92 ipvt[3] = 4; 93 if (a[20] == 0.0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_MAT_LU_ZRPVT,"Zero pivot, row %D",3); 94 95 /* 96 Now form the inverse 97 */ 98 99 /* compute inverse(u) */ 100 101 for (k = 1; k <= 4; ++k) { 102 k3 = 4*k; 103 k4 = k3 + k; 104 a[k4] = 1.0 / a[k4]; 105 stmp = -a[k4]; 106 i__2 = k - 1; 107 aa = &a[k3 + 1]; 108 for (ll=0; ll<i__2; ll++) aa[ll] *= stmp; 109 kp1 = k + 1; 110 if (4 < kp1) continue; 111 ax = aa; 112 for (j = kp1; j <= 4; ++j) { 113 j3 = 4*j; 114 stmp = a[k + j3]; 115 a[k + j3] = 0.0; 116 ay = &a[j3 + 1]; 117 for (ll=0; ll<k; ll++) { 118 ay[ll] += stmp*ax[ll]; 119 } 120 } 121 } 122 123 /* form inverse(u)*inverse(l) */ 124 125 for (kb = 1; kb <= 3; ++kb) { 126 k = 4 - kb; 127 k3 = 4*k; 128 kp1 = k + 1; 129 aa = a + k3; 130 for (i = kp1; i <= 4; ++i) { 131 work[i-1] = aa[i]; 132 aa[i] = 0.0; 133 } 134 for (j = kp1; j <= 4; ++j) { 135 stmp = work[j-1]; 136 ax = &a[4*j + 1]; 137 ay = &a[k3 + 1]; 138 ay[0] += stmp*ax[0]; 139 ay[1] += stmp*ax[1]; 140 ay[2] += stmp*ax[2]; 141 ay[3] += stmp*ax[3]; 142 } 143 l = ipvt[k-1]; 144 if (l != k) { 145 ax = &a[k3 + 1]; 146 ay = &a[4*l + 1]; 147 stmp = ax[0]; ax[0] = ay[0]; ay[0] = stmp; 148 stmp = ax[1]; ax[1] = ay[1]; ay[1] = stmp; 149 stmp = ax[2]; ax[2] = ay[2]; ay[2] = stmp; 150 stmp = ax[3]; ax[3] = ay[3]; ay[3] = stmp; 151 } 152 } 153 PetscFunctionReturn(0); 154 } 155 156 #if defined(PETSC_HAVE_SSE) 157 #include PETSC_HAVE_SSE 158 159 #undef __FUNCT__ 160 #define __FUNCT__ "Kernel_A_gets_inverse_A_4_SSE" 161 PetscErrorCode Kernel_A_gets_inverse_A_4_SSE(float *a) 162 { 163 /* 164 This routine is converted from Intel's Small Matrix Library. 165 See: Streaming SIMD Extensions -- Inverse of 4x4 Matrix 166 Order Number: 245043-001 167 March 1999 168 http://www.intel.com 169 170 Inverse of a 4x4 matrix via Kramer's Rule: 171 bool Invert4x4(SMLXMatrix &); 172 */ 173 PetscFunctionBegin; 174 175 SSE_SCOPE_BEGIN; 176 SSE_INLINE_BEGIN_1(a) 177 178 /* ----------------------------------------------- */ 179 180 SSE_LOADL_PS(SSE_ARG_1,FLOAT_0,XMM0) 181 SSE_LOADH_PS(SSE_ARG_1,FLOAT_4,XMM0) 182 183 SSE_LOADL_PS(SSE_ARG_1,FLOAT_8,XMM5) 184 SSE_LOADH_PS(SSE_ARG_1,FLOAT_12,XMM5) 185 186 SSE_COPY_PS(XMM3,XMM0) 187 SSE_SHUFFLE(XMM3,XMM5,0x88) 188 189 SSE_SHUFFLE(XMM5,XMM0,0xDD) 190 191 SSE_LOADL_PS(SSE_ARG_1,FLOAT_2,XMM0) 192 SSE_LOADH_PS(SSE_ARG_1,FLOAT_6,XMM0) 193 194 SSE_LOADL_PS(SSE_ARG_1,FLOAT_10,XMM6) 195 SSE_LOADH_PS(SSE_ARG_1,FLOAT_14,XMM6) 196 197 SSE_COPY_PS(XMM4,XMM0) 198 SSE_SHUFFLE(XMM4,XMM6,0x88) 199 200 SSE_SHUFFLE(XMM6,XMM0,0xDD) 201 202 /* ----------------------------------------------- */ 203 204 SSE_COPY_PS(XMM7,XMM4) 205 SSE_MULT_PS(XMM7,XMM6) 206 207 SSE_SHUFFLE(XMM7,XMM7,0xB1) 208 209 SSE_COPY_PS(XMM0,XMM5) 210 SSE_MULT_PS(XMM0,XMM7) 211 212 SSE_COPY_PS(XMM2,XMM3) 213 SSE_MULT_PS(XMM2,XMM7) 214 215 SSE_SHUFFLE(XMM7,XMM7,0x4E) 216 217 SSE_COPY_PS(XMM1,XMM5) 218 SSE_MULT_PS(XMM1,XMM7) 219 SSE_SUB_PS(XMM1,XMM0) 220 221 SSE_MULT_PS(XMM7,XMM3) 222 SSE_SUB_PS(XMM7,XMM2) 223 224 SSE_SHUFFLE(XMM7,XMM7,0x4E) 225 SSE_STORE_PS(SSE_ARG_1,FLOAT_4,XMM7) 226 227 /* ----------------------------------------------- */ 228 229 SSE_COPY_PS(XMM0,XMM5) 230 SSE_MULT_PS(XMM0,XMM4) 231 232 SSE_SHUFFLE(XMM0,XMM0,0xB1) 233 234 SSE_COPY_PS(XMM2,XMM6) 235 SSE_MULT_PS(XMM2,XMM0) 236 SSE_ADD_PS(XMM2,XMM1) 237 238 SSE_COPY_PS(XMM7,XMM3) 239 SSE_MULT_PS(XMM7,XMM0) 240 241 SSE_SHUFFLE(XMM0,XMM0,0x4E) 242 243 SSE_COPY_PS(XMM1,XMM6) 244 SSE_MULT_PS(XMM1,XMM0) 245 SSE_SUB_PS(XMM2,XMM1) 246 247 SSE_MULT_PS(XMM0,XMM3) 248 SSE_SUB_PS(XMM0,XMM7) 249 250 SSE_SHUFFLE(XMM0,XMM0,0x4E) 251 SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM0) 252 253 /* ----------------------------------------------- */ 254 255 SSE_COPY_PS(XMM7,XMM5) 256 SSE_SHUFFLE(XMM7,XMM5,0x4E) 257 SSE_MULT_PS(XMM7,XMM6) 258 259 SSE_SHUFFLE(XMM7,XMM7,0xB1) 260 261 SSE_SHUFFLE(XMM4,XMM4,0x4E) 262 263 SSE_COPY_PS(XMM0,XMM4) 264 SSE_MULT_PS(XMM0,XMM7) 265 SSE_ADD_PS(XMM0,XMM2) 266 267 SSE_COPY_PS(XMM2,XMM3) 268 SSE_MULT_PS(XMM2,XMM7) 269 270 SSE_SHUFFLE(XMM7,XMM7,0x4E) 271 272 SSE_COPY_PS(XMM1,XMM4) 273 SSE_MULT_PS(XMM1,XMM7) 274 SSE_SUB_PS(XMM0,XMM1) 275 SSE_STORE_PS(SSE_ARG_1,FLOAT_0,XMM0) 276 277 SSE_MULT_PS(XMM7,XMM3) 278 SSE_SUB_PS(XMM7,XMM2) 279 280 SSE_SHUFFLE(XMM7,XMM7,0x4E) 281 282 /* ----------------------------------------------- */ 283 284 SSE_COPY_PS(XMM1,XMM3) 285 SSE_MULT_PS(XMM1,XMM5) 286 287 SSE_SHUFFLE(XMM1,XMM1,0xB1) 288 289 SSE_COPY_PS(XMM0,XMM6) 290 SSE_MULT_PS(XMM0,XMM1) 291 SSE_ADD_PS(XMM0,XMM7) 292 293 SSE_COPY_PS(XMM2,XMM4) 294 SSE_MULT_PS(XMM2,XMM1) 295 SSE_SUB_PS_M(XMM2,SSE_ARG_1,FLOAT_12) 296 297 SSE_SHUFFLE(XMM1,XMM1,0x4E) 298 299 SSE_COPY_PS(XMM7,XMM6) 300 SSE_MULT_PS(XMM7,XMM1) 301 SSE_SUB_PS(XMM7,XMM0) 302 303 SSE_MULT_PS(XMM1,XMM4) 304 SSE_SUB_PS(XMM2,XMM1) 305 SSE_STORE_PS(SSE_ARG_1,FLOAT_12,XMM2) 306 307 /* ----------------------------------------------- */ 308 309 SSE_COPY_PS(XMM1,XMM3) 310 SSE_MULT_PS(XMM1,XMM6) 311 312 SSE_SHUFFLE(XMM1,XMM1,0xB1) 313 314 SSE_COPY_PS(XMM2,XMM4) 315 SSE_MULT_PS(XMM2,XMM1) 316 SSE_LOAD_PS(SSE_ARG_1,FLOAT_4,XMM0) 317 SSE_SUB_PS(XMM0,XMM2) 318 319 SSE_COPY_PS(XMM2,XMM5) 320 SSE_MULT_PS(XMM2,XMM1) 321 SSE_ADD_PS(XMM2,XMM7) 322 323 SSE_SHUFFLE(XMM1,XMM1,0x4E) 324 325 SSE_COPY_PS(XMM7,XMM4) 326 SSE_MULT_PS(XMM7,XMM1) 327 SSE_ADD_PS(XMM7,XMM0) 328 329 SSE_MULT_PS(XMM1,XMM5) 330 SSE_SUB_PS(XMM2,XMM1) 331 332 /* ----------------------------------------------- */ 333 334 SSE_MULT_PS(XMM4,XMM3) 335 336 SSE_SHUFFLE(XMM4,XMM4,0xB1) 337 338 SSE_COPY_PS(XMM1,XMM6) 339 SSE_MULT_PS(XMM1,XMM4) 340 SSE_ADD_PS(XMM1,XMM7) 341 342 SSE_COPY_PS(XMM0,XMM5) 343 SSE_MULT_PS(XMM0,XMM4) 344 SSE_LOAD_PS(SSE_ARG_1,FLOAT_12,XMM7) 345 SSE_SUB_PS(XMM7,XMM0) 346 347 SSE_SHUFFLE(XMM4,XMM4,0x4E) 348 349 SSE_MULT_PS(XMM6,XMM4) 350 SSE_SUB_PS(XMM1,XMM6) 351 352 SSE_MULT_PS(XMM5,XMM4) 353 SSE_ADD_PS(XMM5,XMM7) 354 355 /* ----------------------------------------------- */ 356 357 SSE_LOAD_PS(SSE_ARG_1,FLOAT_0,XMM0) 358 SSE_MULT_PS(XMM3,XMM0) 359 360 SSE_COPY_PS(XMM4,XMM3) 361 SSE_SHUFFLE(XMM4,XMM3,0x4E) 362 SSE_ADD_PS(XMM4,XMM3) 363 364 SSE_COPY_PS(XMM6,XMM4) 365 SSE_SHUFFLE(XMM6,XMM4,0xB1) 366 SSE_ADD_SS(XMM6,XMM4) 367 368 SSE_COPY_PS(XMM3,XMM6) 369 SSE_RECIP_SS(XMM3,XMM6) 370 SSE_COPY_SS(XMM4,XMM3) 371 SSE_ADD_SS(XMM4,XMM3) 372 SSE_MULT_SS(XMM3,XMM3) 373 SSE_MULT_SS(XMM6,XMM3) 374 SSE_SUB_SS(XMM4,XMM6) 375 376 SSE_SHUFFLE(XMM4,XMM4,0x00) 377 378 SSE_MULT_PS(XMM0,XMM4) 379 SSE_STOREL_PS(SSE_ARG_1,FLOAT_0,XMM0) 380 SSE_STOREH_PS(SSE_ARG_1,FLOAT_2,XMM0) 381 382 SSE_MULT_PS(XMM1,XMM4) 383 SSE_STOREL_PS(SSE_ARG_1,FLOAT_4,XMM1) 384 SSE_STOREH_PS(SSE_ARG_1,FLOAT_6,XMM1) 385 386 SSE_MULT_PS(XMM2,XMM4) 387 SSE_STOREL_PS(SSE_ARG_1,FLOAT_8,XMM2) 388 SSE_STOREH_PS(SSE_ARG_1,FLOAT_10,XMM2) 389 390 SSE_MULT_PS(XMM4,XMM5) 391 SSE_STOREL_PS(SSE_ARG_1,FLOAT_12,XMM4) 392 SSE_STOREH_PS(SSE_ARG_1,FLOAT_14,XMM4) 393 394 /* ----------------------------------------------- */ 395 396 SSE_INLINE_END_1; 397 SSE_SCOPE_END; 398 399 PetscFunctionReturn(0); 400 } 401 402 #endif 403 404 405