1 2 /* 3 Support for the parallel dense matrix vector multiply 4 */ 5 #include <../src/mat/impls/dense/mpi/mpidense.h> 6 #include <petscblaslapack.h> 7 8 PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat) { 9 Mat_MPIDense *mdn = (Mat_MPIDense *)mat->data; 10 11 PetscFunctionBegin; 12 if (!mdn->Mvctx) { 13 /* Create local vector that is used to scatter into */ 14 PetscCall(VecDestroy(&mdn->lvec)); 15 if (mdn->A) { 16 PetscCall(MatCreateVecs(mdn->A, &mdn->lvec, NULL)); 17 PetscCall(PetscLogObjectParent((PetscObject)mat, (PetscObject)mdn->lvec)); 18 } 19 PetscCall(PetscLayoutSetUp(mat->cmap)); 20 PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)mat), &mdn->Mvctx)); 21 PetscCall(PetscSFSetGraphWithPattern(mdn->Mvctx, mat->cmap, PETSCSF_PATTERN_ALLGATHER)); 22 PetscCall(PetscLogObjectParent((PetscObject)mat, (PetscObject)mdn->Mvctx)); 23 } 24 PetscFunctionReturn(0); 25 } 26 27 static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat, PetscInt, const IS[], const IS[], MatReuse, Mat *); 28 29 PetscErrorCode MatCreateSubMatrices_MPIDense(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submat[]) { 30 PetscInt nmax, nstages_local, nstages, i, pos, max_no; 31 32 PetscFunctionBegin; 33 /* Allocate memory to hold all the submatrices */ 34 if (scall != MAT_REUSE_MATRIX) PetscCall(PetscCalloc1(ismax + 1, submat)); 35 /* Determine the number of stages through which submatrices are done */ 36 nmax = 20 * 1000000 / (C->cmap->N * sizeof(PetscInt)); 37 if (!nmax) nmax = 1; 38 nstages_local = ismax / nmax + ((ismax % nmax) ? 1 : 0); 39 40 /* Make sure every processor loops through the nstages */ 41 PetscCall(MPIU_Allreduce(&nstages_local, &nstages, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)C))); 42 43 for (i = 0, pos = 0; i < nstages; i++) { 44 if (pos + nmax <= ismax) max_no = nmax; 45 else if (pos == ismax) max_no = 0; 46 else max_no = ismax - pos; 47 PetscCall(MatCreateSubMatrices_MPIDense_Local(C, max_no, isrow + pos, iscol + pos, scall, *submat + pos)); 48 pos += max_no; 49 } 50 PetscFunctionReturn(0); 51 } 52 /* -------------------------------------------------------------------------*/ 53 PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submats) { 54 Mat_MPIDense *c = (Mat_MPIDense *)C->data; 55 Mat A = c->A; 56 Mat_SeqDense *a = (Mat_SeqDense *)A->data, *mat; 57 PetscMPIInt rank, size, tag0, tag1, idex, end, i; 58 PetscInt N = C->cmap->N, rstart = C->rmap->rstart, count; 59 const PetscInt **irow, **icol, *irow_i; 60 PetscInt *nrow, *ncol, *w1, *w3, *w4, *rtable, start; 61 PetscInt **sbuf1, m, j, k, l, ct1, **rbuf1, row, proc; 62 PetscInt nrqs, msz, **ptr, *ctr, *pa, *tmp, bsz, nrqr; 63 PetscInt is_no, jmax, **rmap, *rmap_i; 64 PetscInt ctr_j, *sbuf1_j, *rbuf1_i; 65 MPI_Request *s_waits1, *r_waits1, *s_waits2, *r_waits2; 66 MPI_Status *r_status1, *r_status2, *s_status1, *s_status2; 67 MPI_Comm comm; 68 PetscScalar **rbuf2, **sbuf2; 69 PetscBool sorted; 70 71 PetscFunctionBegin; 72 PetscCall(PetscObjectGetComm((PetscObject)C, &comm)); 73 tag0 = ((PetscObject)C)->tag; 74 PetscCallMPI(MPI_Comm_rank(comm, &rank)); 75 PetscCallMPI(MPI_Comm_size(comm, &size)); 76 m = C->rmap->N; 77 78 /* Get some new tags to keep the communication clean */ 79 PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag1)); 80 81 /* Check if the col indices are sorted */ 82 for (i = 0; i < ismax; i++) { 83 PetscCall(ISSorted(isrow[i], &sorted)); 84 PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "ISrow is not sorted"); 85 PetscCall(ISSorted(iscol[i], &sorted)); 86 PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "IScol is not sorted"); 87 } 88 89 PetscCall(PetscMalloc5(ismax, (PetscInt ***)&irow, ismax, (PetscInt ***)&icol, ismax, &nrow, ismax, &ncol, m, &rtable)); 90 for (i = 0; i < ismax; i++) { 91 PetscCall(ISGetIndices(isrow[i], &irow[i])); 92 PetscCall(ISGetIndices(iscol[i], &icol[i])); 93 PetscCall(ISGetLocalSize(isrow[i], &nrow[i])); 94 PetscCall(ISGetLocalSize(iscol[i], &ncol[i])); 95 } 96 97 /* Create hash table for the mapping :row -> proc*/ 98 for (i = 0, j = 0; i < size; i++) { 99 jmax = C->rmap->range[i + 1]; 100 for (; j < jmax; j++) rtable[j] = i; 101 } 102 103 /* evaluate communication - mesg to who,length of mesg, and buffer space 104 required. Based on this, buffers are allocated, and data copied into them*/ 105 PetscCall(PetscMalloc3(2 * size, &w1, size, &w3, size, &w4)); 106 PetscCall(PetscArrayzero(w1, size * 2)); /* initialize work vector*/ 107 PetscCall(PetscArrayzero(w3, size)); /* initialize work vector*/ 108 for (i = 0; i < ismax; i++) { 109 PetscCall(PetscArrayzero(w4, size)); /* initialize work vector*/ 110 jmax = nrow[i]; 111 irow_i = irow[i]; 112 for (j = 0; j < jmax; j++) { 113 row = irow_i[j]; 114 proc = rtable[row]; 115 w4[proc]++; 116 } 117 for (j = 0; j < size; j++) { 118 if (w4[j]) { 119 w1[2 * j] += w4[j]; 120 w3[j]++; 121 } 122 } 123 } 124 125 nrqs = 0; /* no of outgoing messages */ 126 msz = 0; /* total mesg length (for all procs) */ 127 w1[2 * rank] = 0; /* no mesg sent to self */ 128 w3[rank] = 0; 129 for (i = 0; i < size; i++) { 130 if (w1[2 * i]) { 131 w1[2 * i + 1] = 1; 132 nrqs++; 133 } /* there exists a message to proc i */ 134 } 135 PetscCall(PetscMalloc1(nrqs + 1, &pa)); /*(proc -array)*/ 136 for (i = 0, j = 0; i < size; i++) { 137 if (w1[2 * i]) { 138 pa[j] = i; 139 j++; 140 } 141 } 142 143 /* Each message would have a header = 1 + 2*(no of IS) + data */ 144 for (i = 0; i < nrqs; i++) { 145 j = pa[i]; 146 w1[2 * j] += w1[2 * j + 1] + 2 * w3[j]; 147 msz += w1[2 * j]; 148 } 149 /* Do a global reduction to determine how many messages to expect*/ 150 PetscCall(PetscMaxSum(comm, w1, &bsz, &nrqr)); 151 152 /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */ 153 PetscCall(PetscMalloc1(nrqr + 1, &rbuf1)); 154 PetscCall(PetscMalloc1(nrqr * bsz, &rbuf1[0])); 155 for (i = 1; i < nrqr; ++i) rbuf1[i] = rbuf1[i - 1] + bsz; 156 157 /* Post the receives */ 158 PetscCall(PetscMalloc1(nrqr + 1, &r_waits1)); 159 for (i = 0; i < nrqr; ++i) PetscCallMPI(MPI_Irecv(rbuf1[i], bsz, MPIU_INT, MPI_ANY_SOURCE, tag0, comm, r_waits1 + i)); 160 161 /* Allocate Memory for outgoing messages */ 162 PetscCall(PetscMalloc4(size, &sbuf1, size, &ptr, 2 * msz, &tmp, size, &ctr)); 163 PetscCall(PetscArrayzero(sbuf1, size)); 164 PetscCall(PetscArrayzero(ptr, size)); 165 { 166 PetscInt *iptr = tmp, ict = 0; 167 for (i = 0; i < nrqs; i++) { 168 j = pa[i]; 169 iptr += ict; 170 sbuf1[j] = iptr; 171 ict = w1[2 * j]; 172 } 173 } 174 175 /* Form the outgoing messages */ 176 /* Initialize the header space */ 177 for (i = 0; i < nrqs; i++) { 178 j = pa[i]; 179 sbuf1[j][0] = 0; 180 PetscCall(PetscArrayzero(sbuf1[j] + 1, 2 * w3[j])); 181 ptr[j] = sbuf1[j] + 2 * w3[j] + 1; 182 } 183 184 /* Parse the isrow and copy data into outbuf */ 185 for (i = 0; i < ismax; i++) { 186 PetscCall(PetscArrayzero(ctr, size)); 187 irow_i = irow[i]; 188 jmax = nrow[i]; 189 for (j = 0; j < jmax; j++) { /* parse the indices of each IS */ 190 row = irow_i[j]; 191 proc = rtable[row]; 192 if (proc != rank) { /* copy to the outgoing buf*/ 193 ctr[proc]++; 194 *ptr[proc] = row; 195 ptr[proc]++; 196 } 197 } 198 /* Update the headers for the current IS */ 199 for (j = 0; j < size; j++) { /* Can Optimise this loop too */ 200 if ((ctr_j = ctr[j])) { 201 sbuf1_j = sbuf1[j]; 202 k = ++sbuf1_j[0]; 203 sbuf1_j[2 * k] = ctr_j; 204 sbuf1_j[2 * k - 1] = i; 205 } 206 } 207 } 208 209 /* Now post the sends */ 210 PetscCall(PetscMalloc1(nrqs + 1, &s_waits1)); 211 for (i = 0; i < nrqs; ++i) { 212 j = pa[i]; 213 PetscCallMPI(MPI_Isend(sbuf1[j], w1[2 * j], MPIU_INT, j, tag0, comm, s_waits1 + i)); 214 } 215 216 /* Post receives to capture the row_data from other procs */ 217 PetscCall(PetscMalloc1(nrqs + 1, &r_waits2)); 218 PetscCall(PetscMalloc1(nrqs + 1, &rbuf2)); 219 for (i = 0; i < nrqs; i++) { 220 j = pa[i]; 221 count = (w1[2 * j] - (2 * sbuf1[j][0] + 1)) * N; 222 PetscCall(PetscMalloc1(count + 1, &rbuf2[i])); 223 PetscCallMPI(MPI_Irecv(rbuf2[i], count, MPIU_SCALAR, j, tag1, comm, r_waits2 + i)); 224 } 225 226 /* Receive messages(row_nos) and then, pack and send off the rowvalues 227 to the correct processors */ 228 229 PetscCall(PetscMalloc1(nrqr + 1, &s_waits2)); 230 PetscCall(PetscMalloc1(nrqr + 1, &r_status1)); 231 PetscCall(PetscMalloc1(nrqr + 1, &sbuf2)); 232 233 { 234 PetscScalar *sbuf2_i, *v_start; 235 PetscInt s_proc; 236 for (i = 0; i < nrqr; ++i) { 237 PetscCallMPI(MPI_Waitany(nrqr, r_waits1, &idex, r_status1 + i)); 238 s_proc = r_status1[i].MPI_SOURCE; /* send processor */ 239 rbuf1_i = rbuf1[idex]; /* Actual message from s_proc */ 240 /* no of rows = end - start; since start is array idex[], 0idex, whel end 241 is length of the buffer - which is 1idex */ 242 start = 2 * rbuf1_i[0] + 1; 243 PetscCallMPI(MPI_Get_count(r_status1 + i, MPIU_INT, &end)); 244 /* allocate memory sufficinet to hold all the row values */ 245 PetscCall(PetscMalloc1((end - start) * N, &sbuf2[idex])); 246 sbuf2_i = sbuf2[idex]; 247 /* Now pack the data */ 248 for (j = start; j < end; j++) { 249 row = rbuf1_i[j] - rstart; 250 v_start = a->v + row; 251 for (k = 0; k < N; k++) { 252 sbuf2_i[0] = v_start[0]; 253 sbuf2_i++; 254 v_start += a->lda; 255 } 256 } 257 /* Now send off the data */ 258 PetscCallMPI(MPI_Isend(sbuf2[idex], (end - start) * N, MPIU_SCALAR, s_proc, tag1, comm, s_waits2 + i)); 259 } 260 } 261 /* End Send-Recv of IS + row_numbers */ 262 PetscCall(PetscFree(r_status1)); 263 PetscCall(PetscFree(r_waits1)); 264 PetscCall(PetscMalloc1(nrqs + 1, &s_status1)); 265 if (nrqs) PetscCallMPI(MPI_Waitall(nrqs, s_waits1, s_status1)); 266 PetscCall(PetscFree(s_status1)); 267 PetscCall(PetscFree(s_waits1)); 268 269 /* Create the submatrices */ 270 if (scall == MAT_REUSE_MATRIX) { 271 for (i = 0; i < ismax; i++) { 272 mat = (Mat_SeqDense *)(submats[i]->data); 273 PetscCheck(!(submats[i]->rmap->n != nrow[i]) && !(submats[i]->cmap->n != ncol[i]), PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Cannot reuse matrix. wrong size"); 274 PetscCall(PetscArrayzero(mat->v, submats[i]->rmap->n * submats[i]->cmap->n)); 275 276 submats[i]->factortype = C->factortype; 277 } 278 } else { 279 for (i = 0; i < ismax; i++) { 280 PetscCall(MatCreate(PETSC_COMM_SELF, submats + i)); 281 PetscCall(MatSetSizes(submats[i], nrow[i], ncol[i], nrow[i], ncol[i])); 282 PetscCall(MatSetType(submats[i], ((PetscObject)A)->type_name)); 283 PetscCall(MatSeqDenseSetPreallocation(submats[i], NULL)); 284 } 285 } 286 287 /* Assemble the matrices */ 288 { 289 PetscInt col; 290 PetscScalar *imat_v, *mat_v, *imat_vi, *mat_vi; 291 292 for (i = 0; i < ismax; i++) { 293 mat = (Mat_SeqDense *)submats[i]->data; 294 mat_v = a->v; 295 imat_v = mat->v; 296 irow_i = irow[i]; 297 m = nrow[i]; 298 for (j = 0; j < m; j++) { 299 row = irow_i[j]; 300 proc = rtable[row]; 301 if (proc == rank) { 302 row = row - rstart; 303 mat_vi = mat_v + row; 304 imat_vi = imat_v + j; 305 for (k = 0; k < ncol[i]; k++) { 306 col = icol[i][k]; 307 imat_vi[k * m] = mat_vi[col * a->lda]; 308 } 309 } 310 } 311 } 312 } 313 314 /* Create row map-> This maps c->row to submat->row for each submat*/ 315 /* this is a very expensive operation wrt memory usage */ 316 PetscCall(PetscMalloc1(ismax, &rmap)); 317 PetscCall(PetscCalloc1(ismax * C->rmap->N, &rmap[0])); 318 for (i = 1; i < ismax; i++) rmap[i] = rmap[i - 1] + C->rmap->N; 319 for (i = 0; i < ismax; i++) { 320 rmap_i = rmap[i]; 321 irow_i = irow[i]; 322 jmax = nrow[i]; 323 for (j = 0; j < jmax; j++) rmap_i[irow_i[j]] = j; 324 } 325 326 /* Now Receive the row_values and assemble the rest of the matrix */ 327 PetscCall(PetscMalloc1(nrqs + 1, &r_status2)); 328 { 329 PetscInt is_max, tmp1, col, *sbuf1_i, is_sz; 330 PetscScalar *rbuf2_i, *imat_v, *imat_vi; 331 332 for (tmp1 = 0; tmp1 < nrqs; tmp1++) { /* For each message */ 333 PetscCallMPI(MPI_Waitany(nrqs, r_waits2, &i, r_status2 + tmp1)); 334 /* Now dig out the corresponding sbuf1, which contains the IS data_structure */ 335 sbuf1_i = sbuf1[pa[i]]; 336 is_max = sbuf1_i[0]; 337 ct1 = 2 * is_max + 1; 338 rbuf2_i = rbuf2[i]; 339 for (j = 1; j <= is_max; j++) { /* For each IS belonging to the message */ 340 is_no = sbuf1_i[2 * j - 1]; 341 is_sz = sbuf1_i[2 * j]; 342 mat = (Mat_SeqDense *)submats[is_no]->data; 343 imat_v = mat->v; 344 rmap_i = rmap[is_no]; 345 m = nrow[is_no]; 346 for (k = 0; k < is_sz; k++, rbuf2_i += N) { /* For each row */ 347 row = sbuf1_i[ct1]; 348 ct1++; 349 row = rmap_i[row]; 350 imat_vi = imat_v + row; 351 for (l = 0; l < ncol[is_no]; l++) { /* For each col */ 352 col = icol[is_no][l]; 353 imat_vi[l * m] = rbuf2_i[col]; 354 } 355 } 356 } 357 } 358 } 359 /* End Send-Recv of row_values */ 360 PetscCall(PetscFree(r_status2)); 361 PetscCall(PetscFree(r_waits2)); 362 PetscCall(PetscMalloc1(nrqr + 1, &s_status2)); 363 if (nrqr) PetscCallMPI(MPI_Waitall(nrqr, s_waits2, s_status2)); 364 PetscCall(PetscFree(s_status2)); 365 PetscCall(PetscFree(s_waits2)); 366 367 /* Restore the indices */ 368 for (i = 0; i < ismax; i++) { 369 PetscCall(ISRestoreIndices(isrow[i], irow + i)); 370 PetscCall(ISRestoreIndices(iscol[i], icol + i)); 371 } 372 373 PetscCall(PetscFree5(*(PetscInt ***)&irow, *(PetscInt ***)&icol, nrow, ncol, rtable)); 374 PetscCall(PetscFree3(w1, w3, w4)); 375 PetscCall(PetscFree(pa)); 376 377 for (i = 0; i < nrqs; ++i) PetscCall(PetscFree(rbuf2[i])); 378 PetscCall(PetscFree(rbuf2)); 379 PetscCall(PetscFree4(sbuf1, ptr, tmp, ctr)); 380 PetscCall(PetscFree(rbuf1[0])); 381 PetscCall(PetscFree(rbuf1)); 382 383 for (i = 0; i < nrqr; ++i) PetscCall(PetscFree(sbuf2[i])); 384 385 PetscCall(PetscFree(sbuf2)); 386 PetscCall(PetscFree(rmap[0])); 387 PetscCall(PetscFree(rmap)); 388 389 for (i = 0; i < ismax; i++) { 390 PetscCall(MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY)); 391 PetscCall(MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY)); 392 } 393 PetscFunctionReturn(0); 394 } 395 396 PETSC_INTERN PetscErrorCode MatScale_MPIDense(Mat inA, PetscScalar alpha) { 397 Mat_MPIDense *A = (Mat_MPIDense *)inA->data; 398 399 PetscFunctionBegin; 400 PetscCall(MatScale(A->A, alpha)); 401 PetscFunctionReturn(0); 402 } 403