xref: /petsc/src/mat/impls/dense/mpi/mmdense.c (revision 98d129c30f3ee9fdddc40fdbc5a989b7be64f888)
1 /*
2    Support for the parallel dense matrix vector multiply
3 */
4 #include <../src/mat/impls/dense/mpi/mpidense.h>
5 #include <petscblaslapack.h>
6 
7 PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat)
8 {
9   Mat_MPIDense *mdn = (Mat_MPIDense *)mat->data;
10 
11   PetscFunctionBegin;
12   if (!mdn->Mvctx) {
13     /* Create local vector that is used to scatter into */
14     PetscCall(VecDestroy(&mdn->lvec));
15     if (mdn->A) { PetscCall(MatCreateVecs(mdn->A, &mdn->lvec, NULL)); }
16     PetscCall(PetscLayoutSetUp(mat->cmap));
17     PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)mat), &mdn->Mvctx));
18     PetscCall(PetscSFSetGraphWithPattern(mdn->Mvctx, mat->cmap, PETSCSF_PATTERN_ALLGATHER));
19   }
20   PetscFunctionReturn(PETSC_SUCCESS);
21 }
22 
23 static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat, PetscInt, const IS[], const IS[], MatReuse, Mat *);
24 
25 PetscErrorCode MatCreateSubMatrices_MPIDense(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submat[])
26 {
27   PetscInt nmax, nstages_local, nstages, i, pos, max_no;
28 
29   PetscFunctionBegin;
30   /* Allocate memory to hold all the submatrices */
31   if (scall != MAT_REUSE_MATRIX) PetscCall(PetscCalloc1(ismax + 1, submat));
32   /* Determine the number of stages through which submatrices are done */
33   nmax = 20 * 1000000 / (C->cmap->N * sizeof(PetscInt));
34   if (!nmax) nmax = 1;
35   nstages_local = ismax / nmax + ((ismax % nmax) ? 1 : 0);
36 
37   /* Make sure every processor loops through the nstages */
38   PetscCall(MPIU_Allreduce(&nstages_local, &nstages, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)C)));
39 
40   for (i = 0, pos = 0; i < nstages; i++) {
41     if (pos + nmax <= ismax) max_no = nmax;
42     else if (pos == ismax) max_no = 0;
43     else max_no = ismax - pos;
44     PetscCall(MatCreateSubMatrices_MPIDense_Local(C, max_no, isrow + pos, iscol + pos, scall, *submat + pos));
45     pos += max_no;
46   }
47   PetscFunctionReturn(PETSC_SUCCESS);
48 }
49 
50 static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submats)
51 {
52   Mat_MPIDense    *c = (Mat_MPIDense *)C->data;
53   Mat              A = c->A;
54   Mat_SeqDense    *a = (Mat_SeqDense *)A->data, *mat;
55   PetscMPIInt      rank, size, tag0, tag1, idex, end, i;
56   PetscInt         N = C->cmap->N, rstart = C->rmap->rstart, count;
57   const PetscInt **irow, **icol, *irow_i;
58   PetscInt        *nrow, *ncol, *w1, *w3, *w4, *rtable, start;
59   PetscInt       **sbuf1, m, j, k, l, ct1, **rbuf1, row, proc;
60   PetscInt         nrqs, msz, **ptr, *ctr, *pa, *tmp, bsz, nrqr;
61   PetscInt         is_no, jmax, **rmap, *rmap_i;
62   PetscInt         ctr_j, *sbuf1_j, *rbuf1_i;
63   MPI_Request     *s_waits1, *r_waits1, *s_waits2, *r_waits2;
64   MPI_Status      *r_status1, *r_status2, *s_status1, *s_status2;
65   MPI_Comm         comm;
66   PetscScalar    **rbuf2, **sbuf2;
67   PetscBool        sorted;
68 
69   PetscFunctionBegin;
70   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
71   tag0 = ((PetscObject)C)->tag;
72   PetscCallMPI(MPI_Comm_rank(comm, &rank));
73   PetscCallMPI(MPI_Comm_size(comm, &size));
74   m = C->rmap->N;
75 
76   /* Get some new tags to keep the communication clean */
77   PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag1));
78 
79   /* Check if the col indices are sorted */
80   for (i = 0; i < ismax; i++) {
81     PetscCall(ISSorted(isrow[i], &sorted));
82     PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "ISrow is not sorted");
83     PetscCall(ISSorted(iscol[i], &sorted));
84     PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "IScol is not sorted");
85   }
86 
87   PetscCall(PetscMalloc5(ismax, (PetscInt ***)&irow, ismax, (PetscInt ***)&icol, ismax, &nrow, ismax, &ncol, m, &rtable));
88   for (i = 0; i < ismax; i++) {
89     PetscCall(ISGetIndices(isrow[i], &irow[i]));
90     PetscCall(ISGetIndices(iscol[i], &icol[i]));
91     PetscCall(ISGetLocalSize(isrow[i], &nrow[i]));
92     PetscCall(ISGetLocalSize(iscol[i], &ncol[i]));
93   }
94 
95   /* Create hash table for the mapping :row -> proc*/
96   for (i = 0, j = 0; i < size; i++) {
97     jmax = C->rmap->range[i + 1];
98     for (; j < jmax; j++) rtable[j] = i;
99   }
100 
101   /* evaluate communication - mesg to who,length of mesg, and buffer space
102      required. Based on this, buffers are allocated, and data copied into them*/
103   PetscCall(PetscMalloc3(2 * size, &w1, size, &w3, size, &w4));
104   PetscCall(PetscArrayzero(w1, size * 2)); /* initialize work vector*/
105   PetscCall(PetscArrayzero(w3, size));     /* initialize work vector*/
106   for (i = 0; i < ismax; i++) {
107     PetscCall(PetscArrayzero(w4, size)); /* initialize work vector*/
108     jmax   = nrow[i];
109     irow_i = irow[i];
110     for (j = 0; j < jmax; j++) {
111       row  = irow_i[j];
112       proc = rtable[row];
113       w4[proc]++;
114     }
115     for (j = 0; j < size; j++) {
116       if (w4[j]) {
117         w1[2 * j] += w4[j];
118         w3[j]++;
119       }
120     }
121   }
122 
123   nrqs         = 0; /* no of outgoing messages */
124   msz          = 0; /* total mesg length (for all procs) */
125   w1[2 * rank] = 0; /* no mesg sent to self */
126   w3[rank]     = 0;
127   for (i = 0; i < size; i++) {
128     if (w1[2 * i]) {
129       w1[2 * i + 1] = 1;
130       nrqs++;
131     } /* there exists a message to proc i */
132   }
133   PetscCall(PetscMalloc1(nrqs + 1, &pa)); /*(proc -array)*/
134   for (i = 0, j = 0; i < size; i++) {
135     if (w1[2 * i]) {
136       pa[j] = i;
137       j++;
138     }
139   }
140 
141   /* Each message would have a header = 1 + 2*(no of IS) + data */
142   for (i = 0; i < nrqs; i++) {
143     j = pa[i];
144     w1[2 * j] += w1[2 * j + 1] + 2 * w3[j];
145     msz += w1[2 * j];
146   }
147   /* Do a global reduction to determine how many messages to expect*/
148   PetscCall(PetscMaxSum(comm, w1, &bsz, &nrqr));
149 
150   /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */
151   PetscCall(PetscMalloc1(nrqr + 1, &rbuf1));
152   PetscCall(PetscMalloc1(nrqr * bsz, &rbuf1[0]));
153   for (i = 1; i < nrqr; ++i) rbuf1[i] = rbuf1[i - 1] + bsz;
154 
155   /* Post the receives */
156   PetscCall(PetscMalloc1(nrqr + 1, &r_waits1));
157   for (i = 0; i < nrqr; ++i) PetscCallMPI(MPI_Irecv(rbuf1[i], bsz, MPIU_INT, MPI_ANY_SOURCE, tag0, comm, r_waits1 + i));
158 
159   /* Allocate Memory for outgoing messages */
160   PetscCall(PetscMalloc4(size, &sbuf1, size, &ptr, 2 * msz, &tmp, size, &ctr));
161   PetscCall(PetscArrayzero(sbuf1, size));
162   PetscCall(PetscArrayzero(ptr, size));
163   {
164     PetscInt *iptr = tmp, ict = 0;
165     for (i = 0; i < nrqs; i++) {
166       j = pa[i];
167       iptr += ict;
168       sbuf1[j] = iptr;
169       ict      = w1[2 * j];
170     }
171   }
172 
173   /* Form the outgoing messages */
174   /* Initialize the header space */
175   for (i = 0; i < nrqs; i++) {
176     j           = pa[i];
177     sbuf1[j][0] = 0;
178     PetscCall(PetscArrayzero(sbuf1[j] + 1, 2 * w3[j]));
179     ptr[j] = sbuf1[j] + 2 * w3[j] + 1;
180   }
181 
182   /* Parse the isrow and copy data into outbuf */
183   for (i = 0; i < ismax; i++) {
184     PetscCall(PetscArrayzero(ctr, size));
185     irow_i = irow[i];
186     jmax   = nrow[i];
187     for (j = 0; j < jmax; j++) { /* parse the indices of each IS */
188       row  = irow_i[j];
189       proc = rtable[row];
190       if (proc != rank) { /* copy to the outgoing buf*/
191         ctr[proc]++;
192         *ptr[proc] = row;
193         ptr[proc]++;
194       }
195     }
196     /* Update the headers for the current IS */
197     for (j = 0; j < size; j++) { /* Can Optimise this loop too */
198       if ((ctr_j = ctr[j])) {
199         sbuf1_j            = sbuf1[j];
200         k                  = ++sbuf1_j[0];
201         sbuf1_j[2 * k]     = ctr_j;
202         sbuf1_j[2 * k - 1] = i;
203       }
204     }
205   }
206 
207   /*  Now  post the sends */
208   PetscCall(PetscMalloc1(nrqs + 1, &s_waits1));
209   for (i = 0; i < nrqs; ++i) {
210     j = pa[i];
211     PetscCallMPI(MPI_Isend(sbuf1[j], w1[2 * j], MPIU_INT, j, tag0, comm, s_waits1 + i));
212   }
213 
214   /* Post receives to capture the row_data from other procs */
215   PetscCall(PetscMalloc1(nrqs + 1, &r_waits2));
216   PetscCall(PetscMalloc1(nrqs + 1, &rbuf2));
217   for (i = 0; i < nrqs; i++) {
218     j     = pa[i];
219     count = (w1[2 * j] - (2 * sbuf1[j][0] + 1)) * N;
220     PetscCall(PetscMalloc1(count + 1, &rbuf2[i]));
221     PetscCallMPI(MPI_Irecv(rbuf2[i], count, MPIU_SCALAR, j, tag1, comm, r_waits2 + i));
222   }
223 
224   /* Receive messages(row_nos) and then, pack and send off the rowvalues
225      to the correct processors */
226 
227   PetscCall(PetscMalloc1(nrqr + 1, &s_waits2));
228   PetscCall(PetscMalloc1(nrqr + 1, &r_status1));
229   PetscCall(PetscMalloc1(nrqr + 1, &sbuf2));
230 
231   {
232     PetscScalar *sbuf2_i, *v_start;
233     PetscInt     s_proc;
234     for (i = 0; i < nrqr; ++i) {
235       PetscCallMPI(MPI_Waitany(nrqr, r_waits1, &idex, r_status1 + i));
236       s_proc  = r_status1[i].MPI_SOURCE; /* send processor */
237       rbuf1_i = rbuf1[idex];             /* Actual message from s_proc */
238       /* no of rows = end - start; since start is array idex[], 0idex, whel end
239          is length of the buffer - which is 1idex */
240       start = 2 * rbuf1_i[0] + 1;
241       PetscCallMPI(MPI_Get_count(r_status1 + i, MPIU_INT, &end));
242       /* allocate memory sufficinet to hold all the row values */
243       PetscCall(PetscMalloc1((end - start) * N, &sbuf2[idex]));
244       sbuf2_i = sbuf2[idex];
245       /* Now pack the data */
246       for (j = start; j < end; j++) {
247         row     = rbuf1_i[j] - rstart;
248         v_start = a->v + row;
249         for (k = 0; k < N; k++) {
250           sbuf2_i[0] = v_start[0];
251           sbuf2_i++;
252           v_start += a->lda;
253         }
254       }
255       /* Now send off the data */
256       PetscCallMPI(MPI_Isend(sbuf2[idex], (end - start) * N, MPIU_SCALAR, s_proc, tag1, comm, s_waits2 + i));
257     }
258   }
259   /* End Send-Recv of IS + row_numbers */
260   PetscCall(PetscFree(r_status1));
261   PetscCall(PetscFree(r_waits1));
262   PetscCall(PetscMalloc1(nrqs + 1, &s_status1));
263   if (nrqs) PetscCallMPI(MPI_Waitall(nrqs, s_waits1, s_status1));
264   PetscCall(PetscFree(s_status1));
265   PetscCall(PetscFree(s_waits1));
266 
267   /* Create the submatrices */
268   if (scall == MAT_REUSE_MATRIX) {
269     for (i = 0; i < ismax; i++) {
270       mat = (Mat_SeqDense *)submats[i]->data;
271       PetscCheck(!(submats[i]->rmap->n != nrow[i]) && !(submats[i]->cmap->n != ncol[i]), PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Cannot reuse matrix. wrong size");
272       PetscCall(PetscArrayzero(mat->v, submats[i]->rmap->n * submats[i]->cmap->n));
273 
274       submats[i]->factortype = C->factortype;
275     }
276   } else {
277     for (i = 0; i < ismax; i++) {
278       PetscCall(MatCreate(PETSC_COMM_SELF, submats + i));
279       PetscCall(MatSetSizes(submats[i], nrow[i], ncol[i], nrow[i], ncol[i]));
280       PetscCall(MatSetType(submats[i], ((PetscObject)A)->type_name));
281       PetscCall(MatSeqDenseSetPreallocation(submats[i], NULL));
282     }
283   }
284 
285   /* Assemble the matrices */
286   {
287     PetscInt     col;
288     PetscScalar *imat_v, *mat_v, *imat_vi, *mat_vi;
289 
290     for (i = 0; i < ismax; i++) {
291       mat    = (Mat_SeqDense *)submats[i]->data;
292       mat_v  = a->v;
293       imat_v = mat->v;
294       irow_i = irow[i];
295       m      = nrow[i];
296       for (j = 0; j < m; j++) {
297         row  = irow_i[j];
298         proc = rtable[row];
299         if (proc == rank) {
300           row     = row - rstart;
301           mat_vi  = mat_v + row;
302           imat_vi = imat_v + j;
303           for (k = 0; k < ncol[i]; k++) {
304             col            = icol[i][k];
305             imat_vi[k * m] = mat_vi[col * a->lda];
306           }
307         }
308       }
309     }
310   }
311 
312   /* Create row map-> This maps c->row to submat->row for each submat*/
313   /* this is a very expensive operation wrt memory usage */
314   PetscCall(PetscMalloc1(ismax, &rmap));
315   PetscCall(PetscCalloc1(ismax * C->rmap->N, &rmap[0]));
316   for (i = 1; i < ismax; i++) rmap[i] = rmap[i - 1] + C->rmap->N;
317   for (i = 0; i < ismax; i++) {
318     rmap_i = rmap[i];
319     irow_i = irow[i];
320     jmax   = nrow[i];
321     for (j = 0; j < jmax; j++) rmap_i[irow_i[j]] = j;
322   }
323 
324   /* Now Receive the row_values and assemble the rest of the matrix */
325   PetscCall(PetscMalloc1(nrqs + 1, &r_status2));
326   {
327     PetscInt     is_max, tmp1, col, *sbuf1_i, is_sz;
328     PetscScalar *rbuf2_i, *imat_v, *imat_vi;
329 
330     for (tmp1 = 0; tmp1 < nrqs; tmp1++) { /* For each message */
331       PetscCallMPI(MPI_Waitany(nrqs, r_waits2, &i, r_status2 + tmp1));
332       /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
333       sbuf1_i = sbuf1[pa[i]];
334       is_max  = sbuf1_i[0];
335       ct1     = 2 * is_max + 1;
336       rbuf2_i = rbuf2[i];
337       for (j = 1; j <= is_max; j++) { /* For each IS belonging to the message */
338         is_no  = sbuf1_i[2 * j - 1];
339         is_sz  = sbuf1_i[2 * j];
340         mat    = (Mat_SeqDense *)submats[is_no]->data;
341         imat_v = mat->v;
342         rmap_i = rmap[is_no];
343         m      = nrow[is_no];
344         for (k = 0; k < is_sz; k++, rbuf2_i += N) { /* For each row */
345           row = sbuf1_i[ct1];
346           ct1++;
347           row     = rmap_i[row];
348           imat_vi = imat_v + row;
349           for (l = 0; l < ncol[is_no]; l++) { /* For each col */
350             col            = icol[is_no][l];
351             imat_vi[l * m] = rbuf2_i[col];
352           }
353         }
354       }
355     }
356   }
357   /* End Send-Recv of row_values */
358   PetscCall(PetscFree(r_status2));
359   PetscCall(PetscFree(r_waits2));
360   PetscCall(PetscMalloc1(nrqr + 1, &s_status2));
361   if (nrqr) PetscCallMPI(MPI_Waitall(nrqr, s_waits2, s_status2));
362   PetscCall(PetscFree(s_status2));
363   PetscCall(PetscFree(s_waits2));
364 
365   /* Restore the indices */
366   for (i = 0; i < ismax; i++) {
367     PetscCall(ISRestoreIndices(isrow[i], irow + i));
368     PetscCall(ISRestoreIndices(iscol[i], icol + i));
369   }
370 
371   PetscCall(PetscFree5(*(PetscInt ***)&irow, *(PetscInt ***)&icol, nrow, ncol, rtable));
372   PetscCall(PetscFree3(w1, w3, w4));
373   PetscCall(PetscFree(pa));
374 
375   for (i = 0; i < nrqs; ++i) PetscCall(PetscFree(rbuf2[i]));
376   PetscCall(PetscFree(rbuf2));
377   PetscCall(PetscFree4(sbuf1, ptr, tmp, ctr));
378   PetscCall(PetscFree(rbuf1[0]));
379   PetscCall(PetscFree(rbuf1));
380 
381   for (i = 0; i < nrqr; ++i) PetscCall(PetscFree(sbuf2[i]));
382 
383   PetscCall(PetscFree(sbuf2));
384   PetscCall(PetscFree(rmap[0]));
385   PetscCall(PetscFree(rmap));
386 
387   for (i = 0; i < ismax; i++) {
388     PetscCall(MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY));
389     PetscCall(MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY));
390   }
391   PetscFunctionReturn(PETSC_SUCCESS);
392 }
393 
394 PETSC_INTERN PetscErrorCode MatScale_MPIDense(Mat inA, PetscScalar alpha)
395 {
396   Mat_MPIDense *A = (Mat_MPIDense *)inA->data;
397 
398   PetscFunctionBegin;
399   PetscCall(MatScale(A->A, alpha));
400   PetscFunctionReturn(PETSC_SUCCESS);
401 }
402