xref: /petsc/src/mat/impls/dense/mpi/mmdense.c (revision 21e3ffae2f3b73c0bd738cf6d0a809700fc04bb0)
1 
2 /*
3    Support for the parallel dense matrix vector multiply
4 */
5 #include <../src/mat/impls/dense/mpi/mpidense.h>
6 #include <petscblaslapack.h>
7 
8 PetscErrorCode MatSetUpMultiply_MPIDense(Mat mat)
9 {
10   Mat_MPIDense *mdn = (Mat_MPIDense *)mat->data;
11 
12   PetscFunctionBegin;
13   if (!mdn->Mvctx) {
14     /* Create local vector that is used to scatter into */
15     PetscCall(VecDestroy(&mdn->lvec));
16     if (mdn->A) { PetscCall(MatCreateVecs(mdn->A, &mdn->lvec, NULL)); }
17     PetscCall(PetscLayoutSetUp(mat->cmap));
18     PetscCall(PetscSFCreate(PetscObjectComm((PetscObject)mat), &mdn->Mvctx));
19     PetscCall(PetscSFSetGraphWithPattern(mdn->Mvctx, mat->cmap, PETSCSF_PATTERN_ALLGATHER));
20   }
21   PetscFunctionReturn(PETSC_SUCCESS);
22 }
23 
24 static PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat, PetscInt, const IS[], const IS[], MatReuse, Mat *);
25 
26 PetscErrorCode MatCreateSubMatrices_MPIDense(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submat[])
27 {
28   PetscInt nmax, nstages_local, nstages, i, pos, max_no;
29 
30   PetscFunctionBegin;
31   /* Allocate memory to hold all the submatrices */
32   if (scall != MAT_REUSE_MATRIX) PetscCall(PetscCalloc1(ismax + 1, submat));
33   /* Determine the number of stages through which submatrices are done */
34   nmax = 20 * 1000000 / (C->cmap->N * sizeof(PetscInt));
35   if (!nmax) nmax = 1;
36   nstages_local = ismax / nmax + ((ismax % nmax) ? 1 : 0);
37 
38   /* Make sure every processor loops through the nstages */
39   PetscCall(MPIU_Allreduce(&nstages_local, &nstages, 1, MPIU_INT, MPI_MAX, PetscObjectComm((PetscObject)C)));
40 
41   for (i = 0, pos = 0; i < nstages; i++) {
42     if (pos + nmax <= ismax) max_no = nmax;
43     else if (pos == ismax) max_no = 0;
44     else max_no = ismax - pos;
45     PetscCall(MatCreateSubMatrices_MPIDense_Local(C, max_no, isrow + pos, iscol + pos, scall, *submat + pos));
46     pos += max_no;
47   }
48   PetscFunctionReturn(PETSC_SUCCESS);
49 }
50 /* -------------------------------------------------------------------------*/
51 PetscErrorCode MatCreateSubMatrices_MPIDense_Local(Mat C, PetscInt ismax, const IS isrow[], const IS iscol[], MatReuse scall, Mat *submats)
52 {
53   Mat_MPIDense    *c = (Mat_MPIDense *)C->data;
54   Mat              A = c->A;
55   Mat_SeqDense    *a = (Mat_SeqDense *)A->data, *mat;
56   PetscMPIInt      rank, size, tag0, tag1, idex, end, i;
57   PetscInt         N = C->cmap->N, rstart = C->rmap->rstart, count;
58   const PetscInt **irow, **icol, *irow_i;
59   PetscInt        *nrow, *ncol, *w1, *w3, *w4, *rtable, start;
60   PetscInt       **sbuf1, m, j, k, l, ct1, **rbuf1, row, proc;
61   PetscInt         nrqs, msz, **ptr, *ctr, *pa, *tmp, bsz, nrqr;
62   PetscInt         is_no, jmax, **rmap, *rmap_i;
63   PetscInt         ctr_j, *sbuf1_j, *rbuf1_i;
64   MPI_Request     *s_waits1, *r_waits1, *s_waits2, *r_waits2;
65   MPI_Status      *r_status1, *r_status2, *s_status1, *s_status2;
66   MPI_Comm         comm;
67   PetscScalar    **rbuf2, **sbuf2;
68   PetscBool        sorted;
69 
70   PetscFunctionBegin;
71   PetscCall(PetscObjectGetComm((PetscObject)C, &comm));
72   tag0 = ((PetscObject)C)->tag;
73   PetscCallMPI(MPI_Comm_rank(comm, &rank));
74   PetscCallMPI(MPI_Comm_size(comm, &size));
75   m = C->rmap->N;
76 
77   /* Get some new tags to keep the communication clean */
78   PetscCall(PetscObjectGetNewTag((PetscObject)C, &tag1));
79 
80   /* Check if the col indices are sorted */
81   for (i = 0; i < ismax; i++) {
82     PetscCall(ISSorted(isrow[i], &sorted));
83     PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "ISrow is not sorted");
84     PetscCall(ISSorted(iscol[i], &sorted));
85     PetscCheck(sorted, PETSC_COMM_SELF, PETSC_ERR_ARG_WRONGSTATE, "IScol is not sorted");
86   }
87 
88   PetscCall(PetscMalloc5(ismax, (PetscInt ***)&irow, ismax, (PetscInt ***)&icol, ismax, &nrow, ismax, &ncol, m, &rtable));
89   for (i = 0; i < ismax; i++) {
90     PetscCall(ISGetIndices(isrow[i], &irow[i]));
91     PetscCall(ISGetIndices(iscol[i], &icol[i]));
92     PetscCall(ISGetLocalSize(isrow[i], &nrow[i]));
93     PetscCall(ISGetLocalSize(iscol[i], &ncol[i]));
94   }
95 
96   /* Create hash table for the mapping :row -> proc*/
97   for (i = 0, j = 0; i < size; i++) {
98     jmax = C->rmap->range[i + 1];
99     for (; j < jmax; j++) rtable[j] = i;
100   }
101 
102   /* evaluate communication - mesg to who,length of mesg, and buffer space
103      required. Based on this, buffers are allocated, and data copied into them*/
104   PetscCall(PetscMalloc3(2 * size, &w1, size, &w3, size, &w4));
105   PetscCall(PetscArrayzero(w1, size * 2)); /* initialize work vector*/
106   PetscCall(PetscArrayzero(w3, size));     /* initialize work vector*/
107   for (i = 0; i < ismax; i++) {
108     PetscCall(PetscArrayzero(w4, size)); /* initialize work vector*/
109     jmax   = nrow[i];
110     irow_i = irow[i];
111     for (j = 0; j < jmax; j++) {
112       row  = irow_i[j];
113       proc = rtable[row];
114       w4[proc]++;
115     }
116     for (j = 0; j < size; j++) {
117       if (w4[j]) {
118         w1[2 * j] += w4[j];
119         w3[j]++;
120       }
121     }
122   }
123 
124   nrqs         = 0; /* no of outgoing messages */
125   msz          = 0; /* total mesg length (for all procs) */
126   w1[2 * rank] = 0; /* no mesg sent to self */
127   w3[rank]     = 0;
128   for (i = 0; i < size; i++) {
129     if (w1[2 * i]) {
130       w1[2 * i + 1] = 1;
131       nrqs++;
132     } /* there exists a message to proc i */
133   }
134   PetscCall(PetscMalloc1(nrqs + 1, &pa)); /*(proc -array)*/
135   for (i = 0, j = 0; i < size; i++) {
136     if (w1[2 * i]) {
137       pa[j] = i;
138       j++;
139     }
140   }
141 
142   /* Each message would have a header = 1 + 2*(no of IS) + data */
143   for (i = 0; i < nrqs; i++) {
144     j = pa[i];
145     w1[2 * j] += w1[2 * j + 1] + 2 * w3[j];
146     msz += w1[2 * j];
147   }
148   /* Do a global reduction to determine how many messages to expect*/
149   PetscCall(PetscMaxSum(comm, w1, &bsz, &nrqr));
150 
151   /* Allocate memory for recv buffers . Make sure rbuf1[0] exists by adding 1 to the buffer length */
152   PetscCall(PetscMalloc1(nrqr + 1, &rbuf1));
153   PetscCall(PetscMalloc1(nrqr * bsz, &rbuf1[0]));
154   for (i = 1; i < nrqr; ++i) rbuf1[i] = rbuf1[i - 1] + bsz;
155 
156   /* Post the receives */
157   PetscCall(PetscMalloc1(nrqr + 1, &r_waits1));
158   for (i = 0; i < nrqr; ++i) PetscCallMPI(MPI_Irecv(rbuf1[i], bsz, MPIU_INT, MPI_ANY_SOURCE, tag0, comm, r_waits1 + i));
159 
160   /* Allocate Memory for outgoing messages */
161   PetscCall(PetscMalloc4(size, &sbuf1, size, &ptr, 2 * msz, &tmp, size, &ctr));
162   PetscCall(PetscArrayzero(sbuf1, size));
163   PetscCall(PetscArrayzero(ptr, size));
164   {
165     PetscInt *iptr = tmp, ict = 0;
166     for (i = 0; i < nrqs; i++) {
167       j = pa[i];
168       iptr += ict;
169       sbuf1[j] = iptr;
170       ict      = w1[2 * j];
171     }
172   }
173 
174   /* Form the outgoing messages */
175   /* Initialize the header space */
176   for (i = 0; i < nrqs; i++) {
177     j           = pa[i];
178     sbuf1[j][0] = 0;
179     PetscCall(PetscArrayzero(sbuf1[j] + 1, 2 * w3[j]));
180     ptr[j] = sbuf1[j] + 2 * w3[j] + 1;
181   }
182 
183   /* Parse the isrow and copy data into outbuf */
184   for (i = 0; i < ismax; i++) {
185     PetscCall(PetscArrayzero(ctr, size));
186     irow_i = irow[i];
187     jmax   = nrow[i];
188     for (j = 0; j < jmax; j++) { /* parse the indices of each IS */
189       row  = irow_i[j];
190       proc = rtable[row];
191       if (proc != rank) { /* copy to the outgoing buf*/
192         ctr[proc]++;
193         *ptr[proc] = row;
194         ptr[proc]++;
195       }
196     }
197     /* Update the headers for the current IS */
198     for (j = 0; j < size; j++) { /* Can Optimise this loop too */
199       if ((ctr_j = ctr[j])) {
200         sbuf1_j            = sbuf1[j];
201         k                  = ++sbuf1_j[0];
202         sbuf1_j[2 * k]     = ctr_j;
203         sbuf1_j[2 * k - 1] = i;
204       }
205     }
206   }
207 
208   /*  Now  post the sends */
209   PetscCall(PetscMalloc1(nrqs + 1, &s_waits1));
210   for (i = 0; i < nrqs; ++i) {
211     j = pa[i];
212     PetscCallMPI(MPI_Isend(sbuf1[j], w1[2 * j], MPIU_INT, j, tag0, comm, s_waits1 + i));
213   }
214 
215   /* Post receives to capture the row_data from other procs */
216   PetscCall(PetscMalloc1(nrqs + 1, &r_waits2));
217   PetscCall(PetscMalloc1(nrqs + 1, &rbuf2));
218   for (i = 0; i < nrqs; i++) {
219     j     = pa[i];
220     count = (w1[2 * j] - (2 * sbuf1[j][0] + 1)) * N;
221     PetscCall(PetscMalloc1(count + 1, &rbuf2[i]));
222     PetscCallMPI(MPI_Irecv(rbuf2[i], count, MPIU_SCALAR, j, tag1, comm, r_waits2 + i));
223   }
224 
225   /* Receive messages(row_nos) and then, pack and send off the rowvalues
226      to the correct processors */
227 
228   PetscCall(PetscMalloc1(nrqr + 1, &s_waits2));
229   PetscCall(PetscMalloc1(nrqr + 1, &r_status1));
230   PetscCall(PetscMalloc1(nrqr + 1, &sbuf2));
231 
232   {
233     PetscScalar *sbuf2_i, *v_start;
234     PetscInt     s_proc;
235     for (i = 0; i < nrqr; ++i) {
236       PetscCallMPI(MPI_Waitany(nrqr, r_waits1, &idex, r_status1 + i));
237       s_proc  = r_status1[i].MPI_SOURCE; /* send processor */
238       rbuf1_i = rbuf1[idex];             /* Actual message from s_proc */
239       /* no of rows = end - start; since start is array idex[], 0idex, whel end
240          is length of the buffer - which is 1idex */
241       start = 2 * rbuf1_i[0] + 1;
242       PetscCallMPI(MPI_Get_count(r_status1 + i, MPIU_INT, &end));
243       /* allocate memory sufficinet to hold all the row values */
244       PetscCall(PetscMalloc1((end - start) * N, &sbuf2[idex]));
245       sbuf2_i = sbuf2[idex];
246       /* Now pack the data */
247       for (j = start; j < end; j++) {
248         row     = rbuf1_i[j] - rstart;
249         v_start = a->v + row;
250         for (k = 0; k < N; k++) {
251           sbuf2_i[0] = v_start[0];
252           sbuf2_i++;
253           v_start += a->lda;
254         }
255       }
256       /* Now send off the data */
257       PetscCallMPI(MPI_Isend(sbuf2[idex], (end - start) * N, MPIU_SCALAR, s_proc, tag1, comm, s_waits2 + i));
258     }
259   }
260   /* End Send-Recv of IS + row_numbers */
261   PetscCall(PetscFree(r_status1));
262   PetscCall(PetscFree(r_waits1));
263   PetscCall(PetscMalloc1(nrqs + 1, &s_status1));
264   if (nrqs) PetscCallMPI(MPI_Waitall(nrqs, s_waits1, s_status1));
265   PetscCall(PetscFree(s_status1));
266   PetscCall(PetscFree(s_waits1));
267 
268   /* Create the submatrices */
269   if (scall == MAT_REUSE_MATRIX) {
270     for (i = 0; i < ismax; i++) {
271       mat = (Mat_SeqDense *)(submats[i]->data);
272       PetscCheck(!(submats[i]->rmap->n != nrow[i]) && !(submats[i]->cmap->n != ncol[i]), PETSC_COMM_SELF, PETSC_ERR_ARG_SIZ, "Cannot reuse matrix. wrong size");
273       PetscCall(PetscArrayzero(mat->v, submats[i]->rmap->n * submats[i]->cmap->n));
274 
275       submats[i]->factortype = C->factortype;
276     }
277   } else {
278     for (i = 0; i < ismax; i++) {
279       PetscCall(MatCreate(PETSC_COMM_SELF, submats + i));
280       PetscCall(MatSetSizes(submats[i], nrow[i], ncol[i], nrow[i], ncol[i]));
281       PetscCall(MatSetType(submats[i], ((PetscObject)A)->type_name));
282       PetscCall(MatSeqDenseSetPreallocation(submats[i], NULL));
283     }
284   }
285 
286   /* Assemble the matrices */
287   {
288     PetscInt     col;
289     PetscScalar *imat_v, *mat_v, *imat_vi, *mat_vi;
290 
291     for (i = 0; i < ismax; i++) {
292       mat    = (Mat_SeqDense *)submats[i]->data;
293       mat_v  = a->v;
294       imat_v = mat->v;
295       irow_i = irow[i];
296       m      = nrow[i];
297       for (j = 0; j < m; j++) {
298         row  = irow_i[j];
299         proc = rtable[row];
300         if (proc == rank) {
301           row     = row - rstart;
302           mat_vi  = mat_v + row;
303           imat_vi = imat_v + j;
304           for (k = 0; k < ncol[i]; k++) {
305             col            = icol[i][k];
306             imat_vi[k * m] = mat_vi[col * a->lda];
307           }
308         }
309       }
310     }
311   }
312 
313   /* Create row map-> This maps c->row to submat->row for each submat*/
314   /* this is a very expensive operation wrt memory usage */
315   PetscCall(PetscMalloc1(ismax, &rmap));
316   PetscCall(PetscCalloc1(ismax * C->rmap->N, &rmap[0]));
317   for (i = 1; i < ismax; i++) rmap[i] = rmap[i - 1] + C->rmap->N;
318   for (i = 0; i < ismax; i++) {
319     rmap_i = rmap[i];
320     irow_i = irow[i];
321     jmax   = nrow[i];
322     for (j = 0; j < jmax; j++) rmap_i[irow_i[j]] = j;
323   }
324 
325   /* Now Receive the row_values and assemble the rest of the matrix */
326   PetscCall(PetscMalloc1(nrqs + 1, &r_status2));
327   {
328     PetscInt     is_max, tmp1, col, *sbuf1_i, is_sz;
329     PetscScalar *rbuf2_i, *imat_v, *imat_vi;
330 
331     for (tmp1 = 0; tmp1 < nrqs; tmp1++) { /* For each message */
332       PetscCallMPI(MPI_Waitany(nrqs, r_waits2, &i, r_status2 + tmp1));
333       /* Now dig out the corresponding sbuf1, which contains the IS data_structure */
334       sbuf1_i = sbuf1[pa[i]];
335       is_max  = sbuf1_i[0];
336       ct1     = 2 * is_max + 1;
337       rbuf2_i = rbuf2[i];
338       for (j = 1; j <= is_max; j++) { /* For each IS belonging to the message */
339         is_no  = sbuf1_i[2 * j - 1];
340         is_sz  = sbuf1_i[2 * j];
341         mat    = (Mat_SeqDense *)submats[is_no]->data;
342         imat_v = mat->v;
343         rmap_i = rmap[is_no];
344         m      = nrow[is_no];
345         for (k = 0; k < is_sz; k++, rbuf2_i += N) { /* For each row */
346           row = sbuf1_i[ct1];
347           ct1++;
348           row     = rmap_i[row];
349           imat_vi = imat_v + row;
350           for (l = 0; l < ncol[is_no]; l++) { /* For each col */
351             col            = icol[is_no][l];
352             imat_vi[l * m] = rbuf2_i[col];
353           }
354         }
355       }
356     }
357   }
358   /* End Send-Recv of row_values */
359   PetscCall(PetscFree(r_status2));
360   PetscCall(PetscFree(r_waits2));
361   PetscCall(PetscMalloc1(nrqr + 1, &s_status2));
362   if (nrqr) PetscCallMPI(MPI_Waitall(nrqr, s_waits2, s_status2));
363   PetscCall(PetscFree(s_status2));
364   PetscCall(PetscFree(s_waits2));
365 
366   /* Restore the indices */
367   for (i = 0; i < ismax; i++) {
368     PetscCall(ISRestoreIndices(isrow[i], irow + i));
369     PetscCall(ISRestoreIndices(iscol[i], icol + i));
370   }
371 
372   PetscCall(PetscFree5(*(PetscInt ***)&irow, *(PetscInt ***)&icol, nrow, ncol, rtable));
373   PetscCall(PetscFree3(w1, w3, w4));
374   PetscCall(PetscFree(pa));
375 
376   for (i = 0; i < nrqs; ++i) PetscCall(PetscFree(rbuf2[i]));
377   PetscCall(PetscFree(rbuf2));
378   PetscCall(PetscFree4(sbuf1, ptr, tmp, ctr));
379   PetscCall(PetscFree(rbuf1[0]));
380   PetscCall(PetscFree(rbuf1));
381 
382   for (i = 0; i < nrqr; ++i) PetscCall(PetscFree(sbuf2[i]));
383 
384   PetscCall(PetscFree(sbuf2));
385   PetscCall(PetscFree(rmap[0]));
386   PetscCall(PetscFree(rmap));
387 
388   for (i = 0; i < ismax; i++) {
389     PetscCall(MatAssemblyBegin(submats[i], MAT_FINAL_ASSEMBLY));
390     PetscCall(MatAssemblyEnd(submats[i], MAT_FINAL_ASSEMBLY));
391   }
392   PetscFunctionReturn(PETSC_SUCCESS);
393 }
394 
395 PETSC_INTERN PetscErrorCode MatScale_MPIDense(Mat inA, PetscScalar alpha)
396 {
397   Mat_MPIDense *A = (Mat_MPIDense *)inA->data;
398 
399   PetscFunctionBegin;
400   PetscCall(MatScale(A->A, alpha));
401   PetscFunctionReturn(PETSC_SUCCESS);
402 }
403