static char help[] = "Tests the parallel case for MatIncreaseOverlap(). Input arguments are:\n\ -f : file to load. For example see $PETSC_DIR/share/petsc/datafiles/matrices\n\ -nd : > 0 number of domains per processor \n\ -ov : >=0 amount of overlap between domains\n\n"; #include PetscErrorCode ISAllGatherDisjoint(IS iis, IS** ois) { IS *is2,is; const PetscInt *idxs; PetscInt i, ls,*sizes; PetscMPIInt size; PetscFunctionBeginUser; CHKERRMPI(MPI_Comm_size(PetscObjectComm((PetscObject)iis),&size)); CHKERRQ(PetscMalloc1(size,&is2)); CHKERRQ(PetscMalloc1(size,&sizes)); CHKERRQ(ISGetLocalSize(iis,&ls)); /* we don't have a public ISGetLayout */ CHKERRMPI(MPI_Allgather(&ls,1,MPIU_INT,sizes,1,MPIU_INT,PetscObjectComm((PetscObject)iis))); CHKERRQ(ISAllGather(iis,&is)); CHKERRQ(ISGetIndices(is,&idxs)); for (i = 0, ls = 0; i < size; i++) { CHKERRQ(ISCreateGeneral(PETSC_COMM_SELF,sizes[i],idxs+ls,PETSC_COPY_VALUES,&is2[i])); ls += sizes[i]; } CHKERRQ(ISRestoreIndices(is,&idxs)); CHKERRQ(ISDestroy(&is)); CHKERRQ(PetscFree(sizes)); *ois = is2; PetscFunctionReturn(0); } int main(int argc,char **args) { PetscErrorCode ierr; PetscInt nd = 2,ov = 1,ndpar,i,start,m,n,end,lsize; PetscMPIInt rank; PetscBool flg, useND = PETSC_FALSE; Mat A,B; char file[PETSC_MAX_PATH_LEN]; PetscViewer fd; IS *is1,*is2; PetscRandom r; PetscScalar rand; ierr = PetscInitialize(&argc,&args,(char*)0,help);if (ierr) return ierr; CHKERRMPI(MPI_Comm_rank(PETSC_COMM_WORLD,&rank)); CHKERRQ(PetscOptionsGetString(NULL,NULL,"-f",file,sizeof(file),&flg)); PetscCheck(flg,PETSC_COMM_WORLD,PETSC_ERR_USER,"Must use -f filename to indicate a file containing a PETSc binary matrix"); CHKERRQ(PetscOptionsGetInt(NULL,NULL,"-nd",&nd,NULL)); CHKERRQ(PetscOptionsGetInt(NULL,NULL,"-ov",&ov,NULL)); CHKERRQ(PetscOptionsGetBool(NULL,NULL,"-nested_dissection",&useND,NULL)); /* Read matrix */ CHKERRQ(PetscViewerBinaryOpen(PETSC_COMM_WORLD,file,FILE_MODE_READ,&fd)); CHKERRQ(MatCreate(PETSC_COMM_WORLD,&A)); CHKERRQ(MatSetType(A,MATMPIAIJ)); CHKERRQ(MatLoad(A,fd)); CHKERRQ(MatSetFromOptions(A)); CHKERRQ(PetscViewerDestroy(&fd)); /* Read the matrix again as a sequential matrix */ CHKERRQ(PetscViewerBinaryOpen(PETSC_COMM_SELF,file,FILE_MODE_READ,&fd)); CHKERRQ(MatCreate(PETSC_COMM_SELF,&B)); CHKERRQ(MatSetType(B,MATSEQAIJ)); CHKERRQ(MatLoad(B,fd)); CHKERRQ(MatSetFromOptions(B)); CHKERRQ(PetscViewerDestroy(&fd)); /* Create the IS corresponding to subdomains */ if (useND) { MatPartitioning part; IS ndmap; PetscMPIInt size; ndpar = 1; CHKERRMPI(MPI_Comm_size(PETSC_COMM_WORLD,&size)); nd = (PetscInt)size; CHKERRQ(PetscMalloc1(ndpar,&is1)); CHKERRQ(MatPartitioningCreate(PETSC_COMM_WORLD,&part)); CHKERRQ(MatPartitioningSetAdjacency(part,A)); CHKERRQ(MatPartitioningSetFromOptions(part)); CHKERRQ(MatPartitioningApplyND(part,&ndmap)); CHKERRQ(MatPartitioningDestroy(&part)); CHKERRQ(ISBuildTwoSided(ndmap,NULL,&is1[0])); CHKERRQ(ISDestroy(&ndmap)); CHKERRQ(ISAllGatherDisjoint(is1[0],&is2)); } else { /* Create the random Index Sets */ CHKERRQ(PetscMalloc1(nd,&is1)); CHKERRQ(PetscMalloc1(nd,&is2)); CHKERRQ(MatGetSize(A,&m,&n)); CHKERRQ(PetscRandomCreate(PETSC_COMM_SELF,&r)); CHKERRQ(PetscRandomSetFromOptions(r)); for (i=0; i end) { start = end; lsize = -lsize;} CHKERRQ(ISCreateStride(PETSC_COMM_SELF,lsize,start,1,is1+i)); CHKERRQ(ISCreateStride(PETSC_COMM_SELF,lsize,start,1,is2+i)); } ndpar = nd; CHKERRQ(PetscRandomDestroy(&r)); } CHKERRQ(MatIncreaseOverlap(A,ndpar,is1,ov)); CHKERRQ(MatIncreaseOverlap(B,nd,is2,ov)); if (useND) { IS *is; CHKERRQ(ISAllGatherDisjoint(is1[0],&is)); CHKERRQ(ISDestroy(&is1[0])); CHKERRQ(PetscFree(is1)); is1 = is; } /* Now see if the serial and parallel case have the same answers */ for (i=0; i