xref: /petsc/src/sys/tests/ex46.cxx (revision df4cd43f92eaa320656440c40edb1046daee8f75)
1 
2 static char help[] = "Demonstrates calling Trilinos and then PETSc in the same program.\n\n";
3 
4 /*
5    Example obtained from: http://trilinos.org/docs/dev/packages/tpetra/doc/html/Tpetra_Lesson01.html
6 */
7 
8 #include <petscsys.h>
9 #include <Tpetra_DefaultPlatform.hpp>
10 #include <Tpetra_Version.hpp>
11 #include <Teuchos_GlobalMPISession.hpp> // used if Trilinos is the one that starts up MPI
12 
13 // Do something with the given communicator.  In this case, we just
14 // print Tpetra's version to stdout on Process 0 in the given
15 // communicator.
16 void exampleRoutine(const Teuchos::RCP<const Teuchos::Comm<int>> &comm)
17 {
18   if (comm->getRank() == 0) {
19     // On (MPI) Process 0, print out the Tpetra software version.
20     std::cout << Tpetra::version() << std::endl << std::endl;
21   }
22 }
23 
24 int main(int argc, char **argv)
25 {
26   // These "using" declarations make the code more concise, in that
27   // you don't have to write the namespace along with the class or
28   // object name.  This is especially helpful with commonly used
29   // things like std::endl.
30   using std::cout;
31   using std::endl;
32   // Start up MPI, if using MPI.  Trilinos doesn't have to be built
33   // with MPI; it's called a "serial" build if you build without MPI.
34   // GlobalMPISession hides this implementation detail.
35   //
36   // Note the third argument.  If you pass GlobalMPISession the
37   // address of an std::ostream, it will print a one-line status
38   // message with the rank on each MPI process.  This may be
39   // undesirable if running with a large number of MPI ranks.
40   // You can avoid printing anything here by passing in either
41   // NULL or the address of a Teuchos::oblackholestream.
42   Teuchos::GlobalMPISession mpiSession(&argc, &argv, NULL);
43   // Get a pointer to the communicator object representing
44   // MPI_COMM_WORLD.  getDefaultPlatform.getComm() doesn't create a
45   // new object every time you call it; it just returns the same
46   // communicator each time.  Thus, you can call it anywhere and get
47   // the same communicator.  (This is handy if you don't want to pass
48   // a communicator around everywhere, though it's always better to
49   // parameterize your algorithms on the communicator.)
50   //
51   // "Tpetra::DefaultPlatform" knows whether or not we built with MPI
52   // support.  If we didn't build with MPI, we'll get a "communicator"
53   // with size 1, whose only process has rank 0.
54   Teuchos::RCP<const Teuchos::Comm<int>> comm = Tpetra::DefaultPlatform::getDefaultPlatform().getComm();
55 
56   PetscFunctionBeginUser;
57   PetscCall(PetscInitialize(&argc, &argv, (char *)0, help));
58 
59   // Get my process' rank, and the total number of processes.
60   // Equivalent to MPI_Comm_rank resp. MPI_Comm_size.
61   const int myRank = comm->getRank();
62   const int size   = comm->getSize();
63   if (myRank == 0) cout << "Total number of processes: " << size << endl;
64   // Do something with the new communicator.
65   exampleRoutine(comm);
66   // This tells the Trilinos test framework that the test passed.
67   if (myRank == 0) cout << "End Result: TEST PASSED" << endl;
68   // GlobalMPISession calls MPI_Finalize() in its destructor, if
69   // appropriate.  You don't have to do anything here!  Just return
70   // from main().  Isn't that helpful?
71   PetscCall(PetscFinalize());
72   return 0;
73 }
74 
75 /*TEST
76 
77    build:
78      requires: trilinos
79 
80    test:
81       nsize: 3
82       filter: grep -v "Tpetra in Trilinos"
83 
84 TEST*/
85