xref: /petsc/src/sys/tests/ex46.cxx (revision c3b5f7ba6bc5ce25a01a67bb37ba5d34b02bbbd7)
1 
2 static char help[] = "Demonstrates calling Trilinos and then PETSc in the same program.\n\n";
3 
4 /*
5    Example obtained from: http://trilinos.org/docs/dev/packages/tpetra/doc/html/Tpetra_Lesson01.html
6 */
7 
8 #include <petscsys.h>
9 #include <Tpetra_DefaultPlatform.hpp>
10 #include <Tpetra_Version.hpp>
11 #include <Teuchos_GlobalMPISession.hpp>    // used if Trilinos is the one that starts up MPI
12 
13 // Do something with the given communicator.  In this case, we just
14 // print Tpetra's version to stdout on Process 0 in the given
15 // communicator.
16 void
17 exampleRoutine (const Teuchos::RCP<const Teuchos::Comm<int> >& comm)
18 {
19   if (comm->getRank () == 0) {
20     // On (MPI) Process 0, print out the Tpetra software version.
21     std::cout << Tpetra::version () << std::endl << std::endl;
22   }
23 }
24 
25 int main(int argc,char **argv)
26 {
27   // These "using" declarations make the code more concise, in that
28   // you don't have to write the namespace along with the class or
29   // object name.  This is especially helpful with commonly used
30   // things like std::endl.
31   using std::cout;
32   using std::endl;
33   // Start up MPI, if using MPI.  Trilinos doesn't have to be built
34   // with MPI; it's called a "serial" build if you build without MPI.
35   // GlobalMPISession hides this implementation detail.
36   //
37   // Note the third argument.  If you pass GlobalMPISession the
38   // address of an std::ostream, it will print a one-line status
39   // message with the rank on each MPI process.  This may be
40   // undesirable if running with a large number of MPI processes.
41   // You can avoid printing anything here by passing in either
42   // NULL or the address of a Teuchos::oblackholestream.
43   Teuchos::GlobalMPISession mpiSession (&argc, &argv, NULL);
44   // Get a pointer to the communicator object representing
45   // MPI_COMM_WORLD.  getDefaultPlatform.getComm() doesn't create a
46   // new object every time you call it; it just returns the same
47   // communicator each time.  Thus, you can call it anywhere and get
48   // the same communicator.  (This is handy if you don't want to pass
49   // a communicator around everywhere, though it's always better to
50   // parameterize your algorithms on the communicator.)
51   //
52   // "Tpetra::DefaultPlatform" knows whether or not we built with MPI
53   // support.  If we didn't build with MPI, we'll get a "communicator"
54   // with size 1, whose only process has rank 0.
55   Teuchos::RCP<const Teuchos::Comm<int> > comm = Tpetra::DefaultPlatform::getDefaultPlatform ().getComm ();
56 
57   PetscCall(PetscInitialize(&argc,&argv,(char*)0,help));
58 
59   // Get my process' rank, and the total number of processes.
60   // Equivalent to MPI_Comm_rank resp. MPI_Comm_size.
61   const int myRank = comm->getRank ();
62   const int size = comm->getSize ();
63   if (myRank == 0) {
64     cout << "Total number of processes: " << size << endl;
65   }
66   // Do something with the new communicator.
67   exampleRoutine (comm);
68   // This tells the Trilinos test framework that the test passed.
69   if (myRank == 0) {
70     cout << "End Result: TEST PASSED" << endl;
71   }
72   // GlobalMPISession calls MPI_Finalize() in its destructor, if
73   // appropriate.  You don't have to do anything here!  Just return
74   // from main().  Isn't that helpful?
75   PetscCall(PetscFinalize());
76   return 0;
77 }
78 
79 /*TEST
80 
81    build:
82      requires: trilinos
83 
84    test:
85       nsize: 3
86       filter: grep -v "Tpetra in Trilinos"
87 
88 TEST*/
89