MFEM  v4.6.0
Finite element discretization library
ex1p.cpp
Go to the documentation of this file.
1 // MFEM Example 1 - Parallel Version
2 // PETSc Modification
3 //
4 // Compile with: make ex1p
5 //
6 // Sample runs: mpirun -np 4 ex1p -m ../../data/amr-quad.mesh
7 // mpirun -np 4 ex1p -m ../../data/amr-quad.mesh --petscopts rc_ex1p
8 //
9 // Device sample runs:
10 // mpirun -np 4 ex1p -pa -d cuda --petscopts rc_ex1p_device
11 //
12 // Description: This example code demonstrates the use of MFEM to define a
13 // simple finite element discretization of the Laplace problem
14 // -Delta u = 1 with homogeneous Dirichlet boundary conditions.
15 // Specifically, we discretize using a FE space of the specified
16 // order, or if order < 1 using an isoparametric/isogeometric
17 // space (i.e. quadratic for quadratic curvilinear mesh, NURBS for
18 // NURBS mesh, etc.)
19 //
20 // The example highlights the use of mesh refinement, finite
21 // element grid functions, as well as linear and bilinear forms
22 // corresponding to the left-hand side and right-hand side of the
23 // discrete linear system. We also cover the explicit elimination
24 // of essential boundary conditions, static condensation, and the
25 // optional connection to the GLVis tool for visualization.
26 // The example also shows how PETSc Krylov solvers can be used by
27 // wrapping a HypreParMatrix (or not) and a Solver, together with
28 // customization using an options file (see rc_ex1p) We also
29 // provide an example on how to visualize the iterative solution
30 // inside a PETSc solver.
31 
32 #include "mfem.hpp"
33 #include <fstream>
34 #include <iostream>
35 
36 #ifndef MFEM_USE_PETSC
37 #error This example requires that MFEM is built with MFEM_USE_PETSC=YES
38 #endif
39 
40 using namespace std;
41 using namespace mfem;
42 
43 class UserMonitor : public PetscSolverMonitor
44 {
45 private:
48 
49 public:
50  UserMonitor(ParBilinearForm *a_, ParLinearForm *b_)
51  : PetscSolverMonitor(true,false), a(a_), b(b_) {}
52 
53  void MonitorSolution(PetscInt it, PetscReal norm, const Vector &X)
54  {
55  // we plot the first 5 iterates
56  if (!it || it > 5) { return; }
57  ParFiniteElementSpace *fespace = a->ParFESpace();
58  ParMesh *mesh = fespace->GetParMesh();
59  ParGridFunction x(fespace);
60  a->RecoverFEMSolution(X, *b, x);
61 
62  char vishost[] = "localhost";
63  int visport = 19916;
64  int num_procs, myid;
65 
66  MPI_Comm_size(mesh->GetComm(),&num_procs);
67  MPI_Comm_rank(mesh->GetComm(),&myid);
68  socketstream sol_sock(vishost, visport);
69  sol_sock << "parallel " << num_procs << " " << myid << "\n";
70  sol_sock.precision(8);
71  sol_sock << "solution\n" << *mesh << x
72  << "window_title 'Iteration no " << it << "'" << flush;
73  }
74 };
75 
76 int main(int argc, char *argv[])
77 {
78  // 1. Initialize MPI and HYPRE.
79  Mpi::Init(argc, argv);
80  int num_procs = Mpi::WorldSize();
81  int myid = Mpi::WorldRank();
82  Hypre::Init();
83 
84  // 2. Parse command-line options.
85  const char *mesh_file = "../../data/star.mesh";
86  int order = 1;
87  bool static_cond = false;
88  bool pa = false;
89  bool visualization = false;
90  const char *device_config = "cpu";
91  bool use_petsc = true;
92  const char *petscrc_file = "";
93  bool petscmonitor = false;
94  bool forcewrap = false;
95  bool useh2 = false;
96 
97  OptionsParser args(argc, argv);
98  args.AddOption(&mesh_file, "-m", "--mesh",
99  "Mesh file to use.");
100  args.AddOption(&order, "-o", "--order",
101  "Finite element order (polynomial degree) or -1 for"
102  " isoparametric space.");
103  args.AddOption(&static_cond, "-sc", "--static-condensation", "-no-sc",
104  "--no-static-condensation", "Enable static condensation.");
105  args.AddOption(&pa, "-pa", "--partial-assembly", "-no-pa",
106  "--no-partial-assembly", "Enable Partial Assembly.");
107  args.AddOption(&device_config, "-d", "--device",
108  "Device configuration string, see Device::Configure().");
109  args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
110  "--no-visualization",
111  "Enable or disable GLVis visualization.");
112  args.AddOption(&use_petsc, "-usepetsc", "--usepetsc", "-no-petsc",
113  "--no-petsc",
114  "Use or not PETSc to solve the linear system.");
115  args.AddOption(&petscrc_file, "-petscopts", "--petscopts",
116  "PetscOptions file to use.");
117  args.AddOption(&petscmonitor, "-petscmonitor", "--petscmonitor",
118  "-no-petscmonitor", "--no-petscmonitor",
119  "Enable or disable GLVis visualization of residual.");
120  args.AddOption(&forcewrap, "-forcewrap", "--forcewrap",
121  "-noforce-wrap", "--noforce-wrap",
122  "Force matrix-free.");
123  args.AddOption(&useh2, "-useh2", "--useh2", "-no-h2",
124  "--no-h2",
125  "Use or not the H2 matrix solver.");
126  args.Parse();
127  if (!args.Good())
128  {
129  if (myid == 0)
130  {
131  args.PrintUsage(cout);
132  }
133  return 1;
134  }
135  if (myid == 0)
136  {
137  args.PrintOptions(cout);
138  }
139 
140  // 3. Enable hardware devices such as GPUs, and programming models such as
141  // CUDA, OCCA, RAJA and OpenMP based on command line options.
142  Device device(device_config);
143  if (myid == 0) { device.Print(); }
144 
145  // 3b. We initialize PETSc
146  MFEMInitializePetsc(NULL,NULL,petscrc_file,NULL);
147 
148  // 4. Read the (serial) mesh from the given mesh file on all processors. We
149  // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface
150  // and volume meshes with the same code.
151  Mesh *mesh = new Mesh(mesh_file, 1, 1);
152  int dim = mesh->Dimension();
153 
154  // 5. Refine the serial mesh on all processors to increase the resolution. In
155  // this example we do 'ref_levels' of uniform refinement. We choose
156  // 'ref_levels' to be the largest number that gives a final mesh with no
157  // more than 10,000 elements.
158  {
159  int ref_levels =
160  (int)floor(log(10000./mesh->GetNE())/log(2.)/dim);
161  for (int l = 0; l < ref_levels; l++)
162  {
163  mesh->UniformRefinement();
164  }
165  }
166 
167  // 6. Define a parallel mesh by a partitioning of the serial mesh. Refine
168  // this mesh further in parallel to increase the resolution. Once the
169  // parallel mesh is defined, the serial mesh can be deleted.
170  ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
171  delete mesh;
172  {
173  int par_ref_levels = 2;
174  for (int l = 0; l < par_ref_levels; l++)
175  {
176  pmesh->UniformRefinement();
177  }
178  }
179 
180  // 7. Define a parallel finite element space on the parallel mesh. Here we
181  // use continuous Lagrange finite elements of the specified order. If
182  // order < 1, we instead use an isoparametric/isogeometric space.
184  bool delete_fec;
185  if (order > 0)
186  {
187  fec = new H1_FECollection(order, dim);
188  delete_fec = true;
189  }
190  else if (pmesh->GetNodes())
191  {
192  fec = pmesh->GetNodes()->OwnFEC();
193  delete_fec = false;
194  if (myid == 0)
195  {
196  cout << "Using isoparametric FEs: " << fec->Name() << endl;
197  }
198  }
199  else
200  {
201  fec = new H1_FECollection(order = 1, dim);
202  delete_fec = true;
203  }
204  ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
205  HYPRE_BigInt size = fespace->GlobalTrueVSize();
206  if (myid == 0)
207  {
208  cout << "Number of finite element unknowns: " << size << endl;
209  }
210 
211  // 8. Determine the list of true (i.e. parallel conforming) essential
212  // boundary dofs. In this example, the boundary conditions are defined
213  // by marking all the boundary attributes from the mesh as essential
214  // (Dirichlet) and converting them to a list of true dofs.
215  Array<int> ess_tdof_list;
216  if (pmesh->bdr_attributes.Size())
217  {
218  Array<int> ess_bdr(pmesh->bdr_attributes.Max());
219  ess_bdr = 1;
220  fespace->GetEssentialTrueDofs(ess_bdr, ess_tdof_list);
221  }
222 
223  // 9. Set up the parallel linear form b(.) which corresponds to the
224  // right-hand side of the FEM linear system, which in this case is
225  // (1,phi_i) where phi_i are the basis functions in fespace.
226  ParLinearForm *b = new ParLinearForm(fespace);
227  ConstantCoefficient one(1.0);
228  b->AddDomainIntegrator(new DomainLFIntegrator(one));
229  b->Assemble();
230 
231  // 10. Define the solution vector x as a parallel finite element grid function
232  // corresponding to fespace. Initialize x with initial guess of zero,
233  // which satisfies the boundary conditions.
234  ParGridFunction x(fespace);
235  x = 0.0;
236 
237  // 11. Set up the parallel bilinear form a(.,.) on the finite element space
238  // corresponding to the Laplacian operator -Delta, by adding the Diffusion
239  // domain integrator.
240  ParBilinearForm *a = new ParBilinearForm(fespace);
241  if (pa) { a->SetAssemblyLevel(AssemblyLevel::PARTIAL); }
242  a->AddDomainIntegrator(new DiffusionIntegrator(one));
243 
244  // 12. Assemble the parallel bilinear form and the corresponding linear
245  // system, applying any necessary transformations such as: parallel
246  // assembly, eliminating boundary conditions, applying conforming
247  // constraints for non-conforming AMR, static condensation, etc.
248  if (static_cond) { a->EnableStaticCondensation(); }
249  a->Assemble();
250 
251  OperatorPtr A;
252  Vector B, X;
253  a->FormLinearSystem(ess_tdof_list, x, *b, A, X, B);
254 
255  // 13. Solve the linear system A X = B.
256  // If using MFEM with HYPRE
257  // * With full assembly, use the BoomerAMG preconditioner from hypre.
258  // * With partial assembly, use Jacobi smoothing, for now.
259  // If using MFEM with PETSc
260  // * With full assembly, use command line options or H2 matrix solver
261  // * With partial assembly, wrap Jacobi smoothing, for now.
262  Solver *prec = NULL;
263  if (pa)
264  {
265  if (UsesTensorBasis(*fespace))
266  {
267  prec = new OperatorJacobiSmoother(*a, ess_tdof_list);
268  }
269  }
270  else
271  {
272  prec = new HypreBoomerAMG;
273  }
274 
275  if (!use_petsc)
276  {
277  CGSolver *pcg = new CGSolver(MPI_COMM_WORLD);
278  if (prec) { pcg->SetPreconditioner(*prec); }
279  pcg->SetOperator(*A);
280  pcg->SetRelTol(1e-12);
281  pcg->SetMaxIter(200);
282  pcg->SetPrintLevel(1);
283  pcg->Mult(B, X);
284  delete pcg;
285  }
286  else
287  {
288  PetscPCGSolver *pcg;
289  // If petscrc_file has been given, we convert the HypreParMatrix to a
290  // PetscParMatrix; the user can then experiment with PETSc command line
291  // options unless forcewrap is true.
292  bool wrap = forcewrap ? true : (pa ? true : !strlen(petscrc_file));
293  if (wrap)
294  {
295  pcg = new PetscPCGSolver(MPI_COMM_WORLD);
296  pcg->SetOperator(*A);
297  if (useh2)
298  {
299  delete prec;
300  prec = new PetscH2Solver(*A.Ptr(),fespace);
301  }
302  else if (!pa) // We need to pass the preconditioner constructed from the HypreParMatrix
303  {
304  delete prec;
305  HypreParMatrix *hA = A.As<HypreParMatrix>();
306  prec = new HypreBoomerAMG(*hA);
307  }
308  if (prec) { pcg->SetPreconditioner(*prec); }
309  }
310  else // Not wrapping, pass the HypreParMatrix so that users can experiment with command line
311  {
312  HypreParMatrix *hA = A.As<HypreParMatrix>();
313  pcg = new PetscPCGSolver(*hA, false);
314  if (useh2)
315  {
316  delete prec;
317  prec = new PetscH2Solver(*hA,fespace);
318  }
319  }
320  pcg->iterative_mode = true; // iterative_mode is true by default with CGSolver
321  pcg->SetRelTol(1e-12);
322  pcg->SetAbsTol(1e-12);
323  pcg->SetMaxIter(200);
324  pcg->SetPrintLevel(1);
325 
326  UserMonitor mymon(a,b);
327  if (visualization && petscmonitor)
328  {
329  pcg->SetMonitor(&mymon);
330  pcg->iterative_mode = true;
331  X.Randomize();
332  }
333  pcg->Mult(B, X);
334  delete pcg;
335  }
336 
337  // 14. Recover the parallel grid function corresponding to X. This is the
338  // local finite element solution on each processor.
339  a->RecoverFEMSolution(X, *b, x);
340 
341  // 15. Save the refined mesh and the solution in parallel. This output can
342  // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
343  {
344  ostringstream mesh_name, sol_name;
345  mesh_name << "mesh." << setfill('0') << setw(6) << myid;
346  sol_name << "sol." << setfill('0') << setw(6) << myid;
347 
348  ofstream mesh_ofs(mesh_name.str().c_str());
349  mesh_ofs.precision(8);
350  pmesh->Print(mesh_ofs);
351 
352  ofstream sol_ofs(sol_name.str().c_str());
353  sol_ofs.precision(8);
354  x.Save(sol_ofs);
355  }
356 
357  // 16. Send the solution by socket to a GLVis server.
358  if (visualization)
359  {
360  char vishost[] = "localhost";
361  int visport = 19916;
362  socketstream sol_sock(vishost, visport);
363  sol_sock << "parallel " << num_procs << " " << myid << "\n";
364  sol_sock.precision(8);
365  sol_sock << "solution\n" << *pmesh << x << flush;
366  }
367 
368  // 17. Free the used memory.
369  if (delete_fec)
370  {
371  delete fec;
372  }
373  delete a;
374  delete b;
375  delete fespace;
376  delete pmesh;
377  delete prec;
378 
379  // We finalize PETSc
381 
382  return 0;
383 }
void SetPreconditioner(Solver &precond)
Definition: petsc.cpp:3107
void SetPrintLevel(int plev)
Definition: petsc.cpp:2450
Class for domain integration L(v) := (f, v)
Definition: lininteg.hpp:108
virtual void Mult(const Vector &b, Vector &x) const
Application of the solver.
Definition: petsc.cpp:3187
virtual void GetEssentialTrueDofs(const Array< int > &bdr_attr_is_ess, Array< int > &ess_tdof_list, int component=-1)
Definition: pfespace.cpp:1031
int visport
Conjugate gradient method.
Definition: solvers.hpp:493
virtual void SetOperator(const Operator &op)
Definition: petsc.cpp:2962
A coefficient that is constant across space and time.
Definition: coefficient.hpp:84
HYPRE_Int PetscInt
Definition: petsc.hpp:47
void PrintOptions(std::ostream &out) const
Print the options.
Definition: optparser.cpp:331
int Dimension() const
Dimension of the reference space used within the elements.
Definition: mesh.hpp:1020
void PrintUsage(std::ostream &out) const
Print the usage message.
Definition: optparser.cpp:462
Pointer to an Operator of a specified type.
Definition: handle.hpp:33
virtual void Mult(const Vector &b, Vector &x) const
Operator application: y=A(x).
Definition: solvers.cpp:718
T Max() const
Find the maximal element in the array, using the comparison operator < for class T.
Definition: array.cpp:68
void Print(std::ostream &out=mfem::out)
Print the configuration of the MFEM virtual device object.
Definition: device.cpp:279
bool Good() const
Return true if the command line options were parsed successfully.
Definition: optparser.hpp:159
Abstract parallel finite element space.
Definition: pfespace.hpp:28
void SetRelTol(double tol)
Definition: petsc.cpp:2373
bool iterative_mode
If true, use the second argument of Mult() as an initial guess.
Definition: operator.hpp:686
STL namespace.
void Randomize(int seed=0)
Set random values in the vector.
Definition: vector.cpp:817
bool UsesTensorBasis(const FiniteElementSpace &fes)
Return true if the mesh contains only one topology and the elements are tensor elements.
Definition: fespace.hpp:1306
void SetMonitor(PetscSolverMonitor *ctx)
Sets user-defined monitoring routine.
Definition: petsc.cpp:2535
void SetMaxIter(int max_iter)
Definition: petsc.cpp:2423
double PetscReal
Definition: petsc.hpp:49
The BoomerAMG solver in hypre.
Definition: hypre.hpp:1590
Class for parallel linear form.
Definition: plinearform.hpp:26
virtual void SetPrintLevel(int print_lvl)
Legacy method to set the level of verbosity of the solver output.
Definition: solvers.cpp:71
Abstract class for monitoring PETSc&#39;s solvers.
Definition: petsc.hpp:981
void Parse()
Parse the command-line options. Note that this function expects all the options provided through the ...
Definition: optparser.cpp:151
char vishost[]
Jacobi smoothing for a given bilinear form (no matrix necessary).
Definition: solvers.hpp:302
double b
Definition: lissajous.cpp:42
void UniformRefinement(int i, const DSTable &, int *, int *, int *)
Definition: mesh.cpp:10232
ParMesh * GetParMesh() const
Definition: pfespace.hpp:273
void SetMaxIter(int max_it)
Definition: solvers.hpp:201
virtual const char * Name() const
Definition: fe_coll.hpp:80
HYPRE_BigInt GlobalTrueVSize() const
Definition: pfespace.hpp:281
MPI_Comm GetComm() const
Definition: pmesh.hpp:351
Array< int > bdr_attributes
A list of all unique boundary attributes used by the Mesh.
Definition: mesh.hpp:275
void SetRelTol(double rtol)
Definition: solvers.hpp:199
Collection of finite elements from the same family in multiple dimensions. This class is used to matc...
Definition: fe_coll.hpp:26
void AddOption(bool *var, const char *enable_short_name, const char *enable_long_name, const char *disable_short_name, const char *disable_long_name, const char *description, bool required=false)
Add a boolean option and set &#39;var&#39; to receive the value. Enable/disable tags are used to set the bool...
Definition: optparser.hpp:82
HYPRE_Int HYPRE_BigInt
int main(int argc, char *argv[])
Definition: ex1p.cpp:69
virtual void Save(std::ostream &out) const
Definition: pgridfunc.cpp:909
int GetNE() const
Returns number of elements.
Definition: mesh.hpp:1086
double a
Definition: lissajous.cpp:41
void MFEMFinalizePetsc()
Definition: petsc.cpp:241
OpType * As() const
Return the Operator pointer statically cast to a specified OpType. Similar to the method Get()...
Definition: handle.hpp:104
void MFEMInitializePetsc()
Convenience functions to initialize/finalize PETSc.
Definition: petsc.cpp:201
int dim
Definition: ex24.cpp:53
void SetAbsTol(double tol)
Definition: petsc.cpp:2398
Class for parallel bilinear form.
Operator * Ptr() const
Access the underlying Operator pointer.
Definition: handle.hpp:87
int Size() const
Return the logical size of the array.
Definition: array.hpp:141
virtual void SetOperator(const Operator &op)
Also calls SetOperator for the preconditioner if there is one.
Definition: solvers.hpp:507
Vector data type.
Definition: vector.hpp:58
virtual void SetPreconditioner(Solver &pr)
This should be called before SetOperator.
Definition: solvers.cpp:173
Arbitrary order H1-conforming (continuous) finite elements.
Definition: fe_coll.hpp:259
void GetNodes(Vector &node_coord) const
Definition: mesh.cpp:8302
void Print(std::ostream &out=mfem::out) const override
Definition: pmesh.cpp:4825
Base class for solvers.
Definition: operator.hpp:682
Class for parallel grid function.
Definition: pgridfunc.hpp:32
The MFEM Device class abstracts hardware devices such as GPUs, as well as programming models such as ...
Definition: device.hpp:121
Wrapper for hypre&#39;s ParCSR matrix class.
Definition: hypre.hpp:343
Class for parallel meshes.
Definition: pmesh.hpp:32