forked from mfem/mfem
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ex14p.cpp
285 lines (256 loc) · 9.9 KB
/
ex14p.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
// MFEM Example 14 - Parallel Version
//
// Compile with: make ex14p
//
// Sample runs: mpirun -np 4 ex14p -m ../data/inline-quad.mesh -o 0
// mpirun -np 4 ex14p -m ../data/star.mesh -o 2
// mpirun -np 4 ex14p -m ../data/star-mixed.mesh -o 2
// mpirun -np 4 ex14p -m ../data/star-mixed.mesh -o 2 -k 0 -e 1
// mpirun -np 4 ex14p -m ../data/escher.mesh -s 1
// mpirun -np 4 ex14p -m ../data/fichera.mesh -s 1 -k 1
// mpirun -np 4 ex14p -m ../data/fichera-mixed.mesh -s 1 -k 1
// mpirun -np 4 ex14p -m ../data/square-disc-p2.vtk -o 2
// mpirun -np 4 ex14p -m ../data/square-disc-p3.mesh -o 3
// mpirun -np 4 ex14p -m ../data/square-disc-nurbs.mesh -o 1
// mpirun -np 4 ex14p -m ../data/disc-nurbs.mesh -rs 4 -o 2 -s 1 -k 0
// mpirun -np 4 ex14p -m ../data/pipe-nurbs.mesh -o 1
// mpirun -np 4 ex14p -m ../data/inline-segment.mesh -rs 5
// mpirun -np 4 ex14p -m ../data/amr-quad.mesh -rs 3
// mpirun -np 4 ex14p -m ../data/amr-hex.mesh
//
// Description: This example code demonstrates the use of MFEM to define a
// discontinuous Galerkin (DG) finite element discretization of
// the Laplace problem -Delta u = 1 with homogeneous Dirichlet
// boundary conditions. Finite element spaces of any order,
// including zero on regular grids, are supported. The example
// highlights the use of discontinuous spaces and DG-specific face
// integrators.
//
// We recommend viewing examples 1 and 9 before viewing this
// example.
#include "mfem.hpp"
#include <fstream>
#include <iostream>
using namespace std;
using namespace mfem;
class CustomSolverMonitor : public IterativeSolverMonitor
{
public:
CustomSolverMonitor(const ParMesh *m,
ParGridFunction *f) :
pmesh(m),
pgf(f) {}
void MonitorSolution(int i, double norm, const Vector &x, bool final)
{
char vishost[] = "localhost";
int visport = 19916;
int num_procs, myid;
MPI_Comm_size(pmesh->GetComm(),&num_procs);
MPI_Comm_rank(pmesh->GetComm(),&myid);
pgf->SetFromTrueDofs(x);
socketstream sol_sock(vishost, visport);
sol_sock << "parallel " << num_procs << " " << myid << "\n";
sol_sock.precision(8);
sol_sock << "solution\n" << *pmesh << *pgf
<< "window_title 'Iteration no " << i << "'"
<< "keys rRjlc\n" << flush;
}
private:
const ParMesh *pmesh;
ParGridFunction *pgf;
};
int main(int argc, char *argv[])
{
// 1. Initialize MPI and HYPRE.
Mpi::Init(argc, argv);
int num_procs = Mpi::WorldSize();
int myid = Mpi::WorldRank();
Hypre::Init();
// 2. Parse command-line options.
const char *mesh_file = "../data/star.mesh";
int ser_ref_levels = -1;
int par_ref_levels = 2;
int order = 1;
double sigma = -1.0;
double kappa = -1.0;
double eta = 0.0;
bool visualization = 1;
OptionsParser args(argc, argv);
args.AddOption(&mesh_file, "-m", "--mesh",
"Mesh file to use.");
args.AddOption(&ser_ref_levels, "-rs", "--refine-serial",
"Number of times to refine the mesh uniformly in serial,"
" -1 for auto.");
args.AddOption(&par_ref_levels, "-rp", "--refine-parallel",
"Number of times to refine the mesh uniformly in parallel.");
args.AddOption(&order, "-o", "--order",
"Finite element order (polynomial degree) >= 0.");
args.AddOption(&sigma, "-s", "--sigma",
"One of the three DG penalty parameters, typically +1/-1."
" See the documentation of class DGDiffusionIntegrator.");
args.AddOption(&kappa, "-k", "--kappa",
"One of the three DG penalty parameters, should be positive."
" Negative values are replaced with (order+1)^2.");
args.AddOption(&eta, "-e", "--eta", "BR2 penalty parameter.");
args.AddOption(&visualization, "-vis", "--visualization", "-no-vis",
"--no-visualization",
"Enable or disable GLVis visualization.");
args.Parse();
if (!args.Good())
{
if (myid == 0)
{
args.PrintUsage(cout);
}
return 1;
}
if (kappa < 0)
{
kappa = (order+1)*(order+1);
}
if (myid == 0)
{
args.PrintOptions(cout);
}
// 3. Read the (serial) mesh from the given mesh file on all processors. We
// can handle triangular, quadrilateral, tetrahedral and hexahedral meshes
// with the same code. NURBS meshes are projected to second order meshes.
Mesh *mesh = new Mesh(mesh_file, 1, 1);
int dim = mesh->Dimension();
// 4. Refine the serial mesh on all processors to increase the resolution. In
// this example we do 'ser_ref_levels' of uniform refinement. By default,
// or if ser_ref_levels < 0, we choose it to be the largest number that
// gives a final mesh with no more than 50,000 elements.
{
if (ser_ref_levels < 0)
{
ser_ref_levels = (int)floor(log(10000./mesh->GetNE())/log(2.)/dim);
}
for (int l = 0; l < ser_ref_levels; l++)
{
mesh->UniformRefinement();
}
}
if (mesh->NURBSext)
{
mesh->SetCurvature(max(order, 1));
}
// 5. Define a parallel mesh by a partitioning of the serial mesh. Refine
// this mesh further in parallel to increase the resolution. Once the
// parallel mesh is defined, the serial mesh can be deleted.
ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh);
delete mesh;
{
for (int l = 0; l < par_ref_levels; l++)
{
pmesh->UniformRefinement();
}
}
// 6. Define a parallel finite element space on the parallel mesh. Here we
// use discontinuous finite elements of the specified order >= 0.
FiniteElementCollection *fec = new DG_FECollection(order, dim);
ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec);
HYPRE_BigInt size = fespace->GlobalTrueVSize();
if (myid == 0)
{
cout << "Number of unknowns: " << size << endl;
}
// 7. Set up the parallel linear form b(.) which corresponds to the
// right-hand side of the FEM linear system.
ParLinearForm *b = new ParLinearForm(fespace);
ConstantCoefficient one(1.0);
ConstantCoefficient zero(0.0);
b->AddDomainIntegrator(new DomainLFIntegrator(one));
b->AddBdrFaceIntegrator(
new DGDirichletLFIntegrator(zero, one, sigma, kappa));
b->Assemble();
// 8. Define the solution vector x as a parallel finite element grid function
// corresponding to fespace. Initialize x with initial guess of zero.
ParGridFunction x(fespace);
x = 0.0;
// 9. Set up the bilinear form a(.,.) on the finite element space
// corresponding to the Laplacian operator -Delta, by adding the Diffusion
// domain integrator and the interior and boundary DG face integrators.
// Note that boundary conditions are imposed weakly in the form, so there
// is no need for dof elimination. After serial and parallel assembly we
// extract the corresponding parallel matrix A.
ParBilinearForm *a = new ParBilinearForm(fespace);
a->AddDomainIntegrator(new DiffusionIntegrator(one));
a->AddInteriorFaceIntegrator(new DGDiffusionIntegrator(one, sigma, kappa));
a->AddBdrFaceIntegrator(new DGDiffusionIntegrator(one, sigma, kappa));
if (eta > 0)
{
a->AddInteriorFaceIntegrator(new DGDiffusionBR2Integrator(*fespace, eta));
a->AddBdrFaceIntegrator(new DGDiffusionBR2Integrator(*fespace, eta));
}
a->Assemble();
a->Finalize();
// 10. Define the parallel (hypre) matrix and vectors representing a(.,.),
// b(.) and the finite element approximation.
HypreParMatrix *A = a->ParallelAssemble();
HypreParVector *B = b->ParallelAssemble();
HypreParVector *X = x.ParallelProject();
delete a;
delete b;
// 11. Depending on the symmetry of A, define and apply a parallel PCG or
// GMRES solver for AX=B using the BoomerAMG preconditioner from hypre.
HypreSolver *amg = new HypreBoomerAMG(*A);
if (sigma == -1.0)
{
HyprePCG pcg(*A);
pcg.SetTol(1e-12);
pcg.SetMaxIter(500);
pcg.SetPrintLevel(2);
pcg.SetPreconditioner(*amg);
pcg.Mult(*B, *X);
}
else
{
CustomSolverMonitor monitor(pmesh, &x);
GMRESSolver gmres(MPI_COMM_WORLD);
gmres.SetAbsTol(0.0);
gmres.SetRelTol(1e-12);
gmres.SetMaxIter(500);
gmres.SetKDim(10);
gmres.SetPrintLevel(1);
gmres.SetOperator(*A);
gmres.SetPreconditioner(*amg);
gmres.SetMonitor(monitor);
gmres.Mult(*B, *X);
}
delete amg;
// 12. Extract the parallel grid function corresponding to the finite element
// approximation X. This is the local solution on each processor.
x = *X;
// 13. Save the refined mesh and the solution in parallel. This output can
// be viewed later using GLVis: "glvis -np <np> -m mesh -g sol".
{
ostringstream mesh_name, sol_name;
mesh_name << "mesh." << setfill('0') << setw(6) << myid;
sol_name << "sol." << setfill('0') << setw(6) << myid;
ofstream mesh_ofs(mesh_name.str().c_str());
mesh_ofs.precision(8);
pmesh->Print(mesh_ofs);
ofstream sol_ofs(sol_name.str().c_str());
sol_ofs.precision(8);
x.Save(sol_ofs);
}
// 14. Send the solution by socket to a GLVis server.
if (visualization)
{
char vishost[] = "localhost";
int visport = 19916;
socketstream sol_sock(vishost, visport);
sol_sock << "parallel " << num_procs << " " << myid << "\n";
sol_sock.precision(8);
sol_sock << "solution\n" << *pmesh << x << flush;
}
// 15. Free the used memory.
delete X;
delete B;
delete A;
delete fespace;
delete fec;
delete pmesh;
return 0;
}