Skip to content

Commit

Permalink
Merge pull request #119 from awslabs/sjg/quadrature-order-dev
Browse files Browse the repository at this point in the history
Organize the default quadrature rule order
  • Loading branch information
sebastiangrimberg authored Dec 13, 2023
2 parents da5056e + 187c940 commit cb15f1f
Show file tree
Hide file tree
Showing 55 changed files with 461 additions and 341 deletions.
5 changes: 5 additions & 0 deletions docs/src/config/solver.md
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,11 @@ Thus, this object is only relevant for [`config["Problem"]["Type"]: "Magnetostat
`"Linear"` : Top-level object for configuring the linear solver employed by all simulation
types.

### Advanced solver options

- `"QuadratureOrderJacobian" [false]`
- `"ExtraQuadratureOrder" [0]`

## `solver["Eigenmode"]`

```json
Expand Down
12 changes: 6 additions & 6 deletions docs/src/examples/rings.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,9 @@ The computed inductance matrix is saved to disk as `postpro/terminal-M.csv`, and
show its contents:

```
i, M[i][1] (H), M[i][2] (H)
1.000000e+00, +4.272291158e-11, +1.959927760e-12
2.000000e+00, +1.959927760e-12, +7.131293160e-10
i, M[i][1] (H), M[i][2] (H)
1.000000000e+00, +4.271874130e-11, +1.959879250e-12
2.000000000e+00, +1.959879250e-12, +7.131301882e-10
```

According to the analytic expressions above, for this geometry we should have
Expand Down Expand Up @@ -89,9 +89,9 @@ under [`config["Boundaries"]["Postprocessing"]["Inductance"]`]
file. The resulting postprocessed values are written to `postpro/surface-M.csv`:

```
i, M[1] (H), M[2] (H)
1.000000e+00, +4.260888637e-11, +1.890068391e-12
2.000000e+00, +1.955578068e-12, +7.130510941e-10
i, M[1] (H), M[2] (H)
1.000000000e+00, +4.260888932e-11, +1.890068508e-12
2.000000000e+00, +1.955578103e-12, +7.130510940e-10
```

The values computed using the flux integral method are in close agreement to those above, as
Expand Down
18 changes: 9 additions & 9 deletions docs/src/examples/spheres.md
Original file line number Diff line number Diff line change
Expand Up @@ -57,9 +57,9 @@ The resulting extracted Maxwell capacitance matrix is saved to disk in the CSV f
`postpro/terminal-C.csv`:

```
i, C[i][1] (F), C[i][2] (F)
1.000000e+00, +1.237470440e-12, -4.771229193e-13
2.000000e+00, -4.771229193e-13, +2.478512278e-12
i, C[i][1] (F), C[i][2] (F)
1.000000000e+00, +1.237470370e-12, -4.771228724e-13
2.000000000e+00, -4.771228724e-13, +2.478512148e-12
```

In this case, the analytic solution yields
Expand All @@ -80,9 +80,9 @@ The mutual capacitance matrix can be computed from its Maxwell counterpart, and
`postpro/terminal-Cm.csv`:

```
i, C_m[i][1] (F), C_m[i][2] (F)
1.000000e+00, +7.603475205e-13, +4.771229193e-13
2.000000e+00, +4.771229193e-13, +2.001389358e-12
i, C_m[i][1] (F), C_m[i][2] (F)
1.000000000e+00, +7.603474979e-13, +4.771228724e-13
2.000000000e+00, +4.771228724e-13, +2.001389275e-12
```

Additionally, while the typical approach used by *Palace* for lumped parameter extraction
Expand All @@ -94,9 +94,9 @@ configuration file for this example contains this information under
capacitances are written to `postpro/surface-C.csv`:

```
i, C[1] (F), C[2] (F)
1.000000e+00, +1.219433080e-12, -4.711763113e-13
2.000000e+00, -4.701805852e-13, +2.443652512e-12
i, C[1] (F), C[2] (F)
1.000000000e+00, +1.219442420e-12, -4.711796343e-13
2.000000000e+00, -4.701844045e-13, +2.443672442e-12
```

and agree closely with the values computed using the default method above, as expected.
Expand Down
4 changes: 2 additions & 2 deletions palace/drivers/drivensolver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ ErrorIndicator DrivenSolver::SweepUniform(SpaceOperator &spaceop, PostOperator &
// Initialize structures for storing and reducing the results of error estimation.
CurlFluxErrorEstimator<ComplexVector> estimator(
spaceop.GetMaterialOp(), spaceop.GetNDSpaces(), iodata.solver.linear.estimator_tol,
iodata.solver.linear.estimator_max_it, 0, iodata.solver.pa_order_threshold);
iodata.solver.linear.estimator_max_it, 0);
ErrorIndicator indicator;

// Main frequency sweep loop.
Expand Down Expand Up @@ -255,7 +255,7 @@ ErrorIndicator DrivenSolver::SweepAdaptive(SpaceOperator &spaceop, PostOperator
// Initialize structures for storing and reducing the results of error estimation.
CurlFluxErrorEstimator<ComplexVector> estimator(
spaceop.GetMaterialOp(), spaceop.GetNDSpaces(), iodata.solver.linear.estimator_tol,
iodata.solver.linear.estimator_max_it, 0, iodata.solver.pa_order_threshold);
iodata.solver.linear.estimator_max_it, 0);
ErrorIndicator indicator;

// Configure the PROM operator which performs the parameter space sampling and basis
Expand Down
5 changes: 2 additions & 3 deletions palace/drivers/eigensolver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,7 @@ EigenSolver::Solve(const std::vector<std::unique_ptr<mfem::ParMesh>> &mesh) cons
divfree = std::make_unique<DivFreeSolver>(
spaceop.GetMaterialOp(), spaceop.GetNDSpace(), spaceop.GetH1Spaces(),
spaceop.GetAuxBdrTDofLists(), iodata.solver.linear.divfree_tol,
iodata.solver.linear.divfree_max_it, divfree_verbose,
iodata.solver.pa_order_threshold);
iodata.solver.linear.divfree_max_it, divfree_verbose);
eigen->SetDivFreeProjector(*divfree);
}

Expand Down Expand Up @@ -270,7 +269,7 @@ EigenSolver::Solve(const std::vector<std::unique_ptr<mfem::ParMesh>> &mesh) cons
Mpi::Print("Computing solution error estimates\n\n");
CurlFluxErrorEstimator<ComplexVector> estimator(
spaceop.GetMaterialOp(), spaceop.GetNDSpaces(), iodata.solver.linear.estimator_tol,
iodata.solver.linear.estimator_max_it, 0, iodata.solver.pa_order_threshold);
iodata.solver.linear.estimator_max_it, 0);
ErrorIndicator indicator;
for (int i = 0; i < iodata.solver.eigenmode.n; i++)
{
Expand Down
3 changes: 1 addition & 2 deletions palace/drivers/electrostaticsolver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,7 @@ ErrorIndicator ElectrostaticSolver::Postprocess(LaplaceOperator &laplaceop,
Mpi::Print("Computing solution error estimates\n\n");
GradFluxErrorEstimator estimator(laplaceop.GetMaterialOp(), laplaceop.GetH1Spaces(),
iodata.solver.linear.estimator_tol,
iodata.solver.linear.estimator_max_it, 0,
iodata.solver.pa_order_threshold);
iodata.solver.linear.estimator_max_it, 0);
ErrorIndicator indicator;
for (int i = 0; i < nstep; i++)
{
Expand Down
3 changes: 1 addition & 2 deletions palace/drivers/magnetostaticsolver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,7 @@ ErrorIndicator MagnetostaticSolver::Postprocess(CurlCurlOperator &curlcurlop,
Mpi::Print("Computing solution error estimates\n\n");
CurlFluxErrorEstimator<Vector> estimator(
curlcurlop.GetMaterialOp(), curlcurlop.GetNDSpaces(),
iodata.solver.linear.estimator_tol, iodata.solver.linear.estimator_max_it, 0,
iodata.solver.pa_order_threshold);
iodata.solver.linear.estimator_tol, iodata.solver.linear.estimator_max_it, 0);
ErrorIndicator indicator;
for (int i = 0; i < nstep; i++)
{
Expand Down
6 changes: 3 additions & 3 deletions palace/drivers/transientsolver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,9 @@ TransientSolver::Solve(const std::vector<std::unique_ptr<mfem::ParMesh>> &mesh)
Mpi::Print("\n");

// Initialize structures for storing and reducing the results of error estimation.
CurlFluxErrorEstimator<Vector> estimator(
spaceop.GetMaterialOp(), spaceop.GetNDSpaces(), iodata.solver.linear.estimator_tol,
iodata.solver.linear.estimator_max_it, 0, iodata.solver.pa_order_threshold);
CurlFluxErrorEstimator<Vector> estimator(spaceop.GetMaterialOp(), spaceop.GetNDSpaces(),
iodata.solver.linear.estimator_tol,
iodata.solver.linear.estimator_max_it, 0);
ErrorIndicator indicator;

// Main time integration loop.
Expand Down
14 changes: 7 additions & 7 deletions palace/fem/bilinearform.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ GetElementIndices(const mfem::ParFiniteElementSpace &trial_fespace,

} // namespace

std::unique_ptr<ceed::Operator> BilinearForm::Assemble() const
std::unique_ptr<ceed::Operator> BilinearForm::PartialAssemble() const
{
MFEM_VERIFY(trial_fespace.GetParMesh() == test_fespace.GetParMesh(),
"Trial and test finite element spaces must correspond to the same mesh!");
Expand Down Expand Up @@ -145,8 +145,8 @@ std::unique_ptr<ceed::Operator> BilinearForm::Assemble() const
for (const auto &value : element_indices)
{
const std::vector<int> &indices = value.second;
const int q_order = fem::GetDefaultIntegrationOrder(
trial_fespace, test_fespace, indices, use_bdr, q_extra_pk, q_extra_qk);
const int q_order = fem::DefaultIntegrationOrder::Get(trial_fespace, test_fespace,
indices, use_bdr);
const mfem::IntegrationRule &ir =
mfem::IntRules.Get(mesh.GetElementGeometry(indices[0]), q_order);

Expand Down Expand Up @@ -182,8 +182,8 @@ std::unique_ptr<ceed::Operator> BilinearForm::Assemble() const
for (const auto &value : element_indices)
{
const std::vector<int> &indices = value.second;
const int q_order = fem::GetDefaultIntegrationOrder(
trial_fespace, test_fespace, indices, use_bdr, q_extra_pk, q_extra_qk);
const int q_order = fem::DefaultIntegrationOrder::Get(trial_fespace, test_fespace,
indices, use_bdr);
const mfem::IntegrationRule &ir =
mfem::IntRules.Get(mesh.GetBdrElementGeometry(indices[0]), q_order);

Expand Down Expand Up @@ -218,7 +218,7 @@ std::unique_ptr<mfem::SparseMatrix> BilinearForm::FullAssemble(const ceed::Opera
return ceed::CeedOperatorFullAssemble(op, skip_zeros, false);
}

std::unique_ptr<ceed::Operator> DiscreteLinearOperator::Assemble() const
std::unique_ptr<ceed::Operator> DiscreteLinearOperator::PartialAssemble() const
{
// Construct dof multiplicity vector for scaling to account for dofs shared between
// elements (on host, then copy to device).
Expand All @@ -239,7 +239,7 @@ std::unique_ptr<ceed::Operator> DiscreteLinearOperator::Assemble() const
test_multiplicity.UseDevice(true);
test_multiplicity.Reciprocal();

auto op = a.Assemble();
auto op = a.PartialAssemble();
op->SetDofMultiplicity(std::move(test_multiplicity));
return op;
}
Expand Down
55 changes: 21 additions & 34 deletions palace/fem/bilinearform.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -27,46 +27,33 @@ class BilinearForm
// List of domain and boundary integrators making up the bilinear form.
std::vector<std::unique_ptr<BilinearFormIntegrator>> domain_integs, boundary_integs;

// Integration order for quadrature rules is calculated as p_trial + p_test + w + q_extra,
// where p_test and p_trial are the test and trial space basis function orders and w is
// the geometry order.
int q_extra_pk, q_extra_qk;
public:
// Order above which to use partial assembly vs. full.
inline static int pa_order_threshold = 1;

public:
BilinearForm(const mfem::ParFiniteElementSpace &trial_fespace,
const mfem::ParFiniteElementSpace &test_fespace, int q_extra_pk,
int q_extra_qk)
: trial_fespace(trial_fespace), test_fespace(test_fespace), q_extra_pk(q_extra_pk),
q_extra_qk(q_extra_qk)
{
}
BilinearForm(const mfem::ParFiniteElementSpace &trial_fespace,
const mfem::ParFiniteElementSpace &test_fespace, int q_extra = 0)
: BilinearForm(trial_fespace, test_fespace, q_extra, q_extra)
{
}
BilinearForm(const mfem::ParFiniteElementSpace &fespace, int q_extra_pk, int q_extra_qk)
: BilinearForm(fespace, fespace, q_extra_pk, q_extra_qk)
const mfem::ParFiniteElementSpace &test_fespace)
: trial_fespace(trial_fespace), test_fespace(test_fespace)
{
}
BilinearForm(const mfem::ParFiniteElementSpace &fespace, int q_extra = 0)
: BilinearForm(fespace, fespace, q_extra, q_extra)
BilinearForm(const mfem::ParFiniteElementSpace &fespace) : BilinearForm(fespace, fespace)
{
}

const auto &GetTrialSpace() const { return trial_fespace; }
const auto &GetTestSpace() const { return test_fespace; }

// MFEM's RT_FECollection actually returns order + 1 for GetOrder() for historical
// reasons.
// Returns order such that the miniumum for all element types is 1. MFEM's RT_FECollection
// actually already returns order + 1 for GetOrder() for historical reasons.
auto GetMaxElementOrder() const
{
const auto &trial_fec = *trial_fespace.FEColl();
const auto &test_fec = *test_fespace.FEColl();
return std::max(
dynamic_cast<const mfem::RT_FECollection *>(&trial_fec) ? trial_fec.GetOrder() - 1
dynamic_cast<const mfem::L2_FECollection *>(&trial_fec) ? trial_fec.GetOrder() + 1
: trial_fec.GetOrder(),
dynamic_cast<const mfem::RT_FECollection *>(&test_fec) ? test_fec.GetOrder() - 1
dynamic_cast<const mfem::L2_FECollection *>(&test_fec) ? test_fec.GetOrder() + 1
: test_fec.GetOrder());
}

Expand All @@ -82,25 +69,25 @@ class BilinearForm
boundary_integs.push_back(std::make_unique<T>(std::forward<U>(args)...));
}

std::unique_ptr<Operator> Assemble(int pa_order_threshold, bool skip_zeros) const
std::unique_ptr<Operator> Assemble(bool skip_zeros) const
{
if (GetMaxElementOrder() >= pa_order_threshold)
{
return Assemble();
return PartialAssemble();
}
else
{
return FullAssemble(skip_zeros);
}
}

std::unique_ptr<ceed::Operator> PartialAssemble() const;

std::unique_ptr<mfem::SparseMatrix> FullAssemble(bool skip_zeros) const
{
return FullAssemble(*Assemble(), skip_zeros);
return FullAssemble(*PartialAssemble(), skip_zeros);
}

std::unique_ptr<ceed::Operator> Assemble() const;

static std::unique_ptr<mfem::SparseMatrix> FullAssemble(const ceed::Operator &op,
bool skip_zeros);
};
Expand Down Expand Up @@ -128,25 +115,25 @@ class DiscreteLinearOperator
a.AddDomainIntegrator<T>(std::forward<U>(args)...);
}

std::unique_ptr<Operator> Assemble(int pa_order_threshold, bool skip_zeros) const
std::unique_ptr<Operator> Assemble(bool skip_zeros) const
{
if (a.GetMaxElementOrder() >= pa_order_threshold)
if (a.GetMaxElementOrder() >= a.pa_order_threshold)
{
return Assemble();
return PartialAssemble();
}
else
{
return FullAssemble(skip_zeros);
}
}

std::unique_ptr<ceed::Operator> PartialAssemble() const;

std::unique_ptr<mfem::SparseMatrix> FullAssemble(bool skip_zeros) const
{
return FullAssemble(*Assemble(), skip_zeros);
return FullAssemble(*a.PartialAssemble(), skip_zeros);
}

std::unique_ptr<ceed::Operator> Assemble() const;

static std::unique_ptr<mfem::SparseMatrix> FullAssemble(const ceed::Operator &op,
bool skip_zeros);
};
Expand Down
22 changes: 14 additions & 8 deletions palace/fem/fespace.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -39,47 +39,53 @@ const Operator &AuxiliaryFiniteElementSpace::BuildDiscreteInterpolator() const
// Discrete gradient interpolator
DiscreteLinearOperator interp(*this, primal_fespace);
interp.AddDomainInterpolator<GradientInterpolator>();
G = std::make_unique<ParOperator>(interp.Assemble(), *this, primal_fespace, true);
G = std::make_unique<ParOperator>(interp.PartialAssemble(), *this, primal_fespace,
true);
}
else if (primal_map_type == mfem::FiniteElement::VALUE &&
aux_map_type == mfem::FiniteElement::H_CURL)
{
// Discrete gradient interpolator (spaces reversed)
DiscreteLinearOperator interp(primal_fespace, *this);
interp.AddDomainInterpolator<GradientInterpolator>();
G = std::make_unique<ParOperator>(interp.Assemble(), primal_fespace, *this, true);
G = std::make_unique<ParOperator>(interp.PartialAssemble(), primal_fespace, *this,
true);
}
else if (aux_map_type == mfem::FiniteElement::H_CURL &&
primal_map_type == mfem::FiniteElement::H_DIV)
{
// Discrete curl interpolator
DiscreteLinearOperator interp(*this, primal_fespace);
interp.AddDomainInterpolator<CurlInterpolator>();
G = std::make_unique<ParOperator>(interp.Assemble(), *this, primal_fespace, true);
G = std::make_unique<ParOperator>(interp.PartialAssemble(), *this, primal_fespace,
true);
}
else if (primal_map_type == mfem::FiniteElement::H_CURL &&
aux_map_type == mfem::FiniteElement::H_DIV)
{
// Discrete curl interpolator (spaces reversed)
DiscreteLinearOperator interp(primal_fespace, *this);
interp.AddDomainInterpolator<CurlInterpolator>();
G = std::make_unique<ParOperator>(interp.Assemble(), primal_fespace, *this, true);
G = std::make_unique<ParOperator>(interp.PartialAssemble(), primal_fespace, *this,
true);
}
else if (aux_map_type == mfem::FiniteElement::H_DIV &&
primal_map_type == mfem::FiniteElement::INTEGRAL)
{
// Discrete divergence interpolator
DiscreteLinearOperator interp(*this, primal_fespace);
interp.AddDomainInterpolator<DivergenceInterpolator>();
G = std::make_unique<ParOperator>(interp.Assemble(), *this, primal_fespace, true);
G = std::make_unique<ParOperator>(interp.PartialAssemble(), *this, primal_fespace,
true);
}
else if (primal_map_type == mfem::FiniteElement::H_DIV &&
aux_map_type == mfem::FiniteElement::INTEGRAL)
{
// Discrete divergence interpolator (spaces reversed)
DiscreteLinearOperator interp(primal_fespace, *this);
interp.AddDomainInterpolator<DivergenceInterpolator>();
G = std::make_unique<ParOperator>(interp.Assemble(), primal_fespace, *this, true);
G = std::make_unique<ParOperator>(interp.PartialAssemble(), primal_fespace, *this,
true);
}
else
{
Expand Down Expand Up @@ -108,8 +114,8 @@ BaseFiniteElementSpaceHierarchy<FESpace>::BuildProlongationAtLevel(std::size_t l
{
DiscreteLinearOperator p(*fespaces[l], *fespaces[l + 1]);
p.AddDomainInterpolator<IdentityInterpolator>();
P[l] =
std::make_unique<ParOperator>(p.Assemble(), *fespaces[l], *fespaces[l + 1], true);
P[l] = std::make_unique<ParOperator>(p.PartialAssemble(), *fespaces[l],
*fespaces[l + 1], true);
}

return *P[l];
Expand Down
Loading

0 comments on commit cb15f1f

Please sign in to comment.