5#ifndef DUNE_ISTL_FASTAMG_HH
6#define DUNE_ISTL_FASTAMG_HH
9#include <dune/common/exceptions.hh>
10#include <dune/common/typetraits.hh>
58 template<
class M,
class X,
class PI=SequentialInformation,
class A=std::allocator<X> >
152 matrices_->recalculateGalerkin(NegateSet<typename PI::OwnerSet>());
169 void createHierarchies(C& criterion,
170 const std::shared_ptr<const Operator>& matrixptr,
184 typename OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator
matrix;
192 typename OperatorHierarchy::RedistributeInfoList::const_iterator
redist;
196 typename OperatorHierarchy::AggregatesMapList::const_iterator
aggregates;
216 void mgc(LevelContext& levelContext,
Domain& x,
const Range& b);
224 void presmooth(LevelContext& levelContext,
Domain& x,
const Range& b);
232 void postsmooth(LevelContext& levelContext,
Domain& x,
const Range& b);
240 void moveToFineLevel(LevelContext& levelContext,
bool processedFineLevel,
247 bool moveToCoarseLevel(LevelContext& levelContext);
253 void initIteratorsWithFineLevel(LevelContext& levelContext);
256 std::shared_ptr<OperatorHierarchy> matrices_;
258 std::shared_ptr<CoarseSolver> solver_;
260 std::shared_ptr<Hierarchy<Range,A>> rhs_;
262 std::shared_ptr<Hierarchy<Domain,A>> lhs_;
264 std::shared_ptr<Hierarchy<Domain,A>> residual_;
269 std::shared_ptr<ScalarProduct> scalarProduct_;
273 std::size_t preSteps_;
275 std::size_t postSteps_;
277 bool buildHierarchy_;
279 bool coarsesolverconverged;
281 typedef std::shared_ptr<Smoother> SmootherPointer;
282 SmootherPointer coarseSmoother_;
284 std::size_t verbosity_;
287 template<
class M,
class X,
class PI,
class A>
289 : matrices_(amg.matrices_), solver_(amg.solver_),
290 rhs_(), lhs_(), residual_(), scalarProduct_(amg.scalarProduct_),
291 gamma_(amg.gamma_), preSteps_(amg.preSteps_), postSteps_(amg.postSteps_),
292 symmetric(amg.symmetric), coarsesolverconverged(amg.coarsesolverconverged),
293 coarseSmoother_(amg.coarseSmoother_), verbosity_(amg.verbosity_)
296 template<
class M,
class X,
class PI,
class A>
299 : matrices_(stackobject_to_shared_ptr(matrices)), solver_(&coarseSolver),
300 rhs_(), lhs_(), residual_(), scalarProduct_(),
301 gamma_(parms.getGamma()), preSteps_(parms.getNoPreSmoothSteps()),
302 postSteps_(parms.getNoPostSmoothSteps()), buildHierarchy_(false),
303 symmetric(symmetric_), coarsesolverconverged(true),
304 coarseSmoother_(), verbosity_(parms.debugLevel())
306 if(preSteps_>1||postSteps_>1)
308 std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
309 preSteps_=postSteps_=0;
311 assert(matrices_->isBuilt());
312 static_assert(std::is_same<PI,SequentialInformation>::value,
313 "Currently only sequential runs are supported");
315 template<
class M,
class X,
class PI,
class A>
322 : solver_(), rhs_(), lhs_(), residual_(), scalarProduct_(), gamma_(parms.getGamma()),
323 preSteps_(parms.getNoPreSmoothSteps()), postSteps_(parms.getNoPostSmoothSteps()),
324 buildHierarchy_(true),
325 symmetric(symmetric_), coarsesolverconverged(true),
326 coarseSmoother_(), verbosity_(criterion.debugLevel())
328 if(preSteps_>1||postSteps_>1)
330 std::cerr<<
"WARNING only one step of smoothing is supported!"<<std::endl;
331 preSteps_=postSteps_=1;
333 static_assert(std::is_same<PI,SequentialInformation>::value,
334 "Currently only sequential runs are supported");
338 auto matrixptr = stackobject_to_shared_ptr(matrix);
339 createHierarchies(criterion, matrixptr, pinfo);
342 template<
class M,
class X,
class PI,
class A>
345 const std::shared_ptr<const Operator>& matrixptr,
349 matrices_ = std::make_shared<OperatorHierarchy>(
350 std::const_pointer_cast<Operator>(matrixptr),
351 stackobject_to_shared_ptr(
const_cast<PI&
>(pinfo)));
353 matrices_->template build<NegateSet<typename PI::OwnerSet> >(criterion);
355 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
356 std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
358 if(buildHierarchy_ && matrices_->levels()==matrices_->maxlevels()) {
365 cargs.setArgs(sargs);
366 if(matrices_->redistributeInformation().back().isSetup()) {
368 cargs.setMatrix(matrices_->matrices().coarsest().getRedistributed().getmat());
369 cargs.setComm(matrices_->parallelInformation().coarsest().getRedistributed());
371 cargs.setMatrix(matrices_->matrices().coarsest()->getmat());
372 cargs.setComm(*matrices_->parallelInformation().coarsest());
376 scalarProduct_ = createScalarProduct<X>(cargs.getComm(),category());
378#if HAVE_SUPERLU|| HAVE_SUITESPARSE_UMFPACK
379#if HAVE_SUITESPARSE_UMFPACK
380#define DIRECTSOLVER UMFPack
382#define DIRECTSOLVER SuperLU
385 if(std::is_same<ParallelInformation,SequentialInformation>::value
386 || matrices_->parallelInformation().coarsest()->communicator().size()==1
387 || (matrices_->parallelInformation().coarsest().isRedistributed()
388 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()==1
389 && matrices_->parallelInformation().coarsest().getRedistributed().communicator().size()>0)) {
390 if(verbosity_>0 && matrices_->parallelInformation().coarsest()->communicator().rank()==0)
391 std::cout<<
"Using superlu"<<std::endl;
392 if(matrices_->parallelInformation().coarsest().isRedistributed())
394 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
396 solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest().getRedistributed().getmat(),
false,
false));
400 solver_.reset(
new DIRECTSOLVER<typename M::matrix_type>(matrices_->matrices().coarsest()->getmat(),
false,
false));
405 if(matrices_->parallelInformation().coarsest().isRedistributed())
407 if(matrices_->matrices().coarsest().getRedistributed().getmat().N()>0)
409 solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(matrices_->matrices().coarsest().getRedistributed()),
411 *coarseSmoother_, 1E-2, 1000, 0));
415 solver_.reset(
new BiCGSTABSolver<X>(
const_cast<M&
>(*matrices_->matrices().coarsest()),
417 *coarseSmoother_, 1E-2, 1000, 0));
421 if(verbosity_>0 && matrices_->parallelInformation().finest()->communicator().rank()==0)
422 std::cout<<
"Building Hierarchy of "<<matrices_->maxlevels()<<
" levels took "<<watch.elapsed()<<
" seconds."<<std::endl;
426 template<
class M,
class X,
class PI,
class A>
434 typedef typename M::matrix_type
Matrix;
441 const Matrix&
mat=matrices_->matrices().finest()->getmat();
442 for(RowIter row=
mat.begin(); row!=
mat.end(); ++row) {
443 bool isDirichlet =
true;
444 bool hasDiagonal =
false;
446 for(ColIter
col=row->begin();
col!=row->end(); ++
col) {
447 if(row.index()==
col.index()) {
449 hasDiagonal = (*
col != zero);
455 if(isDirichlet && hasDiagonal)
456 diag->solve(x[row.index()], b[row.index()]);
459 std::cout<<
" Preprocessing Dirichlet took "<<watch1.elapsed()<<std::endl;
462 matrices_->parallelInformation().coarsest()->copyOwnerToAll(x,x);
463 rhs_ = std::make_shared<Hierarchy<Range,A>>(std::make_shared<Range>(b));
464 lhs_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
465 residual_ = std::make_shared<Hierarchy<Domain,A>>(std::make_shared<Domain>(x));
466 matrices_->coarsenVector(*rhs_);
467 matrices_->coarsenVector(*lhs_);
468 matrices_->coarsenVector(*residual_);
475 template<
class M,
class X,
class PI,
class A>
478 return matrices_->levels();
480 template<
class M,
class X,
class PI,
class A>
483 return matrices_->maxlevels();
487 template<
class M,
class X,
class PI,
class A>
490 LevelContext levelContext;
492 initIteratorsWithFineLevel(levelContext);
494 assert(v.two_norm()==0);
497 if(matrices_->maxlevels()==1){
500 mgc(levelContext, v, b);
502 mgc(levelContext, v, d);
503 if(postSteps_==0||matrices_->maxlevels()==1)
504 levelContext.pinfo->copyOwnerToAll(v, v);
507 template<
class M,
class X,
class PI,
class A>
510 levelContext.matrix = matrices_->matrices().finest();
511 levelContext.pinfo = matrices_->parallelInformation().finest();
512 levelContext.redist =
513 matrices_->redistributeInformation().begin();
514 levelContext.aggregates = matrices_->aggregatesMaps().begin();
515 levelContext.lhs = lhs_->finest();
516 levelContext.residual = residual_->finest();
517 levelContext.rhs = rhs_->finest();
518 levelContext.level=0;
521 template<
class M,
class X,
class PI,
class A>
522 bool FastAMG<M,X,PI,A>
523 ::moveToCoarseLevel(LevelContext& levelContext)
525 bool processNextLevel=
true;
527 if(levelContext.redist->isSetup()) {
529 levelContext.redist->redistribute(
static_cast<const Range&
>(*levelContext.residual),
530 levelContext.residual.getRedistributed());
531 processNextLevel = levelContext.residual.getRedistributed().size()>0;
532 if(processNextLevel) {
534 ++levelContext.pinfo;
535 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
536 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
537 static_cast<const Range&
>(levelContext.residual.getRedistributed()),
538 *levelContext.pinfo);
543 ++levelContext.pinfo;
544 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
545 ::restrictVector(*(*levelContext.aggregates), *levelContext.rhs,
546 static_cast<const Range&
>(*levelContext.residual), *levelContext.pinfo);
549 if(processNextLevel) {
551 ++levelContext.residual;
553 ++levelContext.matrix;
554 ++levelContext.level;
555 ++levelContext.redist;
557 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
559 ++levelContext.aggregates;
563 *levelContext.residual=0;
565 return processNextLevel;
568 template<
class M,
class X,
class PI,
class A>
569 void FastAMG<M,X,PI,A>
570 ::moveToFineLevel(LevelContext& levelContext,
bool processNextLevel, Domain& x)
572 if(processNextLevel) {
573 if(levelContext.matrix != matrices_->matrices().coarsest() || matrices_->levels()<matrices_->maxlevels()) {
575 --levelContext.aggregates;
577 --levelContext.redist;
578 --levelContext.level;
580 --levelContext.matrix;
581 --levelContext.residual;
585 typename Hierarchy<Domain,A>::Iterator coarseLhs = levelContext.lhs--;
586 if(levelContext.redist->isSetup()) {
589 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
590 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
591 levelContext.lhs.getRedistributed(),
592 matrices_->getProlongationDampingFactor(),
593 *levelContext.pinfo, *levelContext.redist);
595 Transfer<typename OperatorHierarchy::AggregatesMap::AggregateDescriptor,Range,ParallelInformation>
596 ::prolongateVector(*(*levelContext.aggregates), *coarseLhs, x,
597 matrices_->getProlongationDampingFactor(), *levelContext.pinfo);
603 if(processNextLevel) {
610 template<
class M,
class X,
class PI,
class A>
611 void FastAMG<M,X,PI,A>
612 ::presmooth(LevelContext& levelContext, Domain& x,
const Range& b)
614 constexpr auto bl = blockLevel<typename M::matrix_type>();
617 *levelContext.residual,
621 template<
class M,
class X,
class PI,
class A>
622 void FastAMG<M,X,PI,A>
623 ::postsmooth(LevelContext& levelContext, Domain& x,
const Range& b)
625 constexpr auto bl = blockLevel<typename M::matrix_type>();
626 GaussSeidelPostsmoothDefect<bl>
627 ::apply(levelContext.matrix->getmat(), x, *levelContext.residual, b);
631 template<
class M,
class X,
class PI,
class A>
637 template<
class M,
class X,
class PI,
class A>
640 if(levelContext.matrix == matrices_->matrices().coarsest() && levels()==maxlevels()) {
644 if(levelContext.redist->isSetup()) {
645 levelContext.redist->redistribute(b, levelContext.rhs.getRedistributed());
646 if(levelContext.rhs.getRedistributed().size()>0) {
648 levelContext.pinfo.getRedistributed().copyOwnerToAll(levelContext.rhs.getRedistributed(),
649 levelContext.rhs.getRedistributed());
650 solver_->apply(levelContext.lhs.getRedistributed(), levelContext.rhs.getRedistributed(), res);
652 levelContext.redist->redistributeBackward(v, levelContext.lhs.getRedistributed());
653 levelContext.pinfo->copyOwnerToAll(v, v);
655 levelContext.pinfo->copyOwnerToAll(b, b);
656 solver_->apply(v,
const_cast<Range&
>(b), res);
662 coarsesolverconverged =
false;
668#ifndef DUNE_AMG_NO_COARSEGRIDCORRECTION
669 bool processNextLevel = moveToCoarseLevel(levelContext);
671 if(processNextLevel) {
673 for(std::size_t i=0; i<gamma_; i++)
674 mgc(levelContext, *levelContext.lhs, *levelContext.rhs);
677 moveToFineLevel(levelContext, processNextLevel, v);
682 if(levelContext.matrix == matrices_->matrices().finest()) {
683 coarsesolverconverged = matrices_->parallelInformation().finest()->communicator().prod(coarsesolverconverged);
684 if(!coarsesolverconverged)
685 DUNE_THROW(MathError,
"Coarse solver did not converge");
694 template<
class M,
class X,
class PI,
class A>
702 template<
class M,
class X,
class PI,
class A>
706 matrices_->getCoarsestAggregatesOnFinest(cont);
Classes for using SuperLU with ISTL matrices.
Templates characterizing the type of a solver.
Define general preconditioner interface.
Implementations of the inverse operator interface.
Classes for the generic construction and application of the smoothers.
Prolongation and restriction for amg.
Provides a classes representing the hierarchies in AMG.
Some generic functions for pretty printing vectors and matrices.
Classes for using UMFPack with ISTL matrices.
Define base class for scalar product and norm.
Col col
Definition matrixmatrix.hh:351
Matrix & mat
Definition matrixmatrix.hh:347
void presmooth(LevelContext &levelContext, size_t steps)
Apply pre smoothing on the current level.
Definition smoother.hh:406
const void * Arguments
A type holding all the arguments needed to call the constructor.
Definition construction.hh:44
static std::shared_ptr< T > construct(Arguments &args)
Construct an object with the specified arguments.
Definition construction.hh:52
void postsmooth(LevelContext &levelContext, size_t steps)
Apply post smoothing on the current level.
Definition smoother.hh:428
int iterations
The numbe of iterations to perform.
Definition smoother.hh:47
OperatorHierarchy::ParallelMatrixHierarchy::ConstIterator matrix
The iterator over the matrices.
Definition fastamg.hh:184
void getCoarsestAggregateNumbers(std::vector< std::size_t, A1 > &cont)
Get the aggregate number of each unknown on the coarsest level.
Definition fastamg.hh:704
ParallelInformationHierarchy::Iterator pinfo
The iterator over the parallel information.
Definition fastamg.hh:188
void recalculateHierarchy()
Recalculate the matrix hierarchy.
Definition fastamg.hh:150
void post(Domain &x)
Clean up.
Definition fastamg.hh:695
std::size_t maxlevels()
Definition fastamg.hh:481
X Domain
The domain type.
Definition fastamg.hh:77
Hierarchy< Domain, A >::Iterator residual
The iterator over the residuals.
Definition fastamg.hh:204
virtual SolverCategory::Category category() const
Category of the preconditioner (see SolverCategory::Category)
Definition fastamg.hh:123
MatrixHierarchy< M, ParallelInformation, A > OperatorHierarchy
The operator hierarchy type.
Definition fastamg.hh:72
OperatorHierarchy::RedistributeInfoList::const_iterator redist
The iterator over the redistribution information.
Definition fastamg.hh:192
X Range
The range type.
Definition fastamg.hh:79
PI ParallelInformation
The type of the parallel information. Either OwnerOverlapCommunication or another type describing the...
Definition fastamg.hh:70
M Operator
The matrix operator type.
Definition fastamg.hh:63
std::size_t levels()
Definition fastamg.hh:476
InverseOperator< X, X > CoarseSolver
the type of the coarse solver.
Definition fastamg.hh:81
bool usesDirectCoarseLevelSolver() const
Check whether the coarse solver used is a direct solver.
Definition fastamg.hh:632
Hierarchy< Domain, A >::Iterator lhs
The iterator over the left hand side.
Definition fastamg.hh:200
Hierarchy< Range, A >::Iterator rhs
The iterator over the right hand sided.
Definition fastamg.hh:208
std::size_t level
The level index.
Definition fastamg.hh:212
void apply(Domain &v, const Range &d)
Apply one step of the preconditioner to the system A(v)=d.
Definition fastamg.hh:488
OperatorHierarchy::ParallelInformationHierarchy ParallelInformationHierarchy
The parallal data distribution hierarchy type.
Definition fastamg.hh:74
void pre(Domain &x, Range &b)
Prepare the preconditioner.
Definition fastamg.hh:427
FastAMG(OperatorHierarchy &matrices, CoarseSolver &coarseSolver, const Parameters &parms, bool symmetric=true)
Construct a new amg with a specific coarse solver.
Definition fastamg.hh:297
OperatorHierarchy::AggregatesMapList::const_iterator aggregates
The iterator over the aggregates maps.
Definition fastamg.hh:196
Definition allocator.hh:11
ConstIterator class for sequential access.
Definition matrix.hh:404
A generic dynamic dense matrix.
Definition matrix.hh:561
typename Imp::BlockTraits< T >::field_type field_type
Export the type representing the underlying field.
Definition matrix.hh:565
row_type::const_iterator ConstColIterator
Const iterator for the entries of each row.
Definition matrix.hh:589
T block_type
Export the type representing the components.
Definition matrix.hh:568
A fast (sequential) algebraic multigrid based on agglomeration that saves memory bandwidth.
Definition fastamg.hh:60
static void apply(const M &A, X &x, Y &d, const Y &b)
Definition fastamgsmoother.hh:19
LevelIterator< Hierarchy< ParallelInformation, Allocator >, ParallelInformation > Iterator
Type of the mutable iterator.
Definition hierarchy.hh:216
Iterator over the levels in the hierarchy.
Definition hierarchy.hh:120
The hierarchies build by the coarsening process.
Definition matrixhierarchy.hh:61
All parameters for AMG.
Definition parameters.hh:393
The default class for the smoother arguments.
Definition smoother.hh:38
Base class for matrix free definition of preconditioners.
Definition preconditioner.hh:32
Sequential SSOR preconditioner.
Definition preconditioners.hh:141
Base class for scalar product and norm computation.
Definition scalarproducts.hh:52
Statistics about the application of an inverse operator.
Definition solver.hh:48
bool converged
True if convergence criterion has been met.
Definition solver.hh:73
Abstract base class for all solvers.
Definition solver.hh:99
Category
Definition solvercategory.hh:23
@ sequential
Category for sequential solvers.
Definition solvercategory.hh:25
Definition solvertype.hh:16