Nektar++
GlobalLinSysPETScStaticCond.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File: GlobalLinSysPETScStaticCond.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // Permission is hereby granted, free of charge, to any person obtaining a
14 // copy of this software and associated documentation files (the "Software"),
15 // to deal in the Software without restriction, including without limitation
16 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 // and/or sell copies of the Software, and to permit persons to whom the
18 // Software is furnished to do so, subject to the following conditions:
19 //
20 // The above copyright notice and this permission notice shall be included
21 // in all copies or substantial portions of the Software.
22 //
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 // DEALINGS IN THE SOFTWARE.
30 //
31 // Description: GlobalLinSys definition
32 //
33 ///////////////////////////////////////////////////////////////////////////////
34 
35 #include <boost/core/ignore_unused.hpp>
36 
38 
39 #include <petscksp.h>
40 #include <petscmat.h>
41 #include <petscsys.h>
42 
43 using namespace std;
44 
45 namespace Nektar
46 {
47 namespace MultiRegions
48 {
49 /**
50  * @class GlobalLinSysPETSc
51  *
52  * Solves a linear system using single- or multi-level static
53  * condensation.
54  */
55 
56 /**
57  * Registers the class with the Factory.
58  */
59 string GlobalLinSysPETScStaticCond::className =
61  "PETScStaticCond", GlobalLinSysPETScStaticCond::create,
62  "PETSc static condensation.");
63 
64 string GlobalLinSysPETScStaticCond::className2 =
66  "PETScMultiLevelStaticCond", GlobalLinSysPETScStaticCond::create,
67  "PETSc multi-level static condensation.");
68 
69 /**
70  * For a matrix system of the form @f[
71  * \left[ \begin{array}{cc}
72  * \boldsymbol{A} & \boldsymbol{B}\\
73  * \boldsymbol{C} & \boldsymbol{D}
74  * \end{array} \right]
75  * \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2}
76  * \end{array}\right]
77  * = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2}
78  * \end{array}\right],
79  * @f]
80  * where @f$\boldsymbol{D}@f$ and
81  * @f$(\boldsymbol{A-BD^{-1}C})@f$ are invertible, store and assemble
82  * a static condensation system, according to a given local to global
83  * mapping. #m_linSys is constructed by AssembleSchurComplement().
84  * @param mKey Associated matrix key.
85  * @param pLocMatSys LocalMatrixSystem
86  * @param locToGloMap Local to global mapping.
87  */
88 GlobalLinSysPETScStaticCond::GlobalLinSysPETScStaticCond(
89  const GlobalLinSysKey &pKey, const std::weak_ptr<ExpList> &pExpList,
90  const std::shared_ptr<AssemblyMap> &pLocToGloMap)
91  : GlobalLinSys(pKey, pExpList, pLocToGloMap),
92  GlobalLinSysPETSc(pKey, pExpList, pLocToGloMap),
93  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
94 {
97  "This constructor is only valid when using static "
98  "condensation");
100  pLocToGloMap->GetGlobalSysSolnType(),
101  "The local to global map is not set up for the requested "
102  "solution type");
103 }
104 
105 /**
106  *
107  */
109  const GlobalLinSysKey &pKey, const std::weak_ptr<ExpList> &pExpList,
110  const DNekScalBlkMatSharedPtr pSchurCompl,
111  const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC,
112  const DNekScalBlkMatSharedPtr pInvD,
113  const std::shared_ptr<AssemblyMap> &pLocToGloMap,
114  const PreconditionerSharedPtr pPrecon)
115  : GlobalLinSys(pKey, pExpList, pLocToGloMap),
116  GlobalLinSysPETSc(pKey, pExpList, pLocToGloMap),
117  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
118 {
119  m_schurCompl = pSchurCompl;
120  m_BinvD = pBinvD;
121  m_C = pC;
122  m_invD = pInvD;
123  m_precon = pPrecon;
124 }
125 
126 /**
127  *
128  */
130 {
131 }
132 
134 {
135  auto asmMap = m_locToGloMap.lock();
136 
137  m_precon = CreatePrecon(asmMap);
138 
139  // Allocate memory for top-level structure
140  SetupTopLevel(asmMap);
141 
142  // Setup Block Matrix systems
143  int n, n_exp = m_expList.lock()->GetNumElmts();
144 
145  // Build preconditioner
146  m_precon->BuildPreconditioner();
147 
148  // Do transform of Schur complement matrix
149  int cnt = 0;
150  for (n = 0; n < n_exp; ++n)
151  {
153  {
154  DNekScalMatSharedPtr mat = m_schurCompl->GetBlock(n, n);
156  m_precon->TransformedSchurCompl(n, cnt, mat);
157  m_schurCompl->SetBlock(n, n, t);
158  cnt += mat->GetRows();
159  }
160  }
161 
162  // Construct this level
163  Initialise(asmMap);
164 }
165 
166 /**
167  * Assemble the schur complement matrix from the block matrices stored
168  * in #m_blkMatrices and the given local to global mapping information.
169  * @param locToGloMap Local to global mapping information.
170  */
172  AssemblyMapSharedPtr pLocToGloMap)
173 {
174  int i, j, n, cnt, gid1, gid2, loc_lda;
175  NekDouble sign1, sign2, value;
176 
177  const int nDirDofs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
178 
183  DNekScalMatSharedPtr loc_mat;
184 
185  // Build precon again if we in multi-level static condensation (a
186  // bit of a hack)
188  {
190  m_precon->BuildPreconditioner();
191  }
192 
193  // CALCULATE REORDERING MAPPING
194  CalculateReordering(pLocToGloMap->GetGlobalToUniversalBndMap(),
195  pLocToGloMap->GetGlobalToUniversalBndMapUnique(),
196  pLocToGloMap);
197 
198  // SET UP VECTORS AND MATRIX
199  SetUpMatVec(pLocToGloMap->GetNumGlobalBndCoeffs(), nDirDofs);
200 
201  // SET UP SCATTER OBJECTS
202  SetUpScatter();
203 
204  // CONSTRUCT KSP OBJECT
205  SetUpSolver(pLocToGloMap->GetIterativeTolerance());
206 
207  // If we are using the matrix multiplication shell don't try to
208  // populate the matrix.
210  {
211  return;
212  }
213 
214  // POPULATE MATRIX
215  for (n = cnt = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
216  {
217  loc_mat = m_schurCompl->GetBlock(n, n);
218  loc_lda = loc_mat->GetRows();
219 
220  for (i = 0; i < loc_lda; ++i)
221  {
222  gid1 = pLocToGloMap->GetLocalToGlobalBndMap(cnt + i) - nDirDofs;
223  sign1 = pLocToGloMap->GetLocalToGlobalBndSign(cnt + i);
224  if (gid1 >= 0)
225  {
226  int gid1ro = m_reorderedMap[gid1];
227  for (j = 0; j < loc_lda; ++j)
228  {
229  gid2 = pLocToGloMap->GetLocalToGlobalBndMap(cnt + j) -
230  nDirDofs;
231  sign2 = pLocToGloMap->GetLocalToGlobalBndSign(cnt + j);
232  if (gid2 >= 0)
233  {
234  int gid2ro = m_reorderedMap[gid2];
235  value = sign1 * sign2 * (*loc_mat)(i, j);
236  MatSetValue(m_matrix, gid1ro, gid2ro, value,
237  ADD_VALUES);
238  }
239  }
240  }
241  }
242  cnt += loc_lda;
243  }
244 
245  // ASSEMBLE MATRIX
246  MatAssemblyBegin(m_matrix, MAT_FINAL_ASSEMBLY);
247  MatAssemblyEnd(m_matrix, MAT_FINAL_ASSEMBLY);
248 }
249 
251  unsigned int n)
252 {
253  DNekScalBlkMatSharedPtr schurComplBlock;
254  DNekScalMatSharedPtr localMat = m_schurCompl->GetBlock(n, n);
255  unsigned int nbdry = localMat->GetRows();
256  unsigned int nblks = 1;
257  unsigned int esize[1] = {nbdry};
258 
260  nblks, nblks, esize, esize);
261  schurComplBlock->SetBlock(0, 0, localMat);
262 
263  return schurComplBlock;
264 }
265 
267  Array<OneD, NekDouble> &F_bnd)
268 {
269  boost::ignore_unused(F_bnd);
270 
271  if (scLevel == 0)
272  {
273  // When matrices are supplied to the constructor at the top
274  // level, the preconditioner is never set up.
275  if (!m_precon)
276  {
278  m_precon->BuildPreconditioner();
279  }
280  }
281 }
282 
284  Array<OneD, NekDouble> &pInOut)
285 {
286  m_precon->DoTransformBasisToLowEnergy(pInOut);
287 }
288 
290  Array<OneD, NekDouble> &pInOut)
291 {
292  m_precon->DoTransformCoeffsFromLowEnergy(pInOut);
293 }
294 
296  const Array<OneD, NekDouble> &pInput, Array<OneD, NekDouble> &pOutput)
297 {
298  m_precon->DoTransformCoeffsToLowEnergy(pInput, pOutput);
299 }
300 
301 /**
302  * @brief Apply matrix-vector multiplication using local approach and
303  * the assembly map.
304  *
305  * @param input Vector input.
306  * @param output Result of multiplication.
307  *
308  * @todo This can possibly be made faster by using the sparse
309  * block-matrix multiplication code from the iterative elastic
310  * systems.
311  */
314 {
315  auto asmMap = m_locToGloMap.lock();
316 
317  int nLocBndDofs = asmMap->GetNumLocalBndCoeffs();
318  int nDirDofs = asmMap->GetNumGlobalDirBndCoeffs();
319 
320  NekVector<NekDouble> in(nLocBndDofs), out(nLocBndDofs);
321  asmMap->GlobalToLocalBnd(input, in.GetPtr(), nDirDofs);
322  out = (*m_schurCompl) * in;
323  asmMap->AssembleBnd(out.GetPtr(), output, nDirDofs);
324 }
325 
327  const GlobalLinSysKey &mkey, const std::weak_ptr<ExpList> &pExpList,
328  const DNekScalBlkMatSharedPtr pSchurCompl,
329  const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC,
330  const DNekScalBlkMatSharedPtr pInvD,
331  const std::shared_ptr<AssemblyMap> &l2gMap)
332 {
335  mkey, pExpList, pSchurCompl, pBinvD, pC, pInvD, l2gMap, m_precon);
336  sys->Initialise(l2gMap);
337  return sys;
338 }
339 } // namespace MultiRegions
340 } // namespace Nektar
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode....
Definition: ErrorUtil.hpp:249
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:198
static std::shared_ptr< DataType > AllocateSharedPtr(const Args &...args)
Allocate a shared pointer from the memory pool.
A global linear system.
Definition: GlobalLinSys.h:72
const std::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:124
const GlobalLinSysKey m_linSysKey
Key associated with this linear system.
Definition: GlobalLinSys.h:122
PreconditionerSharedPtr CreatePrecon(AssemblyMapSharedPtr asmMap)
Create a preconditioner object from the parameters defined in the supplied assembly map.
void Initialise(const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Definition: GlobalLinSys.h:205
GlobalSysSolnType GetGlobalSysSolnType() const
Return the associated solution type.
A PETSc global linear system.
PETScMatMult m_matMult
Enumerator to select matrix multiplication type.
std::vector< int > m_reorderedMap
Reordering that takes universal IDs to a unique row in the PETSc matrix.
void SetUpScatter()
Set up PETSc local (equivalent to Nektar++ global) and global (equivalent to universal) scatter maps.
void SetUpSolver(NekDouble tolerance)
Set up KSP solver object.
void SetUpMatVec(int nGlobal, int nDir)
Construct PETSc matrix and vector handles.
void CalculateReordering(const Array< OneD, const int > &glo2uniMap, const Array< OneD, const int > &glo2unique, const AssemblyMapSharedPtr &pLocToGloMap)
Calculate a reordering of universal IDs for PETSc.
virtual void v_CoeffsFwdTransform(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput) override
virtual void v_AssembleSchurComplement(std::shared_ptr< AssemblyMap > locToGloMap) override
Assemble the Schur complement matrix.
virtual GlobalLinSysStaticCondSharedPtr v_Recurse(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const std::shared_ptr< AssemblyMap > &locToGloMap) override
virtual void v_CoeffsBwdTransform(Array< OneD, NekDouble > &pInOut) override
virtual void v_DoMatrixMultiply(const Array< OneD, const NekDouble > &input, Array< OneD, NekDouble > &output) override
Apply matrix-vector multiplication using local approach and the assembly map.
virtual DNekScalBlkMatSharedPtr v_GetStaticCondBlock(unsigned int n) override
Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided ...
virtual void v_PreSolve(int scLevel, Array< OneD, NekDouble > &F_bBnd) override
GlobalLinSysPETScStaticCond(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &locToGloMap)
Constructor for full direct matrix solve.
virtual void v_BasisFwdTransform(Array< OneD, NekDouble > &pInOut) override
DNekScalBlkMatSharedPtr m_schurCompl
Block Schur complement matrix.
std::weak_ptr< AssemblyMap > m_locToGloMap
Local to global map.
void SetupTopLevel(const std::shared_ptr< AssemblyMap > &locToGloMap)
Set up the storage for the Schur complement or the top level of the multi-level Schur complement.
DNekScalBlkMatSharedPtr m_BinvD
Block matrix.
DNekScalBlkMatSharedPtr m_C
Block matrix.
DNekScalBlkMatSharedPtr m_invD
Block matrix.
StdRegions::MatrixType GetMatrixType() const
Return the matrix type.
Array< OneD, DataType > & GetPtr()
Definition: NekVector.cpp:217
std::shared_ptr< GlobalLinSysStaticCond > GlobalLinSysStaticCondSharedPtr
GlobalLinSysFactory & GetGlobalLinSysFactory()
std::shared_ptr< Preconditioner > PreconditionerSharedPtr
Definition: GlobalLinSys.h:60
std::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:51
std::shared_ptr< GlobalLinSysPETScStaticCond > GlobalLinSysPETScStaticCondSharedPtr
The above copyright notice and this permission notice shall be included.
Definition: CoupledSolver.h:2
std::shared_ptr< DNekScalMat > DNekScalMatSharedPtr
std::shared_ptr< DNekScalBlkMat > DNekScalBlkMatSharedPtr
Definition: NekTypeDefs.hpp:79
double NekDouble