Nektar++
GlobalLinSysPETScStaticCond.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File GlobalLinSysPETScStaticCond.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // Permission is hereby granted, free of charge, to any person obtaining a
14 // copy of this software and associated documentation files (the "Software"),
15 // to deal in the Software without restriction, including without limitation
16 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 // and/or sell copies of the Software, and to permit persons to whom the
18 // Software is furnished to do so, subject to the following conditions:
19 //
20 // The above copyright notice and this permission notice shall be included
21 // in all copies or substantial portions of the Software.
22 //
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 // DEALINGS IN THE SOFTWARE.
30 //
31 // Description: GlobalLinSys definition
32 //
33 ///////////////////////////////////////////////////////////////////////////////
34 
35 #include <boost/core/ignore_unused.hpp>
36 
38 
39 #include <petscsys.h>
40 #include <petscksp.h>
41 #include <petscmat.h>
42 
43 using namespace std;
44 
45 namespace Nektar
46 {
47  namespace MultiRegions
48  {
49  /**
50  * @class GlobalLinSysPETSc
51  *
52  * Solves a linear system using single- or multi-level static
53  * condensation.
54  */
55 
56  /**
57  * Registers the class with the Factory.
58  */
59  string GlobalLinSysPETScStaticCond::className
61  "PETScStaticCond",
62  GlobalLinSysPETScStaticCond::create,
63  "PETSc static condensation.");
64 
65  string GlobalLinSysPETScStaticCond::className2
67  "PETScMultiLevelStaticCond",
68  GlobalLinSysPETScStaticCond::create,
69  "PETSc multi-level static condensation.");
70 
71  /**
72  * For a matrix system of the form @f[
73  * \left[ \begin{array}{cc}
74  * \boldsymbol{A} & \boldsymbol{B}\\
75  * \boldsymbol{C} & \boldsymbol{D}
76  * \end{array} \right]
77  * \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2}
78  * \end{array}\right]
79  * = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2}
80  * \end{array}\right],
81  * @f]
82  * where @f$\boldsymbol{D}@f$ and
83  * @f$(\boldsymbol{A-BD^{-1}C})@f$ are invertible, store and assemble
84  * a static condensation system, according to a given local to global
85  * mapping. #m_linSys is constructed by AssembleSchurComplement().
86  * @param mKey Associated matrix key.
87  * @param pLocMatSys LocalMatrixSystem
88  * @param locToGloMap Local to global mapping.
89  */
90  GlobalLinSysPETScStaticCond::GlobalLinSysPETScStaticCond(
91  const GlobalLinSysKey &pKey,
92  const std::weak_ptr<ExpList> &pExpList,
93  const std::shared_ptr<AssemblyMap> &pLocToGloMap)
94  : GlobalLinSys (pKey, pExpList, pLocToGloMap),
95  GlobalLinSysPETSc (pKey, pExpList, pLocToGloMap),
96  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
97  {
100  "This constructor is only valid when using static "
101  "condensation");
103  == pLocToGloMap->GetGlobalSysSolnType(),
104  "The local to global map is not set up for the requested "
105  "solution type");
106  }
107 
108  /**
109  *
110  */
112  const GlobalLinSysKey &pKey,
113  const std::weak_ptr<ExpList> &pExpList,
114  const DNekScalBlkMatSharedPtr pSchurCompl,
115  const DNekScalBlkMatSharedPtr pBinvD,
116  const DNekScalBlkMatSharedPtr pC,
117  const DNekScalBlkMatSharedPtr pInvD,
118  const std::shared_ptr<AssemblyMap> &pLocToGloMap,
119  const PreconditionerSharedPtr pPrecon)
120  : GlobalLinSys (pKey, pExpList, pLocToGloMap),
121  GlobalLinSysPETSc (pKey, pExpList, pLocToGloMap),
122  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
123  {
124  m_schurCompl = pSchurCompl;
125  m_BinvD = pBinvD;
126  m_C = pC;
127  m_invD = pInvD;
128  m_precon = pPrecon;
129  }
130 
131  /**
132  *
133  */
135  {
136 
137  }
138 
140  {
141  auto asmMap = m_locToGloMap.lock();
142 
143  m_precon = CreatePrecon(asmMap);
144 
145  // Allocate memory for top-level structure
146  SetupTopLevel(asmMap);
147 
148  // Setup Block Matrix systems
149  int n, n_exp = m_expList.lock()->GetNumElmts();
150 
151  MatrixStorage blkmatStorage = eDIAGONAL;
152  const Array<OneD,const unsigned int>& nbdry_size
153  = asmMap->GetNumLocalBndCoeffsPerPatch();
154 
156  ::AllocateSharedPtr(nbdry_size, nbdry_size, blkmatStorage);
157 
158  // Preserve original matrix in m_S1Blk
159  for (n = 0; n < n_exp; ++n)
160  {
161  DNekScalMatSharedPtr mat = m_schurCompl->GetBlock(n, n);
162  m_S1Blk->SetBlock(n, n, mat);
163  }
164 
165  // Build preconditioner
166  m_precon->BuildPreconditioner();
167 
168  // Do transform of Schur complement matrix
169  int cnt = 0;
170  for (n = 0; n < n_exp; ++n)
171  {
172  if (m_linSysKey.GetMatrixType() !=
174  {
175  DNekScalMatSharedPtr mat = m_S1Blk->GetBlock(n, n);
176  DNekScalMatSharedPtr t = m_precon->TransformedSchurCompl(
177  n, cnt, mat);
178  m_schurCompl->SetBlock(n, n, t);
179  cnt += mat->GetRows();
180  }
181  }
182 
183  // Construct this level
184  Initialise(asmMap);
185  }
186 
187  /**
188  * Assemble the schur complement matrix from the block matrices stored
189  * in #m_blkMatrices and the given local to global mapping information.
190  * @param locToGloMap Local to global mapping information.
191  */
193  AssemblyMapSharedPtr pLocToGloMap)
194  {
195  int i, j, n, cnt, gid1, gid2, loc_lda;
196  NekDouble sign1, sign2, value;
197 
198  const int nDirDofs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
199 
204  DNekScalMatSharedPtr loc_mat;
205 
206  // Build precon again if we in multi-level static condensation (a
207  // bit of a hack)
210  {
212  m_precon->BuildPreconditioner();
213  }
214 
215  // CALCULATE REORDERING MAPPING
216  CalculateReordering(pLocToGloMap->GetGlobalToUniversalBndMap(),
217  pLocToGloMap->GetGlobalToUniversalBndMapUnique(),
218  pLocToGloMap);
219 
220  // SET UP VECTORS AND MATRIX
221  SetUpMatVec(pLocToGloMap->GetNumGlobalBndCoeffs(), nDirDofs);
222 
223  // SET UP SCATTER OBJECTS
224  SetUpScatter();
225 
226  // CONSTRUCT KSP OBJECT
227  SetUpSolver(pLocToGloMap->GetIterativeTolerance());
228 
229  // If we are using the matrix multiplication shell don't try to
230  // populate the matrix.
232  {
233  return;
234  }
235 
236  // POPULATE MATRIX
237  for(n = cnt = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
238  {
239  loc_mat = m_schurCompl->GetBlock(n,n);
240  loc_lda = loc_mat->GetRows();
241 
242  for(i = 0; i < loc_lda; ++i)
243  {
244  gid1 = pLocToGloMap->GetLocalToGlobalBndMap(cnt + i)-nDirDofs;
245  sign1 = pLocToGloMap->GetLocalToGlobalBndSign(cnt + i);
246  if(gid1 >= 0)
247  {
248  int gid1ro = m_reorderedMap[gid1];
249  for(j = 0; j < loc_lda; ++j)
250  {
251  gid2 = pLocToGloMap->GetLocalToGlobalBndMap(cnt + j)
252  - nDirDofs;
253  sign2 = pLocToGloMap->GetLocalToGlobalBndSign(cnt + j);
254  if(gid2 >= 0)
255  {
256  int gid2ro = m_reorderedMap[gid2];
257  value = sign1*sign2*(*loc_mat)(i,j);
258  MatSetValue(
259  m_matrix, gid1ro, gid2ro, value, ADD_VALUES);
260  }
261  }
262  }
263  }
264  cnt += loc_lda;
265  }
266 
267  // ASSEMBLE MATRIX
268  MatAssemblyBegin(m_matrix, MAT_FINAL_ASSEMBLY);
269  MatAssemblyEnd (m_matrix, MAT_FINAL_ASSEMBLY);
270  }
271 
273  v_GetStaticCondBlock(unsigned int n)
274  {
275  DNekScalBlkMatSharedPtr schurComplBlock;
276  int scLevel = m_locToGloMap.lock()->GetStaticCondLevel();
277  DNekScalBlkMatSharedPtr sc = scLevel == 0 ? m_S1Blk : m_schurCompl;
278  DNekScalMatSharedPtr localMat = sc->GetBlock(n,n);
279  unsigned int nbdry = localMat->GetRows();
280  unsigned int nblks = 1;
281  unsigned int esize[1] = {nbdry};
282 
283  schurComplBlock = MemoryManager<DNekScalBlkMat>
284  ::AllocateSharedPtr(nblks, nblks, esize, esize);
285  schurComplBlock->SetBlock(0, 0, localMat);
286 
287  return schurComplBlock;
288  }
289 
291  int scLevel,
292  NekVector<NekDouble> &F_GlobBnd)
293  {
294  boost::ignore_unused(F_GlobBnd);
295 
296  if (scLevel == 0)
297  {
298  // When matrices are supplied to the constructor at the top
299  // level, the preconditioner is never set up.
300  if (!m_precon)
301  {
303  m_precon->BuildPreconditioner();
304  }
305 
306  return m_S1Blk;
307  }
308  else
309  {
310  return m_schurCompl;
311  }
312  }
313 
315  Array<OneD, NekDouble>& pInOut,
316  int offset)
317  {
318  m_precon->DoTransformToLowEnergy(pInOut, offset);
319  }
320 
322  Array<OneD, NekDouble>& pInOut)
323  {
324  m_precon->DoTransformFromLowEnergy(pInOut);
325  }
326 
327  /**
328  * @brief Apply matrix-vector multiplication using local approach and
329  * the assembly map.
330  *
331  * @param input Vector input.
332  * @param output Result of multiplication.
333  *
334  * @todo This can possibly be made faster by using the sparse
335  * block-matrix multiplication code from the iterative elastic
336  * systems.
337  */
339  const Array<OneD, const NekDouble> &input,
340  Array<OneD, NekDouble> &output)
341  {
342  auto asmMap = m_locToGloMap.lock();
343 
344  int nLocBndDofs = asmMap->GetNumLocalBndCoeffs();
345  int nDirDofs = asmMap->GetNumGlobalDirBndCoeffs();
346 
347  NekVector<NekDouble> in(nLocBndDofs), out(nLocBndDofs);
348  asmMap->GlobalToLocalBnd(input, in.GetPtr(), nDirDofs);
349  out = (*m_schurCompl) * in;
350  asmMap->AssembleBnd(out.GetPtr(), output, nDirDofs);
351  }
352 
354  const GlobalLinSysKey &mkey,
355  const std::weak_ptr<ExpList> &pExpList,
356  const DNekScalBlkMatSharedPtr pSchurCompl,
357  const DNekScalBlkMatSharedPtr pBinvD,
358  const DNekScalBlkMatSharedPtr pC,
359  const DNekScalBlkMatSharedPtr pInvD,
360  const std::shared_ptr<AssemblyMap> &l2gMap)
361  {
363  GlobalLinSysPETScStaticCond>::AllocateSharedPtr(
364  mkey, pExpList, pSchurCompl, pBinvD, pC, pInvD, l2gMap,
365  m_precon);
366  sys->Initialise(l2gMap);
367  return sys;
368  }
369  }
370 }
std::shared_ptr< GlobalLinSysStaticCond > GlobalLinSysStaticCondSharedPtr
std::shared_ptr< GlobalLinSysPETScStaticCond > GlobalLinSysPETScStaticCondSharedPtr
virtual void v_BasisBwdTransform(Array< OneD, NekDouble > &pInOut)
std::weak_ptr< AssemblyMap > m_locToGloMap
Local to global map.
std::shared_ptr< DNekScalMat > DNekScalMatSharedPtr
DNekScalBlkMatSharedPtr m_schurCompl
Block Schur complement matrix.
General purpose memory allocation routines with the ability to allocate from thread specific memory p...
std::shared_ptr< DNekScalBlkMat > DNekScalBlkMatSharedPtr
Definition: NekTypeDefs.hpp:73
virtual DNekScalBlkMatSharedPtr v_PreSolve(int scLevel, NekVector< NekDouble > &F_GlobBnd)
DNekScalBlkMatSharedPtr m_C
Block matrix.
void CalculateReordering(const Array< OneD, const int > &glo2uniMap, const Array< OneD, const int > &glo2unique, const AssemblyMapSharedPtr &pLocToGloMap)
Calculate a reordering of universal IDs for PETSc.
virtual void v_BasisFwdTransform(Array< OneD, NekDouble > &pInOut, int offset)
void SetupTopLevel(const std::shared_ptr< AssemblyMap > &locToGloMap)
Set up the storage for the Schur complement or the top level of the multi-level Schur complement...
STL namespace.
void SetUpSolver(NekDouble tolerance)
Set up KSP solver object.
DNekScalBlkMatSharedPtr m_invD
Block matrix.
std::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:52
virtual DNekScalBlkMatSharedPtr v_GetStaticCondBlock(unsigned int n)
Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided ...
virtual void v_AssembleSchurComplement(std::shared_ptr< AssemblyMap > locToGloMap)
Assemble the Schur complement matrix.
StdRegions::MatrixType GetMatrixType() const
Return the matrix type.
static std::shared_ptr< DataType > AllocateSharedPtr(const Args &...args)
Allocate a shared pointer from the memory pool.
double NekDouble
Describe a linear system.
A PETSc global linear system.
void SetUpScatter()
Set up PETSc local (equivalent to Nektar++ global) and global (equivalent to universal) scatter maps...
const GlobalLinSysKey m_linSysKey
Key associated with this linear system.
Definition: GlobalLinSys.h:125
GlobalLinSysPETScStaticCond(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &locToGloMap)
Constructor for full direct matrix solve.
virtual GlobalLinSysStaticCondSharedPtr v_Recurse(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const std::shared_ptr< AssemblyMap > &locToGloMap)
A global linear system.
Definition: GlobalLinSys.h:72
virtual void v_DoMatrixMultiply(const Array< OneD, const NekDouble > &input, Array< OneD, NekDouble > &output)
Apply matrix-vector multiplication using local approach and the assembly map.
const std::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:127
GlobalSysSolnType GetGlobalSysSolnType() const
Return the associated solution type.
void SetUpMatVec(int nGlobal, int nDir)
Construct PETSc matrix and vector handles.
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:199
std::shared_ptr< Preconditioner > PreconditionerSharedPtr
Definition: GlobalLinSys.h:60
void Initialise(const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Definition: GlobalLinSys.h:216
GlobalLinSysFactory & GetGlobalLinSysFactory()
std::vector< int > m_reorderedMap
Reordering that takes universal IDs to a unique row in the PETSc matrix.
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode...
Definition: ErrorUtil.hpp:250
DNekScalBlkMatSharedPtr m_BinvD
Block matrix.
Array< OneD, DataType > & GetPtr()
Definition: NekVector.cpp:227
PETScMatMult m_matMult
Enumerator to select matrix multiplication type.
PreconditionerSharedPtr CreatePrecon(AssemblyMapSharedPtr asmMap)
Create a preconditioner object from the parameters defined in the supplied assembly map...