Nektar++
GlobalLinSysPETScStaticCond.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File GlobalLinSysPETScStaticCond.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // Permission is hereby granted, free of charge, to any person obtaining a
14 // copy of this software and associated documentation files (the "Software"),
15 // to deal in the Software without restriction, including without limitation
16 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 // and/or sell copies of the Software, and to permit persons to whom the
18 // Software is furnished to do so, subject to the following conditions:
19 //
20 // The above copyright notice and this permission notice shall be included
21 // in all copies or substantial portions of the Software.
22 //
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 // DEALINGS IN THE SOFTWARE.
30 //
31 // Description: GlobalLinSys definition
32 //
33 ///////////////////////////////////////////////////////////////////////////////
34 
35 #include <boost/core/ignore_unused.hpp>
36 
38 
39 #include <petscsys.h>
40 #include <petscksp.h>
41 #include <petscmat.h>
42 
43 using namespace std;
44 
45 namespace Nektar
46 {
47  namespace MultiRegions
48  {
49  /**
50  * @class GlobalLinSysPETSc
51  *
52  * Solves a linear system using single- or multi-level static
53  * condensation.
54  */
55 
56  /**
57  * Registers the class with the Factory.
58  */
59  string GlobalLinSysPETScStaticCond::className
61  "PETScStaticCond",
62  GlobalLinSysPETScStaticCond::create,
63  "PETSc static condensation.");
64 
65  string GlobalLinSysPETScStaticCond::className2
67  "PETScMultiLevelStaticCond",
68  GlobalLinSysPETScStaticCond::create,
69  "PETSc multi-level static condensation.");
70 
71  /**
72  * For a matrix system of the form @f[
73  * \left[ \begin{array}{cc}
74  * \boldsymbol{A} & \boldsymbol{B}\\
75  * \boldsymbol{C} & \boldsymbol{D}
76  * \end{array} \right]
77  * \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2}
78  * \end{array}\right]
79  * = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2}
80  * \end{array}\right],
81  * @f]
82  * where @f$\boldsymbol{D}@f$ and
83  * @f$(\boldsymbol{A-BD^{-1}C})@f$ are invertible, store and assemble
84  * a static condensation system, according to a given local to global
85  * mapping. #m_linSys is constructed by AssembleSchurComplement().
86  * @param mKey Associated matrix key.
87  * @param pLocMatSys LocalMatrixSystem
88  * @param locToGloMap Local to global mapping.
89  */
90  GlobalLinSysPETScStaticCond::GlobalLinSysPETScStaticCond(
91  const GlobalLinSysKey &pKey,
92  const std::weak_ptr<ExpList> &pExpList,
93  const std::shared_ptr<AssemblyMap> &pLocToGloMap)
94  : GlobalLinSys (pKey, pExpList, pLocToGloMap),
95  GlobalLinSysPETSc (pKey, pExpList, pLocToGloMap),
96  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
97  {
100  "This constructor is only valid when using static "
101  "condensation");
103  == pLocToGloMap->GetGlobalSysSolnType(),
104  "The local to global map is not set up for the requested "
105  "solution type");
106  }
107 
108  /**
109  *
110  */
112  const GlobalLinSysKey &pKey,
113  const std::weak_ptr<ExpList> &pExpList,
114  const DNekScalBlkMatSharedPtr pSchurCompl,
115  const DNekScalBlkMatSharedPtr pBinvD,
116  const DNekScalBlkMatSharedPtr pC,
117  const DNekScalBlkMatSharedPtr pInvD,
118  const std::shared_ptr<AssemblyMap> &pLocToGloMap,
119  const PreconditionerSharedPtr pPrecon)
120  : GlobalLinSys (pKey, pExpList, pLocToGloMap),
121  GlobalLinSysPETSc (pKey, pExpList, pLocToGloMap),
122  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
123  {
124  m_schurCompl = pSchurCompl;
125  m_BinvD = pBinvD;
126  m_C = pC;
127  m_invD = pInvD;
128  m_precon = pPrecon;
129  }
130 
131  /**
132  *
133  */
135  {
136 
137  }
138 
140  {
141  auto asmMap = m_locToGloMap.lock();
142 
143  m_precon = CreatePrecon(asmMap);
144 
145  // Allocate memory for top-level structure
146  SetupTopLevel(asmMap);
147 
148  // Setup Block Matrix systems
149  int n, n_exp = m_expList.lock()->GetNumElmts();
150 
151  // Build preconditioner
152  m_precon->BuildPreconditioner();
153 
154  // Do transform of Schur complement matrix
155  int cnt = 0;
156  for (n = 0; n < n_exp; ++n)
157  {
158  if (m_linSysKey.GetMatrixType() !=
160  {
161  DNekScalMatSharedPtr mat = m_schurCompl->GetBlock(n, n);
162  DNekScalMatSharedPtr t = m_precon->TransformedSchurCompl(
163  n, cnt, mat);
164  m_schurCompl->SetBlock(n, n, t);
165  cnt += mat->GetRows();
166  }
167  }
168 
169  // Construct this level
170  Initialise(asmMap);
171  }
172 
173  /**
174  * Assemble the schur complement matrix from the block matrices stored
175  * in #m_blkMatrices and the given local to global mapping information.
176  * @param locToGloMap Local to global mapping information.
177  */
179  AssemblyMapSharedPtr pLocToGloMap)
180  {
181  int i, j, n, cnt, gid1, gid2, loc_lda;
182  NekDouble sign1, sign2, value;
183 
184  const int nDirDofs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
185 
190  DNekScalMatSharedPtr loc_mat;
191 
192  // Build precon again if we in multi-level static condensation (a
193  // bit of a hack)
196  {
198  m_precon->BuildPreconditioner();
199  }
200 
201  // CALCULATE REORDERING MAPPING
202  CalculateReordering(pLocToGloMap->GetGlobalToUniversalBndMap(),
203  pLocToGloMap->GetGlobalToUniversalBndMapUnique(),
204  pLocToGloMap);
205 
206  // SET UP VECTORS AND MATRIX
207  SetUpMatVec(pLocToGloMap->GetNumGlobalBndCoeffs(), nDirDofs);
208 
209  // SET UP SCATTER OBJECTS
210  SetUpScatter();
211 
212  // CONSTRUCT KSP OBJECT
213  SetUpSolver(pLocToGloMap->GetIterativeTolerance());
214 
215  // If we are using the matrix multiplication shell don't try to
216  // populate the matrix.
218  {
219  return;
220  }
221 
222  // POPULATE MATRIX
223  for(n = cnt = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
224  {
225  loc_mat = m_schurCompl->GetBlock(n,n);
226  loc_lda = loc_mat->GetRows();
227 
228  for(i = 0; i < loc_lda; ++i)
229  {
230  gid1 = pLocToGloMap->GetLocalToGlobalBndMap(cnt + i)-nDirDofs;
231  sign1 = pLocToGloMap->GetLocalToGlobalBndSign(cnt + i);
232  if(gid1 >= 0)
233  {
234  int gid1ro = m_reorderedMap[gid1];
235  for(j = 0; j < loc_lda; ++j)
236  {
237  gid2 = pLocToGloMap->GetLocalToGlobalBndMap(cnt + j)
238  - nDirDofs;
239  sign2 = pLocToGloMap->GetLocalToGlobalBndSign(cnt + j);
240  if(gid2 >= 0)
241  {
242  int gid2ro = m_reorderedMap[gid2];
243  value = sign1*sign2*(*loc_mat)(i,j);
244  MatSetValue(
245  m_matrix, gid1ro, gid2ro, value, ADD_VALUES);
246  }
247  }
248  }
249  }
250  cnt += loc_lda;
251  }
252 
253  // ASSEMBLE MATRIX
254  MatAssemblyBegin(m_matrix, MAT_FINAL_ASSEMBLY);
255  MatAssemblyEnd (m_matrix, MAT_FINAL_ASSEMBLY);
256  }
257 
259  v_GetStaticCondBlock(unsigned int n)
260  {
261  DNekScalBlkMatSharedPtr schurComplBlock;
262  DNekScalMatSharedPtr localMat = m_schurCompl->GetBlock(n,n);
263  unsigned int nbdry = localMat->GetRows();
264  unsigned int nblks = 1;
265  unsigned int esize[1] = {nbdry};
266 
267  schurComplBlock = MemoryManager<DNekScalBlkMat>
268  ::AllocateSharedPtr(nblks, nblks, esize, esize);
269  schurComplBlock->SetBlock(0, 0, localMat);
270 
271  return schurComplBlock;
272  }
273 
275  int scLevel,
276  Array<OneD, NekDouble> &F_bnd)
277  {
278  boost::ignore_unused(F_bnd);
279 
280  if (scLevel == 0)
281  {
282  // When matrices are supplied to the constructor at the top
283  // level, the preconditioner is never set up.
284  if (!m_precon)
285  {
287  m_precon->BuildPreconditioner();
288  }
289  }
290 
291 
292  }
293 
295  Array<OneD, NekDouble>& pInOut)
296  {
297  m_precon->DoTransformBasisToLowEnergy(pInOut);
298  }
299 
301  Array<OneD, NekDouble>& pInOut)
302  {
303  m_precon->DoTransformCoeffsFromLowEnergy(pInOut);
304  }
305 
307  const Array<OneD, NekDouble>& pInput,
308  Array<OneD, NekDouble>& pOutput)
309  {
310  m_precon->DoTransformCoeffsToLowEnergy(pInput,pOutput);
311  }
312 
313  /**
314  * @brief Apply matrix-vector multiplication using local approach and
315  * the assembly map.
316  *
317  * @param input Vector input.
318  * @param output Result of multiplication.
319  *
320  * @todo This can possibly be made faster by using the sparse
321  * block-matrix multiplication code from the iterative elastic
322  * systems.
323  */
325  const Array<OneD, const NekDouble> &input,
326  Array<OneD, NekDouble> &output)
327  {
328  auto asmMap = m_locToGloMap.lock();
329 
330  int nLocBndDofs = asmMap->GetNumLocalBndCoeffs();
331  int nDirDofs = asmMap->GetNumGlobalDirBndCoeffs();
332 
333  NekVector<NekDouble> in(nLocBndDofs), out(nLocBndDofs);
334  asmMap->GlobalToLocalBnd(input, in.GetPtr(), nDirDofs);
335  out = (*m_schurCompl) * in;
336  asmMap->AssembleBnd(out.GetPtr(), output, nDirDofs);
337  }
338 
340  const GlobalLinSysKey &mkey,
341  const std::weak_ptr<ExpList> &pExpList,
342  const DNekScalBlkMatSharedPtr pSchurCompl,
343  const DNekScalBlkMatSharedPtr pBinvD,
344  const DNekScalBlkMatSharedPtr pC,
345  const DNekScalBlkMatSharedPtr pInvD,
346  const std::shared_ptr<AssemblyMap> &l2gMap)
347  {
349  GlobalLinSysPETScStaticCond>::AllocateSharedPtr(
350  mkey, pExpList, pSchurCompl, pBinvD, pC, pInvD, l2gMap,
351  m_precon);
352  sys->Initialise(l2gMap);
353  return sys;
354  }
355  }
356 }
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode....
Definition: ErrorUtil.hpp:250
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:200
General purpose memory allocation routines with the ability to allocate from thread specific memory p...
static std::shared_ptr< DataType > AllocateSharedPtr(const Args &...args)
Allocate a shared pointer from the memory pool.
A global linear system.
Definition: GlobalLinSys.h:73
const std::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:127
const GlobalLinSysKey m_linSysKey
Key associated with this linear system.
Definition: GlobalLinSys.h:125
PreconditionerSharedPtr CreatePrecon(AssemblyMapSharedPtr asmMap)
Create a preconditioner object from the parameters defined in the supplied assembly map.
void Initialise(const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Definition: GlobalLinSys.h:216
GlobalSysSolnType GetGlobalSysSolnType() const
Return the associated solution type.
A PETSc global linear system.
PETScMatMult m_matMult
Enumerator to select matrix multiplication type.
std::vector< int > m_reorderedMap
Reordering that takes universal IDs to a unique row in the PETSc matrix.
void SetUpScatter()
Set up PETSc local (equivalent to Nektar++ global) and global (equivalent to universal) scatter maps.
void SetUpSolver(NekDouble tolerance)
Set up KSP solver object.
void SetUpMatVec(int nGlobal, int nDir)
Construct PETSc matrix and vector handles.
void CalculateReordering(const Array< OneD, const int > &glo2uniMap, const Array< OneD, const int > &glo2unique, const AssemblyMapSharedPtr &pLocToGloMap)
Calculate a reordering of universal IDs for PETSc.
virtual void v_CoeffsBwdTransform(Array< OneD, NekDouble > &pInOut)
virtual void v_CoeffsFwdTransform(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput)
virtual DNekScalBlkMatSharedPtr v_GetStaticCondBlock(unsigned int n)
Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided ...
virtual GlobalLinSysStaticCondSharedPtr v_Recurse(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const std::shared_ptr< AssemblyMap > &locToGloMap)
virtual void v_DoMatrixMultiply(const Array< OneD, const NekDouble > &input, Array< OneD, NekDouble > &output)
Apply matrix-vector multiplication using local approach and the assembly map.
virtual void v_PreSolve(int scLevel, Array< OneD, NekDouble > &F_bBnd)
virtual void v_AssembleSchurComplement(std::shared_ptr< AssemblyMap > locToGloMap)
Assemble the Schur complement matrix.
virtual void v_BasisFwdTransform(Array< OneD, NekDouble > &pInOut)
GlobalLinSysPETScStaticCond(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &locToGloMap)
Constructor for full direct matrix solve.
DNekScalBlkMatSharedPtr m_schurCompl
Block Schur complement matrix.
std::weak_ptr< AssemblyMap > m_locToGloMap
Local to global map.
void SetupTopLevel(const std::shared_ptr< AssemblyMap > &locToGloMap)
Set up the storage for the Schur complement or the top level of the multi-level Schur complement.
DNekScalBlkMatSharedPtr m_BinvD
Block matrix.
DNekScalBlkMatSharedPtr m_C
Block matrix.
DNekScalBlkMatSharedPtr m_invD
Block matrix.
StdRegions::MatrixType GetMatrixType() const
Return the matrix type.
Array< OneD, DataType > & GetPtr()
Definition: NekVector.cpp:227
std::shared_ptr< GlobalLinSysStaticCond > GlobalLinSysStaticCondSharedPtr
GlobalLinSysFactory & GetGlobalLinSysFactory()
std::shared_ptr< Preconditioner > PreconditionerSharedPtr
Definition: GlobalLinSys.h:60
std::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:52
std::shared_ptr< GlobalLinSysPETScStaticCond > GlobalLinSysPETScStaticCondSharedPtr
The above copyright notice and this permission notice shall be included.
Definition: CoupledSolver.h:1
std::shared_ptr< DNekScalMat > DNekScalMatSharedPtr
std::shared_ptr< DNekScalBlkMat > DNekScalBlkMatSharedPtr
Definition: NekTypeDefs.hpp:73
double NekDouble