Nektar++
GlobalLinSysIterativeStaticCond.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File: GlobalLinSysIterativeStaticCond.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // Permission is hereby granted, free of charge, to any person obtaining a
14 // copy of this software and associated documentation files (the "Software"),
15 // to deal in the Software without restriction, including without limitation
16 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 // and/or sell copies of the Software, and to permit persons to whom the
18 // Software is furnished to do so, subject to the following conditions:
19 //
20 // The above copyright notice and this permission notice shall be included
21 // in all copies or substantial portions of the Software.
22 //
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 // DEALINGS IN THE SOFTWARE.
30 //
31 // Description: Implementation to linear solver using single-
32 // or multi-level static condensation
33 //
34 ///////////////////////////////////////////////////////////////////////////////
35 
41 
42 using namespace std;
43 
44 namespace Nektar
45 {
46  namespace MultiRegions
47  {
48  /**
49  * @class GlobalLinSysIterativeStaticCond
50  *
51  * Solves a linear system iteratively using single- or multi-level
52  * static condensation.
53  */
54 
55  /**
56  * Registers the class with the Factory.
57  */
58  string GlobalLinSysIterativeStaticCond::className
60  "IterativeStaticCond",
61  GlobalLinSysIterativeStaticCond::create,
62  "Iterative static condensation.");
63 
64  string GlobalLinSysIterativeStaticCond::className2
66  "IterativeMultiLevelStaticCond",
67  GlobalLinSysIterativeStaticCond::create,
68  "Iterative multi-level static condensation.");
69 
70 
71  std::string GlobalLinSysIterativeStaticCond::storagedef =
72  LibUtilities::SessionReader::RegisterDefaultSolverInfo(
73  "LocalMatrixStorageStrategy",
74  "Sparse");
75  std::string GlobalLinSysIterativeStaticCond::storagelookupIds[3] = {
76  LibUtilities::SessionReader::RegisterEnumValue(
77  "LocalMatrixStorageStrategy",
78  "Contiguous",
80  LibUtilities::SessionReader::RegisterEnumValue(
81  "LocalMatrixStorageStrategy",
82  "Non-contiguous",
84  LibUtilities::SessionReader::RegisterEnumValue(
85  "LocalMatrixStorageStrategy",
86  "Sparse",
88  };
89 
90  /**
91  * For a matrix system of the form @f[
92  * \left[ \begin{array}{cc}
93  * \boldsymbol{A} & \boldsymbol{B}\\
94  * \boldsymbol{C} & \boldsymbol{D}
95  * \end{array} \right]
96  * \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2}
97  * \end{array}\right]
98  * = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2}
99  * \end{array}\right],
100  * @f]
101  * where @f$\boldsymbol{D}@f$ and
102  * @f$(\boldsymbol{A-BD^{-1}C})@f$ are invertible, store and assemble
103  * a static condensation system, according to a given local to global
104  * mapping. #m_linSys is constructed by AssembleSchurComplement().
105  * @param mKey Associated matrix key.
106  * @param pLocMatSys LocalMatrixSystem
107  * @param locToGloMap Local to global mapping.
108  */
109  GlobalLinSysIterativeStaticCond::GlobalLinSysIterativeStaticCond(
110  const GlobalLinSysKey &pKey,
111  const std::weak_ptr<ExpList> &pExpList,
112  const std::shared_ptr<AssemblyMap> &pLocToGloMap)
113  : GlobalLinSys (pKey, pExpList, pLocToGloMap),
114  GlobalLinSysIterative (pKey, pExpList, pLocToGloMap),
115  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
116  {
119  "This constructor is only valid when using static "
120  "condensation");
122  == pLocToGloMap->GetGlobalSysSolnType(),
123  "The local to global map is not set up for the requested "
124  "solution type");
125  }
126 
127 
128  /**
129  *
130  */
132  const GlobalLinSysKey &pKey,
133  const std::weak_ptr<ExpList> &pExpList,
134  const DNekScalBlkMatSharedPtr pSchurCompl,
135  const DNekScalBlkMatSharedPtr pBinvD,
136  const DNekScalBlkMatSharedPtr pC,
137  const DNekScalBlkMatSharedPtr pInvD,
138  const std::shared_ptr<AssemblyMap> &pLocToGloMap,
139  const PreconditionerSharedPtr pPrecon)
140  : GlobalLinSys (pKey, pExpList, pLocToGloMap),
141  GlobalLinSysIterative (pKey, pExpList, pLocToGloMap),
142  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
143  {
144  m_schurCompl = pSchurCompl;
145  m_S1Blk = pSchurCompl;
146  m_BinvD = pBinvD;
147  m_C = pC;
148  m_invD = pInvD;
149  m_precon = pPrecon;
150  }
151 
152 
154  {
155  auto asmMap = m_locToGloMap.lock();
156 
157  m_precon = CreatePrecon(asmMap);
158 
159  // Allocate memory for top-level structure
160  SetupTopLevel(asmMap);
161 
162  // Setup Block Matrix systems
163  int n, n_exp = m_expList.lock()->GetNumElmts();
164 
165  MatrixStorage blkmatStorage = eDIAGONAL;
166  const Array<OneD,const unsigned int>& nbdry_size
167  = asmMap->GetNumLocalBndCoeffsPerPatch();
168 
170  ::AllocateSharedPtr(nbdry_size, nbdry_size, blkmatStorage);
171 
172  // Preserve original matrix in m_S1Blk
173  for (n = 0; n < n_exp; ++n)
174  {
175  DNekScalMatSharedPtr mat = m_schurCompl->GetBlock(n, n);
176  m_S1Blk->SetBlock(n, n, mat);
177  }
178 
179  // Build preconditioner
180  m_precon->BuildPreconditioner();
181 
182  // Do transform of Schur complement matrix
183  int cnt = 0;
184  for (n = 0; n < n_exp; ++n)
185  {
186  if (m_linSysKey.GetMatrixType() !=
188  {
189  DNekScalMatSharedPtr mat = m_S1Blk->GetBlock(n, n);
190  DNekScalMatSharedPtr t = m_precon->TransformedSchurCompl(
191  n, cnt, mat);
192  m_schurCompl->SetBlock(n, n, t);
193  cnt += mat->GetRows();
194  }
195  }
196 
197  // Construct this level
198  Initialise(asmMap);
199  }
200 
201  /**
202  *
203  */
205  {
206 
207  }
208 
210  v_GetStaticCondBlock(unsigned int n)
211  {
212  DNekScalBlkMatSharedPtr schurComplBlock;
213  int scLevel = m_locToGloMap.lock()->GetStaticCondLevel();
214  DNekScalBlkMatSharedPtr sc = scLevel == 0 ? m_S1Blk : m_schurCompl;
215  DNekScalMatSharedPtr localMat = sc->GetBlock(n,n);
216  unsigned int nbdry = localMat->GetRows();
217  unsigned int nblks = 1;
218  unsigned int esize[1] = {nbdry};
219 
220  schurComplBlock = MemoryManager<DNekScalBlkMat>
221  ::AllocateSharedPtr(nblks, nblks, esize, esize);
222  schurComplBlock->SetBlock(0, 0, localMat);
223 
224  return schurComplBlock;
225  }
226 
227  /**
228  * Assemble the schur complement matrix from the block matrices stored
229  * in #m_blkMatrices and the given local to global mapping information.
230  * @param locToGloMap Local to global mapping information.
231  */
233  const AssemblyMapSharedPtr pLocToGloMap)
234  {
235  int i,j,n,cnt,gid1,gid2;
236  NekDouble sign1,sign2;
237 
238  bool doGlobalOp = m_expList.lock()->GetGlobalOptParam()->
239  DoGlobalMatOp(m_linSysKey.GetMatrixType());
240 
241  // Set up unique map
242  v_UniqueMap();
243 
244  // Build precon again if we in multi-level static condensation (a
245  // bit of a hack)
248  {
250  m_precon->BuildPreconditioner();
251  }
252 
253  if (!doGlobalOp)
254  {
256  return;
257  }
258 
259  int nBndDofs = pLocToGloMap->GetNumGlobalBndCoeffs();
260  int NumDirBCs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
261  unsigned int rows = nBndDofs - NumDirBCs;
262  unsigned int cols = nBndDofs - NumDirBCs;
263 
264  // COO sparse storage to assist in assembly
265  COOMatType gmat_coo;
266 
267  // Get the matrix storage structure
268  // (whether to store only one triangular part, if symmetric)
269  MatrixStorage matStorage = eFULL;
270 
271  // assemble globally
272  DNekScalMatSharedPtr loc_mat;
273  int loc_lda;
274  for(n = cnt = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
275  {
276  loc_mat = m_schurCompl->GetBlock(n,n);
277  loc_lda = loc_mat->GetRows();
278 
279  // Set up Matrix;
280  for(i = 0; i < loc_lda; ++i)
281  {
282  gid1 = pLocToGloMap->GetLocalToGlobalBndMap (cnt + i)
283  - NumDirBCs;
284  sign1 = pLocToGloMap->GetLocalToGlobalBndSign(cnt + i);
285 
286  if(gid1 >= 0)
287  {
288  for(j = 0; j < loc_lda; ++j)
289  {
290  gid2 = pLocToGloMap->GetLocalToGlobalBndMap(cnt+j)
291  - NumDirBCs;
292  sign2 = pLocToGloMap->GetLocalToGlobalBndSign(cnt+j);
293 
294  if (gid2 >= 0)
295  {
296  gmat_coo[std::make_pair(gid1,gid2)] +=
297  sign1*sign2*(*loc_mat)(i,j);
298  }
299  }
300  }
301  }
302  cnt += loc_lda;
303  }
304 
306  sparseStorage (1);
307 
308  BCOMatType partMat;
309  convertCooToBco(1, gmat_coo, partMat);
310 
311  sparseStorage[0] =
313  AllocateSharedPtr(rows, cols, 1, partMat, matStorage );
314 
315  // Create block diagonal matrix
317  AllocateSharedPtr(sparseStorage);
318  }
319 
320 
321  /**
322  * Populates sparse block-diagonal schur complement matrix from
323  * the block matrices stored in #m_blkMatrices.
324  */
326  {
327  LocalMatrixStorageStrategy storageStrategy =
328  m_expList.lock()->GetSession()->
329  GetSolverInfoAsEnum<LocalMatrixStorageStrategy>(
330  "LocalMatrixStorageStrategy");
331 
332  switch(storageStrategy)
333  {
336  {
337  size_t storageSize = 0;
338  int nBlk = m_schurCompl->GetNumberOfBlockRows();
339 
340  m_scale = Array<OneD, NekDouble> (nBlk, 1.0);
341  m_rows = Array<OneD, unsigned int> (nBlk, 0U);
342 
343  // Determine storage requirements for dense blocks.
344  for (int i = 0; i < nBlk; ++i)
345  {
346  m_rows[i] = m_schurCompl->GetBlock(i,i)->GetRows();
347  m_scale[i] = m_schurCompl->GetBlock(i,i)->Scale();
348  storageSize += m_rows[i] * m_rows[i];
349  }
350 
351  // Assemble dense storage blocks.
352  DNekScalMatSharedPtr loc_mat;
353  m_denseBlocks.resize(nBlk);
354  double *ptr = 0;
355 
356  if (MultiRegions::eContiguous == storageStrategy)
357  {
358  m_storage.resize (storageSize);
359  ptr = &m_storage[0];
360  }
361 
362  for (unsigned int n = 0; n < nBlk; ++n)
363  {
364  loc_mat = m_schurCompl->GetBlock(n,n);
365 
366  if (MultiRegions::eContiguous == storageStrategy)
367  {
368  int loc_lda = loc_mat->GetRows();
369  int blockSize = loc_lda * loc_lda;
370  m_denseBlocks[n] = ptr;
371  for(int i = 0; i < loc_lda; ++i)
372  {
373  for(int j = 0; j < loc_lda; ++j)
374  {
375  ptr[j*loc_lda+i] = (*loc_mat)(i,j);
376  }
377  }
378  ptr += blockSize;
380  }
381  else
382  {
383  m_denseBlocks[n] = loc_mat->GetRawPtr();
384  }
385  }
386  break;
387  }
389  {
390  DNekScalMatSharedPtr loc_mat;
391  int loc_lda;
392  int blockSize = 0;
393 
394  // First run through to split the set of local matrices into
395  // partitions of fixed block size, and count number of local
396  // matrices that belong to each partition.
397  std::vector<std::pair<int,int> > partitions;
398  for(int n = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
399  {
400  loc_mat = m_schurCompl->GetBlock(n,n);
401  loc_lda = loc_mat->GetRows();
402 
403  ASSERTL1(loc_lda >= 0,
404  boost::lexical_cast<std::string>(n) + "-th "
405  "matrix block in Schur complement has "
406  "rank 0!");
407 
408  if (blockSize == loc_lda)
409  {
410  partitions[partitions.size()-1].first++;
411  }
412  else
413  {
414  blockSize = loc_lda;
415  partitions.push_back(make_pair(1,loc_lda));
416  }
417  }
418 
419  MatrixStorage matStorage = eFULL;
420 
421  // Create a vector of sparse storage holders
423  sparseStorage (partitions.size());
424 
425  for (int part = 0, n = 0; part < partitions.size(); ++part)
426  {
427  BCOMatType partMat;
428 
429  for(int k = 0; k < partitions[part].first; ++k, ++n)
430  {
431  loc_mat = m_schurCompl->GetBlock(n,n);
432  loc_lda = loc_mat->GetRows();
433 
434  ASSERTL1(loc_lda == partitions[part].second,
435  boost::lexical_cast<std::string>(n) + "-th"
436  " matrix block in Schur complement has "
437  "unexpected rank");
438 
439  NekDouble scale = loc_mat->Scale();
440  if(fabs(scale-1.0) > NekConstants::kNekZeroTol)
441  {
442  Array<OneD, NekDouble> matarray(loc_lda*loc_lda);
443  Vmath::Smul(loc_lda*loc_lda,scale,
444  loc_mat->GetRawPtr(),1,&matarray[0],1);
445  partMat[make_pair(k,k)] = BCOEntryType(matarray);
446  }
447  else // scale factor is 1.0
448  {
449  partMat[make_pair(k,k)] = BCOEntryType(
450  loc_lda*loc_lda, loc_mat->GetRawPtr());
451  }
452 
454  }
455 
456  sparseStorage[part] =
459  partitions[part].first, partitions[part].first,
460  partitions[part].second, partMat, matStorage );
461  }
462 
463  // Create block diagonal matrix
465  AllocateSharedPtr(sparseStorage);
466 
467  break;
468  }
469  default:
470  ErrorUtil::NekError("Solver info property \
471  LocalMatrixStorageStrategy takes values \
472  Contiguous, Non-contiguous and Sparse");
473  }
474  }
475 
476  /**
477  *
478  */
480  const Array<OneD, NekDouble>& pInput,
481  Array<OneD, NekDouble>& pOutput)
482  {
483  auto asmMap = m_locToGloMap.lock();
484 
485  int nLocal = asmMap->GetNumLocalBndCoeffs();
486  int nDir = asmMap->GetNumGlobalDirBndCoeffs();
487  bool doGlobalOp = m_expList.lock()->GetGlobalOptParam()->
488  DoGlobalMatOp(m_linSysKey.GetMatrixType());
489 
490  if(doGlobalOp)
491  {
492  // Do matrix multiply globally
493  Array<OneD, NekDouble> in = pInput + nDir;
494  Array<OneD, NekDouble> out = pOutput + nDir;
495 
496  m_sparseSchurCompl->Multiply(in,out);
497  asmMap->UniversalAssembleBnd(pOutput, nDir);
498  }
499  else if (m_sparseSchurCompl)
500  {
501  // Do matrix multiply locally using block-diagonal sparse matrix
502  Array<OneD, NekDouble> tmp = m_wsp + nLocal;
503 
504  asmMap->GlobalToLocalBnd(pInput, m_wsp);
505  m_sparseSchurCompl->Multiply(m_wsp,tmp);
506  asmMap->AssembleBnd(tmp, pOutput);
507  }
508  else
509  {
510  // Do matrix multiply locally, using direct BLAS calls
511  asmMap->GlobalToLocalBnd(pInput, m_wsp);
512  int i, cnt;
513  Array<OneD, NekDouble> tmpout = m_wsp + nLocal;
514  for (i = cnt = 0; i < m_denseBlocks.size(); cnt += m_rows[i], ++i)
515  {
516  const int rows = m_rows[i];
517  Blas::Dgemv('N', rows, rows,
518  m_scale[i], m_denseBlocks[i], rows,
519  m_wsp.get()+cnt, 1,
520  0.0, tmpout.get()+cnt, 1);
521  }
522  asmMap->AssembleBnd(tmpout, pOutput);
523  }
524  }
525 
527  {
528  m_map = m_locToGloMap.lock()->GetGlobalToUniversalBndMapUnique();
529  }
530 
532  int scLevel,
533  NekVector<NekDouble> &F_GlobBnd)
534  {
535  if (scLevel == 0)
536  {
537  // When matrices are supplied to the constructor at the top
538  // level, the preconditioner is never set up.
539  if (!m_precon)
540  {
542  m_precon->BuildPreconditioner();
543  }
544 
545  Set_Rhs_Magnitude(F_GlobBnd);
546 
547  return m_S1Blk;
548  }
549  else
550  {
551  // for multilevel iterative solver always use rhs
552  // vector value with no weighting
554 
555  return m_schurCompl;
556  }
557  }
558 
560  Array<OneD, NekDouble>& pInOut,
561  int offset)
562  {
563  m_precon->DoTransformToLowEnergy(pInOut, offset);
564  }
565 
567  Array<OneD, NekDouble>& pInOut)
568  {
569  m_precon->DoTransformFromLowEnergy(pInOut);
570  }
571 
573  const GlobalLinSysKey &mkey,
574  const std::weak_ptr<ExpList> &pExpList,
575  const DNekScalBlkMatSharedPtr pSchurCompl,
576  const DNekScalBlkMatSharedPtr pBinvD,
577  const DNekScalBlkMatSharedPtr pC,
578  const DNekScalBlkMatSharedPtr pInvD,
579  const std::shared_ptr<AssemblyMap> &l2gMap)
580  {
582  GlobalLinSysIterativeStaticCond>::AllocateSharedPtr(
583  mkey, pExpList, pSchurCompl, pBinvD, pC, pInvD, l2gMap,
584  m_precon);
585  sys->Initialise(l2gMap);
586  return sys;
587  }
588  }
589 }
std::shared_ptr< GlobalLinSysStaticCond > GlobalLinSysStaticCondSharedPtr
Array< OneD, NekDouble > m_wsp
Workspace array for matrix multiplication.
virtual DNekScalBlkMatSharedPtr v_PreSolve(int scLevel, NekVector< NekDouble > &F_GlobBnd)
std::weak_ptr< AssemblyMap > m_locToGloMap
Local to global map.
std::shared_ptr< DNekScalMat > DNekScalMatSharedPtr
std::map< CoordType, NekDouble > COOMatType
DNekScalBlkMatSharedPtr m_schurCompl
Block Schur complement matrix.
void Set_Rhs_Magnitude(const NekVector< NekDouble > &pIn)
General purpose memory allocation routines with the ability to allocate from thread specific memory p...
std::shared_ptr< DNekScalBlkMat > DNekScalBlkMatSharedPtr
Definition: NekTypeDefs.hpp:73
std::vector< double > m_storage
Dense storage for block Schur complement matrix.
DNekScalBlkMatSharedPtr m_C
Block matrix.
void SetupTopLevel(const std::shared_ptr< AssemblyMap > &locToGloMap)
Set up the storage for the Schur complement or the top level of the multi-level Schur complement...
STL namespace.
virtual void v_DoMatrixMultiply(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput)
Perform a Shur-complement matrix multiply operation.
virtual void v_BasisBwdTransform(Array< OneD, NekDouble > &pInOut)
DNekScalBlkMatSharedPtr m_invD
Block matrix.
std::shared_ptr< GlobalLinSysIterativeStaticCond > GlobalLinSysIterativeStaticCondSharedPtr
std::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:52
Array< OneD, NekDouble > BCOEntryType
static const NekDouble kNekZeroTol
void Smul(int n, const T alpha, const T *x, const int incx, T *y, const int incy)
Scalar multiply y = alpha*y.
Definition: Vmath.cpp:216
StdRegions::MatrixType GetMatrixType() const
Return the matrix type.
Array< OneD, NekDouble > m_scale
Scaling factors for local matrices.
std::map< CoordType, BCOEntryType > BCOMatType
Array< OneD, int > m_map
Global to universal unique map.
static std::shared_ptr< DataType > AllocateSharedPtr(const Args &...args)
Allocate a shared pointer from the memory pool.
Array< OneD, unsigned int > m_rows
Ranks of local matrices.
double NekDouble
std::vector< const double * > m_denseBlocks
Vector of pointers to local matrix data.
static void Dgemv(const char &trans, const int &m, const int &n, const double &alpha, const double *a, const int &lda, const double *x, const int &incx, const double &beta, double *y, const int &incy)
BLAS level 2: Matrix vector multiply y = A x where A[m x n].
Definition: Blas.hpp:168
void PrepareLocalSchurComplement()
Prepares local representation of Schur complement stored as a sparse block-diagonal matrix...
static const NekDouble kNekUnsetDouble
Describe a linear system.
virtual void v_BasisFwdTransform(Array< OneD, NekDouble > &pInOut, int offset)
const GlobalLinSysKey m_linSysKey
Key associated with this linear system.
Definition: GlobalLinSys.h:125
NekDouble m_rhs_magnitude
dot product of rhs to normalise stopping criterion
A global linear system.
Definition: GlobalLinSys.h:72
DNekSmvBsrDiagBlkMatSharedPtr m_sparseSchurCompl
Sparse representation of Schur complement matrix at this level.
virtual GlobalLinSysStaticCondSharedPtr v_Recurse(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const std::shared_ptr< AssemblyMap > &locToGloMap)
void convertCooToBco(const unsigned int blkDim, const COOMatType &cooMat, BCOMatType &bcoMat)
Definition: SparseUtils.cpp:49
const std::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:127
virtual void v_DropStaticCondBlock(unsigned int n)
Releases the static condensation block matrices from NekManager of n-th expansion using the matrix ke...
GlobalSysSolnType GetGlobalSysSolnType() const
Return the associated solution type.
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:199
std::shared_ptr< Preconditioner > PreconditionerSharedPtr
Definition: GlobalLinSys.h:60
void v_AssembleSchurComplement(const std::shared_ptr< AssemblyMap > locToGloMap)
Assemble the Schur complement matrix.
GlobalLinSysIterativeStaticCond(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &locToGloMap)
Constructor for full direct matrix solve.
void Initialise(const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Definition: GlobalLinSys.h:216
GlobalLinSysFactory & GetGlobalLinSysFactory()
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode...
Definition: ErrorUtil.hpp:250
virtual DNekScalBlkMatSharedPtr v_GetStaticCondBlock(unsigned int n)
Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided ...
DNekScalBlkMatSharedPtr m_BinvD
Block matrix.
PreconditionerSharedPtr CreatePrecon(AssemblyMapSharedPtr asmMap)
Create a preconditioner object from the parameters defined in the supplied assembly map...