Nektar++
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
GlobalLinSysIterativeStaticCond.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File: GlobalLinSysIterativeStaticCond.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // License for the specific language governing rights and limitations under
14 // Permission is hereby granted, free of charge, to any person obtaining a
15 // copy of this software and associated documentation files (the "Software"),
16 // to deal in the Software without restriction, including without limitation
17 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 // and/or sell copies of the Software, and to permit persons to whom the
19 // Software is furnished to do so, subject to the following conditions:
20 //
21 // The above copyright notice and this permission notice shall be included
22 // in all copies or substantial portions of the Software.
23 //
24 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
30 // DEALINGS IN THE SOFTWARE.
31 //
32 // Description: Implementation to linear solver using single-
33 // or multi-level static condensation
34 //
35 ///////////////////////////////////////////////////////////////////////////////
36 
43 
44 using namespace std;
45 
46 namespace Nektar
47 {
48  namespace MultiRegions
49  {
50  /**
51  * @class GlobalLinSysIterativeStaticCond
52  *
53  * Solves a linear system iteratively using single- or multi-level
54  * static condensation.
55  */
56 
57  /**
58  * Registers the class with the Factory.
59  */
60  string GlobalLinSysIterativeStaticCond::className
62  "IterativeStaticCond",
63  GlobalLinSysIterativeStaticCond::create,
64  "Iterative static condensation.");
65 
66  string GlobalLinSysIterativeStaticCond::className2
68  "IterativeMultiLevelStaticCond",
69  GlobalLinSysIterativeStaticCond::create,
70  "Iterative multi-level static condensation.");
71 
72 
73  std::string GlobalLinSysIterativeStaticCond::storagedef =
74  LibUtilities::SessionReader::RegisterDefaultSolverInfo(
75  "LocalMatrixStorageStrategy",
76  "Sparse");
77  std::string GlobalLinSysIterativeStaticCond::storagelookupIds[3] = {
78  LibUtilities::SessionReader::RegisterEnumValue(
79  "LocalMatrixStorageStrategy",
80  "Contiguous",
82  LibUtilities::SessionReader::RegisterEnumValue(
83  "LocalMatrixStorageStrategy",
84  "Non-contiguous",
86  LibUtilities::SessionReader::RegisterEnumValue(
87  "LocalMatrixStorageStrategy",
88  "Sparse",
90  };
91 
92  /**
93  * For a matrix system of the form @f[
94  * \left[ \begin{array}{cc}
95  * \boldsymbol{A} & \boldsymbol{B}\\
96  * \boldsymbol{C} & \boldsymbol{D}
97  * \end{array} \right]
98  * \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2}
99  * \end{array}\right]
100  * = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2}
101  * \end{array}\right],
102  * @f]
103  * where @f$\boldsymbol{D}@f$ and
104  * @f$(\boldsymbol{A-BD^{-1}C})@f$ are invertible, store and assemble
105  * a static condensation system, according to a given local to global
106  * mapping. #m_linSys is constructed by AssembleSchurComplement().
107  * @param mKey Associated matrix key.
108  * @param pLocMatSys LocalMatrixSystem
109  * @param locToGloMap Local to global mapping.
110  */
111  GlobalLinSysIterativeStaticCond::GlobalLinSysIterativeStaticCond(
112  const GlobalLinSysKey &pKey,
113  const boost::weak_ptr<ExpList> &pExpList,
114  const boost::shared_ptr<AssemblyMap> &pLocToGloMap)
115  : GlobalLinSys (pKey, pExpList, pLocToGloMap),
116  GlobalLinSysIterative (pKey, pExpList, pLocToGloMap),
117  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
118  {
121  "This constructor is only valid when using static "
122  "condensation");
124  == pLocToGloMap->GetGlobalSysSolnType(),
125  "The local to global map is not set up for the requested "
126  "solution type");
127  }
128 
129 
130  /**
131  *
132  */
134  const GlobalLinSysKey &pKey,
135  const boost::weak_ptr<ExpList> &pExpList,
136  const DNekScalBlkMatSharedPtr pSchurCompl,
137  const DNekScalBlkMatSharedPtr pBinvD,
138  const DNekScalBlkMatSharedPtr pC,
139  const DNekScalBlkMatSharedPtr pInvD,
140  const boost::shared_ptr<AssemblyMap> &pLocToGloMap,
141  const PreconditionerSharedPtr pPrecon)
142  : GlobalLinSys (pKey, pExpList, pLocToGloMap),
143  GlobalLinSysIterative (pKey, pExpList, pLocToGloMap),
144  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
145  {
146  m_schurCompl = pSchurCompl;
147  m_S1Blk = pSchurCompl;
148  m_BinvD = pBinvD;
149  m_C = pC;
150  m_invD = pInvD;
151  m_precon = pPrecon;
152  }
153 
154 
156  {
158 
159  // Allocate memory for top-level structure
161 
162  // Setup Block Matrix systems
163  int n, n_exp = m_expList.lock()->GetNumElmts();
164 
165  MatrixStorage blkmatStorage = eDIAGONAL;
166  const Array<OneD,const unsigned int>& nbdry_size
167  = m_locToGloMap->GetNumLocalBndCoeffsPerPatch();
168 
170  ::AllocateSharedPtr(nbdry_size, nbdry_size, blkmatStorage);
171 
172  // Preserve original matrix in m_S1Blk
173  for (n = 0; n < n_exp; ++n)
174  {
175  DNekScalMatSharedPtr mat = m_schurCompl->GetBlock(n, n);
176  m_S1Blk->SetBlock(n, n, mat);
177  }
178 
179  // Build preconditioner
180  m_precon->BuildPreconditioner();
181 
182  // Do transform of Schur complement matrix
183  for (n = 0; n < n_exp; ++n)
184  {
185  if (m_linSysKey.GetMatrixType() !=
187  {
188  DNekScalMatSharedPtr mat = m_S1Blk->GetBlock(n, n);
189  DNekScalMatSharedPtr t = m_precon->TransformedSchurCompl(
190  m_expList.lock()->GetOffset_Elmt_Id(n), mat);
191  m_schurCompl->SetBlock(n, n, t);
192  }
193  }
194 
195  // Construct this level
197  }
198 
199  /**
200  *
201  */
203  {
204 
205  }
206 
208  v_GetStaticCondBlock(unsigned int n)
209  {
210  DNekScalBlkMatSharedPtr schurComplBlock;
211  int scLevel = m_locToGloMap->GetStaticCondLevel();
212  DNekScalBlkMatSharedPtr sc = scLevel == 0 ? m_S1Blk : m_schurCompl;
213  DNekScalMatSharedPtr localMat = sc->GetBlock(n,n);
214  unsigned int nbdry = localMat->GetRows();
215  unsigned int nblks = 1;
216  unsigned int esize[1] = {nbdry};
217 
218  schurComplBlock = MemoryManager<DNekScalBlkMat>
219  ::AllocateSharedPtr(nblks, nblks, esize, esize);
220  schurComplBlock->SetBlock(0, 0, localMat);
221 
222  return schurComplBlock;
223  }
224 
225  /**
226  * Assemble the schur complement matrix from the block matrices stored
227  * in #m_blkMatrices and the given local to global mapping information.
228  * @param locToGloMap Local to global mapping information.
229  */
231  const AssemblyMapSharedPtr pLocToGloMap)
232  {
233  int i,j,n,cnt,gid1,gid2;
234  NekDouble sign1,sign2;
235 
236  bool doGlobalOp = m_expList.lock()->GetGlobalOptParam()->
237  DoGlobalMatOp(m_linSysKey.GetMatrixType());
238 
239  // Set up unique map
240  v_UniqueMap();
241 
242  // Build precon again if we in multi-level static condensation (a
243  // bit of a hack)
246  {
248  m_precon->BuildPreconditioner();
249  }
250 
251  if (!doGlobalOp)
252  {
254  return;
255  }
256 
257  int nBndDofs = pLocToGloMap->GetNumGlobalBndCoeffs();
258  int NumDirBCs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
259  unsigned int rows = nBndDofs - NumDirBCs;
260  unsigned int cols = nBndDofs - NumDirBCs;
261 
262  // COO sparse storage to assist in assembly
263  COOMatType gmat_coo;
264 
265  // Get the matrix storage structure
266  // (whether to store only one triangular part, if symmetric)
267  MatrixStorage matStorage = eFULL;
268 
269  // assemble globally
270  DNekScalMatSharedPtr loc_mat;
271  int loc_lda;
272  for(n = cnt = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
273  {
274  loc_mat = m_schurCompl->GetBlock(n,n);
275  loc_lda = loc_mat->GetRows();
276 
277  // Set up Matrix;
278  for(i = 0; i < loc_lda; ++i)
279  {
280  gid1 = pLocToGloMap->GetLocalToGlobalBndMap (cnt + i)
281  - NumDirBCs;
282  sign1 = pLocToGloMap->GetLocalToGlobalBndSign(cnt + i);
283 
284  if(gid1 >= 0)
285  {
286  for(j = 0; j < loc_lda; ++j)
287  {
288  gid2 = pLocToGloMap->GetLocalToGlobalBndMap(cnt+j)
289  - NumDirBCs;
290  sign2 = pLocToGloMap->GetLocalToGlobalBndSign(cnt+j);
291 
292  if (gid2 >= 0)
293  {
294  gmat_coo[std::make_pair(gid1,gid2)] +=
295  sign1*sign2*(*loc_mat)(i,j);
296  }
297  }
298  }
299  }
300  cnt += loc_lda;
301  }
302 
304  sparseStorage (1);
305 
306  BCOMatType partMat;
307  convertCooToBco(rows, cols, 1, gmat_coo, partMat);
308 
309  sparseStorage[0] =
311  AllocateSharedPtr(rows, cols, 1, partMat, matStorage );
312 
313  // Create block diagonal matrix
315  AllocateSharedPtr(sparseStorage);
316  }
317 
318 
319  /**
320  * Populates sparse block-diagonal schur complement matrix from
321  * the block matrices stored in #m_blkMatrices.
322  */
324  {
325  LocalMatrixStorageStrategy storageStrategy =
326  m_expList.lock()->GetSession()->
327  GetSolverInfoAsEnum<LocalMatrixStorageStrategy>(
328  "LocalMatrixStorageStrategy");
329 
330  switch(storageStrategy)
331  {
334  {
335  size_t storageSize = 0;
336  int nBlk = m_schurCompl->GetNumberOfBlockRows();
337 
338  m_scale = Array<OneD, NekDouble> (nBlk, 1.0);
339  m_rows = Array<OneD, unsigned int> (nBlk, 0U);
340 
341  // Determine storage requirements for dense blocks.
342  for (int i = 0; i < nBlk; ++i)
343  {
344  m_rows[i] = m_schurCompl->GetBlock(i,i)->GetRows();
345  m_scale[i] = m_schurCompl->GetBlock(i,i)->Scale();
346  storageSize += m_rows[i] * m_rows[i];
347  }
348 
349  // Assemble dense storage blocks.
350  DNekScalMatSharedPtr loc_mat;
351  m_denseBlocks.resize(nBlk);
352  double *ptr = 0;
353 
354  if (MultiRegions::eContiguous == storageStrategy)
355  {
356  m_storage.resize (storageSize);
357  ptr = &m_storage[0];
358  }
359 
360  for (unsigned int n = 0; n < nBlk; ++n)
361  {
362  loc_mat = m_schurCompl->GetBlock(n,n);
363 
364  if (MultiRegions::eContiguous == storageStrategy)
365  {
366  int loc_lda = loc_mat->GetRows();
367  int blockSize = loc_lda * loc_lda;
368  m_denseBlocks[n] = ptr;
369  for(int i = 0; i < loc_lda; ++i)
370  {
371  for(int j = 0; j < loc_lda; ++j)
372  {
373  ptr[j*loc_lda+i] = (*loc_mat)(i,j);
374  }
375  }
376  ptr += blockSize;
378  m_expList.lock()->GetOffset_Elmt_Id(n));
379  }
380  else
381  {
382  m_denseBlocks[n] = loc_mat->GetRawPtr();
383  }
384  }
385  break;
386  }
388  {
389  DNekScalMatSharedPtr loc_mat;
390  int loc_lda;
391  int blockSize = 0;
392 
393  // First run through to split the set of local matrices into
394  // partitions of fixed block size, and count number of local
395  // matrices that belong to each partition.
396  std::vector<std::pair<int,int> > partitions;
397  for(int n = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
398  {
399  loc_mat = m_schurCompl->GetBlock(n,n);
400  loc_lda = loc_mat->GetRows();
401 
402  ASSERTL1(loc_lda >= 0,
403  boost::lexical_cast<std::string>(n) + "-th "
404  "matrix block in Schur complement has "
405  "rank 0!");
406 
407  if (blockSize == loc_lda)
408  {
409  partitions[partitions.size()-1].first++;
410  }
411  else
412  {
413  blockSize = loc_lda;
414  partitions.push_back(make_pair(1,loc_lda));
415  }
416  }
417 
418  MatrixStorage matStorage = eFULL;
419 
420  // Create a vector of sparse storage holders
422  sparseStorage (partitions.size());
423 
424  for (int part = 0, n = 0; part < partitions.size(); ++part)
425  {
426  BCOMatType partMat;
427 
428  for(int k = 0; k < partitions[part].first; ++k, ++n)
429  {
430  loc_mat = m_schurCompl->GetBlock(n,n);
431  loc_lda = loc_mat->GetRows();
432 
433  ASSERTL1(loc_lda == partitions[part].second,
434  boost::lexical_cast<std::string>(n) + "-th"
435  " matrix block in Schur complement has "
436  "unexpected rank");
437 
438  NekDouble scale = loc_mat->Scale();
439  if(fabs(scale-1.0) > NekConstants::kNekZeroTol)
440  {
441  Array<OneD, NekDouble> matarray(loc_lda*loc_lda);
442  Vmath::Smul(loc_lda*loc_lda,scale,
443  loc_mat->GetRawPtr(),1,&matarray[0],1);
444  partMat[make_pair(k,k)] = BCOEntryType(matarray);
445  }
446  else // scale factor is 1.0
447  {
448  partMat[make_pair(k,k)] = BCOEntryType(
449  loc_lda*loc_lda, loc_mat->GetRawPtr());
450  }
451 
453  m_expList.lock()->GetOffset_Elmt_Id(n));
454  }
455 
456  sparseStorage[part] =
459  partitions[part].first, partitions[part].first,
460  partitions[part].second, partMat, matStorage );
461  }
462 
463  // Create block diagonal matrix
465  AllocateSharedPtr(sparseStorage);
466 
467  break;
468  }
469  default:
470  ErrorUtil::NekError("Solver info property \
471  LocalMatrixStorageStrategy takes values \
472  Contiguous, Non-contiguous and Sparse");
473  }
474  }
475 
476  /**
477  *
478  */
480  const Array<OneD, NekDouble>& pInput,
481  Array<OneD, NekDouble>& pOutput)
482  {
483  int nLocal = m_locToGloMap->GetNumLocalBndCoeffs();
484  int nDir = m_locToGloMap->GetNumGlobalDirBndCoeffs();
485  bool doGlobalOp = m_expList.lock()->GetGlobalOptParam()->
486  DoGlobalMatOp(m_linSysKey.GetMatrixType());
487 
488  if(doGlobalOp)
489  {
490  // Do matrix multiply globally
491  Array<OneD, NekDouble> in = pInput + nDir;
492  Array<OneD, NekDouble> out = pOutput + nDir;
493 
494  m_sparseSchurCompl->Multiply(in,out);
495  m_locToGloMap->UniversalAssembleBnd(pOutput, nDir);
496  }
497  else if (m_sparseSchurCompl)
498  {
499  // Do matrix multiply locally using block-diagonal sparse matrix
500  Array<OneD, NekDouble> tmp = m_wsp + nLocal;
501 
502  m_locToGloMap->GlobalToLocalBnd(pInput, m_wsp);
503  m_sparseSchurCompl->Multiply(m_wsp,tmp);
504  m_locToGloMap->AssembleBnd(tmp, pOutput);
505  }
506  else
507  {
508  // Do matrix multiply locally, using direct BLAS calls
509  m_locToGloMap->GlobalToLocalBnd(pInput, m_wsp);
510  int i, cnt;
511  Array<OneD, NekDouble> tmpout = m_wsp + nLocal;
512  for (i = cnt = 0; i < m_denseBlocks.size(); cnt += m_rows[i], ++i)
513  {
514  const int rows = m_rows[i];
515  Blas::Dgemv('N', rows, rows,
516  m_scale[i], m_denseBlocks[i], rows,
517  m_wsp.get()+cnt, 1,
518  0.0, tmpout.get()+cnt, 1);
519  }
520  m_locToGloMap->AssembleBnd(tmpout, pOutput);
521  }
522  }
523 
525  {
526  m_map = m_locToGloMap->GetGlobalToUniversalBndMapUnique();
527  }
528 
530  int scLevel,
531  NekVector<NekDouble> &F_GlobBnd)
532  {
533  if (scLevel == 0)
534  {
535  // When matrices are supplied to the constructor at the top
536  // level, the preconditioner is never set up.
537  if (!m_precon)
538  {
540  m_precon->BuildPreconditioner();
541  }
542 
543  Set_Rhs_Magnitude(F_GlobBnd);
544 
545  return m_S1Blk;
546  }
547  else
548  {
549  // for multilevel iterative solver always use rhs
550  // vector value with no weighting
552 
553  return m_schurCompl;
554  }
555  }
556 
558  Array<OneD, NekDouble>& pInOut,
559  int offset)
560  {
561  m_precon->DoTransformToLowEnergy(pInOut, offset);
562  }
563 
565  Array<OneD, NekDouble>& pInOut)
566  {
567  m_precon->DoTransformFromLowEnergy(pInOut);
568  }
569 
571  const GlobalLinSysKey &mkey,
572  const boost::weak_ptr<ExpList> &pExpList,
573  const DNekScalBlkMatSharedPtr pSchurCompl,
574  const DNekScalBlkMatSharedPtr pBinvD,
575  const DNekScalBlkMatSharedPtr pC,
576  const DNekScalBlkMatSharedPtr pInvD,
577  const boost::shared_ptr<AssemblyMap> &l2gMap)
578  {
580  GlobalLinSysIterativeStaticCond>::AllocateSharedPtr(
581  mkey, pExpList, pSchurCompl, pBinvD, pC, pInvD, l2gMap,
582  m_precon);
583  sys->Initialise(l2gMap);
584  return sys;
585  }
586  }
587 }
Array< OneD, NekDouble > m_wsp
Workspace array for matrix multiplication.
GlobalSysSolnType GetGlobalSysSolnType() const
Return the associated solution type.
virtual DNekScalBlkMatSharedPtr v_PreSolve(int scLevel, NekVector< NekDouble > &F_GlobBnd)
static boost::shared_ptr< DataType > AllocateSharedPtr()
Allocate a shared pointer from the memory pool.
boost::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:53
std::map< CoordType, NekDouble > COOMatType
DNekScalBlkMatSharedPtr m_schurCompl
Block Schur complement matrix.
void Set_Rhs_Magnitude(const NekVector< NekDouble > &pIn)
General purpose memory allocation routines with the ability to allocate from thread specific memory p...
void v_AssembleSchurComplement(const boost::shared_ptr< AssemblyMap > locToGloMap)
Assemble the Schur complement matrix.
std::vector< double > m_storage
Dense storage for block Schur complement matrix.
DNekScalBlkMatSharedPtr m_C
Block matrix.
STL namespace.
virtual void v_DoMatrixMultiply(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput)
Perform a Shur-complement matrix multiply operation.
boost::shared_ptr< Preconditioner > PreconditionerSharedPtr
Definition: GlobalLinSys.h:62
void Initialise(const boost::shared_ptr< AssemblyMap > &pLocToGloMap)
Definition: GlobalLinSys.h:216
DNekScalBlkMatSharedPtr m_invD
Block matrix.
boost::shared_ptr< DNekScalMat > DNekScalMatSharedPtr
Array< OneD, NekDouble > BCOEntryType
GlobalLinSysIterativeStaticCond(const GlobalLinSysKey &mkey, const boost::weak_ptr< ExpList > &pExpList, const boost::shared_ptr< AssemblyMap > &locToGloMap)
Constructor for full direct matrix solve.
static const NekDouble kNekZeroTol
void Smul(int n, const T alpha, const T *x, const int incx, T *y, const int incy)
Scalar multiply y = alpha*y.
Definition: Vmath.cpp:199
boost::shared_ptr< GlobalLinSysIterativeStaticCond > GlobalLinSysIterativeStaticCondSharedPtr
virtual GlobalLinSysStaticCondSharedPtr v_Recurse(const GlobalLinSysKey &mkey, const boost::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const boost::shared_ptr< AssemblyMap > &locToGloMap)
Array< OneD, NekDouble > m_scale
Scaling factors for local matrices.
virtual void v_BasisInvTransform(Array< OneD, NekDouble > &pInOut)
std::map< CoordType, BCOEntryType > BCOMatType
Array< OneD, int > m_map
Global to universal unique map.
boost::shared_ptr< DNekScalBlkMat > DNekScalBlkMatSharedPtr
Definition: NekTypeDefs.hpp:74
Array< OneD, unsigned int > m_rows
Ranks of local matrices.
double NekDouble
std::vector< const double * > m_denseBlocks
Vector of pointers to local matrix data.
void PrepareLocalSchurComplement()
Prepares local representation of Schur complement stored as a sparse block-diagonal matrix...
void SetupTopLevel(const boost::shared_ptr< AssemblyMap > &locToGloMap)
Set up the storage for the Schur complement or the top level of the multi-level Schur complement...
static const NekDouble kNekUnsetDouble
Describe a linear system.
StdRegions::MatrixType GetMatrixType() const
Return the matrix type.
virtual void v_BasisTransform(Array< OneD, NekDouble > &pInOut, int offset)
const GlobalLinSysKey m_linSysKey
Key associated with this linear system.
Definition: GlobalLinSys.h:127
NekDouble m_rhs_magnitude
dot product of rhs to normalise stopping criterion
A global linear system.
Definition: GlobalLinSys.h:74
DNekSmvBsrDiagBlkMatSharedPtr m_sparseSchurCompl
Sparse representation of Schur complement matrix at this level.
virtual void v_DropStaticCondBlock(unsigned int n)
Releases the static condensation block matrices from NekManager of n-th expansion using the matrix ke...
void convertCooToBco(const unsigned int blkRows, const unsigned int blkColumns, const unsigned int blkDim, const COOMatType &cooMat, BCOMatType &bcoMat)
Definition: SparseUtils.cpp:48
boost::shared_ptr< AssemblyMap > m_locToGloMap
Local to global map.
GlobalLinSysFactory & GetGlobalLinSysFactory()
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode...
Definition: ErrorUtil.hpp:218
virtual DNekScalBlkMatSharedPtr v_GetStaticCondBlock(unsigned int n)
Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided ...
DNekScalBlkMatSharedPtr m_BinvD
Block matrix.
PreconditionerSharedPtr CreatePrecon(AssemblyMapSharedPtr asmMap)
Create a preconditioner object from the parameters defined in the supplied assembly map...
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, tDescription pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:215
boost::shared_ptr< GlobalLinSysStaticCond > GlobalLinSysStaticCondSharedPtr
const boost::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:129