Nektar++
GlobalLinSysIterativeStaticCond.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File: GlobalLinSysIterativeStaticCond.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // License for the specific language governing rights and limitations under
14 // Permission is hereby granted, free of charge, to any person obtaining a
15 // copy of this software and associated documentation files (the "Software"),
16 // to deal in the Software without restriction, including without limitation
17 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 // and/or sell copies of the Software, and to permit persons to whom the
19 // Software is furnished to do so, subject to the following conditions:
20 //
21 // The above copyright notice and this permission notice shall be included
22 // in all copies or substantial portions of the Software.
23 //
24 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
30 // DEALINGS IN THE SOFTWARE.
31 //
32 // Description: Implementation to linear solver using single-
33 // or multi-level static condensation
34 //
35 ///////////////////////////////////////////////////////////////////////////////
36 
43 
44 namespace Nektar
45 {
46  namespace MultiRegions
47  {
48  /**
49  * @class GlobalLinSysIterativeStaticCond
50  *
51  * Solves a linear system iteratively using single- or multi-level
52  * static condensation.
53  */
54 
55  /**
56  * Registers the class with the Factory.
57  */
60  "IterativeStaticCond",
62  "Iterative static condensation.");
63 
66  "IterativeMultiLevelStaticCond",
68  "Iterative multi-level static condensation.");
69 
70 
73  "LocalMatrixStorageStrategy",
74  "Sparse");
77  "LocalMatrixStorageStrategy",
78  "Contiguous",
81  "LocalMatrixStorageStrategy",
82  "Non-contiguous",
85  "LocalMatrixStorageStrategy",
86  "Sparse",
88  };
89 
90  /**
91  * For a matrix system of the form @f[
92  * \left[ \begin{array}{cc}
93  * \boldsymbol{A} & \boldsymbol{B}\\
94  * \boldsymbol{C} & \boldsymbol{D}
95  * \end{array} \right]
96  * \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2}
97  * \end{array}\right]
98  * = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2}
99  * \end{array}\right],
100  * @f]
101  * where @f$\boldsymbol{D}@f$ and
102  * @f$(\boldsymbol{A-BD^{-1}C})@f$ are invertible, store and assemble
103  * a static condensation system, according to a given local to global
104  * mapping. #m_linSys is constructed by AssembleSchurComplement().
105  * @param mKey Associated matrix key.
106  * @param pLocMatSys LocalMatrixSystem
107  * @param locToGloMap Local to global mapping.
108  */
110  const GlobalLinSysKey &pKey,
111  const boost::weak_ptr<ExpList> &pExpList,
112  const boost::shared_ptr<AssemblyMap> &pLocToGloMap)
113  : GlobalLinSys (pKey, pExpList, pLocToGloMap),
114  GlobalLinSysIterative (pKey, pExpList, pLocToGloMap),
115  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
116  {
119  "This constructor is only valid when using static "
120  "condensation");
122  == pLocToGloMap->GetGlobalSysSolnType(),
123  "The local to global map is not set up for the requested "
124  "solution type");
125  }
126 
127 
128  /**
129  *
130  */
132  const GlobalLinSysKey &pKey,
133  const boost::weak_ptr<ExpList> &pExpList,
134  const DNekScalBlkMatSharedPtr pSchurCompl,
135  const DNekScalBlkMatSharedPtr pBinvD,
136  const DNekScalBlkMatSharedPtr pC,
137  const DNekScalBlkMatSharedPtr pInvD,
138  const boost::shared_ptr<AssemblyMap> &pLocToGloMap,
139  const PreconditionerSharedPtr pPrecon)
140  : GlobalLinSys (pKey, pExpList, pLocToGloMap),
141  GlobalLinSysIterative (pKey, pExpList, pLocToGloMap),
142  GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
143  {
144  m_schurCompl = pSchurCompl;
145  m_S1Blk = pSchurCompl;
146  m_BinvD = pBinvD;
147  m_C = pC;
148  m_invD = pInvD;
149  m_precon = pPrecon;
150  }
151 
152 
154  {
156  = m_locToGloMap->GetPreconType();
157  std::string PreconType
160  PreconType,GetSharedThisPtr(),m_locToGloMap);
161 
162  // Allocate memory for top-level structure
164 
165  // Setup Block Matrix systems
166  int n, n_exp = m_expList.lock()->GetNumElmts();
167 
168  MatrixStorage blkmatStorage = eDIAGONAL;
169  const Array<OneD,const unsigned int>& nbdry_size
170  = m_locToGloMap->GetNumLocalBndCoeffsPerPatch();
171 
173  ::AllocateSharedPtr(nbdry_size, nbdry_size , blkmatStorage);
174 
175  // Preserve original matrix in m_S1Blk
176  for (n = 0; n < n_exp; ++n)
177  {
178  DNekScalMatSharedPtr mat = m_schurCompl->GetBlock(n, n);
179  m_S1Blk->SetBlock(n, n, mat);
180  }
181 
182  // Build preconditioner
183  m_precon->BuildPreconditioner();
184 
185  // Do transform of Schur complement matrix
186  for (n = 0; n < n_exp; ++n)
187  {
188  if (m_linSysKey.GetMatrixType() !=
190  {
191  DNekScalMatSharedPtr mat = m_S1Blk->GetBlock(n, n);
192  DNekScalMatSharedPtr t = m_precon->TransformedSchurCompl(
193  m_expList.lock()->GetOffset_Elmt_Id(n), mat);
194  m_schurCompl->SetBlock(n, n, t);
195  }
196  }
197 
198  // Construct this level
200  }
201 
202  /**
203  *
204  */
206  {
207 
208  }
209 
211  v_GetStaticCondBlock(unsigned int n)
212  {
213  DNekScalBlkMatSharedPtr schurComplBlock;
214  int scLevel = m_locToGloMap->GetStaticCondLevel();
215  DNekScalBlkMatSharedPtr sc = scLevel == 0 ? m_S1Blk : m_schurCompl;
216  DNekScalMatSharedPtr localMat = sc->GetBlock(n,n);
217  unsigned int nbdry = localMat->GetRows();
218  unsigned int nblks = 1;
219  unsigned int esize[1] = {nbdry};
220 
221  schurComplBlock = MemoryManager<DNekScalBlkMat>
222  ::AllocateSharedPtr(nblks, nblks, esize, esize);
223  schurComplBlock->SetBlock(0, 0, localMat);
224 
225  return schurComplBlock;
226  }
227 
228  /**
229  * Assemble the schur complement matrix from the block matrices stored
230  * in #m_blkMatrices and the given local to global mapping information.
231  * @param locToGloMap Local to global mapping information.
232  */
234  const AssemblyMapSharedPtr pLocToGloMap)
235  {
236  int i,j,n,cnt,gid1,gid2;
237  NekDouble sign1,sign2;
238 
239  bool doGlobalOp = m_expList.lock()->GetGlobalOptParam()->
240  DoGlobalMatOp(m_linSysKey.GetMatrixType());
241 
242  // Set up unique map
243  v_UniqueMap();
244 
245  // Build precon again if we in multi-level static condensation (a
246  // bit of a hack)
248  {
250  = m_locToGloMap->GetPreconType();
251  std::string PreconType
254  PreconType,GetSharedThisPtr(),m_locToGloMap);
255  m_precon->BuildPreconditioner();
256  }
257 
258  if (!doGlobalOp)
259  {
261  return;
262  }
263 
264  int nBndDofs = pLocToGloMap->GetNumGlobalBndCoeffs();
265  int NumDirBCs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
266  unsigned int rows = nBndDofs - NumDirBCs;
267  unsigned int cols = nBndDofs - NumDirBCs;
268 
269  // COO sparse storage to assist in assembly
270  COOMatType gmat_coo;
271 
272  // Get the matrix storage structure
273  // (whether to store only one triangular part, if symmetric)
274  MatrixStorage matStorage = eFULL;
275 
276  // assemble globally
277  DNekScalMatSharedPtr loc_mat;
278  int loc_lda;
279  for(n = cnt = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
280  {
281  loc_mat = m_schurCompl->GetBlock(n,n);
282  loc_lda = loc_mat->GetRows();
283 
284  // Set up Matrix;
285  for(i = 0; i < loc_lda; ++i)
286  {
287  gid1 = pLocToGloMap->GetLocalToGlobalBndMap (cnt + i)
288  - NumDirBCs;
289  sign1 = pLocToGloMap->GetLocalToGlobalBndSign(cnt + i);
290 
291  if(gid1 >= 0)
292  {
293  for(j = 0; j < loc_lda; ++j)
294  {
295  gid2 = pLocToGloMap->GetLocalToGlobalBndMap(cnt+j)
296  - NumDirBCs;
297  sign2 = pLocToGloMap->GetLocalToGlobalBndSign(cnt+j);
298 
299  if (gid2 >= 0)
300  {
301  gmat_coo[std::make_pair(gid1,gid2)] +=
302  sign1*sign2*(*loc_mat)(i,j);
303  }
304  }
305  }
306  }
307  cnt += loc_lda;
308  }
309 
311  sparseStorage (1);
312 
313  BCOMatType partMat;
314  convertCooToBco(rows, cols, 1, gmat_coo, partMat);
315 
316  sparseStorage[0] =
318  AllocateSharedPtr(rows, cols, 1, partMat, matStorage );
319 
320  // Create block diagonal matrix
322  AllocateSharedPtr(sparseStorage);
323  }
324 
325 
326  /**
327  * Populates sparse block-diagonal schur complement matrix from
328  * the block matrices stored in #m_blkMatrices.
329  */
331  {
332  LocalMatrixStorageStrategy storageStrategy =
333  m_expList.lock()->GetSession()->
334  GetSolverInfoAsEnum<LocalMatrixStorageStrategy>(
335  "LocalMatrixStorageStrategy");
336 
337  switch(storageStrategy)
338  {
341  {
342  size_t storageSize = 0;
343  int nBlk = m_schurCompl->GetNumberOfBlockRows();
344 
345  m_scale = Array<OneD, NekDouble> (nBlk, 1.0);
346  m_rows = Array<OneD, unsigned int> (nBlk, 0U);
347 
348  // Determine storage requirements for dense blocks.
349  for (int i = 0; i < nBlk; ++i)
350  {
351  m_rows[i] = m_schurCompl->GetBlock(i,i)->GetRows();
352  m_scale[i] = m_schurCompl->GetBlock(i,i)->Scale();
353  storageSize += m_rows[i] * m_rows[i];
354  }
355 
356  // Assemble dense storage blocks.
357  DNekScalMatSharedPtr loc_mat;
358  m_denseBlocks.resize(nBlk);
359  double *ptr = 0;
360 
361  if (MultiRegions::eContiguous == storageStrategy)
362  {
363  m_storage.resize (storageSize);
364  ptr = &m_storage[0];
365  }
366 
367  for (unsigned int n = 0; n < nBlk; ++n)
368  {
369  loc_mat = m_schurCompl->GetBlock(n,n);
370 
371  if (MultiRegions::eContiguous == storageStrategy)
372  {
373  int loc_lda = loc_mat->GetRows();
374  int blockSize = loc_lda * loc_lda;
375  m_denseBlocks[n] = ptr;
376  for(int i = 0; i < loc_lda; ++i)
377  {
378  for(int j = 0; j < loc_lda; ++j)
379  {
380  ptr[j*loc_lda+i] = (*loc_mat)(i,j);
381  }
382  }
383  ptr += blockSize;
385  m_expList.lock()->GetOffset_Elmt_Id(n));
386  }
387  else
388  {
389  m_denseBlocks[n] = loc_mat->GetRawPtr();
390  }
391  }
392  break;
393  }
395  {
396  DNekScalMatSharedPtr loc_mat;
397  int loc_lda;
398  int blockSize = 0;
399 
400  // First run through to split the set of local matrices into
401  // partitions of fixed block size, and count number of local
402  // matrices that belong to each partition.
403  std::vector<std::pair<int,int> > partitions;
404  for(int n = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
405  {
406  loc_mat = m_schurCompl->GetBlock(n,n);
407  loc_lda = loc_mat->GetRows();
408 
409  ASSERTL1(loc_lda >= 0,
410  boost::lexical_cast<std::string>(n) + "-th "
411  "matrix block in Schur complement has "
412  "rank 0!");
413 
414  if (blockSize == loc_lda)
415  {
416  partitions[partitions.size()-1].first++;
417  }
418  else
419  {
420  blockSize = loc_lda;
421  partitions.push_back(make_pair(1,loc_lda));
422  }
423  }
424 
425  MatrixStorage matStorage = eFULL;
426 
427  // Create a vector of sparse storage holders
429  sparseStorage (partitions.size());
430 
431  for (int part = 0, n = 0; part < partitions.size(); ++part)
432  {
433  BCOMatType partMat;
434 
435  for(int k = 0; k < partitions[part].first; ++k, ++n)
436  {
437  loc_mat = m_schurCompl->GetBlock(n,n);
438  loc_lda = loc_mat->GetRows();
439 
440  ASSERTL1(loc_lda == partitions[part].second,
441  boost::lexical_cast<std::string>(n) + "-th"
442  " matrix block in Schur complement has "
443  "unexpected rank");
444 
445  NekDouble scale = loc_mat->Scale();
446  if(fabs(scale-1.0) > NekConstants::kNekZeroTol)
447  {
448  Array<OneD, NekDouble> matarray(loc_lda*loc_lda);
449  Vmath::Smul(loc_lda*loc_lda,scale,
450  loc_mat->GetRawPtr(),1,&matarray[0],1);
451  partMat[make_pair(k,k)] = BCOEntryType(matarray);
452  }
453  else // scale factor is 1.0
454  {
455  partMat[make_pair(k,k)] = BCOEntryType(
456  loc_lda*loc_lda, loc_mat->GetRawPtr());
457  }
458 
460  m_expList.lock()->GetOffset_Elmt_Id(n));
461  }
462 
463  sparseStorage[part] =
466  partitions[part].first, partitions[part].first,
467  partitions[part].second, partMat, matStorage );
468  }
469 
470  // Create block diagonal matrix
472  AllocateSharedPtr(sparseStorage);
473 
474  break;
475  }
476  default:
477  ErrorUtil::NekError("Solver info property \
478  LocalMatrixStorageStrategy takes values \
479  Contiguous, Non-contiguous and Sparse");
480  }
481  }
482 
483  /**
484  *
485  */
487  const Array<OneD, NekDouble>& pInput,
488  Array<OneD, NekDouble>& pOutput)
489  {
490  int nLocal = m_locToGloMap->GetNumLocalBndCoeffs();
491  int nDir = m_locToGloMap->GetNumGlobalDirBndCoeffs();
492  bool doGlobalOp = m_expList.lock()->GetGlobalOptParam()->
493  DoGlobalMatOp(m_linSysKey.GetMatrixType());
494 
495  if(doGlobalOp)
496  {
497  // Do matrix multiply globally
498  Array<OneD, NekDouble> in = pInput + nDir;
499  Array<OneD, NekDouble> out = pOutput + nDir;
500 
501  m_sparseSchurCompl->Multiply(in,out);
502  m_locToGloMap->UniversalAssembleBnd(pOutput, nDir);
503  }
504  else if (m_sparseSchurCompl)
505  {
506  // Do matrix multiply locally using block-diagonal sparse matrix
507  Array<OneD, NekDouble> tmp = m_wsp + nLocal;
508 
509  m_locToGloMap->GlobalToLocalBnd(pInput, m_wsp);
510  m_sparseSchurCompl->Multiply(m_wsp,tmp);
511  m_locToGloMap->AssembleBnd(tmp, pOutput);
512  }
513  else
514  {
515  // Do matrix multiply locally, using direct BLAS calls
516  m_locToGloMap->GlobalToLocalBnd(pInput, m_wsp);
517  int i, cnt;
518  Array<OneD, NekDouble> tmpout = m_wsp + nLocal;
519  for (i = cnt = 0; i < m_denseBlocks.size(); cnt += m_rows[i], ++i)
520  {
521  const int rows = m_rows[i];
522  Blas::Dgemv('N', rows, rows,
523  m_scale[i], m_denseBlocks[i], rows,
524  m_wsp.get()+cnt, 1,
525  0.0, tmpout.get()+cnt, 1);
526  }
527  m_locToGloMap->AssembleBnd(tmpout, pOutput);
528  }
529  }
530 
532  {
533  m_map = m_locToGloMap->GetGlobalToUniversalBndMapUnique();
534  }
535 
537  int scLevel,
538  NekVector<NekDouble> &F_GlobBnd)
539  {
540  if (scLevel == 0)
541  {
542  // When matrices are supplied to the constructor at the top
543  // level, the preconditioner is never set up.
544  if (!m_precon)
545  {
547  = m_locToGloMap->GetPreconType();
548  std::string PreconType
551  PreconType, GetSharedThisPtr(), m_locToGloMap);
552  m_precon->BuildPreconditioner();
553  }
554 
555  Set_Rhs_Magnitude(F_GlobBnd);
556  return m_S1Blk;
557  }
558  else
559  {
560  return m_schurCompl;
561  }
562  }
563 
565  Array<OneD, NekDouble>& pInOut,
566  int offset)
567  {
568  m_precon->DoTransformToLowEnergy(pInOut, offset);
569  }
570 
572  Array<OneD, NekDouble>& pInOut)
573  {
574  m_precon->DoTransformFromLowEnergy(pInOut);
575  }
576 
578  const GlobalLinSysKey &mkey,
579  const boost::weak_ptr<ExpList> &pExpList,
580  const DNekScalBlkMatSharedPtr pSchurCompl,
581  const DNekScalBlkMatSharedPtr pBinvD,
582  const DNekScalBlkMatSharedPtr pC,
583  const DNekScalBlkMatSharedPtr pInvD,
584  const boost::shared_ptr<AssemblyMap> &l2gMap)
585  {
587  GlobalLinSysIterativeStaticCond>::AllocateSharedPtr(
588  mkey, pExpList, pSchurCompl, pBinvD, pC, pInvD, l2gMap,
589  m_precon);
590  sys->Initialise(l2gMap);
591  return sys;
592  }
593  }
594 }
Array< OneD, NekDouble > m_wsp
Workspace array for matrix multiplication.
GlobalSysSolnType GetGlobalSysSolnType() const
Return the associated solution type.
virtual DNekScalBlkMatSharedPtr v_PreSolve(int scLevel, NekVector< NekDouble > &F_GlobBnd)
tBaseSharedPtr CreateInstance(tKey idKey BOOST_PP_COMMA_IF(MAX_PARAM) BOOST_PP_ENUM_BINARY_PARAMS(MAX_PARAM, tParam, x))
Create an instance of the class referred to by idKey.
Definition: NekFactory.hpp:162
static std::string RegisterEnumValue(std::string pEnum, std::string pString, int pEnumValue)
Registers an enumeration value.
static boost::shared_ptr< DataType > AllocateSharedPtr()
Allocate a shared pointer from the memory pool.
boost::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:53
std::map< CoordType, NekDouble > COOMatType
DNekScalBlkMatSharedPtr m_schurCompl
Block Schur complement matrix.
void Set_Rhs_Magnitude(const NekVector< NekDouble > &pIn)
General purpose memory allocation routines with the ability to allocate from thread specific memory p...
void v_AssembleSchurComplement(const boost::shared_ptr< AssemblyMap > locToGloMap)
Assemble the Schur complement matrix.
std::vector< double > m_storage
Dense storage for block Schur complement matrix.
DNekScalBlkMatSharedPtr m_C
Block matrix.
PreconFactory & GetPreconFactory()
virtual void v_DoMatrixMultiply(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput)
Perform a Shur-complement matrix multiply operation.
boost::shared_ptr< Preconditioner > PreconditionerSharedPtr
void Initialise(const boost::shared_ptr< AssemblyMap > &pLocToGloMap)
Definition: GlobalLinSys.h:208
DNekScalBlkMatSharedPtr m_invD
Block matrix.
boost::shared_ptr< DNekScalMat > DNekScalMatSharedPtr
Array< OneD, NekDouble > BCOEntryType
GlobalLinSysIterativeStaticCond(const GlobalLinSysKey &mkey, const boost::weak_ptr< ExpList > &pExpList, const boost::shared_ptr< AssemblyMap > &locToGloMap)
Constructor for full direct matrix solve.
static const NekDouble kNekZeroTol
void Smul(int n, const T alpha, const T *x, const int incx, T *y, const int incy)
Scalar multiply y = alpha*y.
Definition: Vmath.cpp:199
boost::shared_ptr< GlobalLinSysIterativeStaticCond > GlobalLinSysIterativeStaticCondSharedPtr
virtual GlobalLinSysStaticCondSharedPtr v_Recurse(const GlobalLinSysKey &mkey, const boost::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const boost::shared_ptr< AssemblyMap > &locToGloMap)
Array< OneD, NekDouble > m_scale
Scaling factors for local matrices.
virtual void v_BasisInvTransform(Array< OneD, NekDouble > &pInOut)
std::map< CoordType, BCOEntryType > BCOMatType
Array< OneD, int > m_map
Global to universal unique map.
boost::shared_ptr< GlobalLinSys > GetSharedThisPtr()
Returns a shared pointer to the current object.
Definition: GlobalLinSys.h:103
boost::shared_ptr< DNekScalBlkMat > DNekScalBlkMatSharedPtr
Definition: NekTypeDefs.hpp:74
Array< OneD, unsigned int > m_rows
Ranks of local matrices.
static GlobalLinSysSharedPtr create(const GlobalLinSysKey &pLinSysKey, const boost::weak_ptr< ExpList > &pExpList, const boost::shared_ptr< AssemblyMap > &pLocToGloMap)
Creates an instance of this class.
double NekDouble
std::vector< const double * > m_denseBlocks
Vector of pointers to local matrix data.
void PrepareLocalSchurComplement()
Prepares local representation of Schur complement stored as a sparse block-diagonal matrix...
void SetupTopLevel(const boost::shared_ptr< AssemblyMap > &locToGloMap)
Set up the storage for the Schur complement or the top level of the multi-level Schur complement...
Describe a linear system.
StdRegions::MatrixType GetMatrixType() const
Return the matrix type.
virtual void v_BasisTransform(Array< OneD, NekDouble > &pInOut, int offset)
const GlobalLinSysKey m_linSysKey
Key associated with this linear system.
Definition: GlobalLinSys.h:123
A global linear system.
Definition: GlobalLinSys.h:70
DNekSmvBsrDiagBlkMatSharedPtr m_sparseSchurCompl
Sparse representation of Schur complement matrix at this level.
virtual void v_DropStaticCondBlock(unsigned int n)
Releases the static condensation block matrices from NekManager of n-th expansion using the matrix ke...
static std::string RegisterDefaultSolverInfo(const std::string &pName, const std::string &pValue)
Registers the default string value of a solver info property.
void convertCooToBco(const unsigned int blkRows, const unsigned int blkColumns, const unsigned int blkDim, const COOMatType &cooMat, BCOMatType &bcoMat)
Definition: SparseUtils.cpp:48
boost::shared_ptr< AssemblyMap > m_locToGloMap
Local to global map.
GlobalLinSysFactory & GetGlobalLinSysFactory()
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode...
Definition: ErrorUtil.hpp:191
virtual DNekScalBlkMatSharedPtr v_GetStaticCondBlock(unsigned int n)
Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided ...
const char *const PreconditionerTypeMap[]
DNekScalBlkMatSharedPtr m_BinvD
Block matrix.
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, tDescription pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:215
boost::shared_ptr< GlobalLinSysStaticCond > GlobalLinSysStaticCondSharedPtr
const boost::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:125