Nektar++
Public Member Functions | Protected Member Functions | Protected Attributes | List of all members
Nektar::MultiRegions::GlobalLinSysStaticCond Class Referenceabstract

A global linear system. More...

#include <GlobalLinSysStaticCond.h>

Inheritance diagram for Nektar::MultiRegions::GlobalLinSysStaticCond:
[legend]

Public Member Functions

 GlobalLinSysStaticCond (const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &locToGloMap)
 Constructor for full direct matrix solve. More...
 
 ~GlobalLinSysStaticCond () override
 
- Public Member Functions inherited from Nektar::MultiRegions::GlobalLinSys
 GlobalLinSys (const GlobalLinSysKey &pKey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &pLocToGloMap)
 Constructor for full direct matrix solve. More...
 
virtual ~GlobalLinSys ()
 
const GlobalLinSysKeyGetKey (void) const
 Returns the key associated with the system. More...
 
const std::weak_ptr< ExpList > & GetLocMat (void) const
 
void InitObject ()
 
void Initialise (const std::shared_ptr< AssemblyMap > &pLocToGloMap)
 
void Solve (const Array< OneD, const NekDouble > &in, Array< OneD, NekDouble > &out, const AssemblyMapSharedPtr &locToGloMap, const Array< OneD, const NekDouble > &dirForcing=NullNekDouble1DArray)
 Solve the linear system for given input and output vectors using a specified local to global map. More...
 
std::shared_ptr< GlobalLinSysGetSharedThisPtr ()
 Returns a shared pointer to the current object. More...
 
int GetNumBlocks ()
 
DNekScalMatSharedPtr GetBlock (unsigned int n)
 
void DropBlock (unsigned int n)
 
DNekScalBlkMatSharedPtr GetStaticCondBlock (unsigned int n)
 
void DropStaticCondBlock (unsigned int n)
 
void SolveLinearSystem (const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const AssemblyMapSharedPtr &locToGloMap, const int pNumDir=0)
 Solve the linear system for given input and output vectors. More...
 

Protected Member Functions

virtual void v_PreSolve (int scLevel, Array< OneD, NekDouble > &F_bnd)
 
virtual void v_BasisFwdTransform (Array< OneD, NekDouble > &pInOut)
 
virtual void v_CoeffsBwdTransform (Array< OneD, NekDouble > &pInOut)
 
virtual void v_CoeffsFwdTransform (const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput)
 
virtual void v_AssembleSchurComplement (std::shared_ptr< AssemblyMap > pLoctoGloMap)
 
int v_GetNumBlocks () override
 Get the number of blocks in this system. More...
 
virtual GlobalLinSysStaticCondSharedPtr v_Recurse (const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const std::shared_ptr< AssemblyMap > &locToGloMap)=0
 
void v_Solve (const Array< OneD, const NekDouble > &in, Array< OneD, NekDouble > &out, const AssemblyMapSharedPtr &locToGloMap, const Array< OneD, const NekDouble > &dirForcing=NullNekDouble1DArray) override
 Solve the linear system for given input and output vectors using a specified local to global map. More...
 
void v_InitObject () override
 
void v_Initialise (const std::shared_ptr< AssemblyMap > &locToGloMap) override
 Initialise this object. More...
 
void SetupTopLevel (const std::shared_ptr< AssemblyMap > &locToGloMap)
 Set up the storage for the Schur complement or the top level of the multi-level Schur complement. More...
 
void ConstructNextLevelCondensedSystem (const std::shared_ptr< AssemblyMap > &locToGloMap)
 
- Protected Member Functions inherited from Nektar::MultiRegions::GlobalLinSys
virtual void v_Solve (const Array< OneD, const NekDouble > &in, Array< OneD, NekDouble > &out, const AssemblyMapSharedPtr &locToGloMap, const Array< OneD, const NekDouble > &dirForcing=NullNekDouble1DArray)=0
 Solve a linear system based on mapping. More...
 
virtual void v_SolveLinearSystem (const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const AssemblyMapSharedPtr &locToGloMap, const int pNumDir)=0
 Solve a basic matrix system. More...
 
virtual void v_InitObject ()
 
virtual void v_Initialise (const std::shared_ptr< AssemblyMap > &pLocToGloMap)
 
virtual int v_GetNumBlocks ()
 Get the number of blocks in this system. More...
 
virtual DNekScalMatSharedPtr v_GetBlock (unsigned int n)
 Retrieves the block matrix from n-th expansion using the matrix key provided by the m_linSysKey. More...
 
virtual void v_DropBlock (unsigned int n)
 Releases the local block matrix from NekManager of n-th expansion using the matrix key provided by the m_linSysKey. More...
 
virtual DNekScalBlkMatSharedPtr v_GetStaticCondBlock (unsigned int n)
 Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided by the m_linSysKey. More...
 
virtual void v_DropStaticCondBlock (unsigned int n)
 Releases the static condensation block matrices from NekManager of n-th expansion using the matrix key provided by the m_linSysKey. More...
 
PreconditionerSharedPtr CreatePrecon (AssemblyMapSharedPtr asmMap)
 Create a preconditioner object from the parameters defined in the supplied assembly map. More...
 

Protected Attributes

GlobalLinSysStaticCondSharedPtr m_recursiveSchurCompl
 Schur complement for Direct Static Condensation. More...
 
DNekScalBlkMatSharedPtr m_schurCompl
 Block Schur complement matrix. More...
 
DNekScalBlkMatSharedPtr m_BinvD
 Block \( BD^{-1} \) matrix. More...
 
DNekScalBlkMatSharedPtr m_C
 Block \( C \) matrix. More...
 
DNekScalBlkMatSharedPtr m_invD
 Block \( D^{-1} \) matrix. More...
 
std::weak_ptr< AssemblyMapm_locToGloMap
 Local to global map. More...
 
Array< OneD, NekDoublem_wsp
 Workspace array for matrix multiplication. More...
 
Array< OneD, const NekDoublem_sign
 
- Protected Attributes inherited from Nektar::MultiRegions::GlobalLinSys
const GlobalLinSysKey m_linSysKey
 Key associated with this linear system. More...
 
const std::weak_ptr< ExpListm_expList
 Local Matrix System. More...
 
const std::map< int, RobinBCInfoSharedPtrm_robinBCInfo
 Robin boundary info. More...
 
bool m_verbose
 

Detailed Description

A global linear system.

Solves a linear system using single- or multi-level static condensation.

Definition at line 51 of file GlobalLinSysStaticCond.h.

Constructor & Destructor Documentation

◆ GlobalLinSysStaticCond()

Nektar::MultiRegions::GlobalLinSysStaticCond::GlobalLinSysStaticCond ( const GlobalLinSysKey pKey,
const std::weak_ptr< ExpList > &  pExpList,
const std::shared_ptr< AssemblyMap > &  pLocToGloMap 
)

Constructor for full direct matrix solve.

For a matrix system of the form

\[ \left[ \begin{array}{cc} \boldsymbol{A} & \boldsymbol{B}\\ \boldsymbol{C} & \boldsymbol{D} \end{array} \right] \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2} \end{array}\right] = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2} \end{array}\right], \]

where \(\boldsymbol{D}\) and \((\boldsymbol{A-BD^{-1}C})\) are invertible, store and assemble a static condensation system, according to a given local to global mapping. #m_linSys is constructed by AssembleSchurComplement().

Parameters
mKeyAssociated matrix key.
pLocMatSysLocalMatrixSystem
locToGloMapLocal to global mapping.

Definition at line 72 of file GlobalLinSysStaticCond.cpp.

75 : GlobalLinSys(pKey, pExpList, pLocToGloMap), m_locToGloMap(pLocToGloMap)
76{
77}
GlobalLinSys(const GlobalLinSysKey &pKey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Constructor for full direct matrix solve.
std::weak_ptr< AssemblyMap > m_locToGloMap
Local to global map.

◆ ~GlobalLinSysStaticCond()

Nektar::MultiRegions::GlobalLinSysStaticCond::~GlobalLinSysStaticCond ( )
override

Definition at line 91 of file GlobalLinSysStaticCond.cpp.

92{
93}

Member Function Documentation

◆ ConstructNextLevelCondensedSystem()

void Nektar::MultiRegions::GlobalLinSysStaticCond::ConstructNextLevelCondensedSystem ( const std::shared_ptr< AssemblyMap > &  locToGloMap)
protected

Definition at line 311 of file GlobalLinSysStaticCond.cpp.

313{
314 int i, j, n, cnt;
315 DNekScalBlkMatSharedPtr blkMatrices[4];
316
317 // Create temporary matrices within an inner-local scope to ensure
318 // any references to the intermediate storage is lost before
319 // the recursive step, rather than at the end of the routine.
320 // This allows the schur complement matrix from this level to be
321 // disposed of in the next level after use without being retained
322 // due to lingering shared pointers.
323 {
324
325 const Array<OneD, const unsigned int> &nBndDofsPerPatch =
326 pLocToGloMap->GetNumLocalBndCoeffsPerPatch();
327 const Array<OneD, const unsigned int> &nIntDofsPerPatch =
328 pLocToGloMap->GetNumLocalIntCoeffsPerPatch();
329
330 // STEP 1:
331 // Based upon the schur complement of the the current level we
332 // will substructure this matrix in the form
333 // -- --
334 // | A B |
335 // | C D |
336 // -- --
337 // All matrices A,B,C and D are (diagonal) blockmatrices.
338 // However, as a start we will use an array of DNekMatrices as
339 // it is too hard to change the individual entries of a
340 // DNekScalBlkMatSharedPtr.
341
342 // In addition, we will also try to ensure that the memory of
343 // the blockmatrices will be contiguous. This will probably
344 // enhance the efficiency
345 // - Calculate the total number of entries in the blockmatrices
346 int nPatches = pLocToGloMap->GetNumPatches();
347 int nEntriesA = 0;
348 int nEntriesB = 0;
349 int nEntriesC = 0;
350 int nEntriesD = 0;
351
352 for (i = 0; i < nPatches; i++)
353 {
354 nEntriesA += nBndDofsPerPatch[i] * nBndDofsPerPatch[i];
355 nEntriesB += nBndDofsPerPatch[i] * nIntDofsPerPatch[i];
356 nEntriesC += nIntDofsPerPatch[i] * nBndDofsPerPatch[i];
357 nEntriesD += nIntDofsPerPatch[i] * nIntDofsPerPatch[i];
358 }
359
360 // Now create the DNekMatrices and link them to the memory
361 // allocated above
362 Array<OneD, DNekMatSharedPtr> substructuredMat[4] = {
363 Array<OneD, DNekMatSharedPtr>(nPatches), // Matrix A
364 Array<OneD, DNekMatSharedPtr>(nPatches), // Matrix B
365 Array<OneD, DNekMatSharedPtr>(nPatches), // Matrix C
366 Array<OneD, DNekMatSharedPtr>(nPatches)}; // Matrix D
367
368 // Initialise storage for the matrices. We do this separately
369 // for each matrix so the matrices may be independently
370 // deallocated when no longer required.
371 Array<OneD, NekDouble> storageA(nEntriesA, 0.0);
372 Array<OneD, NekDouble> storageB(nEntriesB, 0.0);
373 Array<OneD, NekDouble> storageC(nEntriesC, 0.0);
374 Array<OneD, NekDouble> storageD(nEntriesD, 0.0);
375
376 Array<OneD, NekDouble> tmparray;
377 PointerWrapper wType = eWrapper;
378 int cntA = 0;
379 int cntB = 0;
380 int cntC = 0;
381 int cntD = 0;
382
383 // Use symmetric storage for invD if possible
384 MatrixStorage storageTypeD = eFULL;
388 {
389 storageTypeD = eSYMMETRIC;
390 }
391
392 for (i = 0; i < nPatches; i++)
393 {
394 // Matrix A
395 tmparray = storageA + cntA;
396 substructuredMat[0][i] = MemoryManager<DNekMat>::AllocateSharedPtr(
397 nBndDofsPerPatch[i], nBndDofsPerPatch[i], tmparray, wType);
398 // Matrix B
399 tmparray = storageB + cntB;
400 substructuredMat[1][i] = MemoryManager<DNekMat>::AllocateSharedPtr(
401 nBndDofsPerPatch[i], nIntDofsPerPatch[i], tmparray, wType);
402 // Matrix C
403 tmparray = storageC + cntC;
404 substructuredMat[2][i] = MemoryManager<DNekMat>::AllocateSharedPtr(
405 nIntDofsPerPatch[i], nBndDofsPerPatch[i], tmparray, wType);
406 // Matrix D
407 tmparray = storageD + cntD;
408 substructuredMat[3][i] = MemoryManager<DNekMat>::AllocateSharedPtr(
409 nIntDofsPerPatch[i], nIntDofsPerPatch[i], tmparray, wType,
410 storageTypeD);
411
412 cntA += nBndDofsPerPatch[i] * nBndDofsPerPatch[i];
413 cntB += nBndDofsPerPatch[i] * nIntDofsPerPatch[i];
414 cntC += nIntDofsPerPatch[i] * nBndDofsPerPatch[i];
415 cntD += nIntDofsPerPatch[i] * nIntDofsPerPatch[i];
416 }
417
418 // Then, project SchurComplement onto
419 // the substructured matrices of the next level
421 DNekScalMatSharedPtr schurComplSubMat;
422 int schurComplSubMatnRows;
423 Array<OneD, const int> patchId, dofId;
424 Array<OneD, const unsigned int> isBndDof;
425 Array<OneD, const NekDouble> sign;
426
427 for (n = cnt = 0; n < SchurCompl->GetNumberOfBlockRows(); ++n)
428 {
429 schurComplSubMat = SchurCompl->GetBlock(n, n);
430 schurComplSubMatnRows = schurComplSubMat->GetRows();
431
432 patchId =
433 pLocToGloMap->GetPatchMapFromPrevLevel()->GetPatchId() + cnt;
434 dofId = pLocToGloMap->GetPatchMapFromPrevLevel()->GetDofId() + cnt;
435 isBndDof =
436 pLocToGloMap->GetPatchMapFromPrevLevel()->IsBndDof() + cnt;
437 sign = pLocToGloMap->GetPatchMapFromPrevLevel()->GetSign() + cnt;
438
439 // Set up Matrix;
440 for (i = 0; i < schurComplSubMatnRows; ++i)
441 {
442 int pId = patchId[i];
443 Array<OneD, NekDouble> subMat0 =
444 substructuredMat[0][pId]->GetPtr();
445 Array<OneD, NekDouble> subMat1 =
446 substructuredMat[1][patchId[i]]->GetPtr();
447 Array<OneD, NekDouble> subMat2 =
448 substructuredMat[2][patchId[i]]->GetPtr();
449 DNekMatSharedPtr subMat3 = substructuredMat[3][patchId[i]];
450 int subMat0rows = substructuredMat[0][pId]->GetRows();
451 int subMat1rows = substructuredMat[1][pId]->GetRows();
452 int subMat2rows = substructuredMat[2][pId]->GetRows();
453
454 if (isBndDof[i])
455 {
456 for (j = 0; j < schurComplSubMatnRows; ++j)
457 {
458 ASSERTL0(patchId[i] == patchId[j],
459 "These values should be equal");
460
461 if (isBndDof[j])
462 {
463 subMat0[dofId[i] + dofId[j] * subMat0rows] +=
464 sign[i] * sign[j] * (*schurComplSubMat)(i, j);
465 }
466 else
467 {
468 subMat1[dofId[i] + dofId[j] * subMat1rows] +=
469 sign[i] * sign[j] * (*schurComplSubMat)(i, j);
470 }
471 }
472 }
473 else
474 {
475 for (j = 0; j < schurComplSubMatnRows; ++j)
476 {
477 ASSERTL0(patchId[i] == patchId[j],
478 "These values should be equal");
479
480 if (isBndDof[j])
481 {
482 subMat2[dofId[i] + dofId[j] * subMat2rows] +=
483 sign[i] * sign[j] * (*schurComplSubMat)(i, j);
484 }
485 else
486 {
487 if (storageTypeD == eSYMMETRIC)
488 {
489 if (dofId[i] <= dofId[j])
490 {
491 (*subMat3)(dofId[i], dofId[j]) +=
492 sign[i] * sign[j] *
493 (*schurComplSubMat)(i, j);
494 }
495 }
496 else
497 {
498 (*subMat3)(dofId[i], dofId[j]) +=
499 sign[i] * sign[j] *
500 (*schurComplSubMat)(i, j);
501 }
502 }
503 }
504 }
505 }
506 cnt += schurComplSubMatnRows;
507 }
508
509 // STEP 2: condense the system
510 // This can be done elementally (i.e. patch per patch)
511 for (i = 0; i < nPatches; i++)
512 {
513 if (nIntDofsPerPatch[i])
514 {
515 Array<OneD, NekDouble> subMat0 =
516 substructuredMat[0][i]->GetPtr();
517 Array<OneD, NekDouble> subMat1 =
518 substructuredMat[1][i]->GetPtr();
519 Array<OneD, NekDouble> subMat2 =
520 substructuredMat[2][i]->GetPtr();
521 int subMat0rows = substructuredMat[0][i]->GetRows();
522 int subMat1rows = substructuredMat[1][i]->GetRows();
523 int subMat2rows = substructuredMat[2][i]->GetRows();
524 int subMat2cols = substructuredMat[2][i]->GetColumns();
525
526 // 1. D -> InvD
527 substructuredMat[3][i]->Invert();
528 // 2. B -> BInvD
529 (*substructuredMat[1][i]) =
530 (*substructuredMat[1][i]) * (*substructuredMat[3][i]);
531 // 3. A -> A - BInvD*C (= schurcomplement)
532 // (*substructuredMat[0][i]) =(*substructuredMat[0][i])-
533 // (*substructuredMat[1][i])*(*substructuredMat[2][i]);
534 // Note: faster to use blas directly
535 if (subMat1rows && subMat2cols)
536 {
537 Blas::Dgemm('N', 'N', subMat1rows, subMat2cols, subMat2rows,
538 -1.0, &subMat1[0], subMat1rows, &subMat2[0],
539 subMat2rows, 1.0, &subMat0[0], subMat0rows);
540 }
541 }
542 }
543
544 // STEP 3: fill the block matrices. However, do note that we
545 // first have to convert them to a DNekScalMat in order to be
546 // compatible with the first level of static condensation
547 const Array<OneD, const unsigned int> &nbdry_size =
548 pLocToGloMap->GetNumLocalBndCoeffsPerPatch();
549 const Array<OneD, const unsigned int> &nint_size =
550 pLocToGloMap->GetNumLocalIntCoeffsPerPatch();
551 MatrixStorage blkmatStorage = eDIAGONAL;
552
554 nbdry_size, nbdry_size, blkmatStorage);
556 nbdry_size, nint_size, blkmatStorage);
558 nint_size, nbdry_size, blkmatStorage);
560 nint_size, nint_size, blkmatStorage);
561
562 DNekScalMatSharedPtr tmpscalmat;
563 for (i = 0; i < nPatches; i++)
564 {
565 for (j = 0; j < 4; j++)
566 {
568 1.0, substructuredMat[j][i]);
569 blkMatrices[j]->SetBlock(i, i, tmpscalmat);
570 }
571 }
572 }
573
574 // We've finished with the Schur complement matrix passed to this
575 // level, so return the memory to the system. The Schur complement
576 // matrix need only be retained at the last level. Save the other
577 // matrices at this level though.
578 m_schurCompl.reset();
579
581 v_Recurse(m_linSysKey, m_expList, blkMatrices[0], blkMatrices[1],
582 blkMatrices[2], blkMatrices[3], pLocToGloMap);
583}
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:208
#define sign(a, b)
return the sign(b)*a
Definition: Polylib.cpp:47
static std::shared_ptr< DataType > AllocateSharedPtr(const Args &...args)
Allocate a shared pointer from the memory pool.
const std::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:122
const GlobalLinSysKey m_linSysKey
Key associated with this linear system.
Definition: GlobalLinSys.h:120
DNekScalBlkMatSharedPtr m_schurCompl
Block Schur complement matrix.
GlobalLinSysStaticCondSharedPtr m_recursiveSchurCompl
Schur complement for Direct Static Condensation.
virtual GlobalLinSysStaticCondSharedPtr v_Recurse(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const std::shared_ptr< AssemblyMap > &locToGloMap)=0
StdRegions::MatrixType GetMatrixType() const
Return the matrix type.
static void Dgemm(const char &transa, const char &transb, const int &m, const int &n, const int &k, const double &alpha, const double *a, const int &lda, const double *b, const int &ldb, const double &beta, double *c, const int &ldc)
BLAS level 3: Matrix-matrix multiply C = A x B where op(A)[m x k], op(B)[k x n], C[m x n] DGEMM perfo...
Definition: Blas.hpp:383
std::shared_ptr< DNekScalMat > DNekScalMatSharedPtr
std::shared_ptr< DNekScalBlkMat > DNekScalBlkMatSharedPtr
Definition: NekTypeDefs.hpp:79
std::shared_ptr< DNekMat > DNekMatSharedPtr
Definition: NekTypeDefs.hpp:75
PointerWrapper
Specifies if the pointer passed to a NekMatrix or NekVector is copied into an internal representation...

References Nektar::MemoryManager< DataType >::AllocateSharedPtr(), ASSERTL0, Blas::Dgemm(), Nektar::eDIAGONAL, Nektar::eFULL, Nektar::StdRegions::eHelmholtz, Nektar::StdRegions::eLaplacian, Nektar::StdRegions::eMass, Nektar::eSYMMETRIC, Nektar::eWrapper, Nektar::MultiRegions::GlobalMatrixKey::GetMatrixType(), Nektar::MultiRegions::GlobalLinSys::m_expList, Nektar::MultiRegions::GlobalLinSys::m_linSysKey, m_recursiveSchurCompl, m_schurCompl, sign, and v_Recurse().

Referenced by v_Initialise().

◆ SetupTopLevel()

void Nektar::MultiRegions::GlobalLinSysStaticCond::SetupTopLevel ( const std::shared_ptr< AssemblyMap > &  pLocToGloMap)
protected

Set up the storage for the Schur complement or the top level of the multi-level Schur complement.

For the first level in multi-level static condensation, or the only level in the case of single-level static condensation, allocate the condensed matrices and populate them with the local matrices retrieved from the expansion list.

Parameters

Definition at line 266 of file GlobalLinSysStaticCond.cpp.

268{
269 int n;
270 int n_exp = m_expList.lock()->GetNumElmts();
271
272 const Array<OneD, const unsigned int> &nbdry_size =
273 pLocToGloMap->GetNumLocalBndCoeffsPerPatch();
274 const Array<OneD, const unsigned int> &nint_size =
275 pLocToGloMap->GetNumLocalIntCoeffsPerPatch();
276
277 // Setup Block Matrix systems
278 MatrixStorage blkmatStorage = eDIAGONAL;
280 nbdry_size, nbdry_size, blkmatStorage);
282 nbdry_size, nint_size, blkmatStorage);
284 nint_size, nbdry_size, blkmatStorage);
286 nint_size, nint_size, blkmatStorage);
287
288 for (n = 0; n < n_exp; ++n)
289 {
291 {
293 m_schurCompl->SetBlock(n, n, loc_mat);
294 }
295 else
296 {
297 DNekScalBlkMatSharedPtr loc_schur =
300 m_schurCompl->SetBlock(n, n, t = loc_schur->GetBlock(0, 0));
301 m_BinvD->SetBlock(n, n, t = loc_schur->GetBlock(0, 1));
302 m_C->SetBlock(n, n, t = loc_schur->GetBlock(1, 0));
303 m_invD->SetBlock(n, n, t = loc_schur->GetBlock(1, 1));
304 }
305 }
306}
virtual DNekScalMatSharedPtr v_GetBlock(unsigned int n)
Retrieves the block matrix from n-th expansion using the matrix key provided by the m_linSysKey.
virtual DNekScalBlkMatSharedPtr v_GetStaticCondBlock(unsigned int n)
Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided ...
DNekScalBlkMatSharedPtr m_BinvD
Block matrix.
DNekScalBlkMatSharedPtr m_C
Block matrix.
DNekScalBlkMatSharedPtr m_invD
Block matrix.

References Nektar::MemoryManager< DataType >::AllocateSharedPtr(), Nektar::eDIAGONAL, Nektar::StdRegions::eHybridDGHelmBndLam, Nektar::MultiRegions::GlobalMatrixKey::GetMatrixType(), m_BinvD, m_C, Nektar::MultiRegions::GlobalLinSys::m_expList, m_invD, Nektar::MultiRegions::GlobalLinSys::m_linSysKey, m_schurCompl, Nektar::MultiRegions::GlobalLinSys::v_GetBlock(), and Nektar::MultiRegions::GlobalLinSys::v_GetStaticCondBlock().

Referenced by Nektar::MultiRegions::GlobalLinSysIterativeStaticCond::v_InitObject(), Nektar::MultiRegions::GlobalLinSysPETScStaticCond::v_InitObject(), and v_InitObject().

◆ v_AssembleSchurComplement()

virtual void Nektar::MultiRegions::GlobalLinSysStaticCond::v_AssembleSchurComplement ( std::shared_ptr< AssemblyMap pLoctoGloMap)
inlineprotectedvirtual

◆ v_BasisFwdTransform()

virtual void Nektar::MultiRegions::GlobalLinSysStaticCond::v_BasisFwdTransform ( Array< OneD, NekDouble > &  pInOut)
inlineprotectedvirtual

◆ v_CoeffsBwdTransform()

virtual void Nektar::MultiRegions::GlobalLinSysStaticCond::v_CoeffsBwdTransform ( Array< OneD, NekDouble > &  pInOut)
inlineprotectedvirtual

◆ v_CoeffsFwdTransform()

virtual void Nektar::MultiRegions::GlobalLinSysStaticCond::v_CoeffsFwdTransform ( const Array< OneD, NekDouble > &  pInput,
Array< OneD, NekDouble > &  pOutput 
)
inlineprotectedvirtual

◆ v_GetNumBlocks()

int Nektar::MultiRegions::GlobalLinSysStaticCond::v_GetNumBlocks ( )
overrideprotectedvirtual

Get the number of blocks in this system.

At the top level this corresponds to the number of elements in the expansion list.

Reimplemented from Nektar::MultiRegions::GlobalLinSys.

Definition at line 254 of file GlobalLinSysStaticCond.cpp.

255{
256 return m_schurCompl->GetNumberOfBlockRows();
257}

References m_schurCompl.

◆ v_Initialise()

void Nektar::MultiRegions::GlobalLinSysStaticCond::v_Initialise ( const std::shared_ptr< AssemblyMap > &  pLocToGloMap)
overrideprotectedvirtual

Initialise this object.

If at the last level of recursion (or the only level in the case of single-level static condensation), assemble the Schur complement. For other levels, in the case of multi-level static condensation, the next level of the condensed system is computed.

Parameters
pLocToGloMapLocal to global mapping.

Reimplemented from Nektar::MultiRegions::GlobalLinSys.

Definition at line 236 of file GlobalLinSysStaticCond.cpp.

238{
239 int nLocalBnd = m_locToGloMap.lock()->GetNumLocalBndCoeffs();
240 int nIntDofs = m_locToGloMap.lock()->GetNumLocalCoeffs() - nLocalBnd;
241 m_wsp = Array<OneD, NekDouble>(4 * nLocalBnd + nIntDofs, 0.0);
242
243 if (pLocToGloMap->AtLastLevel())
244 {
245 v_AssembleSchurComplement(pLocToGloMap);
246 }
247 else
248 {
250 pLocToGloMap->GetNextLevelLocalToGlobalMap());
251 }
252}
Array< OneD, NekDouble > m_wsp
Workspace array for matrix multiplication.
virtual void v_AssembleSchurComplement(std::shared_ptr< AssemblyMap > pLoctoGloMap)
void ConstructNextLevelCondensedSystem(const std::shared_ptr< AssemblyMap > &locToGloMap)

References ConstructNextLevelCondensedSystem(), m_locToGloMap, m_wsp, and v_AssembleSchurComplement().

◆ v_InitObject()

void Nektar::MultiRegions::GlobalLinSysStaticCond::v_InitObject ( )
overrideprotectedvirtual

Reimplemented from Nektar::MultiRegions::GlobalLinSys.

Definition at line 79 of file GlobalLinSysStaticCond.cpp.

80{
81 // Allocate memory for top-level structure
83
84 // Construct this level
86}
void Initialise(const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Definition: GlobalLinSys.h:203
void SetupTopLevel(const std::shared_ptr< AssemblyMap > &locToGloMap)
Set up the storage for the Schur complement or the top level of the multi-level Schur complement.

References Nektar::MultiRegions::GlobalLinSys::Initialise(), m_locToGloMap, and SetupTopLevel().

◆ v_PreSolve()

virtual void Nektar::MultiRegions::GlobalLinSysStaticCond::v_PreSolve ( int  scLevel,
Array< OneD, NekDouble > &  F_bnd 
)
inlineprotectedvirtual

◆ v_Recurse()

virtual GlobalLinSysStaticCondSharedPtr Nektar::MultiRegions::GlobalLinSysStaticCond::v_Recurse ( const GlobalLinSysKey mkey,
const std::weak_ptr< ExpList > &  pExpList,
const DNekScalBlkMatSharedPtr  pSchurCompl,
const DNekScalBlkMatSharedPtr  pBinvD,
const DNekScalBlkMatSharedPtr  pC,
const DNekScalBlkMatSharedPtr  pInvD,
const std::shared_ptr< AssemblyMap > &  locToGloMap 
)
protectedpure virtual

◆ v_Solve()

void Nektar::MultiRegions::GlobalLinSysStaticCond::v_Solve ( const Array< OneD, const NekDouble > &  in,
Array< OneD, NekDouble > &  out,
const AssemblyMapSharedPtr locToGloMap,
const Array< OneD, const NekDouble > &  dirForcing = NullNekDouble1DArray 
)
overrideprotectedvirtual

Solve the linear system for given input and output vectors using a specified local to global map.

Implements Nektar::MultiRegions::GlobalLinSys.

Definition at line 98 of file GlobalLinSysStaticCond.cpp.

103{
104 ASSERTL1(dirForcing.size() == 0,
105 "GlobalLinSysStaticCond: Not setup for dirForcing");
106
107 bool atLastLevel = pLocToGloMap->AtLastLevel();
108 int scLevel = pLocToGloMap->GetStaticCondLevel();
109
110 int nGlobDofs = pLocToGloMap->GetNumGlobalCoeffs();
111 int nLocBndDofs = pLocToGloMap->GetNumLocalBndCoeffs();
112 int nGlobBndDofs = pLocToGloMap->GetNumGlobalBndCoeffs();
113 int nDirBndDofs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
114 int nIntDofs = nGlobDofs - nGlobBndDofs;
115
116 if ((nGlobDofs - nDirBndDofs) == 0)
117 {
118 return; // nothing to solve;
119 }
120
121 Array<OneD, NekDouble> F_bnd, F_bnd1, F_int, V_bnd;
122 Array<OneD, NekDouble> tmp, unused;
123
124 F_bnd = m_wsp;
125 // unsued = Temporary storage required for DoMatrixMultiply, not used here
126 unused = m_wsp + 1 * nLocBndDofs;
127 F_bnd1 = m_wsp + 2 * nLocBndDofs;
128 V_bnd = m_wsp + 3 * nLocBndDofs;
129 F_int = m_wsp + 4 * nLocBndDofs;
130
131 pLocToGloMap->LocalToLocalBnd(pLocOutput, V_bnd);
132
133 NekVector<NekDouble> F_Int(nIntDofs, F_int, eWrapper);
134 NekVector<NekDouble> V_Bnd(nLocBndDofs, V_bnd, eWrapper);
135
136 if (nIntDofs)
137 {
138 m_locToGloMap.lock()->LocalToLocalInt(pLocInput, F_int);
139 }
140
141 // Boundary system solution
142 if (nGlobBndDofs - nDirBndDofs)
143 {
144 pLocToGloMap->LocalToLocalBnd(pLocInput, F_bnd);
145
146 // set up normalisation factor for right hand side on first SC level
147 v_PreSolve(scLevel, F_bnd);
148
149 // Gather boundary expansison into locbnd
150 NekVector<NekDouble> F_Bnd(nLocBndDofs, F_bnd1, eWrapper);
151
152 // construct boundary forcing
153 if (nIntDofs)
154 {
155 DNekScalBlkMat &BinvD = *m_BinvD;
156
157 F_Bnd = BinvD * F_Int;
158
159 Vmath::Vsub(nLocBndDofs, F_bnd, 1, F_bnd1, 1, F_bnd, 1);
160 }
161
162 if (atLastLevel)
163 {
164 // Transform to new basis if required
165 v_BasisFwdTransform(F_bnd);
166
167 DNekScalBlkMat &SchurCompl = *m_schurCompl;
168
169 v_CoeffsFwdTransform(V_bnd, V_bnd);
170
171 // subtract dirichlet boundary forcing
172 F_Bnd = SchurCompl * V_Bnd;
173
174 Vmath::Vsub(nLocBndDofs, F_bnd, 1, F_bnd1, 1, F_bnd, 1);
175
176 Array<OneD, NekDouble> F_hom;
177
178 // Solve for difference from initial solution given inout;
179 SolveLinearSystem(nGlobBndDofs, F_bnd, F_bnd1, pLocToGloMap,
180 nDirBndDofs);
181
182 // Add back initial conditions onto difference
183 Vmath::Vadd(nLocBndDofs, V_bnd, 1, F_bnd1, 1, V_bnd, 1);
184
185 // Transform back to original basis
187
188 // put final bnd solution back in output array
189 m_locToGloMap.lock()->LocalBndToLocal(V_bnd, pLocOutput);
190 }
191 else // Process next level of recursion for multi level SC
192 {
193 AssemblyMapSharedPtr nextLevLocToGloMap =
194 pLocToGloMap->GetNextLevelLocalToGlobalMap();
195
196 // partially assemble F for next level and
197 // reshuffle V_bnd
198 nextLevLocToGloMap->PatchAssemble(F_bnd, F_bnd);
199 nextLevLocToGloMap->PatchLocalToGlobal(V_bnd, V_bnd);
200
201 m_recursiveSchurCompl->Solve(F_bnd, V_bnd, nextLevLocToGloMap);
202
203 // unpack V_bnd
204 nextLevLocToGloMap->PatchGlobalToLocal(V_bnd, V_bnd);
205
206 // place V_bnd in output array
207 m_locToGloMap.lock()->LocalBndToLocal(V_bnd, pLocOutput);
208 }
209 }
210
211 // solve interior system
212 if (nIntDofs)
213 {
214 Array<OneD, NekDouble> V_int(nIntDofs);
215 NekVector<NekDouble> V_Int(nIntDofs, V_int, eWrapper);
216
217 // get array of local solutions
218 DNekScalBlkMat &invD = *m_invD;
219 DNekScalBlkMat &C = *m_C;
220
221 F_Int = F_Int - C * V_Bnd;
222
223 Multiply(V_Int, invD, F_Int);
224
225 m_locToGloMap.lock()->LocalIntToLocal(V_int, pLocOutput);
226 }
227}
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode....
Definition: ErrorUtil.hpp:242
void SolveLinearSystem(const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const AssemblyMapSharedPtr &locToGloMap, const int pNumDir=0)
Solve the linear system for given input and output vectors.
Definition: GlobalLinSys.h:190
virtual void v_CoeffsFwdTransform(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput)
virtual void v_PreSolve(int scLevel, Array< OneD, NekDouble > &F_bnd)
virtual void v_BasisFwdTransform(Array< OneD, NekDouble > &pInOut)
virtual void v_CoeffsBwdTransform(Array< OneD, NekDouble > &pInOut)
std::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:50
NekMatrix< NekMatrix< NekMatrix< NekDouble, StandardMatrixTag >, ScaledMatrixTag >, BlockMatrixTag > DNekScalBlkMat
Definition: NekTypeDefs.hpp:68
void Multiply(NekMatrix< ResultDataType, StandardMatrixTag > &result, const NekMatrix< LhsDataType, LhsMatrixType > &lhs, const ResultDataType &rhs)
void Vadd(int n, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Add vector z = x+y.
Definition: Vmath.hpp:180
void Vsub(int n, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Subtract vector z = x-y.
Definition: Vmath.hpp:220

References ASSERTL1, Nektar::eWrapper, m_BinvD, m_C, m_invD, m_locToGloMap, m_recursiveSchurCompl, m_schurCompl, m_wsp, Nektar::Multiply(), Nektar::MultiRegions::GlobalLinSys::SolveLinearSystem(), v_BasisFwdTransform(), v_CoeffsBwdTransform(), v_CoeffsFwdTransform(), v_PreSolve(), Vmath::Vadd(), and Vmath::Vsub().

Member Data Documentation

◆ m_BinvD

DNekScalBlkMatSharedPtr Nektar::MultiRegions::GlobalLinSysStaticCond::m_BinvD
protected

◆ m_C

DNekScalBlkMatSharedPtr Nektar::MultiRegions::GlobalLinSysStaticCond::m_C
protected

◆ m_invD

DNekScalBlkMatSharedPtr Nektar::MultiRegions::GlobalLinSysStaticCond::m_invD
protected

◆ m_locToGloMap

std::weak_ptr<AssemblyMap> Nektar::MultiRegions::GlobalLinSysStaticCond::m_locToGloMap
protected

◆ m_recursiveSchurCompl

GlobalLinSysStaticCondSharedPtr Nektar::MultiRegions::GlobalLinSysStaticCond::m_recursiveSchurCompl
protected

Schur complement for Direct Static Condensation.

Definition at line 98 of file GlobalLinSysStaticCond.h.

Referenced by ConstructNextLevelCondensedSystem(), and v_Solve().

◆ m_schurCompl

DNekScalBlkMatSharedPtr Nektar::MultiRegions::GlobalLinSysStaticCond::m_schurCompl
protected

◆ m_sign

Array<OneD, const NekDouble> Nektar::MultiRegions::GlobalLinSysStaticCond::m_sign
protected

Definition at line 112 of file GlobalLinSysStaticCond.h.

◆ m_wsp

Array<OneD, NekDouble> Nektar::MultiRegions::GlobalLinSysStaticCond::m_wsp
protected

Workspace array for matrix multiplication.

Definition at line 110 of file GlobalLinSysStaticCond.h.

Referenced by Nektar::MultiRegions::GlobalLinSysIterativeStaticCond::v_DoMatrixMultiply(), v_Initialise(), and v_Solve().