Nektar++
GlobalLinSysIterativeStaticCond.cpp
Go to the documentation of this file.
1///////////////////////////////////////////////////////////////////////////////
2//
3// File: GlobalLinSysIterativeStaticCond.cpp
4//
5// For more information, please see: http://www.nektar.info
6//
7// The MIT License
8//
9// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10// Department of Aeronautics, Imperial College London (UK), and Scientific
11// Computing and Imaging Institute, University of Utah (USA).
12//
13// Permission is hereby granted, free of charge, to any person obtaining a
14// copy of this software and associated documentation files (the "Software"),
15// to deal in the Software without restriction, including without limitation
16// the rights to use, copy, modify, merge, publish, distribute, sublicense,
17// and/or sell copies of the Software, and to permit persons to whom the
18// Software is furnished to do so, subject to the following conditions:
19//
20// The above copyright notice and this permission notice shall be included
21// in all copies or substantial portions of the Software.
22//
23// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29// DEALINGS IN THE SOFTWARE.
30//
31// Description: Implementation to linear solver using single-
32// or multi-level static condensation
33//
34///////////////////////////////////////////////////////////////////////////////
35
41
42using namespace std;
43
45{
46/**
47 * @class GlobalLinSysIterativeStaticCond
48 *
49 * Solves a linear system iteratively using single- or multi-level
50 * static condensation.
51 */
52
53/**
54 * Registers the class with the Factory.
55 */
58 "IterativeStaticCond", GlobalLinSysIterativeStaticCond::create,
59 "Iterative static condensation.");
60
63 "IterativeMultiLevelStaticCond",
65 "Iterative multi-level static condensation.");
66
69 "LocalMatrixStorageStrategy", "Sparse");
72 "LocalMatrixStorageStrategy", "Contiguous", MultiRegions::eContiguous),
74 "LocalMatrixStorageStrategy", "Non-contiguous",
77 "LocalMatrixStorageStrategy", "Sparse", MultiRegions::eSparse),
78};
79
80/**
81 * For a matrix system of the form @f[
82 * \left[ \begin{array}{cc}
83 * \boldsymbol{A} & \boldsymbol{B}\\
84 * \boldsymbol{C} & \boldsymbol{D}
85 * \end{array} \right]
86 * \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2}
87 * \end{array}\right]
88 * = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2}
89 * \end{array}\right],
90 * @f]
91 * where @f$\boldsymbol{D}@f$ and
92 * @f$(\boldsymbol{A-BD^{-1}C})@f$ are invertible, store and assemble
93 * a static condensation system, according to a given local to global
94 * mapping. #m_linSys is constructed by AssembleSchurComplement().
95 * @param mKey Associated matrix key.
96 * @param pLocMatSys LocalMatrixSystem
97 * @param locToGloMap Local to global mapping.
98 */
100 const GlobalLinSysKey &pKey, const std::weak_ptr<ExpList> &pExpList,
101 const std::shared_ptr<AssemblyMap> &pLocToGloMap)
102 : GlobalLinSys(pKey, pExpList, pLocToGloMap),
103 GlobalLinSysIterative(pKey, pExpList, pLocToGloMap),
104 GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
105{
106 ASSERTL1(
109 "This constructor is only valid when using static "
110 "condensation");
112 pLocToGloMap->GetGlobalSysSolnType(),
113 "The local to global map is not set up for the requested "
114 "solution type");
115}
116
117/**
118 *
119 */
121 const GlobalLinSysKey &pKey, const std::weak_ptr<ExpList> &pExpList,
122 const DNekScalBlkMatSharedPtr pSchurCompl,
124 const DNekScalBlkMatSharedPtr pInvD,
125 const std::shared_ptr<AssemblyMap> &pLocToGloMap,
126 const PreconditionerSharedPtr pPrecon)
127 : GlobalLinSys(pKey, pExpList, pLocToGloMap),
128 GlobalLinSysIterative(pKey, pExpList, pLocToGloMap),
129 GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
130{
131 m_schurCompl = pSchurCompl;
132 m_BinvD = pBinvD;
133 m_C = pC;
134 m_invD = pInvD;
135 m_precon = pPrecon;
136}
137
139{
140 auto asmMap = m_locToGloMap.lock();
141
142 m_precon = CreatePrecon(asmMap);
143
144 // Allocate memory for top-level structure
145 SetupTopLevel(asmMap);
146
147 // Setup Block Matrix systems
148 int n, n_exp = m_expList.lock()->GetNumElmts();
149
150 // Build preconditioner
151 m_precon->BuildPreconditioner();
152
153 // Do transform of Schur complement matrix
154 int cnt = 0;
155 for (n = 0; n < n_exp; ++n)
156 {
158 {
159 DNekScalMatSharedPtr mat = m_schurCompl->GetBlock(n, n);
161 m_precon->TransformedSchurCompl(n, cnt, mat);
162 m_schurCompl->SetBlock(n, n, t);
163 cnt += mat->GetRows();
164 }
165 }
166
167 // Construct this level
168 Initialise(asmMap);
169}
170
171/**
172 *
173 */
175{
176}
177
179 unsigned int n)
180{
181 DNekScalBlkMatSharedPtr schurComplBlock;
182 DNekScalMatSharedPtr localMat = m_schurCompl->GetBlock(n, n);
183 unsigned int nbdry = localMat->GetRows();
184 unsigned int nblks = 1;
185 unsigned int esize[1] = {nbdry};
186
188 nblks, nblks, esize, esize);
189 schurComplBlock->SetBlock(0, 0, localMat);
190
191 return schurComplBlock;
192}
193
194/**
195 * Assemble the schur complement matrix from the block matrices stored
196 * in #m_blkMatrices and the given local to global mapping information.
197 * @param locToGloMap Local to global mapping information.
198 */
200 [[maybe_unused]] const AssemblyMapSharedPtr pLocToGloMap)
201{
202 // Set up unique map
203 v_UniqueMap();
204
205 // Build precon again if we in multi-level static condensation (a
206 // bit of a hack)
208 {
210 m_precon->BuildPreconditioner();
211 }
212
214}
215
216/**
217 * Populates sparse block-diagonal schur complement matrix from
218 * the block matrices stored in #m_blkMatrices.
219 */
221{
222 LocalMatrixStorageStrategy storageStrategy =
223 m_expList.lock()
224 ->GetSession()
225 ->GetSolverInfoAsEnum<LocalMatrixStorageStrategy>(
226 "LocalMatrixStorageStrategy");
227
228 switch (storageStrategy)
229 {
232 {
233 size_t storageSize = 0;
234 int nBlk = m_schurCompl->GetNumberOfBlockRows();
235
236 m_scale = Array<OneD, NekDouble>(nBlk, 1.0);
238
239 // Determine storage requirements for dense blocks.
240 for (int i = 0; i < nBlk; ++i)
241 {
242 m_rows[i] = m_schurCompl->GetBlock(i, i)->GetRows();
243 m_scale[i] = m_schurCompl->GetBlock(i, i)->Scale();
244 storageSize += m_rows[i] * m_rows[i];
245 }
246
247 // Assemble dense storage blocks.
248 DNekScalMatSharedPtr loc_mat;
249 m_denseBlocks.resize(nBlk);
250 double *ptr = nullptr;
251
252 if (MultiRegions::eContiguous == storageStrategy)
253 {
254 m_storage.resize(storageSize);
255 ptr = &m_storage[0];
256 }
257
258 for (unsigned int n = 0; n < nBlk; ++n)
259 {
260 loc_mat = m_schurCompl->GetBlock(n, n);
261
262 if (MultiRegions::eContiguous == storageStrategy)
263 {
264 int loc_lda = loc_mat->GetRows();
265 int blockSize = loc_lda * loc_lda;
266 m_denseBlocks[n] = ptr;
267 for (int i = 0; i < loc_lda; ++i)
268 {
269 for (int j = 0; j < loc_lda; ++j)
270 {
271 ptr[j * loc_lda + i] = (*loc_mat)(i, j);
272 }
273 }
274 ptr += blockSize;
276 }
277 else
278 {
279 m_denseBlocks[n] = loc_mat->GetRawPtr();
280 }
281 }
282 break;
283 }
285 {
286 DNekScalMatSharedPtr loc_mat;
287 int loc_lda;
288 int blockSize = 0;
289
290 // First run through to split the set of local matrices into
291 // partitions of fixed block size, and count number of local
292 // matrices that belong to each partition.
293 std::vector<std::pair<int, int>> partitions;
294 for (int n = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
295 {
296 loc_mat = m_schurCompl->GetBlock(n, n);
297 loc_lda = loc_mat->GetRows();
298
299 ASSERTL1(loc_lda >= 0,
300 std::to_string(n) +
301 "-th "
302 "matrix block in Schur complement has "
303 "rank 0!");
304
305 if (blockSize == loc_lda)
306 {
307 partitions[partitions.size() - 1].first++;
308 }
309 else
310 {
311 blockSize = loc_lda;
312 partitions.push_back(make_pair(1, loc_lda));
313 }
314 }
315
316 MatrixStorage matStorage = eFULL;
317
318 // Create a vector of sparse storage holders
320 partitions.size());
321
322 for (int part = 0, n = 0; part < partitions.size(); ++part)
323 {
324 BCOMatType partMat;
325
326 for (int k = 0; k < partitions[part].first; ++k, ++n)
327 {
328 loc_mat = m_schurCompl->GetBlock(n, n);
329 loc_lda = loc_mat->GetRows();
330
331 ASSERTL1(loc_lda == partitions[part].second,
332 std::to_string(n) +
333 "-th"
334 " matrix block in Schur complement has "
335 "unexpected rank");
336
337 NekDouble scale = loc_mat->Scale();
338 if (fabs(scale - 1.0) > NekConstants::kNekZeroTol)
339 {
340 Array<OneD, NekDouble> matarray(loc_lda * loc_lda);
341 Vmath::Smul(loc_lda * loc_lda, scale,
342 loc_mat->GetRawPtr(), 1, &matarray[0], 1);
343 partMat[make_pair(k, k)] = BCOEntryType(matarray);
344 }
345 else // scale factor is 1.0
346 {
347 partMat[make_pair(k, k)] = BCOEntryType(
348 loc_lda * loc_lda, loc_mat->GetRawPtr());
349 }
350
352 }
353
354 sparseStorage[part] =
357 partitions[part].first, partitions[part].first,
358 partitions[part].second, partMat, matStorage);
359 }
360
361 // Create block diagonal matrix
364 sparseStorage);
365
366 break;
367 }
368 default:
369 ErrorUtil::NekError("Solver info property \
370 LocalMatrixStorageStrategy takes values \
371 Contiguous, Non-contiguous and Sparse");
372 }
373}
374
375/**
376 *
377 */
379 const Array<OneD, NekDouble> &pInput, Array<OneD, NekDouble> &pOutput)
380{
381 if (m_linsol->IsLocal())
382 {
384 {
385 m_sparseSchurCompl->Multiply(pInput, pOutput);
386 }
387 else
388 {
389 // Do matrix multiply locally, using direct BLAS calls
390 int i, cnt;
391 for (i = cnt = 0; i < m_denseBlocks.size(); cnt += m_rows[i], ++i)
392 {
393 const int rows = m_rows[i];
394 Blas::Dgemv('N', rows, rows, m_scale[i], m_denseBlocks[i], rows,
395 pInput.get() + cnt, 1, 0.0, pOutput.get() + cnt, 1);
396 }
397 }
398 }
399 else
400 {
401 int nLocal = m_locToGloMap.lock()->GetNumLocalBndCoeffs();
402 AssemblyMapSharedPtr asmMap = m_locToGloMap.lock();
403
405 {
406 // Do matrix multiply locally using block-diagonal sparse matrix
407 Array<OneD, NekDouble> tmp = m_wsp + nLocal;
408
409 asmMap->GlobalToLocalBnd(pInput, m_wsp);
410 m_sparseSchurCompl->Multiply(m_wsp, tmp);
411 asmMap->AssembleBnd(tmp, pOutput);
412 }
413 else
414 {
415 // Do matrix multiply locally, using direct BLAS calls
416 asmMap->GlobalToLocalBnd(pInput, m_wsp);
417 int i, cnt;
418 Array<OneD, NekDouble> tmpout = m_wsp + nLocal;
419 for (i = cnt = 0; i < m_denseBlocks.size(); cnt += m_rows[i], ++i)
420 {
421 const int rows = m_rows[i];
422 Blas::Dgemv('N', rows, rows, m_scale[i], m_denseBlocks[i], rows,
423 m_wsp.get() + cnt, 1, 0.0, tmpout.get() + cnt, 1);
424 }
425 asmMap->AssembleBnd(tmpout, pOutput);
426 }
427 }
428}
429
431{
432 m_map = m_locToGloMap.lock()->GetGlobalToUniversalBndMapUnique();
433}
434
437{
438 if (scLevel == 0)
439 {
440 // When matrices are supplied to the constructor at the top
441 // level, the preconditioner is never set up.
442 if (!m_precon)
443 {
445 m_precon->BuildPreconditioner();
446 }
447
449 {
450 m_rhs_magnitude = 1.0;
451 return;
452 }
453
454 int nGloBndDofs = m_locToGloMap.lock()->GetNumGlobalBndCoeffs();
455 Array<OneD, NekDouble> F_gloBnd(nGloBndDofs);
456 NekVector<NekDouble> F_GloBnd(nGloBndDofs, F_gloBnd, eWrapper);
457 m_locToGloMap.lock()->AssembleBnd(F_bnd, F_gloBnd);
458
459 NekDouble vExchange(0.0);
460 if (m_map.size() > 0)
461 {
462 vExchange = Vmath::Dot2(F_GloBnd.GetDimension(), &F_GloBnd[0],
463 &F_GloBnd[0], &m_map[0]);
464 }
465
466 m_expList.lock()->GetComm()->GetRowComm()->AllReduce(
468
469 // To ensure that very different rhs values are not being
470 // used in subsequent solvers such as the velocity solve in
471 // INC NS. If this works we then need to work out a better
472 // way to control this.
473 NekDouble new_rhs_mag = (vExchange > 1e-6) ? vExchange : 1.0;
474
476 {
477 m_rhs_magnitude = new_rhs_mag;
478 }
479 else
480 {
482 (1.0 - m_rhs_mag_sm) * new_rhs_mag);
483 }
484 }
485 else
486 {
487 // for multilevel iterative solver always use rhs
488 // vector value with no weighting
490 }
491}
492
495{
496 m_precon->DoTransformBasisToLowEnergy(pInOut);
497}
498
501{
502 m_precon->DoTransformCoeffsFromLowEnergy(pInOut);
503}
504
506 const Array<OneD, NekDouble> &pInput, Array<OneD, NekDouble> &pOutput)
507{
508 m_precon->DoTransformCoeffsToLowEnergy(pInput, pOutput);
509}
510
512 const GlobalLinSysKey &mkey, const std::weak_ptr<ExpList> &pExpList,
513 const DNekScalBlkMatSharedPtr pSchurCompl,
515 const DNekScalBlkMatSharedPtr pInvD,
516 const std::shared_ptr<AssemblyMap> &l2gMap)
517{
520 mkey, pExpList, pSchurCompl, pBinvD, pC, pInvD, l2gMap, m_precon);
521 sys->Initialise(l2gMap);
522 return sys;
523}
524
525/**
526 *
527 */
529 const int nGlobal, const Array<OneD, const NekDouble> &pInput,
530 Array<OneD, NekDouble> &pOutput, const AssemblyMapSharedPtr &plocToGloMap,
531 const int nDir)
532{
533
534 if (!m_linsol)
535 {
537 m_expList.lock()->GetComm()->GetRowComm();
539 m_expList.lock()->GetSession();
540
541 // Check such a module exists for this equation.
544 "NekLinSysIter '" + m_linSysIterSolver +
545 "' is not defined.\n");
546
547 // Create the key to hold solver settings
548 auto sysKey = LibUtilities::NekSysKey();
549 string variable = plocToGloMap->GetVariable();
550
551 // Either get the solnInfo from <GlobalSysSolInfo> or from
552 // <Parameters>
553 if (pSession->DefinesGlobalSysSolnInfo(variable,
554 "IterativeSolverTolerance"))
555 {
556 sysKey.m_NekLinSysTolerance = boost::lexical_cast<double>(
557 pSession
558 ->GetGlobalSysSolnInfo(variable, "IterativeSolverTolerance")
559 .c_str());
560 }
561 else
562 {
563 pSession->LoadParameter("IterativeSolverTolerance",
564 sysKey.m_NekLinSysTolerance,
566 }
567
568 if (pSession->DefinesGlobalSysSolnInfo(variable,
569 "NekLinSysMaxIterations"))
570 {
571 sysKey.m_NekLinSysMaxIterations = boost::lexical_cast<int>(
572 pSession
573 ->GetGlobalSysSolnInfo(variable, "NekLinSysMaxIterations")
574 .c_str());
575 }
576 else
577 {
578 pSession->LoadParameter("NekLinSysMaxIterations",
579 sysKey.m_NekLinSysMaxIterations, 5000);
580 }
581
582 if (pSession->DefinesGlobalSysSolnInfo(variable, "LinSysMaxStorage"))
583 {
584 sysKey.m_LinSysMaxStorage = boost::lexical_cast<int>(
585 pSession->GetGlobalSysSolnInfo(variable, "LinSysMaxStorage")
586 .c_str());
587 }
588 else
589 {
590 pSession->LoadParameter("LinSysMaxStorage",
591 sysKey.m_LinSysMaxStorage, 100);
592 }
593
594 if (pSession->DefinesGlobalSysSolnInfo(variable, "GMRESMaxHessMatBand"))
595 {
596 sysKey.m_KrylovMaxHessMatBand = boost::lexical_cast<int>(
597 pSession->GetGlobalSysSolnInfo(variable, "GMRESMaxHessMatBand")
598 .c_str());
599 }
600 else
601 {
602 pSession->LoadParameter("GMRESMaxHessMatBand",
603 sysKey.m_KrylovMaxHessMatBand,
604 sysKey.m_LinSysMaxStorage + 1);
605 }
606
607 // The following settings have no correponding tests and are rarely
608 // used.
609 pSession->MatchSolverInfo("GMRESLeftPrecon", "True",
610 sysKey.m_NekLinSysLeftPrecon, false);
611 pSession->MatchSolverInfo("GMRESRightPrecon", "True",
612 sysKey.m_NekLinSysRightPrecon, true);
613
615 m_linSysIterSolver, pSession, vRowComm, nGlobal - nDir, sysKey);
616
617 m_linsol->SetSysOperators(m_NekSysOp);
618 v_UniqueMap();
619 m_linsol->SetUniversalUniqueMap(m_map);
620 }
621
622 if (!m_precon)
623 {
624 m_precon = CreatePrecon(plocToGloMap);
625 m_precon->BuildPreconditioner();
626 }
627
628 m_linsol->SetRhsMagnitude(m_rhs_magnitude);
629
630 if (m_useProjection)
631 {
632 Array<OneD, NekDouble> gloIn(nGlobal);
633 Array<OneD, NekDouble> gloOut(nGlobal, 0.0);
634 plocToGloMap->AssembleBnd(pInput, gloIn);
635 DoProjection(nGlobal, gloIn, gloOut, nDir, m_isAconjugate);
636 plocToGloMap->GlobalToLocalBnd(gloOut, pOutput);
637 }
638 else
639 {
640 if (m_linsol->IsLocal())
641 {
642 int nLocDofs = plocToGloMap->GetNumLocalBndCoeffs();
643 Vmath::Zero(nLocDofs, pOutput, 1);
644 m_linsol->SolveSystem(nLocDofs, pInput, pOutput, nDir);
645 }
646 else
647 {
648 Array<OneD, NekDouble> gloIn(nGlobal);
649 Array<OneD, NekDouble> gloOut(nGlobal, 0.0);
650 plocToGloMap->AssembleBnd(pInput, gloIn);
651 m_linsol->SolveSystem(nGlobal, gloIn, gloOut, nDir);
652 plocToGloMap->GlobalToLocalBnd(gloOut, pOutput);
653 }
654 }
655}
656
657} // namespace Nektar::MultiRegions
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:208
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode....
Definition: ErrorUtil.hpp:242
Nektar::ErrorUtil::NekError NekError
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
tBaseSharedPtr CreateInstance(tKey idKey, tParam... args)
Create an instance of the class referred to by idKey.
static std::string RegisterEnumValue(std::string pEnum, std::string pString, int pEnumValue)
Registers an enumeration value.
static std::string RegisterDefaultSolverInfo(const std::string &pName, const std::string &pValue)
Registers the default string value of a solver info property.
static std::shared_ptr< DataType > AllocateSharedPtr(const Args &...args)
Allocate a shared pointer from the memory pool.
A global linear system.
Definition: GlobalLinSys.h:70
const std::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:122
const GlobalLinSysKey m_linSysKey
Key associated with this linear system.
Definition: GlobalLinSys.h:120
PreconditionerSharedPtr CreatePrecon(AssemblyMapSharedPtr asmMap)
Create a preconditioner object from the parameters defined in the supplied assembly map.
virtual void v_DropStaticCondBlock(unsigned int n)
Releases the static condensation block matrices from NekManager of n-th expansion using the matrix ke...
void Initialise(const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Definition: GlobalLinSys.h:203
LibUtilities::NekLinSysIterSharedPtr m_linsol
void DoProjection(const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const int pNumDir, const bool isAconjugate)
projection technique
bool m_useProjection
Whether to apply projection technique.
Array< OneD, int > m_map
Global to universal unique map.
NekDouble m_rhs_magnitude
dot product of rhs to normalise stopping criterion
std::string m_linSysIterSolver
Iterative solver: Conjugate Gradient, GMRES.
NekDouble m_rhs_mag_sm
cnt to how many times rhs_magnitude is called
std::vector< const double * > m_denseBlocks
Vector of pointers to local matrix data.
static GlobalLinSysSharedPtr create(const GlobalLinSysKey &pLinSysKey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Creates an instance of this class.
DNekScalBlkMatSharedPtr v_GetStaticCondBlock(unsigned int n) override
Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided ...
void v_CoeffsBwdTransform(Array< OneD, NekDouble > &pInOut) override
std::vector< double > m_storage
Dense storage for block Schur complement matrix.
GlobalLinSysIterativeStaticCond(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &locToGloMap)
Constructor for full direct matrix solve.
void PrepareLocalSchurComplement()
Prepares local representation of Schur complement stored as a sparse block-diagonal matrix.
Array< OneD, unsigned int > m_rows
Ranks of local matrices.
GlobalLinSysStaticCondSharedPtr v_Recurse(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const std::shared_ptr< AssemblyMap > &locToGloMap) override
void v_BasisFwdTransform(Array< OneD, NekDouble > &pInOut) override
void v_AssembleSchurComplement(const std::shared_ptr< AssemblyMap > locToGloMap) override
Assemble the Schur complement matrix.
void v_SolveLinearSystem(const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const AssemblyMapSharedPtr &locToGloMap, const int pNumDir) override
Solve the matrix system.
Array< OneD, NekDouble > m_scale
Scaling factors for local matrices.
DNekSmvBsrDiagBlkMatSharedPtr m_sparseSchurCompl
Sparse representation of Schur complement matrix at this level.
void v_PreSolve(int scLevel, Array< OneD, NekDouble > &F_bnd) override
void v_CoeffsFwdTransform(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput) override
void v_DoMatrixMultiply(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput) override
Perform a Shur-complement matrix multiply operation.
GlobalSysSolnType GetGlobalSysSolnType() const
Return the associated solution type.
DNekScalBlkMatSharedPtr m_schurCompl
Block Schur complement matrix.
std::weak_ptr< AssemblyMap > m_locToGloMap
Local to global map.
void SetupTopLevel(const std::shared_ptr< AssemblyMap > &locToGloMap)
Set up the storage for the Schur complement or the top level of the multi-level Schur complement.
Array< OneD, NekDouble > m_wsp
Workspace array for matrix multiplication.
DNekScalBlkMatSharedPtr m_BinvD
Block matrix.
DNekScalBlkMatSharedPtr m_C
Block matrix.
DNekScalBlkMatSharedPtr m_invD
Block matrix.
StdRegions::MatrixType GetMatrixType() const
Return the matrix type.
unsigned int GetDimension() const
Returns the number of dimensions for the point.
Definition: NekVector.cpp:201
static void Dgemv(const char &trans, const int &m, const int &n, const double &alpha, const double *a, const int &lda, const double *x, const int &incx, const double &beta, double *y, const int &incy)
BLAS level 2: Matrix vector multiply y = alpha A x plus beta y where A[m x n].
Definition: Blas.hpp:211
std::shared_ptr< SessionReader > SessionReaderSharedPtr
NekLinSysIterFactory & GetNekLinSysIterFactory()
std::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.
Definition: Comm.h:55
std::shared_ptr< GlobalLinSysStaticCond > GlobalLinSysStaticCondSharedPtr
GlobalLinSysFactory & GetGlobalLinSysFactory()
std::shared_ptr< GlobalLinSysIterativeStaticCond > GlobalLinSysIterativeStaticCondSharedPtr
std::shared_ptr< Preconditioner > PreconditionerSharedPtr
Definition: GlobalLinSys.h:58
std::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:50
static const NekDouble kNekIterativeTol
static const NekDouble kNekUnsetDouble
static const NekDouble kNekZeroTol
std::shared_ptr< DNekScalMat > DNekScalMatSharedPtr
std::map< CoordType, BCOEntryType > BCOMatType
Array< OneD, NekDouble > BCOEntryType
std::shared_ptr< DNekScalBlkMat > DNekScalBlkMatSharedPtr
Definition: NekTypeDefs.hpp:79
double NekDouble
T Dot2(int n, const T *w, const T *x, const int *y)
dot product
Definition: Vmath.hpp:790
void Smul(int n, const T alpha, const T *x, const int incx, T *y, const int incy)
Scalar multiply y = alpha*x.
Definition: Vmath.hpp:100
void Zero(int n, T *x, const int incx)
Zero vector.
Definition: Vmath.hpp:273
STL namespace.