Nektar++
GlobalLinSysIterativeStaticCond.cpp
Go to the documentation of this file.
1///////////////////////////////////////////////////////////////////////////////
2//
3// File: GlobalLinSysIterativeStaticCond.cpp
4//
5// For more information, please see: http://www.nektar.info
6//
7// The MIT License
8//
9// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10// Department of Aeronautics, Imperial College London (UK), and Scientific
11// Computing and Imaging Institute, University of Utah (USA).
12//
13// Permission is hereby granted, free of charge, to any person obtaining a
14// copy of this software and associated documentation files (the "Software"),
15// to deal in the Software without restriction, including without limitation
16// the rights to use, copy, modify, merge, publish, distribute, sublicense,
17// and/or sell copies of the Software, and to permit persons to whom the
18// Software is furnished to do so, subject to the following conditions:
19//
20// The above copyright notice and this permission notice shall be included
21// in all copies or substantial portions of the Software.
22//
23// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29// DEALINGS IN THE SOFTWARE.
30//
31// Description: Implementation to linear solver using single-
32// or multi-level static condensation
33//
34///////////////////////////////////////////////////////////////////////////////
35
41
42using namespace std;
43
44namespace Nektar
45{
46namespace MultiRegions
47{
48/**
49 * @class GlobalLinSysIterativeStaticCond
50 *
51 * Solves a linear system iteratively using single- or multi-level
52 * static condensation.
53 */
54
55/**
56 * Registers the class with the Factory.
57 */
60 "IterativeStaticCond", GlobalLinSysIterativeStaticCond::create,
61 "Iterative static condensation.");
62
65 "IterativeMultiLevelStaticCond",
67 "Iterative multi-level static condensation.");
68
71 "LocalMatrixStorageStrategy", "Sparse");
74 "LocalMatrixStorageStrategy", "Contiguous", MultiRegions::eContiguous),
76 "LocalMatrixStorageStrategy", "Non-contiguous",
79 "LocalMatrixStorageStrategy", "Sparse", MultiRegions::eSparse),
80};
81
82/**
83 * For a matrix system of the form @f[
84 * \left[ \begin{array}{cc}
85 * \boldsymbol{A} & \boldsymbol{B}\\
86 * \boldsymbol{C} & \boldsymbol{D}
87 * \end{array} \right]
88 * \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2}
89 * \end{array}\right]
90 * = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2}
91 * \end{array}\right],
92 * @f]
93 * where @f$\boldsymbol{D}@f$ and
94 * @f$(\boldsymbol{A-BD^{-1}C})@f$ are invertible, store and assemble
95 * a static condensation system, according to a given local to global
96 * mapping. #m_linSys is constructed by AssembleSchurComplement().
97 * @param mKey Associated matrix key.
98 * @param pLocMatSys LocalMatrixSystem
99 * @param locToGloMap Local to global mapping.
100 */
102 const GlobalLinSysKey &pKey, const std::weak_ptr<ExpList> &pExpList,
103 const std::shared_ptr<AssemblyMap> &pLocToGloMap)
104 : GlobalLinSys(pKey, pExpList, pLocToGloMap),
105 GlobalLinSysIterative(pKey, pExpList, pLocToGloMap),
106 GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
107{
108 ASSERTL1(
111 "This constructor is only valid when using static "
112 "condensation");
114 pLocToGloMap->GetGlobalSysSolnType(),
115 "The local to global map is not set up for the requested "
116 "solution type");
117}
118
119/**
120 *
121 */
123 const GlobalLinSysKey &pKey, const std::weak_ptr<ExpList> &pExpList,
124 const DNekScalBlkMatSharedPtr pSchurCompl,
126 const DNekScalBlkMatSharedPtr pInvD,
127 const std::shared_ptr<AssemblyMap> &pLocToGloMap,
128 const PreconditionerSharedPtr pPrecon)
129 : GlobalLinSys(pKey, pExpList, pLocToGloMap),
130 GlobalLinSysIterative(pKey, pExpList, pLocToGloMap),
131 GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
132{
133 m_schurCompl = pSchurCompl;
134 m_BinvD = pBinvD;
135 m_C = pC;
136 m_invD = pInvD;
137 m_precon = pPrecon;
138}
139
141{
142 auto asmMap = m_locToGloMap.lock();
143
144 m_precon = CreatePrecon(asmMap);
145
146 // Allocate memory for top-level structure
147 SetupTopLevel(asmMap);
148
149 // Setup Block Matrix systems
150 int n, n_exp = m_expList.lock()->GetNumElmts();
151
152 // Build preconditioner
153 m_precon->BuildPreconditioner();
154
155 // Do transform of Schur complement matrix
156 int cnt = 0;
157 for (n = 0; n < n_exp; ++n)
158 {
160 {
161 DNekScalMatSharedPtr mat = m_schurCompl->GetBlock(n, n);
163 m_precon->TransformedSchurCompl(n, cnt, mat);
164 m_schurCompl->SetBlock(n, n, t);
165 cnt += mat->GetRows();
166 }
167 }
168
169 // Construct this level
170 Initialise(asmMap);
171}
172
173/**
174 *
175 */
177{
178}
179
181 unsigned int n)
182{
183 DNekScalBlkMatSharedPtr schurComplBlock;
184 DNekScalMatSharedPtr localMat = m_schurCompl->GetBlock(n, n);
185 unsigned int nbdry = localMat->GetRows();
186 unsigned int nblks = 1;
187 unsigned int esize[1] = {nbdry};
188
190 nblks, nblks, esize, esize);
191 schurComplBlock->SetBlock(0, 0, localMat);
192
193 return schurComplBlock;
194}
195
196/**
197 * Assemble the schur complement matrix from the block matrices stored
198 * in #m_blkMatrices and the given local to global mapping information.
199 * @param locToGloMap Local to global mapping information.
200 */
202 const AssemblyMapSharedPtr pLocToGloMap)
203{
204 boost::ignore_unused(pLocToGloMap);
205 // Set up unique map
206 v_UniqueMap();
207
208 // Build precon again if we in multi-level static condensation (a
209 // bit of a hack)
211 {
213 m_precon->BuildPreconditioner();
214 }
215
217}
218
219/**
220 * Populates sparse block-diagonal schur complement matrix from
221 * the block matrices stored in #m_blkMatrices.
222 */
224{
225 LocalMatrixStorageStrategy storageStrategy =
226 m_expList.lock()
227 ->GetSession()
228 ->GetSolverInfoAsEnum<LocalMatrixStorageStrategy>(
229 "LocalMatrixStorageStrategy");
230
231 switch (storageStrategy)
232 {
235 {
236 size_t storageSize = 0;
237 int nBlk = m_schurCompl->GetNumberOfBlockRows();
238
239 m_scale = Array<OneD, NekDouble>(nBlk, 1.0);
241
242 // Determine storage requirements for dense blocks.
243 for (int i = 0; i < nBlk; ++i)
244 {
245 m_rows[i] = m_schurCompl->GetBlock(i, i)->GetRows();
246 m_scale[i] = m_schurCompl->GetBlock(i, i)->Scale();
247 storageSize += m_rows[i] * m_rows[i];
248 }
249
250 // Assemble dense storage blocks.
251 DNekScalMatSharedPtr loc_mat;
252 m_denseBlocks.resize(nBlk);
253 double *ptr = 0;
254
255 if (MultiRegions::eContiguous == storageStrategy)
256 {
257 m_storage.resize(storageSize);
258 ptr = &m_storage[0];
259 }
260
261 for (unsigned int n = 0; n < nBlk; ++n)
262 {
263 loc_mat = m_schurCompl->GetBlock(n, n);
264
265 if (MultiRegions::eContiguous == storageStrategy)
266 {
267 int loc_lda = loc_mat->GetRows();
268 int blockSize = loc_lda * loc_lda;
269 m_denseBlocks[n] = ptr;
270 for (int i = 0; i < loc_lda; ++i)
271 {
272 for (int j = 0; j < loc_lda; ++j)
273 {
274 ptr[j * loc_lda + i] = (*loc_mat)(i, j);
275 }
276 }
277 ptr += blockSize;
279 }
280 else
281 {
282 m_denseBlocks[n] = loc_mat->GetRawPtr();
283 }
284 }
285 break;
286 }
288 {
289 DNekScalMatSharedPtr loc_mat;
290 int loc_lda;
291 int blockSize = 0;
292
293 // First run through to split the set of local matrices into
294 // partitions of fixed block size, and count number of local
295 // matrices that belong to each partition.
296 std::vector<std::pair<int, int>> partitions;
297 for (int n = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
298 {
299 loc_mat = m_schurCompl->GetBlock(n, n);
300 loc_lda = loc_mat->GetRows();
301
302 ASSERTL1(loc_lda >= 0,
303 boost::lexical_cast<std::string>(n) +
304 "-th "
305 "matrix block in Schur complement has "
306 "rank 0!");
307
308 if (blockSize == loc_lda)
309 {
310 partitions[partitions.size() - 1].first++;
311 }
312 else
313 {
314 blockSize = loc_lda;
315 partitions.push_back(make_pair(1, loc_lda));
316 }
317 }
318
319 MatrixStorage matStorage = eFULL;
320
321 // Create a vector of sparse storage holders
323 partitions.size());
324
325 for (int part = 0, n = 0; part < partitions.size(); ++part)
326 {
327 BCOMatType partMat;
328
329 for (int k = 0; k < partitions[part].first; ++k, ++n)
330 {
331 loc_mat = m_schurCompl->GetBlock(n, n);
332 loc_lda = loc_mat->GetRows();
333
334 ASSERTL1(loc_lda == partitions[part].second,
335 boost::lexical_cast<std::string>(n) +
336 "-th"
337 " matrix block in Schur complement has "
338 "unexpected rank");
339
340 NekDouble scale = loc_mat->Scale();
341 if (fabs(scale - 1.0) > NekConstants::kNekZeroTol)
342 {
343 Array<OneD, NekDouble> matarray(loc_lda * loc_lda);
344 Vmath::Smul(loc_lda * loc_lda, scale,
345 loc_mat->GetRawPtr(), 1, &matarray[0], 1);
346 partMat[make_pair(k, k)] = BCOEntryType(matarray);
347 }
348 else // scale factor is 1.0
349 {
350 partMat[make_pair(k, k)] = BCOEntryType(
351 loc_lda * loc_lda, loc_mat->GetRawPtr());
352 }
353
355 }
356
357 sparseStorage[part] =
360 partitions[part].first, partitions[part].first,
361 partitions[part].second, partMat, matStorage);
362 }
363
364 // Create block diagonal matrix
367 sparseStorage);
368
369 break;
370 }
371 default:
372 ErrorUtil::NekError("Solver info property \
373 LocalMatrixStorageStrategy takes values \
374 Contiguous, Non-contiguous and Sparse");
375 }
376}
377
378/**
379 *
380 */
382 const Array<OneD, NekDouble> &pInput, Array<OneD, NekDouble> &pOutput)
383{
384 if (m_linsol->IsLocal())
385 {
387 {
388 m_sparseSchurCompl->Multiply(pInput, pOutput);
389 }
390 else
391 {
392 // Do matrix multiply locally, using direct BLAS calls
393 int i, cnt;
394 for (i = cnt = 0; i < m_denseBlocks.size(); cnt += m_rows[i], ++i)
395 {
396 const int rows = m_rows[i];
397 Blas::Dgemv('N', rows, rows, m_scale[i], m_denseBlocks[i], rows,
398 pInput.get() + cnt, 1, 0.0, pOutput.get() + cnt, 1);
399 }
400 }
401 }
402 else
403 {
404 int nLocal = m_locToGloMap.lock()->GetNumLocalBndCoeffs();
405 AssemblyMapSharedPtr asmMap = m_locToGloMap.lock();
406
408 {
409 // Do matrix multiply locally using block-diagonal sparse matrix
410 Array<OneD, NekDouble> tmp = m_wsp + nLocal;
411
412 asmMap->GlobalToLocalBnd(pInput, m_wsp);
413 m_sparseSchurCompl->Multiply(m_wsp, tmp);
414 asmMap->AssembleBnd(tmp, pOutput);
415 }
416 else
417 {
418 // Do matrix multiply locally, using direct BLAS calls
419 asmMap->GlobalToLocalBnd(pInput, m_wsp);
420 int i, cnt;
421 Array<OneD, NekDouble> tmpout = m_wsp + nLocal;
422 for (i = cnt = 0; i < m_denseBlocks.size(); cnt += m_rows[i], ++i)
423 {
424 const int rows = m_rows[i];
425 Blas::Dgemv('N', rows, rows, m_scale[i], m_denseBlocks[i], rows,
426 m_wsp.get() + cnt, 1, 0.0, tmpout.get() + cnt, 1);
427 }
428 asmMap->AssembleBnd(tmpout, pOutput);
429 }
430 }
431}
432
434{
435 m_map = m_locToGloMap.lock()->GetGlobalToUniversalBndMapUnique();
436}
437
440{
441 if (scLevel == 0)
442 {
443 // When matrices are supplied to the constructor at the top
444 // level, the preconditioner is never set up.
445 if (!m_precon)
446 {
448 m_precon->BuildPreconditioner();
449 }
450
451#if 1 // to be consistent with original
452
453 int nGloBndDofs = m_locToGloMap.lock()->GetNumGlobalBndCoeffs();
454 Array<OneD, NekDouble> F_gloBnd(nGloBndDofs);
455 NekVector<NekDouble> F_GloBnd(nGloBndDofs, F_gloBnd, eWrapper);
456
457 m_locToGloMap.lock()->AssembleBnd(F_bnd, F_gloBnd);
458 Set_Rhs_Magnitude(F_GloBnd);
459
460#else
461 int nLocBndDofs = m_locToGloMap.lock()->GetNumLocalBndCoeffs();
462
463 // Set_Rhs_Magnitude - version using local array
464
465 Array<OneD, NekDouble> vExchange(1, 0.0);
466
467 vExchange[0] += Blas::Ddot(nLocBndDofs, F_bnd, 1, F_bnd, 1);
468 m_expList.lock()->GetComm()->GetRowComm()->AllReduce(
470
471 // To ensure that very different rhs values are not being
472 // used in subsequent solvers such as the velocit solve in
473 // INC NS. If this works we then need to work out a better
474 // way to control this.
475 NekDouble new_rhs_mag = (vExchange[0] > 1e-6) ? vExchange[0] : 1.0;
476
478 {
479 m_rhs_magnitude = new_rhs_mag;
480 }
481 else
482 {
484 (1.0 - m_rhs_mag_sm) * new_rhs_mag);
485 }
486#endif
487 }
488 else
489 {
490 // for multilevel iterative solver always use rhs
491 // vector value with no weighting
493 }
494}
495
498{
499 m_precon->DoTransformBasisToLowEnergy(pInOut);
500}
501
504{
505 m_precon->DoTransformCoeffsFromLowEnergy(pInOut);
506}
507
509 const Array<OneD, NekDouble> &pInput, Array<OneD, NekDouble> &pOutput)
510{
511 m_precon->DoTransformCoeffsToLowEnergy(pInput, pOutput);
512}
513
515 const GlobalLinSysKey &mkey, const std::weak_ptr<ExpList> &pExpList,
516 const DNekScalBlkMatSharedPtr pSchurCompl,
518 const DNekScalBlkMatSharedPtr pInvD,
519 const std::shared_ptr<AssemblyMap> &l2gMap)
520{
523 mkey, pExpList, pSchurCompl, pBinvD, pC, pInvD, l2gMap, m_precon);
524 sys->Initialise(l2gMap);
525 return sys;
526}
527/**
528 *
529 */
531 const int nGlobal, const Array<OneD, const NekDouble> &pInput,
532 Array<OneD, NekDouble> &pOutput, const AssemblyMapSharedPtr &plocToGloMap,
533 const int nDir)
534{
535
536 if (!m_linsol)
537 {
539 m_expList.lock()->GetComm()->GetRowComm();
541 m_expList.lock()->GetSession();
542
543 // Check such a module exists for this equation.
546 "NekLinSysIter '" + m_linSysIterSolver +
547 "' is not defined.\n");
549 m_linSysIterSolver, pSession, vRowComm, nGlobal - nDir,
551
552 m_linsol->SetSysOperators(m_NekSysOp);
553 v_UniqueMap();
554 m_linsol->setUniversalUniqueMap(m_map);
555 }
556
557 if (!m_precon)
558 {
559 m_precon = CreatePrecon(plocToGloMap);
560 m_precon->BuildPreconditioner();
561 }
562
563 m_linsol->setRhsMagnitude(m_rhs_magnitude);
564
565 if (m_useProjection)
566 {
567 Array<OneD, NekDouble> gloIn(nGlobal);
568 Array<OneD, NekDouble> gloOut(nGlobal, 0.0);
569 plocToGloMap->AssembleBnd(pInput, gloIn);
570 DoProjection(nGlobal, gloIn, gloOut, nDir, m_tolerance, m_isAconjugate);
571 plocToGloMap->GlobalToLocalBnd(gloOut, pOutput);
572 }
573 else
574 {
575 if (m_linsol->IsLocal())
576 {
577 int nLocDofs = plocToGloMap->GetNumLocalBndCoeffs();
578 Vmath::Zero(nLocDofs, pOutput, 1);
579 m_linsol->SolveSystem(nLocDofs, pInput, pOutput, nDir, m_tolerance);
580 }
581 else
582 {
583 Array<OneD, NekDouble> gloIn(nGlobal);
584 Array<OneD, NekDouble> gloOut(nGlobal, 0.0);
585 plocToGloMap->AssembleBnd(pInput, gloIn);
586 m_linsol->SolveSystem(nGlobal, gloIn, gloOut, nDir, m_tolerance);
587 plocToGloMap->GlobalToLocalBnd(gloOut, pOutput);
588 }
589 }
590}
591} // namespace MultiRegions
592} // namespace Nektar
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:215
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode....
Definition: ErrorUtil.hpp:249
Nektar::ErrorUtil::NekError NekError
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:198
tBaseSharedPtr CreateInstance(tKey idKey, tParam... args)
Create an instance of the class referred to by idKey.
Definition: NekFactory.hpp:144
static std::string RegisterEnumValue(std::string pEnum, std::string pString, int pEnumValue)
Registers an enumeration value.
static std::string RegisterDefaultSolverInfo(const std::string &pName, const std::string &pValue)
Registers the default string value of a solver info property.
static std::shared_ptr< DataType > AllocateSharedPtr(const Args &...args)
Allocate a shared pointer from the memory pool.
A global linear system.
Definition: GlobalLinSys.h:72
const std::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:124
const GlobalLinSysKey m_linSysKey
Key associated with this linear system.
Definition: GlobalLinSys.h:122
PreconditionerSharedPtr CreatePrecon(AssemblyMapSharedPtr asmMap)
Create a preconditioner object from the parameters defined in the supplied assembly map.
virtual void v_DropStaticCondBlock(unsigned int n)
Releases the static condensation block matrices from NekManager of n-th expansion using the matrix ke...
void Initialise(const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Definition: GlobalLinSys.h:205
LibUtilities::NekLinSysIterSharedPtr m_linsol
void DoProjection(const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const int pNumDir, const NekDouble tol, const bool isAconjugate)
projection technique
bool m_useProjection
Whether to apply projection technique.
NekDouble m_tolerance
Tolerance of iterative solver.
Array< OneD, int > m_map
Global to universal unique map.
NekDouble m_rhs_magnitude
dot product of rhs to normalise stopping criterion
std::string m_linSysIterSolver
Iterative solver: Conjugate Gradient, GMRES.
void Set_Rhs_Magnitude(const NekVector< NekDouble > &pIn)
NekDouble m_rhs_mag_sm
cnt to how many times rhs_magnitude is called
std::vector< const double * > m_denseBlocks
Vector of pointers to local matrix data.
static GlobalLinSysSharedPtr create(const GlobalLinSysKey &pLinSysKey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Creates an instance of this class.
virtual DNekScalBlkMatSharedPtr v_GetStaticCondBlock(unsigned int n) override
Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided ...
virtual void v_CoeffsBwdTransform(Array< OneD, NekDouble > &pInOut) override
std::vector< double > m_storage
Dense storage for block Schur complement matrix.
GlobalLinSysIterativeStaticCond(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &locToGloMap)
Constructor for full direct matrix solve.
void PrepareLocalSchurComplement()
Prepares local representation of Schur complement stored as a sparse block-diagonal matrix.
Array< OneD, unsigned int > m_rows
Ranks of local matrices.
virtual GlobalLinSysStaticCondSharedPtr v_Recurse(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const std::shared_ptr< AssemblyMap > &locToGloMap) override
virtual void v_BasisFwdTransform(Array< OneD, NekDouble > &pInOut) override
virtual void v_AssembleSchurComplement(const std::shared_ptr< AssemblyMap > locToGloMap) override
Assemble the Schur complement matrix.
virtual void v_SolveLinearSystem(const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const AssemblyMapSharedPtr &locToGloMap, const int pNumDir) override
Solve the matrix system.
Array< OneD, NekDouble > m_scale
Scaling factors for local matrices.
DNekSmvBsrDiagBlkMatSharedPtr m_sparseSchurCompl
Sparse representation of Schur complement matrix at this level.
virtual void v_PreSolve(int scLevel, Array< OneD, NekDouble > &F_bnd) override
virtual void v_CoeffsFwdTransform(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput) override
virtual void v_DoMatrixMultiply(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput) override
Perform a Shur-complement matrix multiply operation.
GlobalSysSolnType GetGlobalSysSolnType() const
Return the associated solution type.
DNekScalBlkMatSharedPtr m_schurCompl
Block Schur complement matrix.
std::weak_ptr< AssemblyMap > m_locToGloMap
Local to global map.
void SetupTopLevel(const std::shared_ptr< AssemblyMap > &locToGloMap)
Set up the storage for the Schur complement or the top level of the multi-level Schur complement.
Array< OneD, NekDouble > m_wsp
Workspace array for matrix multiplication.
DNekScalBlkMatSharedPtr m_BinvD
Block matrix.
DNekScalBlkMatSharedPtr m_C
Block matrix.
DNekScalBlkMatSharedPtr m_invD
Block matrix.
StdRegions::MatrixType GetMatrixType() const
Return the matrix type.
static void Dgemv(const char &trans, const int &m, const int &n, const double &alpha, const double *a, const int &lda, const double *x, const int &incx, const double &beta, double *y, const int &incy)
BLAS level 2: Matrix vector multiply y = alpha A x plus beta y where A[m x n].
Definition: Blas.hpp:213
static double Ddot(const int &n, const double *x, const int &incx, const double *y, const int &incy)
BLAS level 1: output = .
Definition: Blas.hpp:165
std::shared_ptr< SessionReader > SessionReaderSharedPtr
NekLinSysIterFactory & GetNekLinSysIterFactory()
std::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.
Definition: Comm.h:57
std::shared_ptr< GlobalLinSysStaticCond > GlobalLinSysStaticCondSharedPtr
GlobalLinSysFactory & GetGlobalLinSysFactory()
std::shared_ptr< GlobalLinSysIterativeStaticCond > GlobalLinSysIterativeStaticCondSharedPtr
std::shared_ptr< Preconditioner > PreconditionerSharedPtr
Definition: GlobalLinSys.h:60
std::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:52
static const NekDouble kNekUnsetDouble
static const NekDouble kNekZeroTol
The above copyright notice and this permission notice shall be included.
Definition: CoupledSolver.h:2
std::shared_ptr< DNekScalMat > DNekScalMatSharedPtr
std::map< CoordType, BCOEntryType > BCOMatType
Array< OneD, NekDouble > BCOEntryType
std::shared_ptr< DNekScalBlkMat > DNekScalBlkMatSharedPtr
Definition: NekTypeDefs.hpp:79
double NekDouble
void Smul(int n, const T alpha, const T *x, const int incx, T *y, const int incy)
Scalar multiply y = alpha*x.
Definition: Vmath.cpp:245
void Zero(int n, T *x, const int incx)
Zero vector.
Definition: Vmath.cpp:487