Nektar++
GlobalLinSysPETScStaticCond.cpp
Go to the documentation of this file.
1///////////////////////////////////////////////////////////////////////////////
2//
3// File: GlobalLinSysPETScStaticCond.cpp
4//
5// For more information, please see: http://www.nektar.info
6//
7// The MIT License
8//
9// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10// Department of Aeronautics, Imperial College London (UK), and Scientific
11// Computing and Imaging Institute, University of Utah (USA).
12//
13// Permission is hereby granted, free of charge, to any person obtaining a
14// copy of this software and associated documentation files (the "Software"),
15// to deal in the Software without restriction, including without limitation
16// the rights to use, copy, modify, merge, publish, distribute, sublicense,
17// and/or sell copies of the Software, and to permit persons to whom the
18// Software is furnished to do so, subject to the following conditions:
19//
20// The above copyright notice and this permission notice shall be included
21// in all copies or substantial portions of the Software.
22//
23// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29// DEALINGS IN THE SOFTWARE.
30//
31// Description: GlobalLinSys definition
32//
33///////////////////////////////////////////////////////////////////////////////
34
35#include <boost/core/ignore_unused.hpp>
36
38
39#include <petscksp.h>
40#include <petscmat.h>
41#include <petscsys.h>
42
43using namespace std;
44
45namespace Nektar
46{
47namespace MultiRegions
48{
49/**
50 * @class GlobalLinSysPETSc
51 *
52 * Solves a linear system using single- or multi-level static
53 * condensation.
54 */
55
56/**
57 * Registers the class with the Factory.
58 */
61 "PETScStaticCond", GlobalLinSysPETScStaticCond::create,
62 "PETSc static condensation.");
63
66 "PETScMultiLevelStaticCond", GlobalLinSysPETScStaticCond::create,
67 "PETSc multi-level static condensation.");
68
69/**
70 * For a matrix system of the form @f[
71 * \left[ \begin{array}{cc}
72 * \boldsymbol{A} & \boldsymbol{B}\\
73 * \boldsymbol{C} & \boldsymbol{D}
74 * \end{array} \right]
75 * \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2}
76 * \end{array}\right]
77 * = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2}
78 * \end{array}\right],
79 * @f]
80 * where @f$\boldsymbol{D}@f$ and
81 * @f$(\boldsymbol{A-BD^{-1}C})@f$ are invertible, store and assemble
82 * a static condensation system, according to a given local to global
83 * mapping. #m_linSys is constructed by AssembleSchurComplement().
84 * @param mKey Associated matrix key.
85 * @param pLocMatSys LocalMatrixSystem
86 * @param locToGloMap Local to global mapping.
87 */
89 const GlobalLinSysKey &pKey, const std::weak_ptr<ExpList> &pExpList,
90 const std::shared_ptr<AssemblyMap> &pLocToGloMap)
91 : GlobalLinSys(pKey, pExpList, pLocToGloMap),
92 GlobalLinSysPETSc(pKey, pExpList, pLocToGloMap),
93 GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
94{
97 "This constructor is only valid when using static "
98 "condensation");
100 pLocToGloMap->GetGlobalSysSolnType(),
101 "The local to global map is not set up for the requested "
102 "solution type");
103}
104
105/**
106 *
107 */
109 const GlobalLinSysKey &pKey, const std::weak_ptr<ExpList> &pExpList,
110 const DNekScalBlkMatSharedPtr pSchurCompl,
112 const DNekScalBlkMatSharedPtr pInvD,
113 const std::shared_ptr<AssemblyMap> &pLocToGloMap,
114 const PreconditionerSharedPtr pPrecon)
115 : GlobalLinSys(pKey, pExpList, pLocToGloMap),
116 GlobalLinSysPETSc(pKey, pExpList, pLocToGloMap),
117 GlobalLinSysStaticCond(pKey, pExpList, pLocToGloMap)
118{
119 m_schurCompl = pSchurCompl;
120 m_BinvD = pBinvD;
121 m_C = pC;
122 m_invD = pInvD;
123 m_precon = pPrecon;
124}
125
126/**
127 *
128 */
130{
131}
132
134{
135 auto asmMap = m_locToGloMap.lock();
136
137 m_precon = CreatePrecon(asmMap);
138
139 // Allocate memory for top-level structure
140 SetupTopLevel(asmMap);
141
142 // Setup Block Matrix systems
143 int n, n_exp = m_expList.lock()->GetNumElmts();
144
145 // Build preconditioner
146 m_precon->BuildPreconditioner();
147
148 // Do transform of Schur complement matrix
149 int cnt = 0;
150 for (n = 0; n < n_exp; ++n)
151 {
153 {
154 DNekScalMatSharedPtr mat = m_schurCompl->GetBlock(n, n);
156 m_precon->TransformedSchurCompl(n, cnt, mat);
157 m_schurCompl->SetBlock(n, n, t);
158 cnt += mat->GetRows();
159 }
160 }
161
162 // Construct this level
163 Initialise(asmMap);
164}
165
166/**
167 * Assemble the schur complement matrix from the block matrices stored
168 * in #m_blkMatrices and the given local to global mapping information.
169 * @param locToGloMap Local to global mapping information.
170 */
172 AssemblyMapSharedPtr pLocToGloMap)
173{
174 int i, j, n, cnt, gid1, gid2, loc_lda;
175 NekDouble sign1, sign2, value;
176
177 const int nDirDofs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
178
183 DNekScalMatSharedPtr loc_mat;
184
185 // Build precon again if we in multi-level static condensation (a
186 // bit of a hack)
188 {
190 m_precon->BuildPreconditioner();
191 }
192
193 // CALCULATE REORDERING MAPPING
194 CalculateReordering(pLocToGloMap->GetGlobalToUniversalBndMap(),
195 pLocToGloMap->GetGlobalToUniversalBndMapUnique(),
196 pLocToGloMap);
197
198 // SET UP VECTORS AND MATRIX
199 SetUpMatVec(pLocToGloMap->GetNumGlobalBndCoeffs(), nDirDofs);
200
201 // SET UP SCATTER OBJECTS
202 SetUpScatter();
203
204 // CONSTRUCT KSP OBJECT
205 SetUpSolver(pLocToGloMap->GetIterativeTolerance());
206
207 // If we are using the matrix multiplication shell don't try to
208 // populate the matrix.
210 {
211 return;
212 }
213
214 // POPULATE MATRIX
215 for (n = cnt = 0; n < m_schurCompl->GetNumberOfBlockRows(); ++n)
216 {
217 loc_mat = m_schurCompl->GetBlock(n, n);
218 loc_lda = loc_mat->GetRows();
219
220 for (i = 0; i < loc_lda; ++i)
221 {
222 gid1 = pLocToGloMap->GetLocalToGlobalBndMap(cnt + i) - nDirDofs;
223 sign1 = pLocToGloMap->GetLocalToGlobalBndSign(cnt + i);
224 if (gid1 >= 0)
225 {
226 int gid1ro = m_reorderedMap[gid1];
227 for (j = 0; j < loc_lda; ++j)
228 {
229 gid2 = pLocToGloMap->GetLocalToGlobalBndMap(cnt + j) -
230 nDirDofs;
231 sign2 = pLocToGloMap->GetLocalToGlobalBndSign(cnt + j);
232 if (gid2 >= 0)
233 {
234 int gid2ro = m_reorderedMap[gid2];
235 value = sign1 * sign2 * (*loc_mat)(i, j);
236 MatSetValue(m_matrix, gid1ro, gid2ro, value,
237 ADD_VALUES);
238 }
239 }
240 }
241 }
242 cnt += loc_lda;
243 }
244
245 // ASSEMBLE MATRIX
246 MatAssemblyBegin(m_matrix, MAT_FINAL_ASSEMBLY);
247 MatAssemblyEnd(m_matrix, MAT_FINAL_ASSEMBLY);
248}
249
251 unsigned int n)
252{
253 DNekScalBlkMatSharedPtr schurComplBlock;
254 DNekScalMatSharedPtr localMat = m_schurCompl->GetBlock(n, n);
255 unsigned int nbdry = localMat->GetRows();
256 unsigned int nblks = 1;
257 unsigned int esize[1] = {nbdry};
258
260 nblks, nblks, esize, esize);
261 schurComplBlock->SetBlock(0, 0, localMat);
262
263 return schurComplBlock;
264}
265
268{
269 boost::ignore_unused(F_bnd);
270
271 if (scLevel == 0)
272 {
273 // When matrices are supplied to the constructor at the top
274 // level, the preconditioner is never set up.
275 if (!m_precon)
276 {
278 m_precon->BuildPreconditioner();
279 }
280 }
281}
282
283/**
284 * @brief Solve linear system using PETSc.
285 *
286 * The general strategy being a PETSc solve is to:
287 *
288 * - Copy values into the PETSc vector #m_b
289 * - Solve the system #m_ksp and place result into #m_x.
290 * - Scatter results back into #m_locVec using #m_ctx scatter object.
291 * - Copy from #m_locVec to output array #pOutput.
292 */
294 const int pNumRows, const Array<OneD, const NekDouble> &pInput,
295 Array<OneD, NekDouble> &pOutput, const AssemblyMapSharedPtr &locToGloMap,
296 const int pNumDir)
297{
298 const int nHomDofs = pNumRows - pNumDir;
299
301 {
302 m_precon = CreatePrecon(locToGloMap);
303 m_precon->BuildPreconditioner();
304 }
305
306 Array<OneD, NekDouble> Glo(pNumRows);
307 locToGloMap->AssembleBnd(pInput, Glo);
308
309 // Populate RHS vector from input
310 VecSetValues(m_b, nHomDofs, &m_reorderedMap[0], &Glo[pNumDir],
311 INSERT_VALUES);
312
313 // Assemble RHS vector
314 VecAssemblyBegin(m_b);
315 VecAssemblyEnd(m_b);
316
317 // Do system solve
318 KSPSolve(m_ksp, m_b, m_x);
319
320 KSPConvergedReason reason;
321 KSPGetConvergedReason(m_ksp, &reason);
322 ASSERTL0(reason > 0, "PETSc solver diverged, reason is: " +
323 std::string(KSPConvergedReasons[reason]));
324
325 // Scatter results to local vector
326 VecScatterBegin(m_ctx, m_x, m_locVec, INSERT_VALUES, SCATTER_FORWARD);
327 VecScatterEnd(m_ctx, m_x, m_locVec, INSERT_VALUES, SCATTER_FORWARD);
328
329 // Copy results into output vector
330 PetscScalar *tmp;
331 VecGetArray(m_locVec, &tmp);
332 Vmath::Vcopy(nHomDofs, tmp, 1, &Glo[pNumDir], 1);
333 Vmath::Zero(pNumDir, Glo, 1);
334 locToGloMap->GlobalToLocalBnd(Glo, pOutput);
335 VecRestoreArray(m_locVec, &tmp);
336}
337
340{
341 m_precon->DoTransformBasisToLowEnergy(pInOut);
342}
343
346{
347 m_precon->DoTransformCoeffsFromLowEnergy(pInOut);
348}
349
351 const Array<OneD, NekDouble> &pInput, Array<OneD, NekDouble> &pOutput)
352{
353 m_precon->DoTransformCoeffsToLowEnergy(pInput, pOutput);
354}
355
356/**
357 * @brief Apply matrix-vector multiplication using local approach and
358 * the assembly map.
359 *
360 * @param input Vector input.
361 * @param output Result of multiplication.
362 *
363 * @todo This can possibly be made faster by using the sparse
364 * block-matrix multiplication code from the iterative elastic
365 * systems.
366 */
369{
370 auto asmMap = m_locToGloMap.lock();
371
372 int nLocBndDofs = asmMap->GetNumLocalBndCoeffs();
373 int nDirDofs = asmMap->GetNumGlobalDirBndCoeffs();
374
375 NekVector<NekDouble> in(nLocBndDofs), out(nLocBndDofs);
376 asmMap->GlobalToLocalBnd(input, in.GetPtr(), nDirDofs);
377 out = (*m_schurCompl) * in;
378 asmMap->AssembleBnd(out.GetPtr(), output, nDirDofs);
379}
380
382 const GlobalLinSysKey &mkey, const std::weak_ptr<ExpList> &pExpList,
383 const DNekScalBlkMatSharedPtr pSchurCompl,
385 const DNekScalBlkMatSharedPtr pInvD,
386 const std::shared_ptr<AssemblyMap> &l2gMap)
387{
390 mkey, pExpList, pSchurCompl, pBinvD, pC, pInvD, l2gMap, m_precon);
391 sys->Initialise(l2gMap);
392 return sys;
393}
394} // namespace MultiRegions
395} // namespace Nektar
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:215
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode....
Definition: ErrorUtil.hpp:249
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:198
static std::shared_ptr< DataType > AllocateSharedPtr(const Args &...args)
Allocate a shared pointer from the memory pool.
A global linear system.
Definition: GlobalLinSys.h:72
const std::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:124
const GlobalLinSysKey m_linSysKey
Key associated with this linear system.
Definition: GlobalLinSys.h:122
PreconditionerSharedPtr CreatePrecon(AssemblyMapSharedPtr asmMap)
Create a preconditioner object from the parameters defined in the supplied assembly map.
void Initialise(const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Definition: GlobalLinSys.h:205
GlobalSysSolnType GetGlobalSysSolnType() const
Return the associated solution type.
A PETSc global linear system.
Vec m_x
PETSc vector objects used for local storage.
PETScMatMult m_matMult
Enumerator to select matrix multiplication type.
std::vector< int > m_reorderedMap
Reordering that takes universal IDs to a unique row in the PETSc matrix.
void SetUpScatter()
Set up PETSc local (equivalent to Nektar++ global) and global (equivalent to universal) scatter maps.
void SetUpSolver(NekDouble tolerance)
Set up KSP solver object.
void SetUpMatVec(int nGlobal, int nDir)
Construct PETSc matrix and vector handles.
VecScatter m_ctx
PETSc scatter context that takes us between Nektar++ global ordering and PETSc vector ordering.
void CalculateReordering(const Array< OneD, const int > &glo2uniMap, const Array< OneD, const int > &glo2unique, const AssemblyMapSharedPtr &pLocToGloMap)
Calculate a reordering of universal IDs for PETSc.
KSP m_ksp
KSP object that represents solver system.
virtual void v_CoeffsFwdTransform(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput) override
virtual void v_AssembleSchurComplement(std::shared_ptr< AssemblyMap > locToGloMap) override
Assemble the Schur complement matrix.
static GlobalLinSysSharedPtr create(const GlobalLinSysKey &pLinSysKey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &pLocToGloMap)
Creates an instance of this class.
virtual void v_SolveLinearSystem(const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const AssemblyMapSharedPtr &locToGloMap, const int pNumDir) override
Solve linear system using PETSc.
virtual GlobalLinSysStaticCondSharedPtr v_Recurse(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const std::shared_ptr< AssemblyMap > &locToGloMap) override
virtual void v_CoeffsBwdTransform(Array< OneD, NekDouble > &pInOut) override
virtual void v_DoMatrixMultiply(const Array< OneD, const NekDouble > &input, Array< OneD, NekDouble > &output) override
Apply matrix-vector multiplication using local approach and the assembly map.
virtual DNekScalBlkMatSharedPtr v_GetStaticCondBlock(unsigned int n) override
Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided ...
virtual void v_PreSolve(int scLevel, Array< OneD, NekDouble > &F_bBnd) override
GlobalLinSysPETScStaticCond(const GlobalLinSysKey &mkey, const std::weak_ptr< ExpList > &pExpList, const std::shared_ptr< AssemblyMap > &locToGloMap)
Constructor for full direct matrix solve.
virtual void v_BasisFwdTransform(Array< OneD, NekDouble > &pInOut) override
DNekScalBlkMatSharedPtr m_schurCompl
Block Schur complement matrix.
std::weak_ptr< AssemblyMap > m_locToGloMap
Local to global map.
void SetupTopLevel(const std::shared_ptr< AssemblyMap > &locToGloMap)
Set up the storage for the Schur complement or the top level of the multi-level Schur complement.
DNekScalBlkMatSharedPtr m_BinvD
Block matrix.
DNekScalBlkMatSharedPtr m_C
Block matrix.
DNekScalBlkMatSharedPtr m_invD
Block matrix.
StdRegions::MatrixType GetMatrixType() const
Return the matrix type.
Array< OneD, DataType > & GetPtr()
Definition: NekVector.cpp:217
std::shared_ptr< GlobalLinSysStaticCond > GlobalLinSysStaticCondSharedPtr
GlobalLinSysFactory & GetGlobalLinSysFactory()
std::shared_ptr< Preconditioner > PreconditionerSharedPtr
Definition: GlobalLinSys.h:60
std::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:52
std::shared_ptr< GlobalLinSysPETScStaticCond > GlobalLinSysPETScStaticCondSharedPtr
The above copyright notice and this permission notice shall be included.
Definition: CoupledSolver.h:2
std::shared_ptr< DNekScalMat > DNekScalMatSharedPtr
std::shared_ptr< DNekScalBlkMat > DNekScalBlkMatSharedPtr
Definition: NekTypeDefs.hpp:79
double NekDouble
void Zero(int n, T *x, const int incx)
Zero vector.
Definition: Vmath.cpp:487
void Vcopy(int n, const T *x, const int incx, T *y, const int incy)
Definition: Vmath.cpp:1191