Nektar++
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
GlobalLinSysPETSc.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File GlobalLinSys.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // License for the specific language governing rights and limitations under
14 // Permission is hereby granted, free of charge, to any person obtaining a
15 // copy of this software and associated documentation files (the "Software"),
16 // to deal in the Software without restriction, including without limitation
17 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 // and/or sell copies of the Software, and to permit persons to whom the
19 // Software is furnished to do so, subject to the following conditions:
20 //
21 // The above copyright notice and this permission notice shall be included
22 // in all copies or substantial portions of the Software.
23 //
24 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
30 // DEALINGS IN THE SOFTWARE.
31 //
32 // Description: GlobalLinSys definition
33 //
34 ///////////////////////////////////////////////////////////////////////////////
35 
38 
39 #include "petscis.h"
40 #include "petscversion.h"
41 
42 using namespace std;
43 
44 namespace Nektar
45 {
46  namespace MultiRegions
47  {
48  std::string GlobalLinSysPETSc::matMult =
49  LibUtilities::SessionReader::RegisterDefaultSolverInfo(
50  "PETScMatMult", "Sparse");
51  std::string GlobalLinSysPETSc::matMultIds[] = {
52  LibUtilities::SessionReader::RegisterEnumValue(
53  "PETScMatMult", "Sparse",
55  LibUtilities::SessionReader::RegisterEnumValue(
56  "PETScMatMult", "Shell",
58  };
59 
60  /**
61  * @class GlobalLinSysPETSc
62  *
63  * Solves a linear system using PETSc.
64  */
65  GlobalLinSysPETSc::GlobalLinSysPETSc(
66  const GlobalLinSysKey &pKey,
67  const boost::weak_ptr<ExpList> &pExp,
68  const boost::shared_ptr<AssemblyMap> &pLocToGloMap)
69  : GlobalLinSys(pKey, pExp, pLocToGloMap)
70  {
71  // Determine whether to use standard sparse matrix approach or
72  // shell.
73  m_matMult = pExp.lock()->GetSession()
74  ->GetSolverInfoAsEnum<PETScMatMult>(
75  "PETScMatMult");
76 
77  // Check PETSc is initialized. For some reason, this is needed on
78  // OS X as logging is not initialized properly in the call within
79  // CommMpi.
80  PetscBool isInitialized;
81  PetscInitialized(&isInitialized);
82  if (!isInitialized)
83  {
84  PetscInitializeNoArguments();
85  }
86 
87  // Create matrix
88  MatCreate(PETSC_COMM_WORLD, &m_matrix);
89  }
90 
91  /**
92  * @brief Clean up PETSc objects.
93  *
94  * Note that if SessionReader::Finalize is called before the end of the
95  * program, PETSc may have been finalized already, at which point we
96  * cannot deallocate our objects. If that's the case we do nothing and
97  * let the kernel clear up after us.
98  */
100  {
101  PetscBool isFinalized;
102  PetscFinalized(&isFinalized);
103 
104  // Sometimes, PetscFinalized returns false when (in fact) CommMpi's
105  // Finalise routine has been called. We therefore also need to check
106  // whether MPI has been finalised. This might arise from the
107  // additional call to PetscInitializeNoArguments in the constructor
108  // above.
109 #ifdef NEKTAR_USE_MPI
110  int mpiFinal = 0;
111  MPI_Finalized(&mpiFinal);
112  isFinalized = isFinalized || mpiFinal ? PETSC_TRUE : PETSC_FALSE;
113 #endif
114 
115  if (!isFinalized)
116  {
117  KSPDestroy(&m_ksp);
118  PCDestroy (&m_pc);
119  MatDestroy(&m_matrix);
120  VecDestroy(&m_x);
121  VecDestroy(&m_b);
122  VecDestroy(&m_locVec);
123  }
124  }
125 
126  /**
127  * @brief Solve linear system using PETSc.
128  *
129  * The general strategy being a PETSc solve is to:
130  *
131  * - Copy values into the PETSc vector #m_b
132  * - Solve the system #m_ksp and place result into #m_x.
133  * - Scatter results back into #m_locVec using #m_ctx scatter object.
134  * - Copy from #m_locVec to output array #pOutput.
135  */
137  const int pNumRows,
138  const Array<OneD,const NekDouble> &pInput,
139  Array<OneD, NekDouble> &pOutput,
140  const AssemblyMapSharedPtr &locToGloMap,
141  const int pNumDir)
142  {
143  const int nHomDofs = pNumRows - pNumDir;
144 
146  {
147  m_precon = CreatePrecon(locToGloMap);
148  m_precon->BuildPreconditioner();
149  }
150 
151  // Populate RHS vector from input
152  VecSetValues(m_b, nHomDofs, &m_reorderedMap[0],
153  &pInput[pNumDir], INSERT_VALUES);
154 
155  // Assemble RHS vector
156  VecAssemblyBegin(m_b);
157  VecAssemblyEnd (m_b);
158 
159  // Do system solve
160  KSPSolve(m_ksp, m_b, m_x);
161 
162  // Scatter results to local vector
163  VecScatterBegin(m_ctx, m_x, m_locVec,
164  INSERT_VALUES, SCATTER_FORWARD);
165  VecScatterEnd (m_ctx, m_x, m_locVec,
166  INSERT_VALUES, SCATTER_FORWARD);
167 
168  // Copy results into output vector
169  PetscScalar *tmp;
170  VecGetArray (m_locVec, &tmp);
171  Vmath::Vcopy (nHomDofs, tmp, 1, &pOutput[pNumDir], 1);
172  VecRestoreArray(m_locVec, &tmp);
173  }
174 
175  /**
176  * @brief Set up PETSc local (equivalent to Nektar++ global) and global
177  * (equivalent to universal) scatter maps.
178  *
179  * These maps are used in GlobalLinSysPETSc::v_SolveLinearSystem to
180  * scatter the solution vector back to each process.
181  */
183  {
184  const int nHomDofs = m_reorderedMap.size();
185 
186  // Create local and global numbering systems for vector
187  IS isGlobal, isLocal;
188  ISCreateGeneral (PETSC_COMM_SELF, nHomDofs, &m_reorderedMap[0],
189  PETSC_COPY_VALUES, &isGlobal);
190  ISCreateStride (PETSC_COMM_SELF, nHomDofs, 0, 1, &isLocal);
191 
192  // Create local vector for output
193  VecCreate (PETSC_COMM_SELF, &m_locVec);
194  VecSetSizes (m_locVec, nHomDofs, PETSC_DECIDE);
195  VecSetFromOptions(m_locVec);
196 
197  // Create scatter context
198  VecScatterCreate (m_x, isGlobal, m_locVec, isLocal, &m_ctx);
199 
200  // Clean up
201  ISDestroy(&isGlobal);
202  ISDestroy(&isLocal);
203  }
204 
205  /**
206  * @brief Calculate a reordering of universal IDs for PETSc.
207  *
208  * PETSc requires a unique, contiguous index of all global and universal
209  * degrees of freedom which represents its position inside the
210  * matrix. Presently Gs does not guarantee this, so this routine
211  * constructs a new universal mapping.
212  *
213  * @param glo2uniMap Global to universal map
214  * @param glo2unique Global to unique map
215  * @param pLocToGloMap Assembly map for this system
216  */
218  const Array<OneD, const int> &glo2uniMap,
219  const Array<OneD, const int> &glo2unique,
220  const AssemblyMapSharedPtr &pLocToGloMap)
221  {
223  = m_expList.lock()->GetSession()->GetComm();
224 
225  const int nDirDofs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
226  const int nHomDofs = glo2uniMap.num_elements() - nDirDofs;
227  const int nProc = vComm->GetSize();
228  const int rank = vComm->GetRank();
229 
230  int n, cnt;
231 
232  // Count number of unique degrees of freedom on each process.
233  m_nLocal = Vmath::Vsum(nHomDofs, glo2unique + nDirDofs, 1);
234  m_reorderedMap.resize(nHomDofs);
235 
236  // Reduce coefficient counts across all processors.
237  Array<OneD, int> localCounts(nProc, 0), localOffset(nProc, 0);
238  localCounts[rank] = nHomDofs;
239  vComm->AllReduce(localCounts, LibUtilities::ReduceSum);
240 
241  for (n = 1; n < nProc; ++n)
242  {
243  localOffset[n] = localOffset[n-1] + localCounts[n-1];
244  }
245 
246  int totHomDofs = Vmath::Vsum(nProc, localCounts, 1);
247  vector<unsigned int> allUniIds(totHomDofs, 0);
248 
249  // Assemble list of universal IDs
250  for (n = 0; n < nHomDofs; ++n)
251  {
252  int gid = n + nDirDofs;
253  allUniIds[n + localOffset[rank]] = glo2uniMap[gid];
254  }
255 
256  // Reduce this across processors so that each process has a list of
257  // all universal IDs.
258  vComm->AllReduce(allUniIds, LibUtilities::ReduceSum);
259  std::sort(allUniIds.begin(), allUniIds.end());
260  map<int,int> uniIdReorder;
261 
262  // Renumber starting from 0.
263  for (cnt = n = 0; n < allUniIds.size(); ++n)
264  {
265  if (uniIdReorder.count(allUniIds[n]) > 0)
266  {
267  continue;
268  }
269 
270  uniIdReorder[allUniIds[n]] = cnt++;
271  }
272 
273  // Populate reordering map.
274  for (n = 0; n < nHomDofs; ++n)
275  {
276  int gid = n + nDirDofs;
277  int uniId = glo2uniMap[gid];
278  ASSERTL0(uniIdReorder.count(uniId) > 0, "Error in ordering");
279  m_reorderedMap[n] = uniIdReorder[uniId];
280  }
281  }
282 
283  /**
284  * @brief Construct PETSc matrix and vector handles.
285  *
286  * @todo Preallocation should be done at this point, since presently
287  * matrix allocation takes a significant amount of time.
288  *
289  * @param nGlobal Number of global degrees of freedom in the system (on
290  * this processor)
291  * @param nDir Number of Dirichlet degrees of freedom (on this
292  * processor).
293  */
294  void GlobalLinSysPETSc::SetUpMatVec(int nGlobal, int nDir)
295  {
296  // CREATE VECTORS
297  VecCreate (PETSC_COMM_WORLD, &m_x);
298  VecSetSizes (m_x, m_nLocal, PETSC_DECIDE);
299  VecSetFromOptions(m_x);
300  VecDuplicate (m_x, &m_b);
301 
302  // CREATE MATRICES
304  {
305  // Create ShellCtx context object which will store the matrix
306  // size and a pointer to the linear system. We do this so that
307  // we can call a member function to the matrix-vector and
308  // preconditioning multiplication in a subclass.
309  ShellCtx *ctx1 = new ShellCtx(), *ctx2 = new ShellCtx();
310  ctx1->nGlobal = ctx2->nGlobal = nGlobal;
311  ctx1->nDir = ctx2->nDir = nDir;
312  ctx1->linSys = ctx2->linSys = this;
313 
314  // Set up MatShell object.
315  MatCreateShell (PETSC_COMM_WORLD, m_nLocal, m_nLocal,
316  PETSC_DETERMINE, PETSC_DETERMINE,
317  (void *)ctx1, &m_matrix);
318  MatShellSetOperation(m_matrix, MATOP_MULT,
319  (void(*)(void))DoMatrixMultiply);
320  MatShellSetOperation(m_matrix, MATOP_DESTROY,
321  (void(*)(void))DoDestroyMatCtx);
322 
323  // Create a PCShell to go alongside the MatShell.
324  PCCreate (PETSC_COMM_WORLD, &m_pc);
325 #if PETSC_VERSION_GE(3,5,0)
326  PCSetOperators (m_pc, m_matrix, m_matrix);
327 #else
328  PCSetOperators (m_pc, m_matrix, m_matrix, SAME_NONZERO_PATTERN);
329 #endif
330  PCSetType (m_pc, PCSHELL);
331  PCShellSetApply (m_pc, DoPreconditioner);
332  PCShellSetDestroy(m_pc, DoDestroyPCCtx);
333  PCShellSetContext(m_pc, ctx2);
334  }
335  else
336  {
337  // Otherwise we create a PETSc matrix and use MatSetFromOptions
338  // so that we can set various options on the command line.
339  MatCreate (PETSC_COMM_WORLD, &m_matrix);
340  MatSetType (m_matrix, MATAIJ);
341  MatSetSizes (m_matrix, m_nLocal, m_nLocal,
342  PETSC_DETERMINE, PETSC_DETERMINE);
343  MatSetFromOptions(m_matrix);
344  MatSetUp (m_matrix);
345  }
346  }
347 
348  /**
349  * @brief Perform either matrix multiplication or preconditioning using
350  * Nektar++ routines.
351  *
352  * This static function uses Nektar++ routines to calculate the
353  * matrix-vector product of @p M with @p in, storing the output in @p
354  * out.
355  *
356  * @todo There's a lot of scatters and copies that might possibly be
357  * eliminated to make this more efficient.
358  *
359  * @param in Input vector.
360  * @param out Output vector.
361  * @param ctx ShellCtx object that points to our instance of
362  * GlobalLinSysPETSc.
363  * @param precon If true, we apply a preconditioner, if false, we
364  * perform a matrix multiplication.
365  */
367  Vec &in, Vec &out, ShellCtx *ctx, bool precon)
368  {
369  const int nGlobal = ctx->nGlobal;
370  const int nDir = ctx->nDir;
371  const int nHomDofs = nGlobal - nDir;
372  GlobalLinSysPETSc *linSys = ctx->linSys;
373 
374  // Scatter from PETSc ordering to our local ordering. It's actually
375  // unclear whether this step might also do some communication in
376  // parallel, which is probably not ideal.
377  VecScatterBegin(linSys->m_ctx, in, linSys->m_locVec,
378  INSERT_VALUES, SCATTER_FORWARD);
379  VecScatterEnd (linSys->m_ctx, in, linSys->m_locVec,
380  INSERT_VALUES, SCATTER_FORWARD);
381 
382  // Temporary storage to pass to Nektar++
383  Array<OneD, NekDouble> tmpIn(nHomDofs), tmpOut(nHomDofs);
384 
385  // Get values from input vector and copy to tmpIn.
386  PetscScalar *tmpLocIn;
387  VecGetArray (linSys->m_locVec, &tmpLocIn);
388  Vmath::Vcopy (nHomDofs, tmpLocIn, 1, &tmpIn[0], 1);
389  VecRestoreArray(linSys->m_locVec, &tmpLocIn);
390 
391  // Do matrix multiply in Nektar++, store in tmpOut.
392  if (precon)
393  {
394  linSys->m_precon->DoPreconditioner(tmpIn, tmpOut);
395  }
396  else
397  {
398  linSys->v_DoMatrixMultiply(tmpIn, tmpOut);
399  }
400 
401  // Scatter back to PETSc ordering and put in out.
402  VecSetValues(out, nHomDofs, &linSys->m_reorderedMap[0],
403  &tmpOut[0], INSERT_VALUES);
404  VecAssemblyBegin(out);
405  VecAssemblyEnd (out);
406  }
407 
408  /**
409  * @brief Perform matrix multiplication using Nektar++ routines.
410  *
411  * This static function uses Nektar++ routines to calculate the
412  * matrix-vector product of @p M with @p in, storing the output in @p
413  * out.
414  *
415  * @param M Original MatShell matrix, which stores the ShellCtx
416  * object.
417  * @param in Input vector.
418  * @param out Output vector.
419  */
421  Mat M, Vec in, Vec out)
422  {
423  // Grab our shell context from M.
424  void *ptr;
425  MatShellGetContext(M, &ptr);
426  ShellCtx *ctx = (ShellCtx *)ptr;
427 
428  DoNekppOperation(in, out, ctx, false);
429 
430  // Must return 0, otherwise PETSc complains.
431  return 0;
432  }
433 
434  /**
435  * @brief Apply preconditioning using Nektar++ routines.
436  *
437  * This static function uses Nektar++ routines to apply the
438  * preconditioner stored in GlobalLinSysPETSc::m_precon from the context
439  * of @p pc to the vector @p in, storing the output in @p out.
440  *
441  * @param pc Preconditioner object that stores the ShellCtx.
442  * @param in Input vector.
443  * @param out Output vector.
444  */
446  PC pc, Vec in, Vec out)
447  {
448  // Grab our PCShell context from pc.
449  void *ptr;
450  PCShellGetContext(pc, &ptr);
451  ShellCtx *ctx = (ShellCtx *)ptr;
452 
453  DoNekppOperation(in, out, ctx, true);
454 
455  // Must return 0, otherwise PETSc complains.
456  return 0;
457  }
458 
459  /**
460  * @brief Destroy matrix shell context object.
461  *
462  * Note the matrix shell and preconditioner share a common context so
463  * this might have already been deallocated below, in which case we do
464  * nothing.
465  *
466  * @param M Matrix shell object
467  */
469  {
470  void *ptr;
471  MatShellGetContext(M, &ptr);
472  ShellCtx *ctx = (ShellCtx *)ptr;
473  delete ctx;
474  return 0;
475  }
476 
477  /**
478  * @brief Destroy preconditioner context object.
479  *
480  * Note the matrix shell and preconditioner share a common context so
481  * this might have already been deallocated above, in which case we do
482  * nothing.
483  *
484  * @param pc Preconditioner object
485  */
486  PetscErrorCode GlobalLinSysPETSc::DoDestroyPCCtx(PC pc)
487  {
488  void *ptr;
489  PCShellGetContext(pc, &ptr);
490  ShellCtx *ctx = (ShellCtx *)ptr;
491  delete ctx;
492  return 0;
493  }
494 
495  /**
496  * @brief Set up KSP solver object.
497  *
498  * This is reasonably generic setup -- most solver types can be changed
499  * using the .petscrc file.
500  *
501  * @param tolerance Residual tolerance to converge to.
502  */
504  {
505  KSPCreate(PETSC_COMM_WORLD, &m_ksp);
506  KSPSetTolerances(
507  m_ksp, tolerance, PETSC_DEFAULT, PETSC_DEFAULT, PETSC_DEFAULT);
508  KSPSetFromOptions(m_ksp);
509 #if PETSC_VERSION_GE(3,5,0)
510  KSPSetOperators(m_ksp, m_matrix, m_matrix);
511 #else
512  KSPSetOperators(m_ksp, m_matrix, m_matrix, SAME_NONZERO_PATTERN);
513 #endif
514 
516  {
517  KSPSetPC(m_ksp, m_pc);
518  }
519  }
520  }
521 }
VecScatter m_ctx
PETSc scatter context that takes us between Nektar++ global ordering and PETSc vector ordering...
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:161
GlobalLinSysPETSc * linSys
Pointer to the original calling object.
boost::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:53
void CalculateReordering(const Array< OneD, const int > &glo2uniMap, const Array< OneD, const int > &glo2unique, const AssemblyMapSharedPtr &pLocToGloMap)
Calculate a reordering of universal IDs for PETSc.
PC m_pc
PCShell for preconditioner.
STL namespace.
void SetUpSolver(NekDouble tolerance)
Set up KSP solver object.
Internal struct for MatShell and PCShell calls to store current context for callback.
Vec m_x
PETSc vector objects used for local storage.
static PetscErrorCode DoPreconditioner(PC pc, Vec in, Vec out)
Apply preconditioning using Nektar++ routines.
int nGlobal
Number of global degrees of freedom.
boost::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.
Definition: Comm.h:53
int m_nLocal
Number of unique degrees of freedom on this process.
KSP m_ksp
KSP object that represents solver system.
double NekDouble
virtual void v_DoMatrixMultiply(const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput)=0
virtual ~GlobalLinSysPETSc()
Clean up PETSc objects.
static PetscErrorCode DoMatrixMultiply(Mat M, Vec in, Vec out)
Perform matrix multiplication using Nektar++ routines.
Describe a linear system.
A PETSc global linear system.
void SetUpScatter()
Set up PETSc local (equivalent to Nektar++ global) and global (equivalent to universal) scatter maps...
A global linear system.
Definition: GlobalLinSys.h:74
int nDir
Number of Dirichlet degrees of freedom.
virtual void v_SolveLinearSystem(const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const AssemblyMapSharedPtr &locToGloMap, const int pNumDir)
Solve linear system using PETSc.
static PetscErrorCode DoDestroyMatCtx(Mat M)
Destroy matrix shell context object.
void SetUpMatVec(int nGlobal, int nDir)
Construct PETSc matrix and vector handles.
static void DoNekppOperation(Vec &in, Vec &out, ShellCtx *ctx, bool precon)
Perform either matrix multiplication or preconditioning using Nektar++ routines.
T Vsum(int n, const T *x, const int incx)
Subtract return sum(x)
Definition: Vmath.cpp:723
std::vector< int > m_reorderedMap
Reordering that takes universal IDs to a unique row in the PETSc matrix.
void Vcopy(int n, const T *x, const int incx, T *y, const int incy)
Definition: Vmath.cpp:1047
static PetscErrorCode DoDestroyPCCtx(PC pc)
Destroy preconditioner context object.
PETScMatMult m_matMult
Enumerator to select matrix multiplication type.
PreconditionerSharedPtr CreatePrecon(AssemblyMapSharedPtr asmMap)
Create a preconditioner object from the parameters defined in the supplied assembly map...
const boost::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:129