Nektar++
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
GlobalLinSysIterative.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File: GlobalLinSysIterative.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // License for the specific language governing rights and limitations under
14 // Permission is hereby granted, free of charge, to any person obtaining a
15 // copy of this software and associated documentation files (the "Software"),
16 // to deal in the Software without restriction, including without limitation
17 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 // and/or sell copies of the Software, and to permit persons to whom the
19 // Software is furnished to do so, subject to the following conditions:
20 //
21 // The above copyright notice and this permission notice shall be included
22 // in all copies or substantial portions of the Software.
23 //
24 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
30 // DEALINGS IN THE SOFTWARE.
31 //
32 // Description: GlobalLinSysIterative definition
33 //
34 ///////////////////////////////////////////////////////////////////////////////
35 
37 
38 using namespace std;
39 
40 namespace Nektar
41 {
42  namespace MultiRegions
43  {
44  /**
45  * @class GlobalLinSysIterative
46  *
47  * Solves a linear system using iterative methods.
48  */
49 
50  /// Constructor for full direct matrix solve.
51  GlobalLinSysIterative::GlobalLinSysIterative(
52  const GlobalLinSysKey &pKey,
53  const boost::weak_ptr<ExpList> &pExpList,
54  const boost::shared_ptr<AssemblyMap>
55  &pLocToGloMap)
56  : GlobalLinSys(pKey, pExpList, pLocToGloMap),
57  m_rhs_magnitude(NekConstants::kNekUnsetDouble),
58  m_rhs_mag_sm(0.9),
60  m_totalIterations(0),
61  m_useProjection(false),
62  m_numPrevSols(0)
63  {
65  = pExpList.lock()->GetSession();
66 
67  m_tolerance = pLocToGloMap->GetIterativeTolerance();
68  m_maxiter = pLocToGloMap->GetMaxIterations();
69 
70  LibUtilities::CommSharedPtr vComm = m_expList.lock()->GetComm()->GetRowComm();
71  m_root = (vComm->GetRank())? false : true;
72  m_verbose = (vSession->DefinesCmdLineArgument("verbose"))? true :false;
73 
74  int successiveRHS;
75 
76  if((successiveRHS = pLocToGloMap->GetSuccessiveRHS()))
77  {
78  m_prevLinSol.set_capacity(successiveRHS);
79  m_useProjection = true;
80  }
81  else
82  {
83  m_useProjection = false;
84  }
85  }
86 
88  {
89  }
90 
91 
92  /**
93  *
94  */
96  const int nGlobal,
97  const Array<OneD,const NekDouble> &pInput,
98  Array<OneD, NekDouble> &pOutput,
99  const AssemblyMapSharedPtr &plocToGloMap,
100  const int nDir)
101  {
102  if (m_useProjection)
103  {
104  DoAconjugateProjection(nGlobal, pInput, pOutput, plocToGloMap, nDir);
105  }
106  else
107  {
108  // applying plain Conjugate Gradient
109  DoConjugateGradient(nGlobal, pInput, pOutput, plocToGloMap, nDir);
110  }
111  }
112 
113 
114  /**
115  * This method implements A-conjugate projection technique
116  * in order to speed up successive linear solves with
117  * right-hand sides arising from time-dependent discretisations.
118  * (P.F.Fischer, Comput. Methods Appl. Mech. Engrg. 163, 1998)
119  */
121  const int nGlobal,
122  const Array<OneD,const NekDouble> &pInput,
123  Array<OneD, NekDouble> &pOutput,
124  const AssemblyMapSharedPtr &plocToGloMap,
125  const int nDir)
126  {
127  // Get the communicator for performing data exchanges
129  = m_expList.lock()->GetComm()->GetRowComm();
130 
131  // Get vector sizes
132  int nNonDir = nGlobal - nDir;
134 
135  if (0 == m_numPrevSols)
136  {
137  // no previous solutions found, call CG
138 
139  DoConjugateGradient(nGlobal, pInput, pOutput, plocToGloMap, nDir);
140 
141  UpdateKnownSolutions(nGlobal, pOutput, nDir);
142  }
143  else
144  {
145  // Create NekVector wrappers for linear algebra operations
146  NekVector<NekDouble> b (nNonDir, pInput + nDir, eWrapper);
147  NekVector<NekDouble> x (nNonDir, tmp = pOutput + nDir, eWrapper);
148 
149  // check the input vector (rhs) is not zero
150 
151  NekDouble rhsNorm = Vmath::Dot2(nNonDir,
152  pInput + nDir,
153  pInput + nDir,
154  m_map + nDir);
155 
156  vComm->AllReduce(rhsNorm, Nektar::LibUtilities::ReduceSum);
157 
158  if (rhsNorm < NekConstants::kNekZeroTol)
159  {
160  Array<OneD, NekDouble> tmp = pOutput+nDir;
161  Vmath::Zero(nNonDir, tmp, 1);
162  return;
163  }
164 
165  // Allocate array storage
166  Array<OneD, NekDouble> px_s (nGlobal, 0.0);
167  Array<OneD, NekDouble> pb_s (nGlobal, 0.0);
168  Array<OneD, NekDouble> tmpAx_s (nGlobal, 0.0);
169  Array<OneD, NekDouble> tmpx_s (nGlobal, 0.0);
170 
171  NekVector<NekDouble> pb (nNonDir, tmp = pb_s + nDir, eWrapper);
172  NekVector<NekDouble> px (nNonDir, tmp = px_s + nDir, eWrapper);
173  NekVector<NekDouble> tmpAx (nNonDir, tmp = tmpAx_s + nDir, eWrapper);
174  NekVector<NekDouble> tmpx (nNonDir, tmp = tmpx_s + nDir, eWrapper);
175 
176 
177  // notation follows the paper cited:
178  // \alpha_i = \tilda{x_i}^T b^n
179  // projected x, px = \sum \alpha_i \tilda{x_i}
180 
181  Array<OneD, NekDouble> alpha (m_prevLinSol.size(), 0.0);
182  for (int i = 0; i < m_prevLinSol.size(); i++)
183  {
184  alpha[i] = Vmath::Dot2(nNonDir,
185  m_prevLinSol[i],
186  pInput + nDir,
187  m_map + nDir);
188  }
189  vComm->AllReduce(alpha, Nektar::LibUtilities::ReduceSum);
190 
191  for (int i = 0; i < m_prevLinSol.size(); i++)
192  {
193  if (alpha[i] < NekConstants::kNekZeroTol)
194  {
195  continue;
196  }
197 
198  NekVector<NekDouble> xi (nNonDir, m_prevLinSol[i], eWrapper);
199  px += alpha[i] * xi;
200  }
201 
202  // pb = b^n - A px
203  Vmath::Vcopy(nNonDir,
204  pInput.get() + nDir, 1,
205  pb_s.get() + nDir, 1);
206 
207  v_DoMatrixMultiply(px_s, tmpAx_s);
208 
209  pb -= tmpAx;
210 
211 
212  // solve the system with projected rhs
213  DoConjugateGradient(nGlobal, pb_s, tmpx_s, plocToGloMap, nDir);
214 
215 
216  // remainder solution + projection of previous solutions
217  x = tmpx + px;
218 
219  // save the auxiliary solution to prev. known solutions
220  UpdateKnownSolutions(nGlobal, tmpx_s, nDir);
221  }
222  }
223 
224 
225  /**
226  * Calculating A-norm of an input vector,
227  * A-norm(x) := sqrt( < x, Ax > )
228  */
230  const int nGlobal,
231  const Array<OneD,const NekDouble> &in,
232  const int nDir)
233  {
234  // Get the communicator for performing data exchanges
236  = m_expList.lock()->GetComm()->GetRowComm();
237 
238  // Get vector sizes
239  int nNonDir = nGlobal - nDir;
240 
241  // Allocate array storage
242  Array<OneD, NekDouble> tmpAx_s (nGlobal, 0.0);
243 
244  v_DoMatrixMultiply(in, tmpAx_s);
245 
246  NekDouble anorm_sq = Vmath::Dot2(nNonDir,
247  in + nDir,
248  tmpAx_s + nDir,
249  m_map + nDir);
250  vComm->AllReduce(anorm_sq, Nektar::LibUtilities::ReduceSum);
251  return std::sqrt(anorm_sq);
252  }
253 
254  /**
255  * Updates the storage of previously known solutions.
256  * Performs normalisation of input vector wrt A-norm.
257  */
259  const int nGlobal,
260  const Array<OneD,const NekDouble> &newX,
261  const int nDir)
262  {
263  // Get vector sizes
264  int nNonDir = nGlobal - nDir;
265 
266  // Get the communicator for performing data exchanges
268  = m_expList.lock()->GetComm()->GetRowComm();
269 
270  // Check the solution is non-zero
271  NekDouble solNorm = Vmath::Dot2(nNonDir,
272  newX + nDir,
273  newX + nDir,
274  m_map + nDir);
275  vComm->AllReduce(solNorm, Nektar::LibUtilities::ReduceSum);
276 
277  if (solNorm < NekConstants::kNekZeroTol)
278  {
279  return;
280  }
281 
282 
283  // Allocate array storage
284  Array<OneD, NekDouble> tmpAx_s (nGlobal, 0.0);
285  Array<OneD, NekDouble> px_s (nGlobal, 0.0);
286  Array<OneD, NekDouble> tmp1, tmp2;
287 
288  // Create NekVector wrappers for linear algebra operations
289  NekVector<NekDouble> px (nNonDir, tmp1 = px_s + nDir, eWrapper);
290  NekVector<NekDouble> tmpAx (nNonDir, tmp2 = tmpAx_s + nDir, eWrapper);
291 
292 
293  // calculating \tilda{x} - sum \alpha_i\tilda{x}_i
294 
295  Vmath::Vcopy(nNonDir,
296  tmp1 = newX + nDir, 1,
297  tmp2 = px_s + nDir, 1);
298 
299  if (m_prevLinSol.size() > 0)
300  {
301  v_DoMatrixMultiply(newX, tmpAx_s);
302  }
303 
304  Array<OneD, NekDouble> alpha (m_prevLinSol.size(), 0.0);
305  for (int i = 0; i < m_prevLinSol.size(); i++)
306  {
307  alpha[i] = Vmath::Dot2(nNonDir,
308  m_prevLinSol[i],
309  tmpAx_s + nDir,
310  m_map + nDir);
311  }
312  vComm->AllReduce(alpha, Nektar::LibUtilities::ReduceSum);
313 
314  for (int i = 0; i < m_prevLinSol.size(); i++)
315  {
316  if (alpha[i] < NekConstants::kNekZeroTol)
317  {
318  continue;
319  }
320 
321  NekVector<NekDouble> xi (nNonDir, m_prevLinSol[i], eWrapper);
322  px -= alpha[i] * xi;
323  }
324 
325 
326  // Some solutions generated by CG are identical zeros, see
327  // solutions generated for Test_Tet_equitri.xml (IncNavierStokesSolver).
328  // Not going to store identically zero solutions.
329 
330  NekDouble anorm = CalculateAnorm(nGlobal, px_s, nDir);
331  if (anorm < NekConstants::kNekZeroTol)
332  {
333  return;
334  }
335 
336  // normalisation of new solution
337  Vmath::Smul(nNonDir, 1.0/anorm, px_s.get() + nDir, 1, px_s.get() + nDir, 1);
338 
339  // updating storage with non-Dirichlet-dof part of new solution vector
340  m_prevLinSol.push_back(px_s + nDir);
341  m_numPrevSols++;
342  }
343 
344 
345 
346  /**  
347  * Solve a global linear system using the conjugate gradient method.  
348  * We solve only for the non-Dirichlet modes. The operator is evaluated  
349  * using an auxiliary function v_DoMatrixMultiply defined by the  
350  * specific solver. Distributed math routines are used to support  
351  * parallel execution of the solver.  
352  *  
353  * The implemented algorithm uses a reduced-communication reordering of  
354  * the standard PCG method (Demmel, Heath and Vorst, 1993)  
355  *  
356  * @param pInput Input residual of all DOFs.  
357  * @param pOutput Solution vector of all DOFs.  
358  */
360  const int nGlobal,
361  const Array<OneD,const NekDouble> &pInput,
362  Array<OneD, NekDouble> &pOutput,
363  const AssemblyMapSharedPtr &plocToGloMap,
364  const int nDir)
365  {
366  if (!m_precon)
367  {
368  v_UniqueMap();
369  m_precon = CreatePrecon(plocToGloMap);
370  m_precon->BuildPreconditioner();
371  }
372 
373  // Get the communicator for performing data exchanges
375  = m_expList.lock()->GetComm()->GetRowComm();
376 
377  // Get vector sizes
378  int nNonDir = nGlobal - nDir;
379 
380  // Allocate array storage
381  Array<OneD, NekDouble> w_A (nGlobal, 0.0);
382  Array<OneD, NekDouble> s_A (nGlobal, 0.0);
383  Array<OneD, NekDouble> p_A (nNonDir, 0.0);
384  Array<OneD, NekDouble> r_A (nNonDir, 0.0);
385  Array<OneD, NekDouble> q_A (nNonDir, 0.0);
387 
388  // Create NekVector wrappers for linear algebra operations
389  NekVector<NekDouble> in (nNonDir,pInput + nDir, eWrapper);
390  NekVector<NekDouble> out(nNonDir,tmp = pOutput + nDir,eWrapper);
391  NekVector<NekDouble> w (nNonDir,tmp = w_A + nDir, eWrapper);
392  NekVector<NekDouble> s (nNonDir,tmp = s_A + nDir, eWrapper);
393  NekVector<NekDouble> p (nNonDir,p_A, eWrapper);
394  NekVector<NekDouble> r (nNonDir,r_A, eWrapper);
395  NekVector<NekDouble> q (nNonDir,q_A, eWrapper);
396 
397  int k;
398  NekDouble alpha, beta, rho, rho_new, mu, eps, min_resid;
399  Array<OneD, NekDouble> vExchange(3,0.0);
400 
401  // Copy initial residual from input
402  r = in;
403  // zero homogeneous out array ready for solution updates
404  // Should not be earlier in case input vector is same as
405  // output and above copy has been peformed
406  Vmath::Zero(nNonDir,tmp = pOutput + nDir,1);
407 
408 
409  // evaluate initial residual error for exit check
410  vExchange[2] = Vmath::Dot2(nNonDir,
411  r_A,
412  r_A,
413  m_map + nDir);
414 
415  vComm->AllReduce(vExchange, Nektar::LibUtilities::ReduceSum);
416 
417  eps = vExchange[2];
418 
420  {
421  NekVector<NekDouble> inGlob (nGlobal, pInput, eWrapper);
422  Set_Rhs_Magnitude(inGlob);
423  }
424 
425  // If input residual is less than tolerance skip solve.
427  {
428  if (m_verbose && m_root)
429  {
430  cout << "CG iterations made = " << m_totalIterations
431  << " using tolerance of " << m_tolerance
432  << " (error = " << sqrt(eps/m_rhs_magnitude)
433  << ", rhs_mag = " << sqrt(m_rhs_magnitude) << ")"
434  << endl;
435  }
436  return;
437  }
438 
439  m_totalIterations = 1;
440  m_precon->DoPreconditioner(r_A, tmp = w_A + nDir);
441 
442  v_DoMatrixMultiply(w_A, s_A);
443 
444  k = 0;
445 
446  vExchange[0] = Vmath::Dot2(nNonDir,
447  r_A,
448  w_A + nDir,
449  m_map + nDir);
450 
451  vExchange[1] = Vmath::Dot2(nNonDir,
452  s_A + nDir,
453  w_A + nDir,
454  m_map + nDir);
455 
456  vComm->AllReduce(vExchange, Nektar::LibUtilities::ReduceSum);
457 
458  rho = vExchange[0];
459  mu = vExchange[1];
460  min_resid = m_rhs_magnitude;
461  beta = 0.0;
462  alpha = rho/mu;
463 
464  // Continue until convergence
465  while (true)
466  {
467  if(k >= m_maxiter)
468  {
469  if (m_root)
470  {
471  cout << "CG iterations made = " << m_totalIterations
472  << " using tolerance of " << m_tolerance
473  << " (error = " << sqrt(eps/m_rhs_magnitude)
474  << ", rhs_mag = " << sqrt(m_rhs_magnitude) << ")"
475  << endl;
476  ASSERTL0(false,
477  "Exceeded maximum number of iterations");
478  }
479  }
480 
481  // Compute new search direction p_k, q_k
482  Vmath::Svtvp(nNonDir, beta, &p_A[0], 1, &w_A[nDir], 1, &p_A[0], 1);
483  Vmath::Svtvp(nNonDir, beta, &q_A[0], 1, &s_A[nDir], 1, &q_A[0], 1);
484 
485  // Update solution x_{k+1}
486  Vmath::Svtvp(nNonDir, alpha, &p_A[0], 1, &pOutput[nDir], 1, &pOutput[nDir], 1);
487 
488  // Update residual vector r_{k+1}
489  Vmath::Svtvp(nNonDir, -alpha, &q_A[0], 1, &r_A[0], 1, &r_A[0], 1);
490 
491  // Apply preconditioner
492  m_precon->DoPreconditioner(r_A, tmp = w_A + nDir);
493 
494  // Perform the method-specific matrix-vector multiply operation.
495  v_DoMatrixMultiply(w_A, s_A);
496 
497  // <r_{k+1}, w_{k+1}>
498  vExchange[0] = Vmath::Dot2(nNonDir,
499  r_A,
500  w_A + nDir,
501  m_map + nDir);
502  // <s_{k+1}, w_{k+1}>
503  vExchange[1] = Vmath::Dot2(nNonDir,
504  s_A + nDir,
505  w_A + nDir,
506  m_map + nDir);
507 
508  // <r_{k+1}, r_{k+1}>
509  vExchange[2] = Vmath::Dot2(nNonDir,
510  r_A,
511  r_A,
512  m_map + nDir);
513 
514  // Perform inner-product exchanges
515  vComm->AllReduce(vExchange, Nektar::LibUtilities::ReduceSum);
516 
517  rho_new = vExchange[0];
518  mu = vExchange[1];
519  eps = vExchange[2];
520 
522  // test if norm is within tolerance
523  if (eps < m_tolerance * m_tolerance * m_rhs_magnitude)
524  {
525  if (m_verbose && m_root)
526  {
527  cout << "CG iterations made = " << m_totalIterations
528  << " using tolerance of " << m_tolerance
529  << " (error = " << sqrt(eps/m_rhs_magnitude)
530  << ", rhs_mag = " << sqrt(m_rhs_magnitude) << ")"
531  << endl;
532  }
533  break;
534  }
535  min_resid = min(min_resid, eps);
536 
537  // Compute search direction and solution coefficients
538  beta = rho_new/rho;
539  alpha = rho_new/(mu - rho_new*beta/alpha);
540  rho = rho_new;
541  k++;
542  }
543  }
544 
546  const NekVector<NekDouble> &pIn)
547  {
548  Array<OneD, NekDouble> vExchange(1);
549  vExchange[0] = Vmath::Dot(pIn.GetDimension(),&pIn[0],&pIn[0]);
550 
551  m_expList.lock()->GetComm()->GetRowComm()->AllReduce(
553 
554  // To ensure that very different rhs values are not being
555  // used in subsequent solvers such as the velocit solve in
556  // INC NS. If this works we then need to work out a better
557  // way to control this.
558  NekDouble new_rhs_mag = (vExchange[0] > 1e-6)? vExchange[0] : 1.0;
559 
561  {
562  m_rhs_magnitude = new_rhs_mag;
563  }
564  else
565  {
567  (1.0-m_rhs_mag_sm)*new_rhs_mag);
568  }
569  }
570 
571  }
572 }
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:161
boost::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:53
NekDouble m_rhs_mag_sm
cnt to how many times rhs_magnitude is called
void Set_Rhs_Magnitude(const NekVector< NekDouble > &pIn)
void UpdateKnownSolutions(const int pGlobalBndDofs, const Array< OneD, const NekDouble > &pSolution, const int pNumDirBndDofs)
bool m_root
Provide verbose output and root if parallel.
void Svtvp(int n, const T alpha, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
svtvp (scalar times vector plus vector): z = alpha*x + y
Definition: Vmath.cpp:471
bool m_useProjection
Whether to apply projection technique.
STL namespace.
boost::circular_buffer< Array< OneD, NekDouble > > m_prevLinSol
Storage for solutions to previous linear problems.
virtual void v_DoMatrixMultiply(const Array< OneD, NekDouble > &pInput, Array< OneD, NekDouble > &pOutput)=0
boost::shared_ptr< SessionReader > SessionReaderSharedPtr
Definition: MeshPartition.h:51
NekDouble CalculateAnorm(const int nGlobal, const Array< OneD, const NekDouble > &in, const int nDir)
boost::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.
Definition: Comm.h:53
static const NekDouble kNekZeroTol
void Smul(int n, const T alpha, const T *x, const int incx, T *y, const int incy)
Scalar multiply y = alpha*y.
Definition: Vmath.cpp:199
static PreconditionerSharedPtr NullPreconditionerSharedPtr
NekDouble m_tolerance
Tolerance of iterative solver.
int m_numPrevSols
Total counter of previous solutions.
Array< OneD, int > m_map
Global to universal unique map.
double NekDouble
static const NekDouble kNekUnsetDouble
Describe a linear system.
T Dot(int n, const T *w, const T *x)
vvtvp (vector times vector times vector): z = w*x*y
Definition: Vmath.cpp:900
unsigned int GetDimension() const
Returns the number of dimensions for the point.
Definition: NekVector.cpp:212
void DoAconjugateProjection(const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const AssemblyMapSharedPtr &locToGloMap, const int pNumDir)
A-conjugate projection technique.
NekDouble m_rhs_magnitude
dot product of rhs to normalise stopping criterion
A global linear system.
Definition: GlobalLinSys.h:74
T Dot2(int n, const T *w, const T *x, const int *y)
vvtvp (vector times vector times vector): z = w*x*y
Definition: Vmath.cpp:940
virtual void v_SolveLinearSystem(const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const AssemblyMapSharedPtr &locToGloMap, const int pNumDir)
Solve the matrix system.
void Zero(int n, T *x, const int incx)
Zero vector.
Definition: Vmath.cpp:359
void Vcopy(int n, const T *x, const int incx, T *y, const int incy)
Definition: Vmath.cpp:1047
void DoConjugateGradient(const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const AssemblyMapSharedPtr &locToGloMap, const int pNumDir)
Actual iterative solve.
PreconditionerSharedPtr CreatePrecon(AssemblyMapSharedPtr asmMap)
Create a preconditioner object from the parameters defined in the supplied assembly map...
const boost::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:129