Nektar++
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
GlobalLinSysStaticCond.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File: GlobalLinSysStaticCond.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // License for the specific language governing rights and limitations under
14 // Permission is hereby granted, free of charge, to any person obtaining a
15 // copy of this software and associated documentation files (the "Software"),
16 // to deal in the Software without restriction, including without limitation
17 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 // and/or sell copies of the Software, and to permit persons to whom the
19 // Software is furnished to do so, subject to the following conditions:
20 //
21 // The above copyright notice and this permission notice shall be included
22 // in all copies or substantial portions of the Software.
23 //
24 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
30 // DEALINGS IN THE SOFTWARE.
31 //
32 // Description: Implementation to linear solver using single-
33 // or multi-level static condensation
34 //
35 ///////////////////////////////////////////////////////////////////////////////
36 
43 
44 using namespace std;
45 
46 namespace Nektar
47 {
48  namespace MultiRegions
49  {
50  /**
51  * @class GlobalLinSysStaticCond
52  *
53  * Solves a linear system using single- or multi-level static
54  * condensation.
55  */
56 
57  /**
58  * For a matrix system of the form @f[
59  * \left[ \begin{array}{cc}
60  * \boldsymbol{A} & \boldsymbol{B}\\
61  * \boldsymbol{C} & \boldsymbol{D}
62  * \end{array} \right]
63  * \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2}
64  * \end{array}\right]
65  * = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2}
66  * \end{array}\right],
67  * @f]
68  * where @f$\boldsymbol{D}@f$ and
69  * @f$(\boldsymbol{A-BD^{-1}C})@f$ are invertible, store and assemble
70  * a static condensation system, according to a given local to global
71  * mapping. #m_linSys is constructed by AssembleSchurComplement().
72  * @param mKey Associated matrix key.
73  * @param pLocMatSys LocalMatrixSystem
74  * @param locToGloMap Local to global mapping.
75  */
76  GlobalLinSysStaticCond::GlobalLinSysStaticCond(
77  const GlobalLinSysKey &pKey,
78  const boost::weak_ptr<ExpList> &pExpList,
79  const boost::shared_ptr<AssemblyMap> &pLocToGloMap)
80  : GlobalLinSys(pKey, pExpList, pLocToGloMap),
81  m_locToGloMap (pLocToGloMap)
82  {
83  }
84 
86  {
87  // Allocate memory for top-level structure
89 
90  // Construct this level
92  }
93 
94  /**
95  *
96  */
98  {
99 
100  }
101 
102 
103  /**
104  *
105  */
109  const AssemblyMapSharedPtr &pLocToGloMap,
110  const Array<OneD, const NekDouble> &dirForcing)
111  {
112  bool dirForcCalculated = (bool) dirForcing.num_elements();
113  bool atLastLevel = pLocToGloMap->AtLastLevel();
114  int scLevel = pLocToGloMap->GetStaticCondLevel();
115 
116  int nGlobDofs = pLocToGloMap->GetNumGlobalCoeffs();
117  int nGlobBndDofs = pLocToGloMap->GetNumGlobalBndCoeffs();
118  int nDirBndDofs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
119  int nGlobHomBndDofs = nGlobBndDofs - nDirBndDofs;
120  int nLocBndDofs = pLocToGloMap->GetNumLocalBndCoeffs();
121  int nIntDofs = pLocToGloMap->GetNumGlobalCoeffs()
122  - nGlobBndDofs;
123 
124  Array<OneD, NekDouble> F = m_wsp + 2*nLocBndDofs + nGlobHomBndDofs;
126  if(nDirBndDofs && dirForcCalculated)
127  {
128  Vmath::Vsub(nGlobDofs,in.get(),1,dirForcing.get(),1,F.get(),1);
129  }
130  else
131  {
132  Vmath::Vcopy(nGlobDofs,in.get(),1,F.get(),1);
133  }
134 
135  NekVector<NekDouble> F_HomBnd(nGlobHomBndDofs,tmp=F+nDirBndDofs,
136  eWrapper);
137  NekVector<NekDouble> F_GlobBnd(nGlobBndDofs,F,eWrapper);
138  NekVector<NekDouble> F_Int(nIntDofs,tmp=F+nGlobBndDofs,eWrapper);
139 
140  NekVector<NekDouble> V_GlobBnd(nGlobBndDofs,out,eWrapper);
141  NekVector<NekDouble> V_GlobHomBnd(nGlobHomBndDofs,
142  tmp=out+nDirBndDofs,
143  eWrapper);
144  NekVector<NekDouble> V_Int(nIntDofs,tmp=out+nGlobBndDofs,eWrapper);
145  NekVector<NekDouble> V_LocBnd(nLocBndDofs,m_wsp,eWrapper);
146 
147  NekVector<NekDouble> V_GlobHomBndTmp(
148  nGlobHomBndDofs,tmp = m_wsp + 2*nLocBndDofs,eWrapper);
149 
150  // set up normalisation factor for right hand side on first SC level
151  DNekScalBlkMatSharedPtr sc = v_PreSolve(scLevel, F_GlobBnd);
152 
153  if(nGlobHomBndDofs)
154  {
155  // construct boundary forcing
156  if( nIntDofs && ((!dirForcCalculated) && (atLastLevel)) )
157  {
158  DNekScalBlkMat &BinvD = *m_BinvD;
159  DNekScalBlkMat &SchurCompl = *sc;
160 
161  // include dirichlet boundary forcing
162  pLocToGloMap->GlobalToLocalBnd(V_GlobBnd,V_LocBnd);
163  V_LocBnd = BinvD*F_Int + SchurCompl*V_LocBnd;
164  }
165  else if((!dirForcCalculated) && (atLastLevel))
166  {
167  // include dirichlet boundary forcing
168  DNekScalBlkMat &SchurCompl = *sc;
169  pLocToGloMap->GlobalToLocalBnd(V_GlobBnd,V_LocBnd);
170  V_LocBnd = SchurCompl*V_LocBnd;
171  }
172  else
173  {
174  DNekScalBlkMat &BinvD = *m_BinvD;
175  DiagonalBlockFullScalMatrixMultiply( V_LocBnd, BinvD, F_Int);
176  }
177 
178  pLocToGloMap->AssembleBnd(V_LocBnd,V_GlobHomBndTmp,
179  nDirBndDofs);
180  Subtract(F_HomBnd, F_HomBnd, V_GlobHomBndTmp);
181 
182  // Transform from original basis to low energy
183  v_BasisTransform(F, nDirBndDofs);
184 
185  // For parallel multi-level static condensation some
186  // processors may have different levels to others. This
187  // routine receives contributions to partition vertices from
188  // those lower levels, whilst not sending anything to the
189  // other partitions, and includes them in the modified right
190  // hand side vector.
191  int lcLevel = pLocToGloMap->GetLowestStaticCondLevel();
192  if(atLastLevel && scLevel < lcLevel)
193  {
194  // If this level is not the lowest level across all
195  // processes, we must do dummy communication for the
196  // remaining levels
197  Array<OneD, NekDouble> tmp(nGlobBndDofs);
198  for (int i = scLevel; i < lcLevel; ++i)
199  {
200  Vmath::Fill(nGlobBndDofs, 0.0, tmp, 1);
201  pLocToGloMap->UniversalAssembleBnd(tmp);
202  Vmath::Vcopy(nGlobHomBndDofs,
203  tmp.get()+nDirBndDofs, 1,
204  V_GlobHomBndTmp.GetPtr().get(), 1);
205  Subtract( F_HomBnd, F_HomBnd, V_GlobHomBndTmp);
206  }
207  }
208 
209  // solve boundary system
210  if(atLastLevel)
211  {
212  Array<OneD, NekDouble> pert(nGlobBndDofs,0.0);
213 
214  // Solve for difference from initial solution given inout;
216  nGlobBndDofs, F, pert, pLocToGloMap, nDirBndDofs);
217 
218  // Transform back to original basis
219  v_BasisInvTransform(pert);
220 
221  // Add back initial conditions onto difference
222  Vmath::Vadd(nGlobHomBndDofs,&out[nDirBndDofs],1,
223  &pert[nDirBndDofs],1,&out[nDirBndDofs],1);
224  }
225  else
226  {
227  m_recursiveSchurCompl->Solve(F,
228  V_GlobBnd.GetPtr(),
229  pLocToGloMap->GetNextLevelLocalToGlobalMap());
230  }
231  }
232 
233  // solve interior system
234  if(nIntDofs)
235  {
236  DNekScalBlkMat &invD = *m_invD;
237 
238  if(nGlobHomBndDofs || nDirBndDofs)
239  {
240  DNekScalBlkMat &C = *m_C;
241 
242  if(dirForcCalculated && nDirBndDofs)
243  {
244  pLocToGloMap->GlobalToLocalBnd(V_GlobHomBnd,V_LocBnd,
245  nDirBndDofs);
246  }
247  else
248  {
249  pLocToGloMap->GlobalToLocalBnd(V_GlobBnd,V_LocBnd);
250  }
251  F_Int = F_Int - C*V_LocBnd;
252  }
253  Multiply( V_Int, invD, F_Int);
254  }
255  }
256 
257 
258  /**
259  * If at the last level of recursion (or the only level in the case of
260  * single-level static condensation), assemble the Schur complement.
261  * For other levels, in the case of multi-level static condensation,
262  * the next level of the condensed system is computed.
263  * @param pLocToGloMap Local to global mapping.
264  */
266  const boost::shared_ptr<AssemblyMap>& pLocToGloMap)
267  {
268  int nLocalBnd = m_locToGloMap->GetNumLocalBndCoeffs();
269  int nGlobal = m_locToGloMap->GetNumGlobalCoeffs();
270  int nGlobBndDofs = pLocToGloMap->GetNumGlobalBndCoeffs();
271  int nDirBndDofs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
272  int nGlobHomBndDofs = nGlobBndDofs - nDirBndDofs;
274  (2*nLocalBnd + nGlobal + nGlobHomBndDofs, 0.0);
275 
276  if (pLocToGloMap->AtLastLevel())
277  {
278  v_AssembleSchurComplement(pLocToGloMap);
279  }
280  else
281  {
283  pLocToGloMap->GetNextLevelLocalToGlobalMap());
284  }
285  }
286 
288  {
289  return m_schurCompl->GetNumberOfBlockRows();
290  }
291 
292  /**
293  * For the first level in multi-level static condensation, or the only
294  * level in the case of single-level static condensation, allocate the
295  * condensed matrices and populate them with the local matrices
296  * retrieved from the expansion list.
297  * @param
298  */
300  const boost::shared_ptr<AssemblyMap>& pLocToGloMap)
301  {
302  int n;
303  int n_exp = m_expList.lock()->GetNumElmts();
304 
305  const Array<OneD,const unsigned int>& nbdry_size
306  = pLocToGloMap->GetNumLocalBndCoeffsPerPatch();
307  const Array<OneD,const unsigned int>& nint_size
308  = pLocToGloMap->GetNumLocalIntCoeffsPerPatch();
309 
310  // Setup Block Matrix systems
311  MatrixStorage blkmatStorage = eDIAGONAL;
313  ::AllocateSharedPtr(nbdry_size, nbdry_size, blkmatStorage);
315  ::AllocateSharedPtr(nbdry_size, nint_size , blkmatStorage);
317  ::AllocateSharedPtr(nint_size , nbdry_size, blkmatStorage);
319  ::AllocateSharedPtr(nint_size , nint_size , blkmatStorage);
320 
321  for(n = 0; n < n_exp; ++n)
322  {
323  if (m_linSysKey.GetMatrixType() ==
325  {
326  DNekScalMatSharedPtr loc_mat
328  m_expList.lock()->GetOffset_Elmt_Id(n));
329  m_schurCompl->SetBlock(n,n,loc_mat);
330  }
331  else
332  {
333  DNekScalBlkMatSharedPtr loc_schur
335  m_expList.lock()->GetOffset_Elmt_Id(n));
337  m_schurCompl->SetBlock(n, n, t = loc_schur->GetBlock(0,0));
338  m_BinvD ->SetBlock(n, n, t = loc_schur->GetBlock(0,1));
339  m_C ->SetBlock(n, n, t = loc_schur->GetBlock(1,0));
340  m_invD ->SetBlock(n, n, t = loc_schur->GetBlock(1,1));
341  }
342  }
343  }
344 
345  /**
346  *
347  */
349  const AssemblyMapSharedPtr& pLocToGloMap)
350  {
351  int i,j,n,cnt;
352  DNekScalBlkMatSharedPtr blkMatrices[4];
353 
354  // Create temporary matrices within an inner-local scope to ensure
355  // any references to the intermediate storage is lost before
356  // the recursive step, rather than at the end of the routine.
357  // This allows the schur complement matrix from this level to be
358  // disposed of in the next level after use without being retained
359  // due to lingering shared pointers.
360  {
361 
362  const Array<OneD,const unsigned int>& nBndDofsPerPatch
363  = pLocToGloMap->GetNumLocalBndCoeffsPerPatch();
364  const Array<OneD,const unsigned int>& nIntDofsPerPatch
365  = pLocToGloMap->GetNumLocalIntCoeffsPerPatch();
366 
367  // STEP 1:
368  // Based upon the schur complement of the the current level we
369  // will substructure this matrix in the form
370  // -- --
371  // | A B |
372  // | C D |
373  // -- --
374  // All matrices A,B,C and D are (diagonal) blockmatrices.
375  // However, as a start we will use an array of DNekMatrices as
376  // it is too hard to change the individual entries of a
377  // DNekScalBlkMatSharedPtr.
378 
379  // In addition, we will also try to ensure that the memory of
380  // the blockmatrices will be contiguous. This will probably
381  // enhance the efficiency
382  // - Calculate the total number of entries in the blockmatrices
383  int nPatches = pLocToGloMap->GetNumPatches();
384  int nEntriesA = 0; int nEntriesB = 0;
385  int nEntriesC = 0; int nEntriesD = 0;
386 
387  for(i = 0; i < nPatches; i++)
388  {
389  nEntriesA += nBndDofsPerPatch[i]*nBndDofsPerPatch[i];
390  nEntriesB += nBndDofsPerPatch[i]*nIntDofsPerPatch[i];
391  nEntriesC += nIntDofsPerPatch[i]*nBndDofsPerPatch[i];
392  nEntriesD += nIntDofsPerPatch[i]*nIntDofsPerPatch[i];
393  }
394 
395  // Now create the DNekMatrices and link them to the memory
396  // allocated above
397  Array<OneD, DNekMatSharedPtr> substructuredMat[4]
398  = {Array<OneD, DNekMatSharedPtr>(nPatches), //Matrix A
399  Array<OneD, DNekMatSharedPtr>(nPatches), //Matrix B
400  Array<OneD, DNekMatSharedPtr>(nPatches), //Matrix C
401  Array<OneD, DNekMatSharedPtr>(nPatches)}; //Matrix D
402 
403  // Initialise storage for the matrices. We do this separately
404  // for each matrix so the matrices may be independently
405  // deallocated when no longer required.
406  Array<OneD, NekDouble> storageA(nEntriesA,0.0);
407  Array<OneD, NekDouble> storageB(nEntriesB,0.0);
408  Array<OneD, NekDouble> storageC(nEntriesC,0.0);
409  Array<OneD, NekDouble> storageD(nEntriesD,0.0);
410 
411  Array<OneD, NekDouble> tmparray;
412  PointerWrapper wType = eWrapper;
413  int cntA = 0;
414  int cntB = 0;
415  int cntC = 0;
416  int cntD = 0;
417 
418  // Use symmetric storage for invD if possible
419  MatrixStorage storageTypeD = eFULL;
423  {
424  storageTypeD = eSYMMETRIC;
425  }
426 
427  for(i = 0; i < nPatches; i++)
428  {
429  // Matrix A
430  tmparray = storageA+cntA;
431  substructuredMat[0][i] = MemoryManager<DNekMat>
432  ::AllocateSharedPtr(nBndDofsPerPatch[i],
433  nBndDofsPerPatch[i],
434  tmparray, wType);
435  // Matrix B
436  tmparray = storageB+cntB;
437  substructuredMat[1][i] = MemoryManager<DNekMat>
438  ::AllocateSharedPtr(nBndDofsPerPatch[i],
439  nIntDofsPerPatch[i],
440  tmparray, wType);
441  // Matrix C
442  tmparray = storageC+cntC;
443  substructuredMat[2][i] = MemoryManager<DNekMat>
444  ::AllocateSharedPtr(nIntDofsPerPatch[i],
445  nBndDofsPerPatch[i],
446  tmparray, wType);
447  // Matrix D
448  tmparray = storageD+cntD;
449  substructuredMat[3][i] = MemoryManager<DNekMat>
450  ::AllocateSharedPtr(nIntDofsPerPatch[i],
451  nIntDofsPerPatch[i],
452  tmparray, wType,
453  storageTypeD);
454 
455  cntA += nBndDofsPerPatch[i] * nBndDofsPerPatch[i];
456  cntB += nBndDofsPerPatch[i] * nIntDofsPerPatch[i];
457  cntC += nIntDofsPerPatch[i] * nBndDofsPerPatch[i];
458  cntD += nIntDofsPerPatch[i] * nIntDofsPerPatch[i];
459  }
460 
461  // Then, project SchurComplement onto
462  // the substructured matrices of the next level
464  DNekScalMatSharedPtr schurComplSubMat;
465  int schurComplSubMatnRows;
466  Array<OneD, const int> patchId, dofId;
469 
470  for(n = cnt = 0; n < SchurCompl->GetNumberOfBlockRows(); ++n)
471  {
472  schurComplSubMat = SchurCompl->GetBlock(n,n);
473  schurComplSubMatnRows = schurComplSubMat->GetRows();
474 
475  patchId = pLocToGloMap->GetPatchMapFromPrevLevel()
476  ->GetPatchId() + cnt;
477  dofId = pLocToGloMap->GetPatchMapFromPrevLevel()
478  ->GetDofId() + cnt;
479  isBndDof = pLocToGloMap->GetPatchMapFromPrevLevel()
480  ->IsBndDof() + cnt;
481  sign = pLocToGloMap->GetPatchMapFromPrevLevel()
482  ->GetSign() + cnt;
483 
484  // Set up Matrix;
485  for(i = 0; i < schurComplSubMatnRows; ++i)
486  {
487  int pId = patchId[i];
488  Array<OneD, NekDouble> subMat0
489  = substructuredMat[0][pId]->GetPtr();
490  Array<OneD, NekDouble> subMat1
491  = substructuredMat[1][patchId[i]]->GetPtr();
492  Array<OneD, NekDouble> subMat2
493  = substructuredMat[2][patchId[i]]->GetPtr();
494  DNekMatSharedPtr subMat3
495  = substructuredMat[3][patchId[i]];
496  int subMat0rows = substructuredMat[0][pId]->GetRows();
497  int subMat1rows = substructuredMat[1][pId]->GetRows();
498  int subMat2rows = substructuredMat[2][pId]->GetRows();
499 
500  if(isBndDof[i])
501  {
502  for(j = 0; j < schurComplSubMatnRows; ++j)
503  {
504  ASSERTL0(patchId[i]==patchId[j],
505  "These values should be equal");
506 
507  if(isBndDof[j])
508  {
509  subMat0[dofId[i]+dofId[j]*subMat0rows] +=
510  sign[i]*sign[j]*
511  (*schurComplSubMat)(i,j);
512  }
513  else
514  {
515  subMat1[dofId[i]+dofId[j]*subMat1rows] +=
516  sign[i]*sign[j]*
517  (*schurComplSubMat)(i,j);
518  }
519  }
520  }
521  else
522  {
523  for(j = 0; j < schurComplSubMatnRows; ++j)
524  {
525  ASSERTL0(patchId[i]==patchId[j],
526  "These values should be equal");
527 
528  if(isBndDof[j])
529  {
530  subMat2[dofId[i]+dofId[j]*subMat2rows] +=
531  sign[i]*sign[j]*
532  (*schurComplSubMat)(i,j);
533  }
534  else
535  {
536  if (storageTypeD == eSYMMETRIC)
537  {
538  if (dofId[i] <= dofId[j])
539  {
540  (*subMat3)(dofId[i],dofId[j]) +=
541  sign[i]*sign[j]*
542  (*schurComplSubMat)(i,j);
543  }
544  }
545  else
546  {
547  (*subMat3)(dofId[i],dofId[j]) +=
548  sign[i]*sign[j]*
549  (*schurComplSubMat)(i,j);
550  }
551  }
552  }
553  }
554  }
555  cnt += schurComplSubMatnRows;
556  }
557 
558  // STEP 2: condense the system
559  // This can be done elementally (i.e. patch per patch)
560  for(i = 0; i < nPatches; i++)
561  {
562  if(nIntDofsPerPatch[i])
563  {
564  Array<OneD, NekDouble> subMat0
565  = substructuredMat[0][i]->GetPtr();
566  Array<OneD, NekDouble> subMat1
567  = substructuredMat[1][i]->GetPtr();
568  Array<OneD, NekDouble> subMat2
569  = substructuredMat[2][i]->GetPtr();
570  int subMat0rows = substructuredMat[0][i]->GetRows();
571  int subMat1rows = substructuredMat[1][i]->GetRows();
572  int subMat2rows = substructuredMat[2][i]->GetRows();
573  int subMat2cols = substructuredMat[2][i]->GetColumns();
574 
575  // 1. D -> InvD
576  substructuredMat[3][i]->Invert();
577  // 2. B -> BInvD
578  (*substructuredMat[1][i]) = (*substructuredMat[1][i])*
579  (*substructuredMat[3][i]);
580  // 3. A -> A - BInvD*C (= schurcomplement)
581  // (*substructuredMat[0][i]) =(*substructuredMat[0][i])-
582  // (*substructuredMat[1][i])*(*substructuredMat[2][i]);
583  // Note: faster to use blas directly
584  Blas::Dgemm('N','N', subMat1rows, subMat2cols,
585  subMat2rows, -1.0, &subMat1[0], subMat1rows,
586  &subMat2[0], subMat2rows, 1.0, &subMat0[0],
587  subMat0rows);
588  }
589  }
590 
591  // STEP 3: fill the block matrices. However, do note that we
592  // first have to convert them to a DNekScalMat in order to be
593  // compatible with the first level of static condensation
594  const Array<OneD,const unsigned int>& nbdry_size
595  = pLocToGloMap->GetNumLocalBndCoeffsPerPatch();
596  const Array<OneD,const unsigned int>& nint_size
597  = pLocToGloMap->GetNumLocalIntCoeffsPerPatch();
598  MatrixStorage blkmatStorage = eDIAGONAL;
599 
600  blkMatrices[0] = MemoryManager<DNekScalBlkMat>
601  ::AllocateSharedPtr(nbdry_size, nbdry_size, blkmatStorage);
602  blkMatrices[1] = MemoryManager<DNekScalBlkMat>
603  ::AllocateSharedPtr(nbdry_size, nint_size , blkmatStorage);
604  blkMatrices[2] = MemoryManager<DNekScalBlkMat>
605  ::AllocateSharedPtr(nint_size , nbdry_size, blkmatStorage);
606  blkMatrices[3] = MemoryManager<DNekScalBlkMat>
607  ::AllocateSharedPtr(nint_size , nint_size , blkmatStorage);
608 
609  DNekScalMatSharedPtr tmpscalmat;
610  for(i = 0; i < nPatches; i++)
611  {
612  for(j = 0; j < 4; j++)
613  {
614  tmpscalmat= MemoryManager<DNekScalMat>
615  ::AllocateSharedPtr(1.0,substructuredMat[j][i]);
616  blkMatrices[j]->SetBlock(i,i,tmpscalmat);
617  }
618  }
619  }
620 
621  // We've finished with the Schur complement matrix passed to this
622  // level, so return the memory to the system. The Schur complement
623  // matrix need only be retained at the last level. Save the other
624  // matrices at this level though.
625  m_schurCompl.reset();
626 
628  m_linSysKey, m_expList, blkMatrices[0], blkMatrices[1],
629  blkMatrices[2], blkMatrices[3], pLocToGloMap);
630  }
631  }
632 }
Array< OneD, NekDouble > m_wsp
Workspace array for matrix multiplication.
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:198
virtual void v_Solve(const Array< OneD, const NekDouble > &in, Array< OneD, NekDouble > &out, const AssemblyMapSharedPtr &locToGloMap, const Array< OneD, const NekDouble > &dirForcing=NullNekDouble1DArray)
Solve the linear system for given input and output vectors using a specified local to global map...
virtual GlobalLinSysStaticCondSharedPtr v_Recurse(const GlobalLinSysKey &mkey, const boost::weak_ptr< ExpList > &pExpList, const DNekScalBlkMatSharedPtr pSchurCompl, const DNekScalBlkMatSharedPtr pBinvD, const DNekScalBlkMatSharedPtr pC, const DNekScalBlkMatSharedPtr pInvD, const boost::shared_ptr< AssemblyMap > &locToGloMap)=0
void DiagonalBlockFullScalMatrixMultiply(NekVector< double > &result, const NekMatrix< NekMatrix< NekMatrix< NekDouble, StandardMatrixTag >, ScaledMatrixTag >, BlockMatrixTag > &lhs, const NekVector< double > &rhs)
static boost::shared_ptr< DataType > AllocateSharedPtr()
Allocate a shared pointer from the memory pool.
#define sign(a, b)
return the sign(b)*a
Definition: Polylib.cpp:27
boost::shared_ptr< AssemblyMap > AssemblyMapSharedPtr
Definition: AssemblyMap.h:53
DNekScalBlkMatSharedPtr m_schurCompl
Block Schur complement matrix.
GlobalLinSysStaticCondSharedPtr m_recursiveSchurCompl
Schur complement for Direct Static Condensation.
DNekScalBlkMatSharedPtr m_C
Block matrix.
void Fill(int n, const T alpha, T *x, const int incx)
Fill a vector with a constant value.
Definition: Vmath.cpp:46
void SolveLinearSystem(const int pNumRows, const Array< OneD, const NekDouble > &pInput, Array< OneD, NekDouble > &pOutput, const AssemblyMapSharedPtr &locToGloMap, const int pNumDir=0)
Solve the linear system for given input and output vectors.
Definition: GlobalLinSys.h:201
virtual void v_BasisTransform(Array< OneD, NekDouble > &pInOut, int offset)
STL namespace.
virtual void v_Initialise(const boost::shared_ptr< AssemblyMap > &locToGloMap)
Initialise this object.
void Initialise(const boost::shared_ptr< AssemblyMap > &pLocToGloMap)
Definition: GlobalLinSys.h:216
virtual DNekScalBlkMatSharedPtr v_GetStaticCondBlock(unsigned int n)
Retrieves a the static condensation block matrices from n-th expansion using the matrix key provided ...
DNekScalBlkMatSharedPtr m_invD
Block matrix.
boost::shared_ptr< DNekMat > DNekMatSharedPtr
Definition: NekTypeDefs.hpp:70
boost::shared_ptr< DNekScalMat > DNekScalMatSharedPtr
virtual void v_AssembleSchurComplement(boost::shared_ptr< AssemblyMap > pLoctoGloMap)
void Multiply(NekMatrix< ResultDataType, StandardMatrixTag > &result, const NekMatrix< LhsDataType, LhsMatrixType > &lhs, const ResultDataType &rhs)
virtual DNekScalMatSharedPtr v_GetBlock(unsigned int n)
Retrieves the block matrix from n-th expansion using the matrix key provided by the m_linSysKey...
virtual DNekScalBlkMatSharedPtr v_PreSolve(int scLevel, NekVector< NekDouble > &F_GlobBnd)
boost::shared_ptr< DNekScalBlkMat > DNekScalBlkMatSharedPtr
Definition: NekTypeDefs.hpp:74
void SetupTopLevel(const boost::shared_ptr< AssemblyMap > &locToGloMap)
Set up the storage for the Schur complement or the top level of the multi-level Schur complement...
Describe a linear system.
PointerWrapper
Specifies if the pointer passed to a NekMatrix or NekVector is copied into an internal representation...
StdRegions::MatrixType GetMatrixType() const
Return the matrix type.
const GlobalLinSysKey m_linSysKey
Key associated with this linear system.
Definition: GlobalLinSys.h:127
void Vsub(int n, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Subtract vector z = x-y.
Definition: Vmath.cpp:343
virtual int v_GetNumBlocks()
Get the number of blocks in this system.
A global linear system.
Definition: GlobalLinSys.h:74
boost::shared_ptr< AssemblyMap > m_locToGloMap
Local to global map.
RhsMatrixType void Subtract(NekMatrix< DataType, StandardMatrixTag > &result, const NekMatrix< LhsDataType, LhsMatrixType > &lhs, const NekMatrix< RhsDataType, RhsMatrixType > &rhs)
void Vcopy(int n, const T *x, const int incx, T *y, const int incy)
Definition: Vmath.cpp:1061
virtual void v_BasisInvTransform(Array< OneD, NekDouble > &pInOut)
DNekScalBlkMatSharedPtr m_BinvD
Block matrix.
Array< OneD, DataType > & GetPtr()
Definition: NekVector.cpp:230
void ConstructNextLevelCondensedSystem(const boost::shared_ptr< AssemblyMap > &locToGloMap)
void Vadd(int n, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Add vector z = x+y.
Definition: Vmath.cpp:299
const boost::weak_ptr< ExpList > m_expList
Local Matrix System.
Definition: GlobalLinSys.h:129