Nektar++
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Macros Pages
GlobalLinSysStaticCond.cpp
Go to the documentation of this file.
1 ///////////////////////////////////////////////////////////////////////////////
2 //
3 // File: GlobalLinSysStaticCond.cpp
4 //
5 // For more information, please see: http://www.nektar.info
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // License for the specific language governing rights and limitations under
14 // Permission is hereby granted, free of charge, to any person obtaining a
15 // copy of this software and associated documentation files (the "Software"),
16 // to deal in the Software without restriction, including without limitation
17 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
18 // and/or sell copies of the Software, and to permit persons to whom the
19 // Software is furnished to do so, subject to the following conditions:
20 //
21 // The above copyright notice and this permission notice shall be included
22 // in all copies or substantial portions of the Software.
23 //
24 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
30 // DEALINGS IN THE SOFTWARE.
31 //
32 // Description: Implementation to linear solver using single-
33 // or multi-level static condensation
34 //
35 ///////////////////////////////////////////////////////////////////////////////
36 
43 
44 namespace Nektar
45 {
46  namespace MultiRegions
47  {
48  /**
49  * @class GlobalLinSysStaticCond
50  *
51  * Solves a linear system using single- or multi-level static
52  * condensation.
53  */
54 
55  /**
56  * For a matrix system of the form @f[
57  * \left[ \begin{array}{cc}
58  * \boldsymbol{A} & \boldsymbol{B}\\
59  * \boldsymbol{C} & \boldsymbol{D}
60  * \end{array} \right]
61  * \left[ \begin{array}{c} \boldsymbol{x_1}\\ \boldsymbol{x_2}
62  * \end{array}\right]
63  * = \left[ \begin{array}{c} \boldsymbol{y_1}\\ \boldsymbol{y_2}
64  * \end{array}\right],
65  * @f]
66  * where @f$\boldsymbol{D}@f$ and
67  * @f$(\boldsymbol{A-BD^{-1}C})@f$ are invertible, store and assemble
68  * a static condensation system, according to a given local to global
69  * mapping. #m_linSys is constructed by AssembleSchurComplement().
70  * @param mKey Associated matrix key.
71  * @param pLocMatSys LocalMatrixSystem
72  * @param locToGloMap Local to global mapping.
73  */
75  const GlobalLinSysKey &pKey,
76  const boost::weak_ptr<ExpList> &pExpList,
77  const boost::shared_ptr<AssemblyMap> &pLocToGloMap)
78  : GlobalLinSys(pKey, pExpList, pLocToGloMap),
79  m_locToGloMap (pLocToGloMap)
80  {
81  }
82 
84  {
85  // Allocate memory for top-level structure
87 
88  // Construct this level
90  }
91 
92  /**
93  *
94  */
96  {
97 
98  }
99 
100 
101  /**
102  *
103  */
105  const Array<OneD, const NekDouble> &in,
106  Array<OneD, NekDouble> &out,
107  const AssemblyMapSharedPtr &pLocToGloMap,
108  const Array<OneD, const NekDouble> &dirForcing)
109  {
110  bool dirForcCalculated = (bool) dirForcing.num_elements();
111  bool atLastLevel = pLocToGloMap->AtLastLevel();
112  int scLevel = pLocToGloMap->GetStaticCondLevel();
113 
114  int nGlobDofs = pLocToGloMap->GetNumGlobalCoeffs();
115  int nGlobBndDofs = pLocToGloMap->GetNumGlobalBndCoeffs();
116  int nDirBndDofs = pLocToGloMap->GetNumGlobalDirBndCoeffs();
117  int nGlobHomBndDofs = nGlobBndDofs - nDirBndDofs;
118  int nLocBndDofs = pLocToGloMap->GetNumLocalBndCoeffs();
119  int nIntDofs = pLocToGloMap->GetNumGlobalCoeffs()
120  - nGlobBndDofs;
121 
122  Array<OneD, NekDouble> F = m_wsp + 2*nLocBndDofs;
123  Array<OneD, NekDouble> tmp;
124  if(nDirBndDofs && dirForcCalculated)
125  {
126  Vmath::Vsub(nGlobDofs,in.get(),1,dirForcing.get(),1,F.get(),1);
127  }
128  else
129  {
130  Vmath::Vcopy(nGlobDofs,in.get(),1,F.get(),1);
131  }
132 
133  NekVector<NekDouble> F_HomBnd(nGlobHomBndDofs,tmp=F+nDirBndDofs,
134  eWrapper);
135  NekVector<NekDouble> F_GlobBnd(nGlobBndDofs,F,eWrapper);
136  NekVector<NekDouble> F_LocBnd(nLocBndDofs,0.0);
137  NekVector<NekDouble> F_Int(nIntDofs,tmp=F+nGlobBndDofs,eWrapper);
138 
139  NekVector<NekDouble> V_GlobBnd(nGlobBndDofs,out,eWrapper);
140  NekVector<NekDouble> V_GlobHomBnd(nGlobHomBndDofs,
141  tmp=out+nDirBndDofs,
142  eWrapper);
143  NekVector<NekDouble> V_Int(nIntDofs,tmp=out+nGlobBndDofs,eWrapper);
144  NekVector<NekDouble> V_LocBnd(nLocBndDofs,m_wsp,eWrapper);
145 
146  NekVector<NekDouble> V_GlobHomBndTmp(nGlobHomBndDofs,0.0);
147 
148  // set up normalisation factor for right hand side on first SC level
149  DNekScalBlkMatSharedPtr sc = v_PreSolve(scLevel, F_GlobBnd);
150 
151  if(nGlobHomBndDofs)
152  {
153  // construct boundary forcing
154  if( nIntDofs && ((!dirForcCalculated) && (atLastLevel)) )
155  {
156  DNekScalBlkMat &BinvD = *m_BinvD;
157  DNekScalBlkMat &SchurCompl = *sc;
158 
159  // include dirichlet boundary forcing
160  pLocToGloMap->GlobalToLocalBnd(V_GlobBnd,V_LocBnd);
161  V_LocBnd = BinvD*F_Int + SchurCompl*V_LocBnd;
162 
163  }
164  else if((!dirForcCalculated) && (atLastLevel))
165  {
166  // include dirichlet boundary forcing
167  DNekScalBlkMat &SchurCompl = *sc;
168  pLocToGloMap->GlobalToLocalBnd(V_GlobBnd,V_LocBnd);
169  V_LocBnd = SchurCompl*V_LocBnd;
170  }
171  else
172  {
173  DNekScalBlkMat &BinvD = *m_BinvD;
174  V_LocBnd = BinvD*F_Int;
175  }
176 
177  pLocToGloMap->AssembleBnd(V_LocBnd,V_GlobHomBndTmp,
178  nDirBndDofs);
179  F_HomBnd = F_HomBnd - V_GlobHomBndTmp;
180 
181  // Transform from original basis to low energy
182  v_BasisTransform(F, nDirBndDofs);
183 
184  // For parallel multi-level static condensation some
185  // processors may have different levels to others. This
186  // routine receives contributions to partition vertices from
187  // those lower levels, whilst not sending anything to the
188  // other partitions, and includes them in the modified right
189  // hand side vector.
190  int lcLevel = pLocToGloMap->GetLowestStaticCondLevel();
191  if(atLastLevel && scLevel < lcLevel)
192  {
193  // If this level is not the lowest level across all
194  // processes, we must do dummy communication for the
195  // remaining levels
196  Array<OneD, NekDouble> tmp(nGlobBndDofs);
197  for (int i = scLevel; i < lcLevel; ++i)
198  {
199  Vmath::Fill(nGlobBndDofs, 0.0, tmp, 1);
200  pLocToGloMap->UniversalAssembleBnd(tmp);
201  Vmath::Vcopy(nGlobHomBndDofs,
202  tmp.get()+nDirBndDofs, 1,
203  V_GlobHomBndTmp.GetPtr().get(), 1);
204  F_HomBnd = F_HomBnd - V_GlobHomBndTmp;
205  }
206  }
207 
208  // solve boundary system
209  if(atLastLevel)
210  {
211  Array<OneD, NekDouble> pert(nGlobBndDofs,0.0);
212  NekVector<NekDouble> Pert(nGlobBndDofs,pert,eWrapper);
213 
214  // Solve for difference from initial solution given inout;
216  nGlobBndDofs, F, pert, pLocToGloMap, nDirBndDofs);
217 
218  // Transform back to original basis
219  v_BasisInvTransform(pert);
220 
221  // Add back initial conditions onto difference
222  Vmath::Vadd(nGlobHomBndDofs,&out[nDirBndDofs],1,
223  &pert[nDirBndDofs],1,&out[nDirBndDofs],1);
224  }
225  else
226  {
227  m_recursiveSchurCompl->Solve(F,
228  V_GlobBnd.GetPtr(),
229  pLocToGloMap->GetNextLevelLocalToGlobalMap());
230  }
231  }
232 
233  // solve interior system
234  if(nIntDofs)
235  {
236  DNekScalBlkMat &invD = *m_invD;
237 
238  if(nGlobHomBndDofs || nDirBndDofs)
239  {
240  DNekScalBlkMat &C = *m_C;
241 
242  if(dirForcCalculated && nDirBndDofs)
243  {
244  pLocToGloMap->GlobalToLocalBnd(V_GlobHomBnd,V_LocBnd,
245  nDirBndDofs);
246  }
247  else
248  {
249  pLocToGloMap->GlobalToLocalBnd(V_GlobBnd,V_LocBnd);
250  }
251  F_Int = F_Int - C*V_LocBnd;
252  }
253 
254  V_Int = invD*F_Int;
255  }
256  }
257 
258 
259  /**
260  * If at the last level of recursion (or the only level in the case of
261  * single-level static condensation), assemble the Schur complement.
262  * For other levels, in the case of multi-level static condensation,
263  * the next level of the condensed system is computed.
264  * @param pLocToGloMap Local to global mapping.
265  */
267  const boost::shared_ptr<AssemblyMap>& pLocToGloMap)
268  {
269  int nLocalBnd = m_locToGloMap->GetNumLocalBndCoeffs();
270  int nGlobal = m_locToGloMap->GetNumGlobalCoeffs();
271  m_wsp = Array<OneD, NekDouble>(2*nLocalBnd + nGlobal);
272 
273  if (pLocToGloMap->AtLastLevel())
274  {
275  v_AssembleSchurComplement(pLocToGloMap);
276  }
277  else
278  {
280  pLocToGloMap->GetNextLevelLocalToGlobalMap());
281  }
282  }
283 
285  {
286  return m_schurCompl->GetNumberOfBlockRows();
287  }
288 
289  /**
290  * For the first level in multi-level static condensation, or the only
291  * level in the case of single-level static condensation, allocate the
292  * condensed matrices and populate them with the local matrices
293  * retrieved from the expansion list.
294  * @param
295  */
297  const boost::shared_ptr<AssemblyMap>& pLocToGloMap)
298  {
299  int n;
300  int n_exp = m_expList.lock()->GetNumElmts();
301 
302  const Array<OneD,const unsigned int>& nbdry_size
303  = pLocToGloMap->GetNumLocalBndCoeffsPerPatch();
304  const Array<OneD,const unsigned int>& nint_size
305  = pLocToGloMap->GetNumLocalIntCoeffsPerPatch();
306 
307  // Setup Block Matrix systems
308  MatrixStorage blkmatStorage = eDIAGONAL;
310  ::AllocateSharedPtr(nbdry_size, nbdry_size, blkmatStorage);
312  ::AllocateSharedPtr(nbdry_size, nint_size , blkmatStorage);
314  ::AllocateSharedPtr(nint_size , nbdry_size, blkmatStorage);
316  ::AllocateSharedPtr(nint_size , nint_size , blkmatStorage);
317 
318  for(n = 0; n < n_exp; ++n)
319  {
320  if (m_linSysKey.GetMatrixType() ==
322  {
323  DNekScalMatSharedPtr loc_mat
325  m_expList.lock()->GetOffset_Elmt_Id(n));
326  m_schurCompl->SetBlock(n,n,loc_mat);
327  }
328  else
329  {
330  DNekScalBlkMatSharedPtr loc_schur
332  m_expList.lock()->GetOffset_Elmt_Id(n));
334  m_schurCompl->SetBlock(n, n, t = loc_schur->GetBlock(0,0));
335  m_BinvD ->SetBlock(n, n, t = loc_schur->GetBlock(0,1));
336  m_C ->SetBlock(n, n, t = loc_schur->GetBlock(1,0));
337  m_invD ->SetBlock(n, n, t = loc_schur->GetBlock(1,1));
338  }
339  }
340  }
341 
342  /**
343  *
344  */
346  const AssemblyMapSharedPtr& pLocToGloMap)
347  {
348  int i,j,n,cnt;
349  DNekScalBlkMatSharedPtr blkMatrices[4];
350 
351  // Create temporary matrices within an inner-local scope to ensure
352  // any references to the intermediate storage is lost before
353  // the recursive step, rather than at the end of the routine.
354  // This allows the schur complement matrix from this level to be
355  // disposed of in the next level after use without being retained
356  // due to lingering shared pointers.
357  {
358 
359  const Array<OneD,const unsigned int>& nBndDofsPerPatch
360  = pLocToGloMap->GetNumLocalBndCoeffsPerPatch();
361  const Array<OneD,const unsigned int>& nIntDofsPerPatch
362  = pLocToGloMap->GetNumLocalIntCoeffsPerPatch();
363 
364  // STEP 1:
365  // Based upon the schur complement of the the current level we
366  // will substructure this matrix in the form
367  // -- --
368  // | A B |
369  // | C D |
370  // -- --
371  // All matrices A,B,C and D are (diagonal) blockmatrices.
372  // However, as a start we will use an array of DNekMatrices as
373  // it is too hard to change the individual entries of a
374  // DNekScalBlkMatSharedPtr.
375 
376  // In addition, we will also try to ensure that the memory of
377  // the blockmatrices will be contiguous. This will probably
378  // enhance the efficiency
379  // - Calculate the total number of entries in the blockmatrices
380  int nPatches = pLocToGloMap->GetNumPatches();
381  int nEntriesA = 0; int nEntriesB = 0;
382  int nEntriesC = 0; int nEntriesD = 0;
383 
384  for(i = 0; i < nPatches; i++)
385  {
386  nEntriesA += nBndDofsPerPatch[i]*nBndDofsPerPatch[i];
387  nEntriesB += nBndDofsPerPatch[i]*nIntDofsPerPatch[i];
388  nEntriesC += nIntDofsPerPatch[i]*nBndDofsPerPatch[i];
389  nEntriesD += nIntDofsPerPatch[i]*nIntDofsPerPatch[i];
390  }
391 
392  // Now create the DNekMatrices and link them to the memory
393  // allocated above
394  Array<OneD, DNekMatSharedPtr> substructuredMat[4]
395  = {Array<OneD, DNekMatSharedPtr>(nPatches), //Matrix A
396  Array<OneD, DNekMatSharedPtr>(nPatches), //Matrix B
397  Array<OneD, DNekMatSharedPtr>(nPatches), //Matrix C
398  Array<OneD, DNekMatSharedPtr>(nPatches)}; //Matrix D
399 
400  // Initialise storage for the matrices. We do this separately
401  // for each matrix so the matrices may be independently
402  // deallocated when no longer required.
403  Array<OneD, NekDouble> storageA(nEntriesA,0.0);
404  Array<OneD, NekDouble> storageB(nEntriesB,0.0);
405  Array<OneD, NekDouble> storageC(nEntriesC,0.0);
406  Array<OneD, NekDouble> storageD(nEntriesD,0.0);
407 
408  Array<OneD, NekDouble> tmparray;
409  PointerWrapper wType = eWrapper;
410  int cntA = 0;
411  int cntB = 0;
412  int cntC = 0;
413  int cntD = 0;
414 
415  for(i = 0; i < nPatches; i++)
416  {
417  // Matrix A
418  tmparray = storageA+cntA;
419  substructuredMat[0][i] = MemoryManager<DNekMat>
420  ::AllocateSharedPtr(nBndDofsPerPatch[i],
421  nBndDofsPerPatch[i],
422  tmparray, wType);
423  // Matrix B
424  tmparray = storageB+cntB;
425  substructuredMat[1][i] = MemoryManager<DNekMat>
426  ::AllocateSharedPtr(nBndDofsPerPatch[i],
427  nIntDofsPerPatch[i],
428  tmparray, wType);
429  // Matrix C
430  tmparray = storageC+cntC;
431  substructuredMat[2][i] = MemoryManager<DNekMat>
432  ::AllocateSharedPtr(nIntDofsPerPatch[i],
433  nBndDofsPerPatch[i],
434  tmparray, wType);
435  // Matrix D
436  tmparray = storageD+cntD;
437  substructuredMat[3][i] = MemoryManager<DNekMat>
438  ::AllocateSharedPtr(nIntDofsPerPatch[i],
439  nIntDofsPerPatch[i],
440  tmparray, wType);
441 
442  cntA += nBndDofsPerPatch[i] * nBndDofsPerPatch[i];
443  cntB += nBndDofsPerPatch[i] * nIntDofsPerPatch[i];
444  cntC += nIntDofsPerPatch[i] * nBndDofsPerPatch[i];
445  cntD += nIntDofsPerPatch[i] * nIntDofsPerPatch[i];
446  }
447 
448  // Then, project SchurComplement onto
449  // the substructured matrices of the next level
451  DNekScalMatSharedPtr schurComplSubMat;
452  int schurComplSubMatnRows;
453  Array<OneD, const int> patchId, dofId;
454  Array<OneD, const unsigned int> isBndDof;
455  Array<OneD, const NekDouble> sign;
456  NekDouble scale;
457 
458  for(n = cnt = 0; n < SchurCompl->GetNumberOfBlockRows(); ++n)
459  {
460  schurComplSubMat = SchurCompl->GetBlock(n,n);
461  schurComplSubMatnRows = schurComplSubMat->GetRows();
462 
463  scale = SchurCompl->GetBlock(n,n)->Scale();
464  Array<OneD, NekDouble> schurSubMat
465  = SchurCompl->GetBlock(n,n)->GetOwnedMatrix()->GetPtr();
466 
467  patchId = pLocToGloMap->GetPatchMapFromPrevLevel()
468  ->GetPatchId() + cnt;
469  dofId = pLocToGloMap->GetPatchMapFromPrevLevel()
470  ->GetDofId() + cnt;
471  isBndDof = pLocToGloMap->GetPatchMapFromPrevLevel()
472  ->IsBndDof() + cnt;
473  sign = pLocToGloMap->GetPatchMapFromPrevLevel()
474  ->GetSign() + cnt;
475 
476  // Set up Matrix;
477  for(i = 0; i < schurComplSubMatnRows; ++i)
478  {
479  int pId = patchId[i];
480  Array<OneD, NekDouble> subMat0
481  = substructuredMat[0][pId]->GetPtr();
482  Array<OneD, NekDouble> subMat1
483  = substructuredMat[1][patchId[i]]->GetPtr();
484  Array<OneD, NekDouble> subMat2
485  = substructuredMat[2][patchId[i]]->GetPtr();
486  Array<OneD, NekDouble> subMat3
487  = substructuredMat[3][patchId[i]]->GetPtr();
488  int subMat0rows = substructuredMat[0][pId]->GetRows();
489  int subMat1rows = substructuredMat[1][pId]->GetRows();
490  int subMat2rows = substructuredMat[2][pId]->GetRows();
491  int subMat3rows = substructuredMat[3][pId]->GetRows();
492 
493  if(isBndDof[i])
494  {
495  for(j = 0; j < schurComplSubMatnRows; ++j)
496  {
497  ASSERTL0(patchId[i]==patchId[j],
498  "These values should be equal");
499 
500  if(isBndDof[j])
501  {
502  subMat0[dofId[i]+dofId[j]*subMat0rows] +=
503  sign[i]*sign[j]*(
504  scale*schurSubMat[
505  i+j*schurComplSubMatnRows]);
506  }
507  else
508  {
509  subMat1[dofId[i]+dofId[j]*subMat1rows] +=
510  sign[i]*sign[j]*(
511  scale*schurSubMat[
512  i+j*schurComplSubMatnRows]);
513  }
514  }
515  }
516  else
517  {
518  for(j = 0; j < schurComplSubMatnRows; ++j)
519  {
520  ASSERTL0(patchId[i]==patchId[j],
521  "These values should be equal");
522 
523  if(isBndDof[j])
524  {
525  subMat2[dofId[i]+dofId[j]*subMat2rows] +=
526  sign[i]*sign[j]*(
527  scale*schurSubMat[
528  i+j*schurComplSubMatnRows]);
529  }
530  else
531  {
532  subMat3[dofId[i]+dofId[j]*subMat3rows] +=
533  sign[i]*sign[j]*(
534  scale*schurSubMat[
535  i+j*schurComplSubMatnRows]);
536  }
537  }
538  }
539  }
540  cnt += schurComplSubMatnRows;
541  }
542 
543  // STEP 2: condense the system
544  // This can be done elementally (i.e. patch per patch)
545  for(i = 0; i < nPatches; i++)
546  {
547  if(nIntDofsPerPatch[i])
548  {
549  Array<OneD, NekDouble> subMat0
550  = substructuredMat[0][i]->GetPtr();
551  Array<OneD, NekDouble> subMat1
552  = substructuredMat[1][i]->GetPtr();
553  Array<OneD, NekDouble> subMat2
554  = substructuredMat[2][i]->GetPtr();
555  int subMat0rows = substructuredMat[0][i]->GetRows();
556  int subMat1rows = substructuredMat[1][i]->GetRows();
557  int subMat2rows = substructuredMat[2][i]->GetRows();
558  int subMat2cols = substructuredMat[2][i]->GetColumns();
559 
560  // 1. D -> InvD
561  substructuredMat[3][i]->Invert();
562  // 2. B -> BInvD
563  (*substructuredMat[1][i]) = (*substructuredMat[1][i])*
564  (*substructuredMat[3][i]);
565  // 3. A -> A - BInvD*C (= schurcomplement)
566  // (*substructuredMat[0][i]) =(*substructuredMat[0][i])-
567  // (*substructuredMat[1][i])*(*substructuredMat[2][i]);
568  // Note: faster to use blas directly
569  Blas::Dgemm('N','N', subMat1rows, subMat2cols,
570  subMat2rows, -1.0, &subMat1[0], subMat1rows,
571  &subMat2[0], subMat2rows, 1.0, &subMat0[0],
572  subMat0rows);
573  }
574  }
575 
576  // STEP 3: fill the block matrices. However, do note that we
577  // first have to convert them to a DNekScalMat in order to be
578  // compatible with the first level of static condensation
579  const Array<OneD,const unsigned int>& nbdry_size
580  = pLocToGloMap->GetNumLocalBndCoeffsPerPatch();
581  const Array<OneD,const unsigned int>& nint_size
582  = pLocToGloMap->GetNumLocalIntCoeffsPerPatch();
583  MatrixStorage blkmatStorage = eDIAGONAL;
584 
585  blkMatrices[0] = MemoryManager<DNekScalBlkMat>
586  ::AllocateSharedPtr(nbdry_size, nbdry_size, blkmatStorage);
587  blkMatrices[1] = MemoryManager<DNekScalBlkMat>
588  ::AllocateSharedPtr(nbdry_size, nint_size , blkmatStorage);
589  blkMatrices[2] = MemoryManager<DNekScalBlkMat>
590  ::AllocateSharedPtr(nint_size , nbdry_size, blkmatStorage);
591  blkMatrices[3] = MemoryManager<DNekScalBlkMat>
592  ::AllocateSharedPtr(nint_size , nint_size , blkmatStorage);
593 
594  DNekScalMatSharedPtr tmpscalmat;
595  for(i = 0; i < nPatches; i++)
596  {
597  for(j = 0; j < 4; j++)
598  {
599  tmpscalmat= MemoryManager<DNekScalMat>
600  ::AllocateSharedPtr(1.0,substructuredMat[j][i]);
601  blkMatrices[j]->SetBlock(i,i,tmpscalmat);
602  }
603  }
604  }
605 
606  // We've finished with the Schur complement matrix passed to this
607  // level, so return the memory to the system. The Schur complement
608  // matrix need only be retained at the last level. Save the other
609  // matrices at this level though.
610  m_schurCompl.reset();
611 
613  m_linSysKey, m_expList, blkMatrices[0], blkMatrices[1],
614  blkMatrices[2], blkMatrices[3], pLocToGloMap);
615  }
616  }
617 }