Nektar++
DriverParareal.cpp
Go to the documentation of this file.
1///////////////////////////////////////////////////////////////////////////////
2//
3// File DriverParareal.cpp
4//
5// For more information, please see: http://www.nektar.info
6//
7// The MIT License
8//
9// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10// Department of Aeronautics, Imperial College London (UK), and Scientific
11// Computing and Imaging Institute, University of Utah (USA).
12//
13// Permission is hereby granted, free of charge, to any person obtaining a
14// copy of this software and associated documentation files (the "Software"),
15// to deal in the Software without restriction, including without limitation
16// the rights to use, copy, modify, merge, publish, distribute, sublicense,
17// and/or sell copies of the Software, and to permit persons to whom the
18// Software is furnished to do so, subject to the following conditions:
19//
20// The above copyright notice and this permission notice shall be included
21// in all copies or substantial portions of the Software.
22//
23// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29// DEALINGS IN THE SOFTWARE.
30//
31// Description: Driver class for the parareal solver
32//
33///////////////////////////////////////////////////////////////////////////////
34
35#include <iomanip>
36
39#include <boost/format.hpp>
40
41namespace Nektar::SolverUtils
42{
43std::string DriverParareal::className =
48
49/**
50 *
51 */
55 : DriverParallelInTime(pSession, pGraph)
56{
57}
58
59/**
60 *
61 */
62void DriverParareal::v_InitObject(std::ostream &out)
63{
69}
70
71/**
72 *
73 */
74void DriverParareal::v_Execute([[maybe_unused]] std::ostream &out)
75{
76 // Timing.
78 NekDouble totalTime = 0.0, predictorTime = 0.0, coarseSolveTime = 0.0,
79 fineSolveTime = 0.0, correctionTime = 0.0;
80
81 // Get and assert parameters from session file.
83
84 // Initialie time step parameters.
89
90 // Start iteration windows.
91 m_comm->GetTimeComm()->Block();
93 for (size_t w = 0; w < m_numWindowsPIT; w++)
94 {
95 timer.Start();
96 // Initialize time for the current window.
98
99 // Print window number.
100 PrintHeader((boost::format("WINDOWS #%1%") % (w + 1)).str(), '*');
101
102 // Update coarse initial condition.
104
105 // Run predictor.
106 for (size_t i = 0; i < m_nVar; ++i)
107 {
108 RecvFromPreviousProc(m_EqSys[m_coarseLevel]->UpdatePhysField(i));
109 }
110 if (m_chunkRank > 0)
111 {
113 }
115 for (size_t i = 0; i < m_nVar; ++i)
116 {
117 SendToNextProc(m_EqSys[m_coarseLevel]->UpdatePhysField(i));
118 }
119
120 // Interpolate coarse solution.
122
123 // Compute exact solution, if necessary.
124 if (m_exactSolution)
125 {
127 }
128 timer.Stop();
129 predictorTime += timer.Elapsed().count();
130 totalTime += timer.Elapsed().count();
131
132 // Solution convergence monitoring.
133 timer.Start();
136 timer.Stop();
137 totalTime += timer.Elapsed().count();
138 if (m_chunkRank == m_numChunks - 1 &&
139 m_comm->GetSpaceComm()->GetRank() == 0)
140 {
141 std::cout << "Total Computation Time : " << totalTime << "s"
142 << std::endl
143 << std::flush;
144 }
145
146 // Start Parareal iteration.
147 size_t iter = 1;
148 int convergenceCurr = false;
149 int convergencePrev = (m_chunkRank == 0);
150 while (iter <= m_iterMaxPIT && !convergenceCurr)
151 {
152 // Use previous parareal solution as "exact solution", if necessary.
153 timer.Start();
154 if (!m_exactSolution)
155 {
157 }
158 timer.Stop();
159 totalTime += timer.Elapsed().count();
160
161 // Calculate fine solution (parallel-in-time).
162 timer.Start();
165 timer.Stop();
166 fineSolveTime += timer.Elapsed().count();
167 totalTime += timer.Elapsed().count();
168
169 // Compute F -> F - Gold
170 timer.Start();
172 timer.Stop();
173 correctionTime += timer.Elapsed().count();
174 totalTime += timer.Elapsed().count();
175
176 // Receive coarse solution from previous processor.
177 timer.Start();
179 timer.Stop();
180 totalTime += timer.Elapsed().count();
181
182 // Calculate coarse solution (serial-in-time).
183 timer.Start();
186 iter);
187 timer.Stop();
188 coarseSolveTime += timer.Elapsed().count();
189 totalTime += timer.Elapsed().count();
190
191 // Compute F -> F + Gnew
192 timer.Start();
194 timer.Stop();
195 correctionTime += timer.Elapsed().count();
196 totalTime += timer.Elapsed().count();
197
198 // Solution convergence monitoring.
200 if (m_chunkRank == m_numChunks - 1 &&
201 m_comm->GetSpaceComm()->GetRank() == 0)
202 {
203 std::cout << "Total Computation Time : " << totalTime << "s"
204 << std::endl
205 << std::flush;
206 std::cout << " - Predictor Time : " << predictorTime << "s"
207 << std::endl
208 << std::flush;
209 std::cout << " - Coarse Solve Time : " << coarseSolveTime << "s"
210 << std::endl
211 << std::flush;
212 std::cout << " - Fine Solve Time : " << fineSolveTime << "s"
213 << std::endl
214 << std::flush;
215 std::cout << " - Correction Time : " << correctionTime << "s"
216 << std::endl
217 << std::flush;
218 }
219
220 // Check convergence of L2 error for each time chunk.
221 convergenceCurr = (vL2ErrorMax() < m_tolerPIT && convergencePrev) ||
222 (m_chunkRank + 1 == iter);
223
224 // Send solution to next processor.
225 timer.Start();
226 SendToNextProc(m_fineSolution, convergenceCurr);
227 timer.Stop();
228 totalTime += timer.Elapsed().count();
229
230 // Increment iteration index.
231 iter++;
232 }
233
234 // Copy converged check points.
236
237 // Write time chunk solution to files.
239
240 // Apply windowing.
241 timer.Start();
243 timer.Stop();
244 totalTime += timer.Elapsed().count();
245 }
246
247 m_comm->GetTimeComm()->Block();
248 PrintHeader("SUMMARY", '*');
251 if (m_chunkRank == m_numChunks - 1 &&
252 m_comm->GetSpaceComm()->GetRank() == 0)
253 {
254 std::cout << "Total Computation Time : " << totalTime << "s"
255 << std::endl
256 << std::flush;
257 std::cout << " - Predictor Time : " << predictorTime << "s" << std::endl
258 << std::flush;
259 std::cout << " - Coarse Solve Time : " << coarseSolveTime << "s"
260 << std::endl
261 << std::flush;
262 std::cout << " - Fine Solve Time : " << fineSolveTime << "s"
263 << std::endl
264 << std::flush;
265 std::cout << " - Correction Time : " << correctionTime << "s"
266 << std::endl
267 << std::flush;
268 }
269}
270
271/**
272 *
273 */
275{
276 // Allocate storage for Parareal solver.
280 for (size_t i = 0; i < m_nVar; ++i)
281 {
284 m_fineSolution[i] = m_EqSys[m_fineLevel]->UpdatePhysField(i);
287 ? m_EqSys[m_coarseLevel]->UpdatePhysField(i)
289 }
290}
291
292/**
293 *
294 */
296{
297 // Assert time-stepping parameters
298 ASSERTL0(
300 "Total number of fine step should be divisible by number of chunks.");
301
302 ASSERTL0(
304 "Total number of coarse step should be divisible by number of chunks.");
305
307 "Total number of fine step should be divisible by number of "
308 "windows times number of chunks.");
309
311 "Total number of coarse step should be divisible by number of "
312 "windows times number of chunks.");
313
316 "Fine and coarse total computational times do not match");
317
319 ->GetTimeIntegrationScheme()
320 ->GetNumIntegrationPhases() == 1,
321 "Only single step time-integration schemes currently supported "
322 "for Parareal");
323
325 ->GetTimeIntegrationScheme()
326 ->GetNumIntegrationPhases() == 1,
327 "Only single step time-integration schemes currently supported "
328 "for Parareal");
329
330 // Assert I/O parameters
331 if (m_EqSys[m_fineLevel]->GetInfoSteps())
332 {
333 ASSERTL0(m_nsteps[m_fineLevel] % (m_EqSys[m_fineLevel]->GetInfoSteps() *
335 0,
336 "number of IO_InfoSteps should divide number of fine steps "
337 "per time chunk");
338 }
339
340 if (m_EqSys[m_coarseLevel]->GetInfoSteps())
341 {
343 (m_EqSys[m_coarseLevel]->GetInfoSteps() * m_numChunks *
345 0,
346 "number of IO_InfoSteps should divide number of coarse steps "
347 "per time chunk");
348 }
349
350 if (m_EqSys[m_fineLevel]->GetCheckpointSteps())
351 {
353 (m_EqSys[m_fineLevel]->GetCheckpointSteps() *
355 0,
356 "number of IO_CheckSteps should divide number of fine steps "
357 "per time chunk");
358 }
359
360 if (m_EqSys[m_coarseLevel]->GetCheckpointSteps())
361 {
363 (m_EqSys[m_coarseLevel]->GetCheckpointSteps() *
365 0,
366 "number of IO_CheckSteps should divide number of coarse steps "
367 "per time chunk");
368 }
369}
370
371/**
372 *
373 */
375{
376 // Interpolate solution to fine field.
377 Interpolate(m_EqSys[timeLevel]->UpdateFields(),
380}
381
382/**
383 *
384 */
386{
387 // Restrict fine field to coarse solution.
388 Interpolate(m_EqSys[m_fineLevel]->UpdateFields(),
389 m_EqSys[timeLevel]->UpdateFields(), m_initialCondition,
391}
392
393/**
394 *
395 */
396void DriverParareal::UpdateSolution(const size_t timeLevel,
397 const NekDouble time, const size_t nstep,
398 const size_t wd, const size_t iter)
399{
400 // Number of checkpoint by chunk.
401 size_t nChkPts =
402 m_EqSys[timeLevel]->GetCheckpointSteps()
403 ? m_nsteps[timeLevel] / m_EqSys[timeLevel]->GetCheckpointSteps()
404 : 1;
405
406 // Checkpoint index.
407 size_t iChkPts = (m_chunkRank + wd * m_numChunks) * nChkPts + 1;
408
409 // Reinitialize check point number for each parallel-in-time
410 // iteration.
411 m_EqSys[timeLevel]->SetCheckpointNumber(iChkPts);
412
413 // Update parallel-in-time iteration number.
414 m_EqSys[timeLevel]->SetIterationNumberPIT(iter);
415
416 // Update parallel-in-time window number.
417 m_EqSys[timeLevel]->SetWindowNumberPIT(wd);
418
419 m_EqSys[timeLevel]->SetTime(time);
420 m_EqSys[timeLevel]->SetSteps(nstep);
421 m_EqSys[timeLevel]->DoSolve();
422}
423
424/**
425 *
426 */
428{
429 // Correct solution F -> F - Gold.
430 for (size_t i = 0; i < m_nVar; ++i)
431 {
433 m_coarseSolution[i], 1, m_fineSolution[i], 1);
434 }
435}
436
437/**
438 *
439 */
441{
442 // Interpolate coarse solution.
444
445 // Correct solution F -> F + Gnew.
446 for (size_t i = 0; i < m_nVar; ++i)
447 {
449 m_coarseSolution[i], 1, m_fineSolution[i], 1);
450 }
451}
452
453/**
454 *
455 */
457{
459 {
460 // Interpolate coarse solution to fine field.
461 Interpolate(m_EqSys[m_coarseLevel]->UpdateFields(),
462 m_EqSys[m_fineLevel]->UpdateFields(),
464 }
465}
466
467/**
468 *
469 */
471{
472 if (w == m_numWindowsPIT - 1)
473 {
474 // No windowing required for the last window.
475 return;
476 }
477
478 // Use last chunk solution as initial condition for the next
479 // window.
480 if (m_chunkRank == m_numChunks - 1)
481 {
482 for (size_t i = 0; i < m_nVar; ++i)
483 {
485 m_EqSys[m_fineLevel]->UpdatePhysField(i), 1,
486 m_initialCondition[i], 1);
487 }
488 }
489
490 // Broadcast I.C. for windowing.
491 for (size_t i = 0; i < m_nVar; ++i)
492 {
493 m_comm->GetTimeComm()->Bcast(m_initialCondition[i], m_numChunks - 1);
494 }
495}
496
497/**
498 *
499 */
500void DriverParareal::CopyConvergedCheckPoints(const size_t w, const size_t k)
501{
502 // Determine max number of iteration.
503 size_t kmax = k;
504 m_comm->GetTimeComm()->AllReduce(kmax, Nektar::LibUtilities::ReduceMax);
505
506 if (m_comm->GetSpaceComm()->GetRank() == 0)
507 {
508 for (size_t j = k; j < kmax; j++)
509 {
510 // Copy converged solution files from directory corresponding to
511 // iteration j - 1 to the directory corresponding to iteration j.
512
513 auto sessionName = m_EqSys[m_fineLevel]->GetSessionName();
514
515 // Input directory name.
516 std::string indir =
517 sessionName + "_" + std::to_string(j - 1) + ".pit";
518
519 /// Output directory name.
520 std::string outdir = sessionName + "_" + std::to_string(j) + ".pit";
521
522 for (size_t timeLevel = 0; timeLevel < m_nTimeLevel; timeLevel++)
523 {
524 // Number of checkpoint by chunk.
525 size_t nChkPts =
526 m_EqSys[timeLevel]->GetCheckpointSteps()
527 ? m_nsteps[timeLevel] /
528 m_EqSys[timeLevel]->GetCheckpointSteps()
529 : 0;
530
531 // Checkpoint index.
532 size_t iChkPts = (m_chunkRank + w * m_numChunks) * nChkPts;
533
534 for (size_t i = 1; i <= nChkPts; i++)
535 {
536 // Filename corresponding to checkpoint iChkPts.
537 std::string filename = sessionName + "_timeLevel" +
538 std::to_string(timeLevel) + "_" +
539 std::to_string(iChkPts + i) + ".chk";
540
541 // Intput full file name.
542 std::string infullname = indir + "/" + filename;
543
544 // Output full file name.
545 std::string outfullname = outdir + "/" + filename;
546
547 // Remove output file if already existing.
548 fs::remove_all(outfullname);
549
550 // Copy converged solution files.
551 fs::copy(infullname, outfullname);
552 }
553 }
554 }
555 }
556}
557
558/**
559 *
560 */
562{
563 PrintHeader("PRINT SOLUTION FILES", '-');
564
565 // Update field coefficients.
567
568 // Output solution files.
569 m_EqSys[m_fineLevel]->Output();
570}
571
572} // namespace Nektar::SolverUtils
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:208
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:197
static std::string RegisterEnumValue(std::string pEnum, std::string pString, int pEnumValue)
Registers an enumeration value.
LibUtilities::CommSharedPtr m_comm
Communication object.
Definition: Driver.h:80
Base class for the development of parallel-in-time solvers.
NekDouble m_totalTime
Total time integration interval.
void UpdateFieldCoeffs(const size_t timeLevel, const Array< OneD, const Array< OneD, NekDouble > > &in=NullNekDoubleArrayOfArray)
NekDouble m_chunkTime
Time integration interval per chunk.
void SendToNextProc(Array< OneD, Array< OneD, NekDouble > > &array, int &convergence)
size_t m_iterMaxPIT
Maximum number of parallel-in-time iteration.
Array< OneD, size_t > m_nsteps
Number of time steps for each time level.
SOLVER_UTILS_EXPORT void v_InitObject(std::ostream &out=std::cout) override
Virtual function for initialisation implementation.
void EvaluateExactSolution(const size_t timeLevel, const NekDouble &time)
void RecvFromPreviousProc(Array< OneD, Array< OneD, NekDouble > > &array, int &convergence)
Array< OneD, std::shared_ptr< UnsteadySystem > > m_EqSys
Equation system to solve.
void PrintHeader(const std::string &title, const char c)
NekDouble m_tolerPIT
ParallelInTime tolerance.
void PrintSolverInfo(std::ostream &out=std::cout)
bool m_exactSolution
Using exact solution to compute error norms.
void CopyFromPhysField(const size_t timeLevel, Array< OneD, Array< OneD, NekDouble > > &out)
void CopyToPhysField(const size_t timeLevel, const Array< OneD, const Array< OneD, NekDouble > > &in)
Array< OneD, size_t > m_npts
Number of dof for each time level.
Array< OneD, Array< OneD, NekDouble > > m_exactsoln
void SolutionConvergenceSummary(const size_t timeLevel)
Array< OneD, NekDouble > m_timestep
Time step for each time level.
void SolutionConvergenceMonitoring(const size_t timeLevel, const size_t iter)
void Interpolate(const Array< OneD, MultiRegions::ExpListSharedPtr > &infield, const Array< OneD, MultiRegions::ExpListSharedPtr > &outfield, const Array< OneD, Array< OneD, NekDouble > > &inarray, Array< OneD, Array< OneD, NekDouble > > &outarray)
SOLVER_UTILS_EXPORT void v_Execute(std::ostream &out=std::cout) override
Virtual function for solve implementation.
static constexpr size_t m_fineLevel
void UpdateInitialConditionFromSolver(const size_t timeLevel)
static constexpr size_t m_coarseLevel
static std::string className
Name of the class.
void CopyConvergedCheckPoints(const size_t w, const size_t k)
SOLVER_UTILS_EXPORT void v_InitObject(std::ostream &out=std::cout) override
Virtual function for initialisation implementation.
void UpdateSolution(const size_t timeLevel, const NekDouble time, const size_t nstep, const size_t wd, const size_t iter)
Array< OneD, Array< OneD, NekDouble > > m_coarseSolution
static DriverSharedPtr create(const LibUtilities::SessionReaderSharedPtr &pSession, const SpatialDomains::MeshGraphSharedPtr &pGraph)
Creates an instance of this class.
SOLVER_UTILS_EXPORT DriverParareal(const LibUtilities::SessionReaderSharedPtr pSession, const SpatialDomains::MeshGraphSharedPtr pGraph)
Constructor.
void UpdateSolverInitialCondition(const size_t timeLevel)
Array< OneD, Array< OneD, NekDouble > > m_fineSolution
Array< OneD, Array< OneD, NekDouble > > m_initialCondition
def copy(self)
Definition: pycml.py:2663
std::shared_ptr< SessionReader > SessionReaderSharedPtr
DriverFactory & GetDriverFactory()
Definition: Driver.cpp:65
std::shared_ptr< MeshGraph > MeshGraphSharedPtr
Definition: MeshGraph.h:174
std::vector< double > w(NPUPPER)
static Array< OneD, Array< OneD, NekDouble > > NullNekDoubleArrayOfArray
double NekDouble
void Vadd(int n, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Add vector z = x+y.
Definition: Vmath.hpp:180
void Vcopy(int n, const T *x, const int incx, T *y, const int incy)
Definition: Vmath.hpp:825
void Vsub(int n, const T *x, const int incx, const T *y, const int incy, T *z, const int incz)
Subtract vector z = x-y.
Definition: Vmath.hpp:220