Nektar++
PulseWaveSystem.cpp
Go to the documentation of this file.
1///////////////////////////////////////////////////////////////////////////////
2//
3// File: PulseWaveSystem.cpp
4//
5// For more information, please see: http://www.nektar.info
6//
7// The MIT License
8//
9// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10// Department of Aeronautics, Imperial College London (UK), and Scientific
11// Computing and Imaging Institute, University of Utah (USA).
12//
13// License for the specific language governing rights and limitations under
14// Permission is hereby granted, free of charge, to any person obtaining a
15// copy of this software and associated documentation files (the "Software"),
16// to deal in the Software without restriction, including without limitation
17// the rights to use, copy, modify, merge, publish, distribute, sublicense,
18// and/or sell copies of the Software, and to permit persons to whom the
19// Software is furnished to do so, subject to the following conditions:
20//
21// The above copyright notice and this permission notice shall be included
22// in all copies or substantial portions of the Software.
23//
24// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
25// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
27// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
30// DEALINGS IN THE SOFTWARE.
31//
32// Description: Generic timestepping for Pulse Wave Solver
33//
34///////////////////////////////////////////////////////////////////////////////
35
36#include <iostream>
37
43using namespace std;
44
45namespace Nektar
46{
47
48/**
49 * @class PulseWaveSystem
50 *
51 * Initialises the arterial subdomains in m_vessels and sets up
52 * all domain-linking conditions (bifurcations, intefaces/junctions,
53 * merging flows). Detects the network structure and assigns
54 * boundary conditons. Also provides the underlying timestepping
55 * framework for pulse wave solvers including the general
56 * timestepping routines.
57 */
58
59/**
60 * Processes SolverInfo parameters from the session file and sets
61 * up timestepping-specific code.
62 *
63 * @param m_Session Session object to read parameters from.
64 */
68 : UnsteadySystem(pSession, pGraph)
69{
70}
71
72/**
73 * Destructor
74 */
76{
77}
78
79/**
80 * Initialisation routine for multidomain solver. Sets up the
81 * expansions for every arterial segment (m_vessels) and for one
82 * complete field m_outfield which is needed to write the
83 * postprocessing output. Also determines which upwind strategy
84 * is used (currently only upwinding scheme available) and reads
85 * blodd flow specific parameters from the inputfile
86 *
87 */
88void PulseWaveSystem::v_InitObject(bool DeclareField)
89{
90 // Initialise base class
91 UnsteadySystem::v_InitObject(DeclareField);
92
93 // Read the geometry and the expansion information
94 m_domain = m_graph->GetDomain();
95 m_nDomains = m_domain.size();
96
97 // Determine projectiontype
98 ASSERTL0(m_session->MatchSolverInfo("Projection", "DisContinuous"),
99 "Pulse solver only set up for Discontinuous projections");
101 ASSERTL0(
102 m_graph->GetMeshDimension() == 1,
103 "Pulse wave solver only set up for expansion dimension equal to 1");
104
105 size_t i;
106 m_nVariables = m_session->GetVariables().size();
107
109 m_vessels =
111
112 /* If the PressureArea property is not specified, default to the Beta
113 * law; it's the most well known and this way previous examples that did
114 * not specify the tube law will still work.
115 */
116 if (m_session->DefinesSolverInfo("PressureArea"))
117 {
119 m_session->GetSolverInfo("PressureArea"), m_vessels, m_session);
120 }
121 else
122 {
124 "Beta", m_vessels, m_session);
125 }
126
128
132
134
135 // Set up domains and put geometry to be only one space dimension.
136 size_t cnt = 0;
137 bool SetToOneSpaceDimension = true;
138
139 if (m_session->DefinesCmdLineArgument("SetToOneSpaceDimension"))
140 {
141 std::string cmdline = m_session->GetCmdLineArgument<std::string>(
142 "SetToOneSpaceDimension");
143 if (boost::to_upper_copy(cmdline) == "FALSE")
144 {
145 SetToOneSpaceDimension = false;
146 }
147 }
148
149 // get parallel communicators as required
150 map<int, LibUtilities::CommSharedPtr> domComm;
151 GetCommArray(domComm);
152
153 SetUpDomainInterfaceBCs(Allbcs, domComm);
154
155 for (auto &d : m_domOrder)
156 {
157 for (size_t j = 0; j < m_nVariables; ++j)
158 {
159 m_vessels[cnt++] =
161 m_session, m_graph, m_domain[d], Allbcs,
162 m_session->GetVariable(j), domComm[d],
163 SetToOneSpaceDimension);
164 }
165 }
166
167 // Reset coeff and phys space to be continuous over all domains
168 int totcoeffs = 0;
169 int totphys = 0;
171 for (i = 0; i < m_nDomains; ++i)
172 {
173 totcoeffs += m_vessels[i * m_nVariables]->GetNcoeffs();
174
175 m_fieldPhysOffset[i] = totphys;
176 totphys += m_vessels[i * m_nVariables]->GetTotPoints();
177 }
178 m_fieldPhysOffset[m_nDomains] = totphys;
179
180 for (size_t n = 0; n < m_nVariables; ++n)
181 {
182 Array<OneD, NekDouble> coeffs(totcoeffs, 0.0);
183 Array<OneD, NekDouble> phys(totphys, 0.0);
184 Array<OneD, NekDouble> tmpcoeffs, tmpphys;
185
186 m_vessels[n]->SetCoeffsArray(coeffs);
187 m_vessels[n]->SetPhysArray(phys);
188
189 int cnt = m_vessels[n]->GetNcoeffs();
190 int cnt1 = m_vessels[n]->GetTotPoints();
191
192 for (i = 1; i < m_nDomains; ++i)
193 {
194 m_vessels[i * m_nVariables + n]->SetCoeffsArray(tmpcoeffs =
195 coeffs + cnt);
196 m_vessels[i * m_nVariables + n]->SetPhysArray(tmpphys =
197 phys + cnt1);
198 cnt += m_vessels[i * m_nVariables + n]->GetNcoeffs();
199 cnt1 += m_vessels[i * m_nVariables + n]->GetTotPoints();
200 }
201 }
202
203 // If Discontinuous Galerkin determine upwinding method to use
204 for (size_t i = 0; i < SIZE_UpwindTypePulse; ++i)
205 {
206 bool match;
207 m_session->MatchSolverInfo("UPWINDTYPEPULSE", UpwindTypeMapPulse[i],
208 match, false);
209 if (match)
210 {
212 break;
213 }
214 }
215
216 // Load blood density and external pressure
217 m_session->LoadParameter("rho", m_rho, 0.5);
218 m_session->LoadParameter("pext", m_pext, 0.0);
219
220 int nq = 0;
221 /*
222 * Gets the Material Properties of each arterial segment
223 * specified in the inputfile from section MaterialProperties
224 * Also gets the Area at static equilibrium A_0 specified in the
225 * inputfile.
226 *
227 * Having found these points also extract the values at the
228 * trace points and the normal direction consistent with the
229 * left adjacent definition of Fwd and Bwd
230 */
239
240 int omega = 0;
241 for (auto &d : m_domOrder)
242 {
243 nq = m_vessels[2 * omega]->GetNpoints();
244 m_fields[0] = m_vessels[2 * omega];
245
246 m_beta[omega] = Array<OneD, NekDouble>(nq);
247 GetFunction("MaterialProperties")
248 ->Evaluate("beta", m_beta[omega], m_time, d);
249
250 // If the input file doesn't specify viscoelasticity, set it to zero.
251 m_gamma[omega] = Array<OneD, NekDouble>(nq);
252
253 if (m_session->DefinesFunction("Viscoelasticity"))
254 {
255 GetFunction("Viscoelasticity")
256 ->Evaluate("gamma", m_gamma[omega], m_time, d);
257 }
258 else
259 {
260 for (int j = 0; j < nq; ++j)
261 {
262 m_gamma[omega][j] = 0;
263 }
264 }
265
266 /* If the input file doesn't specify strain-stiffening, set it to
267 * 0.5 for elastic behaviour.
268 */
269 m_alpha[omega] = Array<OneD, NekDouble>(nq);
270
271 if (m_session->DefinesFunction("StrainStiffening"))
272 {
273 GetFunction("StrainStiffening")
274 ->Evaluate("alpha", m_alpha[omega], m_time, d);
275 }
276 else
277 {
278 for (int j = 0; j < nq; ++j)
279 {
280 m_alpha[omega][j] = 0.5;
281 }
282 }
283
284 m_A_0[omega] = Array<OneD, NekDouble>(nq);
285 GetFunction("A_0")->Evaluate("A_0", m_A_0[omega], m_time, d);
286
287 int nqTrace = GetTraceTotPoints();
288
289 m_beta_trace[omega] = Array<OneD, NekDouble>(nqTrace);
290 m_fields[0]->ExtractTracePhys(m_beta[omega], m_beta_trace[omega]);
291
292 m_A_0_trace[omega] = Array<OneD, NekDouble>(nqTrace);
293 m_fields[0]->ExtractTracePhys(m_A_0[omega], m_A_0_trace[omega]);
294
295 m_alpha_trace[omega] = Array<OneD, NekDouble>(nqTrace);
296 m_fields[0]->ExtractTracePhys(m_alpha[omega], m_alpha_trace[omega]);
297
298 if (SetToOneSpaceDimension)
299 {
300 m_trace_fwd_normal[omega] = Array<OneD, NekDouble>(nqTrace, 0.0);
301
302 MultiRegions::ExpListSharedPtr trace = m_fields[0]->GetTrace();
303 int nelmt_trace = trace->GetExpSize();
304
305 Array<OneD, Array<OneD, NekDouble>> normals(nelmt_trace);
306
307 for (int i = 0; i < nelmt_trace; ++i)
308 {
309 normals[i] = m_trace_fwd_normal[omega] + i;
310 }
311
312 // need to set to 1 for consistency since boundary
313 // conditions may not have coordim = 1
314 trace->GetExp(0)->GetGeom()->SetCoordim(1);
315
316 trace->GetNormals(normals);
317 }
318 omega++;
319 }
320
322}
323
325 map<int, LibUtilities::CommSharedPtr> &retval)
326{
327
328 // set up defualt serial communicator
329 std::string def("default");
330 char *argv = new char[def.length() + 1];
331 std::strcpy(argv, def.c_str());
332 LibUtilities::CommSharedPtr serialComm =
334
335 size_t nprocs = m_comm->GetSize();
336
337 if (nprocs == 1) // serial case
338 {
339 for (auto &d : m_domain)
340 {
341 // serial communicator
342 retval[d.first] = serialComm;
343 // fill out m_domOrder set for serial case
344 m_domOrder.push_back(d.first);
345 }
346 }
347 else // parallel case
348 {
349 size_t rank = m_comm->GetRank();
350
351 // Fill array with domain details
352 size_t dmax = 0;
353 for (auto &d : m_domain)
354 {
355 dmax = ((size_t)d.first > dmax) ? d.first : dmax;
356 }
357 dmax += 1;
358
359 // get domain maximum id + 1;
360 m_comm->AllReduce(dmax, LibUtilities::ReduceMax);
361
362 Array<OneD, int> commtmp(dmax, 0);
363 for (auto &d : m_domain)
364 {
365 commtmp[d.first] = 1;
366 }
367
368 // identfy which domains are split between two procs
369 m_comm->AllReduce(commtmp, LibUtilities::ReduceSum);
370
371 Array<OneD, bool> DoneCreate(nprocs, false);
372
373 // setup communicators for domain partitions greater than 2
374 for (size_t d = 0; d < dmax; ++d)
375 {
376 // set up communicator for cases where more than 2 procs
377 // share domain
378 if (commtmp[d] > 2)
379 {
381 int flag = 0;
382 // set flag to 10 if d is on this processors domain
383 for (auto &d1 : m_domain)
384 {
385 if ((size_t)d1.first == d)
386 {
387 flag = 10;
388 }
389 }
390
391 newcomm = m_comm->CommCreateIf(flag);
392
393 // set up communicator if domain on this processor
394 for (auto &d1 : m_domain)
395 {
396 if ((size_t)d1.first == d)
397 {
398 retval[d] = newcomm;
399 }
400 }
401
402 // turn off commtmp flag since should now be setup
403 commtmp[d] = -1;
404 }
405 }
406
407 // we now should have a list of the number of each
408 // domains/vessel that is split over two processors
409
410 // set up serial communicators for domains on only one
411 // processor and set up map of domains that require parallel
412 // communicator and number of processors in parallel
413
414 // communicator
415 map<int, int> SharedProc;
416 Array<OneD, int> commtmp1(dmax, 0);
417
418 for (auto &d : m_domain)
419 {
420 if (commtmp[d.first] == 1) // set to serial communicator
421 {
422 retval[d.first] = serialComm;
423 }
424 else if (commtmp[d.first] == 2) // shared by 2 procs
425 {
426 SharedProc[d.first] = 1;
427 commtmp1[d.first] = rank;
428 }
429 else // do nothing
430 {
431 }
432 }
433
434 // find the maximum number of communicator on all processors
435 size_t nShrProc = SharedProc.size();
436 int commMax = nShrProc;
437 m_comm->AllReduce(commMax, LibUtilities::ReduceMax);
438
439 // find out processor id of each communicator by adding rank
440 // and then subtracting local rank
441
442 m_comm->AllReduce(commtmp1, LibUtilities::ReduceSum);
443
444 // reset SharedProc to hold shared proc id.
445 for (auto &d : SharedProc)
446 {
447 SharedProc[d.first] = commtmp1[d.first] - rank;
448 }
449
450 // now work out a comm ordering;
451 Array<OneD, int> procs(2);
452 int commcall = 0;
453 map<int, pair<int, int>> CreateComm;
454
455 bool search = true;
456 Array<OneD, int> setdone(3);
457
458 while (search)
459 {
460 size_t dorank = 0; // search index over processors
461 set<int>
462 proclist; // has proc been identified for a comm at this level
463 int flag = 100; // colour for communicators in this search level
464
465 while (dorank < nprocs)
466 {
467 int sharedproc = -1;
468 int ncomms = 0;
469
470 if (DoneCreate[dorank] == false)
471 {
472 // check to see if proc even has a shared domain
473 ncomms = (rank == dorank) ? nShrProc : 0;
474 m_comm->AllReduce(ncomms, LibUtilities::ReduceMax);
475
476 // set DoneCreate to true if n comms required on
477 // proc
478 DoneCreate[dorank] = (ncomms == 0) ? true : false;
479 }
480
481 // requires communicator and needs to create one.
482 if (ncomms)
483 {
484 bool doneCreateOnDoRank = false;
485 bool doneCreateOnSharedProc = false;
486 bool createdComm = false;
487
488 // check not already callng communicator on
489 // this processor for this search sweep
490 if ((proclist.count(dorank) == 0))
491 {
492
493 // give all processors rank id of shared proc
494 if (rank == dorank)
495 {
496 for (auto &d : SharedProc)
497 {
498 if (CreateComm.count(d.first) == 0)
499 {
500 sharedproc = SharedProc[d.first];
501 break;
502 }
503 }
504
505 ASSERTL1(sharedproc != -1,
506 "Failed to fine a new "
507 "processor to set up another "
508 "communicator");
509
510 if (proclist.count(sharedproc) == 0)
511 {
512 // save all communicators on this for
513 // split comm setup
514 for (auto &d : SharedProc)
515 {
516 if (d.second == sharedproc)
517 {
518 CreateComm[d.first] = pair<int, int>(
519 commcall, flag + d.first);
520 createdComm = true;
521 }
522 }
523 }
524
525 // set bool to mark as done on dorank
526 if (CreateComm.size() == nShrProc)
527 {
528 doneCreateOnDoRank = true;
529 }
530 }
531 else
532 {
533 sharedproc = -1;
534 }
535
536 m_comm->AllReduce(sharedproc, LibUtilities::ReduceMax);
537 ASSERTL1(
538 sharedproc < nprocs,
539 "Error in obtaining rank "
540 "shared by domain. Sharedproc value is greater "
541 "than number of procs");
542
543 if (proclist.count(sharedproc) == 0)
544 {
545 if ((int)rank == sharedproc)
546 {
547 // save all communicators on this for
548 // split comm setup
549 for (auto &d : SharedProc)
550 {
551 if (d.second == (int)dorank)
552 {
553 CreateComm[d.first] = pair<int, int>(
554 commcall, flag + d.first);
555 createdComm = true;
556 }
557 }
558
559 if (CreateComm.size() == nShrProc)
560 {
561 doneCreateOnSharedProc = true;
562 }
563 }
564 }
565 }
566
567 // mark DoneCreate as if complete
568 setdone[0] = (doneCreateOnDoRank) ? 1 : 0;
569 setdone[1] = (doneCreateOnSharedProc) ? 1 : 0;
570 setdone[2] = (createdComm) ? 1 : 0;
571 m_comm->AllReduce(setdone, LibUtilities::ReduceMax);
572 DoneCreate[dorank] = (bool)setdone[0];
573 if (sharedproc != -1)
574 {
575 DoneCreate[sharedproc] = (bool)setdone[1];
576 }
577 if (setdone[2]) // created new communicator
578 {
579 proclist.insert(dorank);
580 proclist.insert(sharedproc);
581 }
582 }
583
584 dorank++;
585 }
586
587 // have we found all comms on all processors.
588 size_t i;
589 for (i = 0; i < nprocs; ++i)
590 {
591 if (DoneCreate[i] == false)
592 {
593 break;
594 }
595 }
596
597 if (i == nprocs)
598 {
599 search = false;
600 }
601 else
602 {
603 commcall++;
604 ASSERTL0(commcall < 4 * commMax,
605 "Failed to find sub communicators "
606 "pattern in 4*commMax searches");
607 }
608 }
609
610 ASSERTL1(CreateComm.size() == nShrProc,
611 "Have not created communicators for all shared procs");
612
613 // determine maxmimum number of CreateComm size
614 size_t maxCreateComm = CreateComm.size();
615 m_comm->AllReduce(maxCreateComm, LibUtilities::ReduceMax);
616
617 // loop over CreateComm list
618 for (size_t i = 0; i < maxCreateComm; ++i)
619 {
621 int flag = 0;
622
623 for (auto &d : CreateComm)
624 {
625 if (d.second.first == (int)i)
626 {
627 flag = d.second.second;
628 }
629 }
630
631 newcomm = m_comm->CommCreateIf(flag);
632
633 for (auto &d : CreateComm)
634 {
635 if (d.second.first == (int)i)
636 {
637 retval[d.first] = newcomm;
638 }
639 }
640 }
641
642 // Finally need to re-order domain list so that we call the
643 // communicators in a non-locking manner. This is done by
644 // uniquely numbering each shared proc/comm interface and
645 // ensuring the domains are ordered so that we call this
646 // numbering in an increasing order
647
648 Array<OneD, NekDouble> numShared(nprocs, 0.0);
649 // determine the number of communicators on this rank where
650 // the share proc has a higher rank id
651 numShared[rank] = nShrProc;
652 m_comm->AllReduce(numShared, LibUtilities::ReduceSum);
653 int totShared = (int)Vmath::Vsum(nprocs, numShared, 1);
654
655 if (totShared)
656 {
657 size_t cnt = 0;
658 Array<OneD, NekDouble> numOffset(nprocs, 0.0);
659 for (auto &s : SharedProc)
660 {
661 if (s.second > (int)rank)
662 {
663 cnt++;
664 }
665 }
666
667 numOffset[rank] = cnt;
668 m_comm->AllReduce(numOffset, LibUtilities::ReduceMax);
669
670 // make numShared into a cumulative list
671 for (size_t i = 1; i < nprocs; ++i)
672 {
673 numShared[i] += numShared[i - 1];
674 numOffset[i] += numOffset[i - 1];
675 }
676 for (size_t i = nprocs - 1; i > 0; --i)
677 {
678 numShared[i] = numShared[i - 1];
679 numOffset[i] = numOffset[i - 1];
680 }
681 numShared[0] = 0;
682 numOffset[0] = 0;
683
684 Array<OneD, NekDouble> shareddom(totShared, -1.0);
685 cnt = 0; // collect a list of domains that each shared commm is
686 // attached to
687 for (auto &s : SharedProc)
688 {
689 shareddom[numShared[rank] + cnt] = s.first;
690 ++cnt;
691 }
692 m_comm->AllReduce(shareddom, LibUtilities::ReduceMax);
693
694 // define a numbering scheme
695 Array<OneD, NekDouble> sharedid(totShared, -1.0);
696 cnt = 0;
697 size_t cnt1 = 0;
698 for (auto &s : SharedProc)
699 {
700 if (s.second > (int)rank)
701 {
702 sharedid[numShared[rank] + cnt] = numOffset[rank] + cnt1;
703
704 // find shared proc offset by matching the domain ids.
705 size_t j;
706 for (j = 0; j < maxCreateComm; ++j)
707 {
708 if ((numShared[s.second] + j < totShared) &&
709 (shareddom[numShared[s.second] + j] == s.first))
710 {
711 break;
712 }
713 }
714
715 sharedid[numShared[s.second] + j] = numOffset[rank] + cnt1;
716 cnt1++;
717 }
718 cnt++;
719 }
720
721 // now communicate ids to all procesors.
722 m_comm->AllReduce(sharedid, LibUtilities::ReduceMax);
723
724 if (rank == 0)
725 {
726 for (size_t i = 0; i < (size_t)totShared; ++i)
727 {
728 ASSERTL1(sharedid[i] != -1.0,
729 "Failed to number shared proc uniquely");
730 }
731 }
732
733 cnt = 0;
734 int maxoffset =
735 (int)Vmath::Vmax(nShrProc, &sharedid[numShared[rank]], 1);
736 m_comm->AllReduce(maxoffset, LibUtilities::ReduceMax);
737 maxoffset++;
738
739 // make a map relating the order of the SharedProc to the domain id
740 map<int, int> ShrToDom;
741 cnt = 0;
742 for (auto &s : SharedProc)
743 {
744 ShrToDom[cnt] = s.first;
745 ++cnt;
746 }
747
748 // Set up a set the domain ids listed in the ordering of
749 // the SharedProc unique numbering
750 set<int> doneDom;
751 for (size_t i = 0; i < nShrProc; ++i)
752 {
753 int minId =
754 Vmath::Imin(nShrProc, &sharedid[numShared[rank]], 1);
755 sharedid[numShared[rank] + minId] += maxoffset;
756
757 m_domOrder.push_back(ShrToDom[minId]);
758 doneDom.insert(ShrToDom[minId]);
759 }
760
761 // add all the other
762 for (auto &d : m_domain)
763 {
764 if (doneDom.count(d.first) == 0)
765 {
766 m_domOrder.push_back(d.first);
767 }
768 }
769 }
770 }
771}
772
774{
775 map<int, std::vector<InterfacePointShPtr>> VidToDomain;
776
777 // First set up domain to determine how many vertices meet at a
778 // vertex. This allows us to
779
780 /* Loop over domain and find out if we have any undefined
781 * boundary conditions representing interfaces. If so make a
782 * map based around vid and storing the domains that are
783 * part of interfaces.
784 */
785 for (size_t omega = 0; omega < m_nDomains; ++omega)
786 {
787 size_t vesselID = omega * m_nVariables;
788
789 for (size_t i = 0; i < (m_vessels[vesselID]->GetBndConditions()).size();
790 ++i)
791 {
792 if (m_vessels[vesselID]->GetBndConditions()[i]->GetUserDefined() ==
793 "Interface")
794 {
795 // Get Vid of interface
796 int vid = m_vessels[vesselID]
797 ->UpdateBndCondExpansion(i)
798 ->GetExp(0)
799 ->GetGeom()
800 ->GetVid(0);
801
803 m_vessels[vesselID]->GetTrace();
805
806 bool finish = false;
807 // find which elmt, the local vertex and the data
808 // offset of point
809 size_t nExp = m_vessels[vesselID]->GetExpSize();
810 for (size_t n = 0; n < nExp; ++n)
811 {
812 for (size_t p = 0; p < 2; ++p)
813 {
814
815 if (m_vessels[vesselID]
816 ->GetTraceMap()
817 ->GetElmtToTrace()[n][p]
819 ->GetGeom()
820 ->GetVid(0) == vid)
821 {
822 int eid = m_vessels[vesselID]
823 ->GetTraceMap()
824 ->GetElmtToTrace()[n][p]
825 ->GetElmtId();
826
827 int tid = m_vessels[vesselID]
828 ->GetTrace()
829 ->GetCoeff_Offset(eid);
830
831 Ipt = MemoryManager<
832 InterfacePoint>::AllocateSharedPtr(vid, omega,
833 n, p, tid,
834 i);
835
836 finish = true;
837 break;
838 }
839 }
840 if (finish == true)
841 {
842 break;
843 }
844 }
845
846 VidToDomain[vid].push_back(Ipt);
847 }
848 }
849 }
850
851 // Set up comms for parallel cases
852 map<int, int> domId;
853
854 size_t cnt = 0;
855 for (auto &d : m_domain)
856 {
857 domId[cnt] = d.first;
858 ++cnt;
859 }
860 ASSERTL1(domId.size() == m_nDomains, "Number of domains do not match");
861
862 Gs::gs_data *gscomm;
863 int nvid2dom = VidToDomain.size();
864 Array<OneD, long> tmp(nvid2dom);
865 Array<OneD, NekDouble> nvid(nvid2dom, 0.0);
866
867 cnt = 0;
868 for (auto &v : VidToDomain)
869 {
870 tmp[cnt] = v.first + 1;
871 for (size_t i = 0; i < v.second.size(); ++i)
872 {
873 nvid[cnt] += 1.0;
874 }
875 cnt++;
876 }
877
878 const bool verbose = m_session->DefinesCmdLineArgument("verbose");
879
880 gscomm = Gs::Init(tmp, m_comm->GetRowComm(), verbose);
881 Gs::Gather(nvid, Gs::gs_add, gscomm);
882
883 // Loop over domains and identify how many vessels at a
884 // bifurcation use the beginning of the element at the
885 // junction. This distinguishes a bifurcation and merging junction
886 // since we implicitly assume elements are ordered left ot right
887 // and so at a bifurcation we have two elements meeting this point
888 // at the first vertex
889 Array<OneD, NekDouble> nbeg(nvid2dom, 0.0);
890 cnt = 0;
891 for (auto &v : VidToDomain)
892 {
893 if (nvid[cnt] == 3.0)
894 {
895 for (size_t i = 0; i < v.second.size(); ++i)
896 {
897 // for bifurcations and merging junctions store how
898 // many points at interface are at the beginning or
899 // end
900 if (v.second[i]->m_elmtVert == 0)
901 {
902 nbeg[cnt] += 1.0;
903 }
904 }
905 }
906 ++cnt;
907 }
908 Gs::Gather(nbeg, Gs::gs_add, gscomm);
909
910 // Finally determine the maximum domain id between the two vessels
911 // at an interfaces or the maximum domain id of the daughter
912 // vessels for a bifurcation or the two parent id's for a merging
913 // junction
914 Array<OneD, NekDouble> dom(VidToDomain.size(), 0.0);
915 cnt = 0;
916 for (auto &v : VidToDomain)
917 {
918 if (nvid[cnt] == 2.0)
919 {
920 // store the domain id of the two domains
921 for (size_t i = 0; i < v.second.size(); ++i)
922 {
923 dom[cnt] =
924 max(dom[cnt], (NekDouble)domId[v.second[i]->m_domain]);
925 }
926 }
927 else if (nvid[cnt] == 3.0)
928 {
929 // store the domain id of the two daughter vessels if a
930 // bifurcation or the two parent vessels if a merging
931 // junction
932
933 int val = (nbeg[cnt] == 2.0) ? 0 : 1;
934
935 for (size_t i = 0; i < v.second.size(); ++i)
936 {
937 if (v.second[i]->m_elmtVert == val)
938 {
939 dom[cnt] =
940 max(dom[cnt], (NekDouble)domId[v.second[i]->m_domain]);
941 }
942 }
943 }
944 ++cnt;
945 }
946 Gs::Gather(dom, Gs::gs_max, gscomm);
947
948 // loop over map and set up Interface information;
949 int v = 0;
950 for (auto &iter : VidToDomain)
951 {
952 ASSERTL1(nvid[v] != 1.0, "Found an interface wth only "
953 "one domain which should not happen");
954
955 if (nvid[v] == 2.0) // Vessel jump interface
956 {
957 for (size_t i = 0; i < iter.second.size(); ++i)
958 {
959 if (domId[iter.second[i]->m_domain] == dom[v])
960 {
961 iter.second[i]->m_riemannOrd = 1;
962 }
963 else
964 {
965 iter.second[i]->m_riemannOrd = 0;
966 }
967 }
968 m_vesselIntfcs.push_back(iter.second);
969 }
970 else if (nvid[v] == 3.0) // Bifurcation or Merging junction.
971 {
972 // Set up Bifurcation information
973 int val = (nbeg[v] == 2.0) ? 1 : 0;
974
975 for (size_t i = 0; i < iter.second.size(); ++i)
976 {
977 if (iter.second[i]->m_elmtVert == val)
978 {
979 iter.second[i]->m_riemannOrd = 0;
980 }
981 else
982 {
983 if (domId[iter.second[i]->m_domain] == dom[v])
984 {
985 iter.second[i]->m_riemannOrd = 2;
986 }
987 else
988 {
989 iter.second[i]->m_riemannOrd = 1;
990 }
991 }
992 }
993
994 if (nbeg[v] == 2.0)
995 {
996 m_bifurcations.push_back(iter.second);
997 }
998 else
999 {
1000 m_mergingJcts.push_back(iter.second);
1001 }
1002 }
1003 else
1004 {
1005 ASSERTL0(false, "Unknown junction type");
1006 }
1007 ++v; // incrementing loop over vertices
1008 }
1009
1010 // finally set up a communicator for interface data collection
1011 Array<OneD, long> tmp1(3 * nvid2dom);
1012 if (nvid2dom)
1013 {
1014 Vmath::Zero(3 * nvid2dom, tmp1, 1);
1015 }
1016
1017 cnt = 0;
1018 for (size_t n = 0; n < m_bifurcations.size(); ++n, ++cnt)
1019 {
1020 size_t vid = m_bifurcations[n][0]->m_vid;
1021 for (size_t i = 0; i < 3; ++i)
1022 {
1023 tmp1[3 * cnt + i] = (NekDouble)(3 * vid + i + 1);
1024 }
1025 }
1026
1027 for (size_t n = 0; n < m_mergingJcts.size(); ++n, ++cnt)
1028 {
1029 size_t vid = m_mergingJcts[n][0]->m_vid;
1030 for (size_t i = 0; i < 3; ++i)
1031 {
1032 tmp1[3 * cnt + i] = (NekDouble)(3 * vid + i + 1);
1033 }
1034 }
1035
1036 for (size_t n = 0; n < m_vesselIntfcs.size(); ++n, ++cnt)
1037 {
1038 size_t vid = m_vesselIntfcs[n][0]->m_vid;
1039 for (size_t i = 0; i < 3; ++i)
1040 {
1041 tmp1[3 * cnt + i] = (NekDouble)(3 * vid + i + 1);
1042 }
1043 }
1044
1045 m_intComm = Gs::Init(tmp1, m_comm->GetRowComm(), verbose);
1046}
1047
1050 map<int, LibUtilities::CommSharedPtr> &domComm)
1051{
1052
1054 Allbcs.GetBoundaryRegions();
1055
1056 /* Loop over domain and find out if we have any undefined
1057 * boundary conditions representing interfaces. If so make a
1058 * map based around vid and storing the domains that are
1059 * part of interfaces.
1060 */
1061 int numNewBc = 1;
1062 for (auto &d : m_domOrder)
1063 {
1064
1065 map<int, SpatialDomains::GeometrySharedPtr> domvids;
1066
1067 // Loop over each domain and find which vids are only touched
1068 // by one element
1069 for (auto &compIt : m_domain[d])
1070 {
1071 for (size_t j = 0; j < compIt.second->m_geomVec.size(); ++j)
1072 {
1073
1074 // get hold of vids of each segment
1075 for (size_t p = 0; p < 2; ++p)
1076 {
1078 compIt.second->m_geomVec[j]->GetVertex(p);
1079 int vid = vert->GetGlobalID();
1080
1081 // if vid has already been added remove it otherwise add
1082 // into set
1083 if (domvids.count(vid))
1084 {
1085 domvids.erase(vid);
1086 }
1087 else
1088 {
1089 domvids[vid] = vert;
1090 }
1091 }
1092 }
1093 }
1094
1095 ASSERTL1(domvids.size() == 2, "Failed to find two end points "
1096 "of a domain (domvids = " +
1097 std::to_string(domvids.size()) + ")");
1098
1099 size_t nprocs = domComm[d]->GetSize();
1100
1101 if (nprocs > 1) // Remove parallel interfaces
1102 {
1103 size_t rank = domComm[d]->GetRank();
1104 Array<OneD, int> nvids(nprocs, 0);
1105 nvids[rank] = domvids.size();
1106 domComm[d]->AllReduce(nvids, LibUtilities::ReduceSum);
1107
1108 size_t totvids = Vmath::Vsum(nprocs, nvids, 1);
1109
1110 Array<OneD, int> locids(totvids, -1);
1111
1112 size_t cnt = 0;
1113 for (size_t i = 0; i < rank; ++i)
1114 {
1115 cnt += nvids[i];
1116 }
1117
1118 for (auto &i : domvids)
1119 {
1120 locids[cnt++] = i.first;
1121 }
1122
1123 // collect lcoal vids on this domain on all processor
1124 domComm[d]->AllReduce(locids, LibUtilities::ReduceMax);
1125
1126 set<int> chkvids;
1127 for (size_t i = 0; i < totvids; ++i)
1128 {
1129 if (chkvids.count(locids[i]))
1130 {
1131 // if this id is on local domain then remove
1132 if (domvids.count(locids[i]))
1133 {
1134 domvids.erase(locids[i]);
1135 }
1136 }
1137 else
1138 {
1139 chkvids.insert(locids[i]);
1140 }
1141 }
1142 }
1143
1144 // Finally check if remaining domvids are already declared, if
1145 // not add them
1146
1147 // Erase from domvids any existing BCs
1148 for (auto &it : bregions)
1149 {
1150 for (auto &bregionIt : *it.second)
1151 {
1152 // can assume that all regions only contain one point in 1D
1153 // Really do not need loop above
1154 int id = bregionIt.second->m_geomVec[0]->GetGlobalID();
1155
1156 if (domvids.count(id))
1157 {
1158 domvids.erase(id);
1159 }
1160 }
1161 }
1162
1163 // Finally add interface conditions to Allbcs
1164 std::vector<std::string> variables = m_session->GetVariables();
1165
1166 // determine the maxmimum bregion index
1167 int maxbregind = 0;
1168 for (auto &b : bregions)
1169 {
1170 maxbregind = (maxbregind > b.first) ? maxbregind : b.first;
1171 }
1172
1173 for (auto &b : domvids)
1174 {
1177 SpatialDomains::BoundaryRegion>::AllocateSharedPtr());
1178
1179 // Set up Composite (GemetryVector) to contain vertex and put into
1180 // bRegion
1183 gvec->m_geomVec.push_back(b.second);
1184 (*breg)[b.first] = gvec;
1185
1186 Allbcs.AddBoundaryRegions(maxbregind + numNewBc, breg);
1187
1190 SpatialDomains::BoundaryConditionMap>::AllocateSharedPtr();
1191
1192 std::string userDefined = "Interface";
1193 // Set up just boundary condition for this variable.
1194 SpatialDomains::BoundaryConditionShPtr DirichletInterface(
1196 AllocateSharedPtr(m_session, "0", userDefined));
1197
1198 for (size_t i = 0; i < variables.size(); ++i)
1199 {
1200 (*bCondition)[variables[i]] = DirichletInterface;
1201 }
1202
1203 Allbcs.AddBoundaryConditions(maxbregind + numNewBc, bCondition);
1204 ++numNewBc;
1205 }
1206 }
1207}
1208
1209/**
1210 * Initialisation routine for multiple subdomain case. Sets the
1211 * initial conditions for all arterial subdomains read from the
1212 * inputfile. Sets the material properties and the A_0 area for
1213 * all subdomains and fills the domain-linking boundary
1214 * conditions with the initial values of their domain.
1215 */
1217 [[maybe_unused]] bool dumpInitialConditions)
1218{
1219 if (m_session->GetComm()->GetRank() == 0)
1220 {
1221 cout << "Initial Conditions: " << endl;
1222 }
1223
1224 /* Loop over all subdomains to initialize all with the Initial
1225 * Conditions read from the inputfile*/
1226 int omega = 0;
1227 for (auto &d : m_domOrder)
1228 {
1229 for (size_t i = 0; i < 2; ++i)
1230 {
1231 m_fields[i] = m_vessels[m_nVariables * omega + i];
1232 }
1233
1234 if (m_session->GetComm()->GetRank() == 0)
1235 {
1236 cout << "Subdomain: " << omega << endl;
1237 }
1238
1239 SetInitialConditions(0.0, false, d);
1240 omega++;
1241 }
1242
1243 // Reset 2 variables to first vessels
1244 for (size_t i = 0; i < 2; ++i)
1245 {
1246 m_fields[i] = m_vessels[i];
1247 }
1248}
1249
1250/**
1251 * NEEDS Updating:
1252 *
1253 * DoSolve routine for PulseWavePropagation with multiple
1254 * subdomains taken from UnsteadySystem and modified for
1255 * multidomain case. Initialises the time integration scheme (as
1256 * specified in the session file), and perform the time
1257 * integration. Within the timestepping loop the following is
1258 * done: 1. Link all arterial segments according to the network
1259 * structure, solve the Riemann problem between different
1260 * arterial segments and assign the values to the boundary
1261 * conditions (LinkSubdomains) 2. Every arterial segment is
1262 * solved independently for this timestep. This is done by handing
1263 * the solution vector \f$ \mathbf{u} \f$ and the right hand side
1264 * m_ode, which is the PulseWavePropagation class in this example
1265 * over to the time integration scheme
1266 */
1268{
1269 // NekDouble IntegrationTime = 0.0;
1270 size_t i;
1271 int n;
1272 int nchk = 1;
1273
1275
1276 for (i = 0; i < m_nVariables; ++i)
1277 {
1278 fields[i] = m_vessels[i]->UpdatePhys();
1279 m_fields[i]->SetPhysState(false);
1280 }
1281
1282 m_intScheme->InitializeScheme(m_timestep, fields, m_time, m_ode);
1283
1284 // Time loop
1285 for (n = 0; n < m_steps; ++n)
1286 {
1287 LibUtilities::Timer time_v_IntStep;
1288 time_v_IntStep.Start();
1289 fields = m_intScheme->TimeIntegrate(n, m_timestep);
1290 m_time += m_timestep;
1291 time_v_IntStep.Stop();
1292 time_v_IntStep.AccumulateRegion("PulseWaveSystem::TimeIntegrationStep",
1293 0);
1294 // IntegrationTime += timer.TimePerTest(1);
1295
1296 // Write out status information.
1297 if (m_infosteps && !((n + 1) % m_infosteps) &&
1298 m_session->GetComm()->GetRank() == 0)
1299 {
1300 cout << "Steps: " << n + 1 << "\t Time: " << m_time
1301 << "\t Time-step: " << m_timestep << "\t" << endl;
1302 }
1303
1304 // Transform data if needed
1305 if (m_checksteps && !((n + 1) % m_checksteps))
1306 {
1307 for (i = 0; i < m_nVariables; ++i)
1308 {
1309 size_t cnt = 0;
1310 for (size_t omega = 0; omega < m_nDomains; omega++)
1311 {
1312 m_vessels[omega * m_nVariables + i]->FwdTrans(
1313 fields[i] + cnt,
1314 m_vessels[omega * m_nVariables + i]->UpdateCoeffs());
1315 cnt += m_vessels[omega * m_nVariables + i]->GetTotPoints();
1316 }
1317 }
1318 CheckPoint_Output(nchk++);
1319 }
1320
1321 } // end of timeintegration
1322
1323 // Copy Array To Vessel Phys Fields
1324 for (size_t i = 0; i < m_nVariables; ++i)
1325 {
1326 Vmath::Vcopy(fields[i].size(), fields[i], 1, m_vessels[i]->UpdatePhys(),
1327 1);
1328 }
1329
1330 /** if(m_session->GetComm()->GetRank() == 0)
1331 {
1332 cout << "Time-integration timing: " << IntegrationTime << " s" <<
1333 endl
1334 << endl;
1335 } **/
1336}
1337
1340 const Array<OneD, const Array<OneD, NekDouble>> &fields, NekDouble &A,
1341 NekDouble &u, NekDouble &beta, NekDouble &A_0, NekDouble &alpha)
1342{
1343 LibUtilities::Timer time_FillDataFromInterfacePoint;
1344 time_FillDataFromInterfacePoint.Start();
1345
1346 int omega = I->m_domain;
1347 int traceId = I->m_traceId;
1348 int eid = I->m_elmt;
1349 int vert = I->m_elmtVert;
1350 int vesselID = omega * m_nVariables;
1351 int phys_offset = m_vessels[vesselID]->GetPhys_Offset(eid);
1354
1355 m_vessels[vesselID]->GetExp(eid)->GetTracePhysVals(
1356 vert, dumExp, fields[0] + m_fieldPhysOffset[omega] + phys_offset, Tmp);
1357 A = Tmp[0];
1358 m_vessels[vesselID]->GetExp(eid)->GetTracePhysVals(
1359 vert, dumExp, fields[1] + m_fieldPhysOffset[omega] + phys_offset, Tmp);
1360 u = Tmp[0];
1361
1362 beta = m_beta_trace[omega][traceId];
1363 A_0 = m_A_0_trace[omega][traceId];
1364 alpha = m_alpha_trace[omega][traceId];
1365 time_FillDataFromInterfacePoint.Stop();
1366 time_FillDataFromInterfacePoint.AccumulateRegion(
1367 "PulseWaveSystem::FillDataFromInterfacePoint", 2);
1368}
1369
1371 const Array<OneD, const Array<OneD, NekDouble>> &fields)
1372{
1373 LibUtilities::Timer time_EnforceInterfaceConditions;
1374 time_EnforceInterfaceConditions.Start();
1375 int dom, bcpos;
1376
1377 int totif =
1378 m_bifurcations.size() + m_mergingJcts.size() + m_vesselIntfcs.size();
1379
1380 Array<OneD, NekDouble> Aut, Au(3 * totif, 0.0);
1381 Array<OneD, NekDouble> uut, uu(3 * totif, 0.0);
1382 Array<OneD, NekDouble> betat, beta(3 * totif, 0.0);
1383 Array<OneD, NekDouble> A_0t, A_0(3 * totif, 0.0);
1384 Array<OneD, NekDouble> alphat, alpha(3 * totif, 0.0);
1385
1386 // Bifurcations Data:
1387 size_t cnt = 0;
1388 for (size_t n = 0; n < m_bifurcations.size(); ++n, ++cnt)
1389 {
1390 for (size_t i = 0; i < m_bifurcations[n].size(); ++i)
1391 {
1392 size_t l = m_bifurcations[n][i]->m_riemannOrd;
1394 m_bifurcations[n][i], fields, Au[l + 3 * cnt], uu[l + 3 * cnt],
1395 beta[l + 3 * cnt], A_0[l + 3 * cnt], alpha[l + 3 * cnt]);
1396 }
1397 }
1398
1399 // Enforce Merging vessles Data:
1400 for (size_t n = 0; n < m_mergingJcts.size(); ++n, ++cnt)
1401 {
1402 // Merged vessel
1403 for (size_t i = 0; i < m_mergingJcts.size(); ++i)
1404 {
1405 int l = m_mergingJcts[n][i]->m_riemannOrd;
1407 m_mergingJcts[n][i], fields, Au[l + 3 * cnt], uu[l + 3 * cnt],
1408 beta[l + 3 * cnt], A_0[l + 3 * cnt], alpha[l + 3 * cnt]);
1409 }
1410 }
1411
1412 // Enforce interface junction between two vessesls
1413 for (size_t n = 0; n < m_vesselIntfcs.size(); ++n, ++cnt)
1414 {
1415 for (size_t i = 0; i < m_vesselIntfcs[n].size(); ++i)
1416 {
1417 int l = m_vesselIntfcs[n][i]->m_riemannOrd;
1419 m_vesselIntfcs[n][i], fields, Au[l + 3 * cnt], uu[l + 3 * cnt],
1420 beta[l + 3 * cnt], A_0[l + 3 * cnt], alpha[l + 3 * cnt]);
1421 }
1422 }
1423
1424 // Gather data if running in parallel
1430
1431 // Enforce Bifurcations:
1432 cnt = 0;
1433 for (size_t n = 0; n < m_bifurcations.size(); ++n, ++cnt)
1434 {
1435 // Solve the Riemann problem for a bifurcation
1436 BifurcationRiemann(Aut = Au + 3 * cnt, uut = uu + 3 * cnt,
1437 betat = beta + 3 * cnt, A_0t = A_0 + 3 * cnt,
1438 alphat = alpha + 3 * cnt);
1439
1440 // Store the values into the right positions:
1441 for (size_t i = 0; i < m_bifurcations[n].size(); ++i)
1442 {
1443 dom = m_bifurcations[n][i]->m_domain;
1444 bcpos = m_bifurcations[n][i]->m_bcPosition;
1445 int l = m_bifurcations[n][i]->m_riemannOrd;
1446 m_vessels[dom * m_nVariables]
1447 ->UpdateBndCondExpansion(bcpos)
1448 ->UpdatePhys()[0] = Au[l + 3 * cnt];
1449 m_vessels[dom * m_nVariables + 1]
1450 ->UpdateBndCondExpansion(bcpos)
1451 ->UpdatePhys()[0] = uu[l + 3 * cnt];
1452 }
1453 }
1454
1455 // Enforce Merging vessles;
1456 for (size_t n = 0; n < m_mergingJcts.size(); ++n, ++cnt)
1457 {
1458 // Solve the Riemann problem for a merging vessel
1459 MergingRiemann(Aut = Au + 3 * cnt, uut = uu + 3 * cnt,
1460 betat = beta + 3 * cnt, A_0t = A_0 + 3 * cnt,
1461 alphat = alpha + 3 * cnt);
1462
1463 // Store the values into the right positions:
1464 for (size_t i = 0; i < m_mergingJcts[n].size(); ++i)
1465 {
1466 int dom = m_mergingJcts[n][i]->m_domain;
1467 int bcpos = m_mergingJcts[n][i]->m_bcPosition;
1468 int l = m_mergingJcts[n][i]->m_riemannOrd;
1469 m_vessels[dom * m_nVariables]
1470 ->UpdateBndCondExpansion(bcpos)
1471 ->UpdatePhys()[0] = Au[l + 3 * cnt];
1472 m_vessels[dom * m_nVariables + 1]
1473 ->UpdateBndCondExpansion(bcpos)
1474 ->UpdatePhys()[0] = uu[l + 3 * cnt];
1475 }
1476 }
1477
1478 // Enforce interface junction between two vessesls
1479 for (size_t n = 0; n < m_vesselIntfcs.size(); ++n, ++cnt)
1480 {
1481 InterfaceRiemann(Aut = Au + 3 * cnt, uut = uu + 3 * cnt,
1482 betat = beta + 3 * cnt, A_0t = A_0 + 3 * cnt,
1483 alphat = alpha + 3 * cnt);
1484
1485 // Store the values into the right positions:
1486 for (size_t i = 0; i < m_vesselIntfcs[n].size(); ++i)
1487 {
1488 int dom = m_vesselIntfcs[n][i]->m_domain;
1489 int bcpos = m_vesselIntfcs[n][i]->m_bcPosition;
1490 int l = m_vesselIntfcs[n][i]->m_riemannOrd;
1491 m_vessels[dom * m_nVariables]
1492 ->UpdateBndCondExpansion(bcpos)
1493 ->UpdatePhys()[0] = Au[l + 3 * cnt];
1494 m_vessels[dom * m_nVariables + 1]
1495 ->UpdateBndCondExpansion(bcpos)
1496 ->UpdatePhys()[0] = uu[l + 3 * cnt];
1497 }
1498 }
1499 time_EnforceInterfaceConditions.Stop();
1500 time_EnforceInterfaceConditions.AccumulateRegion(
1501 "PulseWaveSystem::EnforceInterfaceConditions", 1);
1502}
1503
1504/**
1505 * Solves the Riemann problem at a bifurcation by assuming
1506 * subsonic flow at both sides of the boundary and by
1507 * applying conservation of mass and continuity of the total
1508 * pressure \f$ \frac{p}{rho} + \frac{u^{2}}{2}. \f$ The
1509 * other 3 missing equations come from the characteristic
1510 * variables. For further information see "Pulse
1511 * WavePropagation in the human vascular system" Section
1512 * 3.4.4
1513 */
1519{
1520
1521 NekDouble rho = m_rho;
1523 Array<OneD, NekDouble> P_Au(3);
1524 Array<OneD, NekDouble> W_Au(3);
1525 NekMatrix<NekDouble> invJ(6, 6);
1528
1529 int proceed = 1;
1530 int iter = 0;
1531 int MAX_ITER = 100;
1532
1533 // Forward characteristic
1534 m_pressureArea->GetW1(W[0], uu[0], beta[0], Au[0], A_0[0], alpha[0]);
1535
1536 // Backward characteristics
1537 for (size_t i = 1; i < 3; ++i)
1538 {
1539 m_pressureArea->GetW2(W[i], uu[i], beta[i], Au[i], A_0[i], alpha[i]);
1540 }
1541
1542 // Tolerances for the algorithm
1543 NekDouble Tol = 1.0E-10;
1544
1545 // Newton Iteration
1546 while ((proceed) && (iter < MAX_ITER))
1547 {
1548 LibUtilities::Timer time_BifurcationRiemann;
1549 time_BifurcationRiemann.Start();
1550 iter += 1;
1551
1552 /*
1553 * We solve the six constraint equations via a multivariate Newton
1554 * iteration. Equations are:
1555 * 1. Forward characteristic: W1(A_L, U_L) = W1(Au_L, Uu_L)
1556 * 2. Backward characteristic 1: W2(A_R1, U_R1) = W2(Au_R1, Uu_R1)
1557 * 3. Backward characteristic 2: W2(A_R2, U_R2) = W2(Au_R2, Uu_R2)
1558 * 4. Conservation of mass: Au_L * Uu_L = Au_R1 * Uu_R1 +
1559 * Au_R2 * Uu_R2
1560 * 5. Continuity of total pressure 1: rho * Uu_L * Uu_L / 2 + p(Au_L)
1561 * = rho * Uu_R1 * Uu_R1 / 2 + p(Au_R1)
1562 * 6. Continuity of total pressure 2: rho * Uu_L * Uu_L / 2 + p(Au_L)
1563 * = rho * Uu_R2 * Uu_R2 / 2 + p(Au_R2)
1564 */
1565 for (size_t i = 0; i < 3; ++i)
1566 {
1567 m_pressureArea->GetPressure(P_Au[i], beta[i], Au[i], A_0[i], 0, 0,
1568 alpha[i]);
1569 }
1570
1571 m_pressureArea->GetW1(W_Au[0], uu[0], beta[0], Au[0], A_0[0], alpha[0]);
1572 m_pressureArea->GetW2(W_Au[1], uu[1], beta[1], Au[1], A_0[1], alpha[1]);
1573 m_pressureArea->GetW2(W_Au[2], uu[2], beta[2], Au[2], A_0[2], alpha[2]);
1574
1575 // Constraint equations set to zero
1576 f[0] = W_Au[0] - W[0];
1577 f[1] = W_Au[1] - W[1];
1578 f[2] = W_Au[2] - W[2];
1579 f[3] = Au[0] * uu[0] - Au[1] * uu[1] - Au[2] * uu[2];
1580 f[4] = uu[0] * uu[0] + 2 * P_Au[0] / rho - uu[1] * uu[1] -
1581 2 * P_Au[1] / rho;
1582 f[5] = uu[0] * uu[0] + 2 * P_Au[0] / rho - uu[2] * uu[2] -
1583 2 * P_Au[2] / rho;
1584
1585 // Inverse Jacobian for x + dx = x - J^(-1) * f(x)
1586 m_pressureArea->GetJacobianInverse(invJ, Au, uu, beta, A_0, alpha,
1587 "Bifurcation");
1588
1589 Multiply(dx, invJ, f);
1590
1591 // Update the solution: x_new = x_old - dx
1592 for (int i = 0; i < 3; ++i)
1593 {
1594 uu[i] -= dx[i];
1595 Au[i] -= dx[i + 3];
1596 }
1597
1598 // Check if the error of the solution is smaller than Tol
1599 if (Dot(dx, dx) < Tol)
1600 {
1601 proceed = 0;
1602 }
1603
1604 // Check if solver converges
1605 if (iter >= MAX_ITER)
1606 {
1607 ASSERTL0(false, "Riemann solver for Bifurcation did not converge.");
1608 }
1609 time_BifurcationRiemann.Stop();
1610 time_BifurcationRiemann.AccumulateRegion(
1611 "PulseWaveSystem::Bifurcation Riemann", 2);
1612 }
1613}
1614
1615/**
1616 * Solves the Riemann problem at an merging flow condition by
1617 * assuming subsonic flow at both sides of the boundary and by
1618 * applying conservation of mass and continuity of the total
1619 * pressure \f$ \frac{p}{rho} + \frac{u^{2}}{2}. \f$ The other 3
1620 * missing equations come from the characteristic variables. For
1621 * further information see "Pulse WavePropagation in the human
1622 * vascular system" Section 3.4.4
1623 */
1629{
1630
1631 NekDouble rho = m_rho;
1633 Array<OneD, NekDouble> W_Au(3);
1634 Array<OneD, NekDouble> P_Au(3);
1635 NekMatrix<NekDouble> invJ(6, 6);
1638
1639 int proceed = 1;
1640 int iter = 0;
1641 int MAX_ITER = 15;
1642
1643 // Backward characteristic
1644 m_pressureArea->GetW2(W[0], uu[0], beta[0], Au[0], A_0[0], alpha[0]);
1645
1646 // Forward characteristics
1647 for (size_t i = 1; i < 3; ++i)
1648 {
1649 m_pressureArea->GetW1(W[i], uu[i], beta[i], Au[i], A_0[i], alpha[i]);
1650 }
1651
1652 // Tolerances for the algorithm
1653 NekDouble Tol = 1.0E-10;
1654
1655 // Newton Iteration
1656 while ((proceed) && (iter < MAX_ITER))
1657 {
1658 LibUtilities::Timer time_MergingRiemann;
1659 time_MergingRiemann.Start();
1660 iter += 1;
1661
1662 /*
1663 * We solve the six constraint equations via a multivariate Newton
1664 * iteration. Equations are:
1665 * 1. Backward characteristic: W2(A_R, U_R) = W1(Au_R, Uu_R)
1666 * 2. Forward characteristic 1: W1(A_L1, U_L1) = W1(Au_L1,
1667 * Uu_R1)
1668 * 3. Forward characteristic 2: W1(A_L2, U_L2) = W1(Au_L2,
1669 * Uu_L2)
1670 * 4. Conservation of mass: Au_R * Uu_R = Au_L1 * Uu_L1 +
1671 * Au_L2 * Uu_L2
1672 * 5. Continuity of total pressure 1: rho * Uu_R * Uu_R / 2 + p(Au_R)
1673 * = rho * Uu_L1 * Uu_L1 / 2 + p(Au_L1)
1674 * 6. Continuity of total pressure 2: rho * Uu_R * Uu_R / 2 + p(Au_R)
1675 * = rho * Uu_L2 * Uu_L2 / 2 + p(Au_L2)
1676 */
1677 for (size_t i = 0; i < 3; ++i)
1678 {
1679 m_pressureArea->GetPressure(P_Au[i], beta[i], Au[i], A_0[i], 0, 0,
1680 alpha[i]);
1681 }
1682
1683 m_pressureArea->GetW2(W_Au[0], uu[0], beta[0], Au[0], A_0[0], alpha[0]);
1684 m_pressureArea->GetW1(W_Au[1], uu[1], beta[1], Au[1], A_0[1], alpha[1]);
1685 m_pressureArea->GetW1(W_Au[2], uu[2], beta[2], Au[2], A_0[2], alpha[2]);
1686
1687 // Constraint equations set to zero
1688 f[0] = W_Au[0] - W[0];
1689 f[1] = W_Au[1] - W[1];
1690 f[2] = W_Au[2] - W[2];
1691 f[3] = Au[0] * uu[0] - Au[1] * uu[1] - Au[2] * uu[2];
1692 f[4] = uu[0] * uu[0] + 2 * P_Au[0] / rho - uu[1] * uu[1] -
1693 2 * P_Au[1] / rho;
1694 f[5] = uu[0] * uu[0] + 2 * P_Au[0] / rho - uu[2] * uu[2] -
1695 2 * P_Au[2] / rho;
1696
1697 // Inverse Jacobian for x + dx = x - J^(-1) * f(x)
1698 m_pressureArea->GetJacobianInverse(invJ, Au, uu, beta, A_0, alpha,
1699 "Merge");
1700
1701 Multiply(dx, invJ, f);
1702
1703 // Update the solution: x_new = x_old - dx
1704 for (size_t i = 0; i < 3; ++i)
1705 {
1706 uu[i] -= dx[i];
1707 Au[i] -= dx[i + 3];
1708 }
1709
1710 // Check if the error of the solution is smaller than Tol
1711 if (Dot(dx, dx) < Tol)
1712 {
1713 proceed = 0;
1714 }
1715
1716 // Check if solver converges
1717 if (iter >= MAX_ITER)
1718 {
1719 ASSERTL0(false, "Riemann solver for Merging Flow did not converge");
1720 }
1721 time_MergingRiemann.Stop();
1722 time_MergingRiemann.AccumulateRegion("PulseWaveSystem::MergingRiemann",
1723 2);
1724 }
1725}
1726
1727/**
1728 * Solves the Riemann problem at an interdomain
1729 * junction/Interface by assuming subsonic flow at both sides of
1730 * the boundary and by applying conservation of mass and
1731 * continuity of the total pressure \f$ \frac{p}{rho} +
1732 * \frac{u^{2}}{2}. \f$ The other 2 missing equations come from
1733 * the characteristic variables. For further information see
1734 * "Pulse WavePropagation in the human vascular system" Section
1735 * 3.4.
1736 */
1742{
1743
1744 NekDouble rho = m_rho;
1746 Array<OneD, NekDouble> W_Au(2);
1747 Array<OneD, NekDouble> P_Au(2);
1748 NekMatrix<NekDouble> invJ(4, 4);
1751
1752 int proceed = 1;
1753 int iter = 0;
1754 int MAX_ITER = 15;
1755 NekDouble Tol = 1.0E-10;
1756
1757 // Forward and backward characteristics
1758 m_pressureArea->GetW1(W[0], uu[0], beta[0], Au[0], A_0[0], alpha[0]);
1759 m_pressureArea->GetW2(W[1], uu[1], beta[1], Au[1], A_0[1], alpha[1]);
1760
1761 while ((proceed) && (iter < MAX_ITER))
1762 {
1763 LibUtilities::Timer time_InterfaceRiemann;
1764 time_InterfaceRiemann.Start();
1765 iter += 1;
1766
1767 /*
1768 * We solve the four constraint equations via a multivariate Newton
1769 * iteration. Equations are:
1770 * 1. Forward characteristic: W1(A_L, U_L) = W1(Au_L, Uu_L)
1771 * 2. Backward characteristic: W2(A_R, U_R) = W2(Au_R, Uu_R)
1772 * 3. Conservation of mass: Au_L * Uu_L = Au_R * Uu_R
1773 * 4. Continuity of total pressure: rho * Uu_L * Uu_L / 2 + p(Au_L) =
1774 * rho * Uu_R * Uu_R / 2 + p(Au_R)
1775 */
1776 for (size_t i = 0; i < 2; ++i)
1777 {
1778 m_pressureArea->GetPressure(P_Au[i], beta[i], Au[i], A_0[i], 0, 0,
1779 alpha[i]);
1780 }
1781
1782 m_pressureArea->GetW1(W_Au[0], uu[0], beta[0], Au[0], A_0[0], alpha[0]);
1783 m_pressureArea->GetW2(W_Au[1], uu[1], beta[1], Au[1], A_0[1], alpha[1]);
1784
1785 // Constraint equations set to zero
1786 f[0] = W_Au[0] - W[0];
1787 f[1] = W_Au[1] - W[1];
1788 f[2] = Au[0] * uu[0] - Au[1] * uu[1];
1789 f[3] = uu[0] * uu[0] + 2 * P_Au[0] / rho - uu[1] * uu[1] -
1790 2 * P_Au[1] / rho;
1791
1792 // Inverse Jacobian for x + dx = x - J^(-1) * f(x)
1793 m_pressureArea->GetJacobianInverse(invJ, Au, uu, beta, A_0, alpha,
1794 "Interface");
1795
1796 Multiply(dx, invJ, f);
1797
1798 // Update solution: x_new = x_old - dx
1799 for (size_t i = 0; i < 2; ++i)
1800 {
1801 uu[i] -= dx[i];
1802 Au[i] -= dx[i + 2];
1803 }
1804
1805 // Check if the error of the solution is smaller than Tol.
1806 if (Dot(dx, dx) < Tol)
1807 {
1808 proceed = 0;
1809 }
1810 time_InterfaceRiemann.Stop();
1811 time_InterfaceRiemann.AccumulateRegion(
1812 "PulseWaveSystem::InterfaceRiemann", 2);
1813 }
1814
1815 if (iter >= MAX_ITER)
1816 {
1817 ASSERTL0(false, "Riemann solver for Interface did not converge");
1818 }
1819}
1820
1821/**
1822 * Writes the .fld file at the end of the simulation. Similar to the normal
1823 * v_Output however the Multidomain output has to be prepared.
1824 */
1826{
1827 /**
1828 * Write the field data to file. The file is named according to the session
1829 * name with the extension .fld appended.
1830 */
1831 std::string outname = m_sessionName + ".fld";
1832
1833 WriteVessels(outname);
1834}
1835
1836/**
1837 * Writes the .fld file at the end of the simulation. Similar to the normal
1838 * v_Output however the Multidomain output has to be prepared.
1839 */
1841{
1842 std::stringstream outname;
1843 outname << m_sessionName << "_" << n << ".chk";
1844
1845 WriteVessels(outname.str());
1846}
1847
1848/**
1849 * Writes the field data to a file with the given filename.
1850 * @param outname Filename to write to.
1851 */
1852void PulseWaveSystem::WriteVessels(const std::string &outname)
1853{
1854 std::vector<LibUtilities::FieldDefinitionsSharedPtr> FieldDef;
1855 std::vector<std::string> variables = m_session->GetVariables();
1856
1857 for (size_t n = 0; n < m_nDomains; ++n)
1858 {
1859 m_vessels[n * m_nVariables]->GetFieldDefinitions(FieldDef);
1860 }
1861
1862 std::vector<std::vector<NekDouble>> FieldData(FieldDef.size());
1863
1864 size_t nFieldDefPerDomain = FieldDef.size() / m_nDomains;
1865 size_t cnt;
1866
1867 // Copy Data into FieldData and set variable
1868 for (size_t n = 0; n < m_nDomains; ++n)
1869 {
1870 // Outputs area and velocity
1871 for (size_t j = 0; j < m_nVariables; ++j)
1872 {
1873 for (size_t i = 0; i < nFieldDefPerDomain; ++i)
1874 {
1875 cnt = n * nFieldDefPerDomain + i;
1876
1877 FieldDef[cnt]->m_fields.push_back(variables[j]);
1878
1879 m_vessels[n * m_nVariables]->AppendFieldData(
1880 FieldDef[cnt], FieldData[cnt],
1881 m_vessels[n * m_nVariables + j]->UpdateCoeffs());
1882 }
1883 }
1884
1885 // Outputs pressure
1887
1888 m_vessels[n * m_nVariables]->FwdTransLocalElmt(m_pressure[n], PFwd);
1889
1890 for (size_t i = 0; i < nFieldDefPerDomain; ++i)
1891 {
1892 cnt = n * nFieldDefPerDomain + i;
1893
1894 FieldDef[cnt]->m_fields.push_back("P");
1895
1896 m_vessels[n * m_nVariables]->AppendFieldData(FieldDef[cnt],
1897 FieldData[cnt], PFwd);
1898 }
1899
1900 if (extraFields)
1901 {
1908
1909 m_vessels[n * m_nVariables]->FwdTransLocalElmt(m_PWV[n], PWVFwd);
1910 m_vessels[n * m_nVariables]->FwdTransLocalElmt(m_W1[n], W1Fwd);
1911 m_vessels[n * m_nVariables]->FwdTransLocalElmt(m_W2[n], W2Fwd);
1912
1913 for (size_t i = 0; i < nFieldDefPerDomain; ++i)
1914 {
1915 cnt = n * nFieldDefPerDomain + i;
1916
1917 FieldDef[cnt]->m_fields.push_back("c");
1918 FieldDef[cnt]->m_fields.push_back("W1");
1919 FieldDef[cnt]->m_fields.push_back("W2");
1920
1921 m_vessels[n * m_nVariables]->AppendFieldData(
1922 FieldDef[cnt], FieldData[cnt], PWVFwd);
1923 m_vessels[n * m_nVariables]->AppendFieldData(
1924 FieldDef[cnt], FieldData[cnt], W1Fwd);
1925 m_vessels[n * m_nVariables]->AppendFieldData(
1926 FieldDef[cnt], FieldData[cnt], W2Fwd);
1927 }
1928 }
1929 }
1930
1931 // Update time in field info if required
1932 if (m_fieldMetaDataMap.find("Time") != m_fieldMetaDataMap.end())
1933 {
1934 m_fieldMetaDataMap["Time"] = boost::lexical_cast<std::string>(m_time);
1935 }
1936
1937 m_fld->Write(outname, FieldDef, FieldData, m_fieldMetaDataMap);
1938}
1939
1940/* Compute the error in the L2-norm
1941 * @param field The field to compare.
1942 * @param exactsoln The exact solution to compare with.
1943 * @param Normalised Normalise L2-error.
1944 * @returns Error in the L2-norm.
1945 */
1947 unsigned int field, [[maybe_unused]] const Array<OneD, NekDouble> &exact,
1948 bool Normalised)
1949{
1950 NekDouble L2error = 0.0;
1951 NekDouble L2error_dom;
1952 NekDouble Vol = 0.0;
1953
1954 if (m_NumQuadPointsError == 0)
1955 {
1956 for (size_t omega = 0; omega < m_nDomains; omega++)
1957 {
1958 size_t vesselid = field + omega * m_nVariables;
1959
1960 // since exactsoln is passed for just the first field size we need
1961 // to reset it each domain
1962 Array<OneD, NekDouble> exactsoln(m_vessels[vesselid]->GetNpoints());
1963 m_fields[field] = m_vessels[omega * m_nVariables];
1964 EvaluateExactSolution(field, exactsoln, m_time);
1965
1966 if (m_vessels[vesselid]->GetPhysState() == false)
1967 {
1968 m_vessels[vesselid]->BwdTrans(
1969 m_vessels[vesselid]->GetCoeffs(),
1970 m_vessels[vesselid]->UpdatePhys());
1971 }
1972
1973 if (exactsoln.size())
1974 {
1975 L2error_dom = m_vessels[vesselid]->L2(
1976 m_vessels[vesselid]->GetPhys(), exactsoln);
1977 }
1978 else if (m_session->DefinesFunction("ExactSolution"))
1979 {
1980 Array<OneD, NekDouble> exactsoln(
1981 m_vessels[vesselid]->GetNpoints());
1982
1984 m_session->GetFunction("ExactSolution", field, omega);
1985 GetFunction("ExactSolution")
1986 ->Evaluate(m_session->GetVariable(field), exactsoln,
1987 m_time);
1988
1989 L2error_dom = m_vessels[vesselid]->L2(
1990 m_vessels[vesselid]->GetPhys(), exactsoln);
1991 }
1992 else
1993 {
1994 L2error_dom =
1995 m_vessels[vesselid]->L2(m_vessels[vesselid]->GetPhys());
1996 }
1997
1998 if (m_vessels[vesselid]->GetComm()->GetRank())
1999 {
2000 // ensure domains are only summed on local root of
2001 // domain
2002 L2error_dom = 0.0;
2003 }
2004
2005 L2error += L2error_dom * L2error_dom;
2006
2007 if (Normalised == true)
2008 {
2010 1.0);
2011
2012 Vol += m_vessels[vesselid]->Integral(one);
2013 }
2014 }
2015 }
2016 else
2017 {
2018 ASSERTL0(false, "Not set up");
2019 }
2020
2021 m_comm->AllReduce(L2error, LibUtilities::ReduceSum);
2022
2023 if (Normalised == true)
2024 {
2025 m_comm->AllReduce(Vol, LibUtilities::ReduceSum);
2026
2027 L2error = sqrt(L2error / Vol);
2028 }
2029 else
2030 {
2031 L2error = sqrt(L2error);
2032 }
2033
2034 return L2error;
2035}
2036
2037/**
2038 * Compute the error in the L_inf-norm
2039 * @param field The field to compare.
2040 * @param exactsoln The exact solution to compare with.
2041 * @returns Error in the L_inft-norm.
2042 */
2044 unsigned int field, [[maybe_unused]] const Array<OneD, NekDouble> &exact)
2045{
2046 NekDouble LinferrorDom, Linferror = -1.0;
2047
2048 for (size_t omega = 0; omega < m_nDomains; ++omega)
2049 {
2050 size_t vesselid = field + omega * m_nVariables;
2051
2052 // since exactsoln is passed for just the first field size we need
2053 // to reset it each domain
2054 Array<OneD, NekDouble> exactsoln(m_vessels[vesselid]->GetNpoints());
2055 m_fields[field] = m_vessels[omega * m_nVariables];
2056 EvaluateExactSolution(field, exactsoln, m_time);
2057
2058 if (m_NumQuadPointsError == 0)
2059 {
2060 if (m_vessels[vesselid]->GetPhysState() == false)
2061 {
2062 m_vessels[vesselid]->BwdTrans(
2063 m_vessels[vesselid]->GetCoeffs(),
2064 m_vessels[vesselid]->UpdatePhys());
2065 }
2066
2067 if (exactsoln.size())
2068 {
2069 LinferrorDom = m_vessels[vesselid]->Linf(
2070 m_vessels[vesselid]->GetPhys(), exactsoln);
2071 }
2072 else if (m_session->DefinesFunction("ExactSolution"))
2073 {
2074 Array<OneD, NekDouble> exactsoln(
2075 m_vessels[vesselid]->GetNpoints());
2076
2077 GetFunction("ExactSolution")
2078 ->Evaluate(m_session->GetVariable(field), exactsoln,
2079 m_time);
2080
2081 LinferrorDom = m_vessels[vesselid]->Linf(
2082 m_vessels[vesselid]->GetPhys(), exactsoln);
2083 }
2084 else
2085 {
2086 LinferrorDom = 0.0;
2087 }
2088
2089 Linferror = (Linferror > LinferrorDom) ? Linferror : LinferrorDom;
2090 }
2091 else
2092 {
2093 ASSERTL0(false, "ErrorExtraPoints not allowed for this solver");
2094 }
2095 }
2096 m_comm->AllReduce(Linferror, LibUtilities::ReduceMax);
2097 return Linferror;
2098}
2099
2100} // namespace Nektar
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:208
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode....
Definition: ErrorUtil.hpp:242
tBaseSharedPtr CreateInstance(tKey idKey, tParam... args)
Create an instance of the class referred to by idKey.
Definition: NekFactory.hpp:143
void AccumulateRegion(std::string, int iolevel=0)
Accumulate elapsed time for a region.
Definition: Timer.cpp:70
General purpose memory allocation routines with the ability to allocate from thread specific memory p...
static std::shared_ptr< DataType > AllocateSharedPtr(const Args &...args)
Allocate a shared pointer from the memory pool.
Array< OneD, Array< OneD, NekDouble > > m_A_0
Array< OneD, int > m_fieldPhysOffset
void v_DoInitialise(bool dumpInitialConditions=false) override
Sets up initial conditions.
Array< OneD, Array< OneD, NekDouble > > m_beta_trace
Array< OneD, Array< OneD, NekDouble > > m_W2
Array< OneD, Array< OneD, NekDouble > > m_trace_fwd_normal
PulseWavePressureAreaSharedPtr m_pressureArea
void CheckPoint_Output(const int n)
Array< OneD, Array< OneD, NekDouble > > m_W1
void v_InitObject(bool DeclareField=false) override
std::vector< int > m_domOrder
void MergingRiemann(Array< OneD, NekDouble > &Au, Array< OneD, NekDouble > &uu, Array< OneD, NekDouble > &beta, Array< OneD, NekDouble > &A_0, Array< OneD, NekDouble > &alpha)
Riemann Problem for Merging Flow.
void InterfaceRiemann(Array< OneD, NekDouble > &Au, Array< OneD, NekDouble > &uu, Array< OneD, NekDouble > &beta, Array< OneD, NekDouble > &A_0, Array< OneD, NekDouble > &alpha)
Riemann Problem for Interface/Junction.
Array< OneD, Array< OneD, NekDouble > > m_alpha_trace
std::map< int, SpatialDomains::CompositeMap > m_domain
UpwindTypePulse m_upwindTypePulse
Array< OneD, Array< OneD, NekDouble > > m_A_0_trace
Array< OneD, Array< OneD, NekDouble > > m_pressure
NekDouble v_LinfError(unsigned int field, const Array< OneD, NekDouble > &exactsoln=NullNekDouble1DArray) override
Compute the L_inf error between fields and a given exact solution.
Array< OneD, Array< OneD, NekDouble > > m_gamma
void WriteVessels(const std::string &outname)
Write input fields to the given filename.
void GetCommArray(std::map< int, LibUtilities::CommSharedPtr > &retval)
Set and retrn a series of communicators for each partition.
Array< OneD, Array< OneD, NekDouble > > m_alpha
Gs::gs_data * m_intComm
Communicator for interfaces.
std::vector< std::vector< InterfacePointShPtr > > m_vesselIntfcs
Array< OneD, Array< OneD, NekDouble > > m_PWV
void v_Output(void) override
Array< OneD, MultiRegions::ExpListSharedPtr > m_vessels
~PulseWaveSystem() override
Destructor.
void FillDataFromInterfacePoint(InterfacePointShPtr &I, const Array< OneD, const Array< OneD, NekDouble > > &field, NekDouble &A, NekDouble &u, NekDouble &beta, NekDouble &A_0, NekDouble &alpha)
std::vector< std::vector< InterfacePointShPtr > > m_bifurcations
void SetUpDomainInterfaceBCs(SpatialDomains::BoundaryConditions &AllBcs, std::map< int, LibUtilities::CommSharedPtr > &domComm)
std::vector< std::vector< InterfacePointShPtr > > m_mergingJcts
NekDouble v_L2Error(unsigned int field, const Array< OneD, NekDouble > &exactsoln=NullNekDouble1DArray, bool Normalised=false) override
Compute the L2 error between fields and a given exact solution.
void v_DoSolve() override
Solves an unsteady problem.
void EnforceInterfaceConditions(const Array< OneD, const Array< OneD, NekDouble > > &fields)
Array< OneD, Array< OneD, NekDouble > > m_beta
PulseWaveSystem(const LibUtilities::SessionReaderSharedPtr &pSession, const SpatialDomains::MeshGraphSharedPtr &pGraph)
Initialises PulseWaveSystem class members.
void BifurcationRiemann(Array< OneD, NekDouble > &Au, Array< OneD, NekDouble > &uu, Array< OneD, NekDouble > &beta, Array< OneD, NekDouble > &A_0, Array< OneD, NekDouble > &alpha)
Riemann Problem for Bifurcation.
LibUtilities::FieldIOSharedPtr m_fld
Field input/output.
SpatialDomains::MeshGraphSharedPtr m_graph
Pointer to graph defining mesh.
LibUtilities::CommSharedPtr m_comm
Communicator.
NekDouble m_timestep
Time step size.
int m_infosteps
Number of time steps between outputting status information.
NekDouble m_time
Current time of simulation.
SOLVER_UTILS_EXPORT int GetTraceTotPoints()
Array< OneD, MultiRegions::ExpListSharedPtr > m_fields
Array holding all dependent variables.
SOLVER_UTILS_EXPORT void EvaluateExactSolution(int field, Array< OneD, NekDouble > &outfield, const NekDouble time)
Evaluates an exact solution.
std::string m_sessionName
Name of the session.
LibUtilities::SessionReaderSharedPtr m_session
The session reader.
SOLVER_UTILS_EXPORT int GetNpoints()
SOLVER_UTILS_EXPORT int GetNcoeffs()
enum MultiRegions::ProjectionType m_projectionType
Type of projection; e.g continuous or discontinuous.
int m_steps
Number of steps to take.
int m_NumQuadPointsError
Number of Quadrature points used to work out the error.
LibUtilities::FieldMetaDataMap m_fieldMetaDataMap
Map to identify relevant solver info to dump in output fields.
int m_checksteps
Number of steps between checkpoints.
SOLVER_UTILS_EXPORT void SetInitialConditions(NekDouble initialtime=0.0, bool dumpInitialConditions=true, const int domain=0)
Initialise the data in the dependent fields.
SOLVER_UTILS_EXPORT SessionFunctionSharedPtr GetFunction(std::string name, const MultiRegions::ExpListSharedPtr &field=MultiRegions::NullExpListSharedPtr, bool cache=false)
Get a SessionFunction by name.
Base class for unsteady solvers.
LibUtilities::TimeIntegrationSchemeOperators m_ode
The time integration scheme operators to use.
LibUtilities::TimeIntegrationSchemeSharedPtr m_intScheme
Wrapper to the time integration scheme.
SOLVER_UTILS_EXPORT void v_InitObject(bool DeclareField=true) override
Init object for UnsteadySystem class.
void AddBoundaryRegions(const int regionID, BoundaryRegionShPtr &bRegion)
Definition: Conditions.h:239
const BoundaryRegionCollection & GetBoundaryRegions(void) const
Definition: Conditions.h:234
void AddBoundaryConditions(const int regionID, BoundaryConditionMapShPtr &bCond)
Definition: Conditions.h:249
static void Gather(Nektar::Array< OneD, NekDouble > pU, gs_op pOp, gs_data *pGsh, Nektar::Array< OneD, NekDouble > pBuffer=NullNekDouble1DArray)
Performs a gather-scatter operation of the provided values.
Definition: GsLib.hpp:265
static gs_data * Init(const Nektar::Array< OneD, long > pId, const LibUtilities::CommSharedPtr &pComm, bool verbose=true)
Initialise Gather-Scatter map.
Definition: GsLib.hpp:190
@ gs_add
Definition: GsLib.hpp:60
@ gs_max
Definition: GsLib.hpp:63
std::shared_ptr< SessionReader > SessionReaderSharedPtr
std::shared_ptr< Equation > EquationSharedPtr
Definition: Equation.h:125
@ beta
Gauss Radau pinned at x=-1,.
Definition: PointsType.h:59
std::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.
Definition: Comm.h:55
std::shared_ptr< Expansion > ExpansionSharedPtr
Definition: Expansion.h:66
std::shared_ptr< ExpList > ExpListSharedPtr
Shared pointer to an ExpList object.
std::map< int, CompositeSharedPtr > BoundaryRegion
Definition: Conditions.h:207
std::map< int, BoundaryRegionShPtr > BoundaryRegionCollection
Definition: Conditions.h:210
std::shared_ptr< BoundaryRegion > BoundaryRegionShPtr
Definition: Conditions.h:208
std::shared_ptr< BoundaryConditionBase > BoundaryConditionShPtr
Definition: Conditions.h:212
std::map< std::string, BoundaryConditionShPtr > BoundaryConditionMap
Definition: Conditions.h:217
std::shared_ptr< Composite > CompositeSharedPtr
Definition: MeshGraph.h:135
std::shared_ptr< MeshGraph > MeshGraphSharedPtr
Definition: MeshGraph.h:174
std::shared_ptr< BoundaryConditionMap > BoundaryConditionMapShPtr
Definition: Conditions.h:218
std::shared_ptr< Geometry > GeometrySharedPtr
Definition: Geometry.h:51
std::vector< double > d(NPUPPER *NPUPPER)
std::shared_ptr< InterfacePoint > InterfacePointShPtr
PressureAreaFactory & GetPressureAreaFactory()
void Multiply(NekMatrix< ResultDataType, StandardMatrixTag > &result, const NekMatrix< LhsDataType, LhsMatrixType > &lhs, const ResultDataType &rhs)
@ SIZE_UpwindTypePulse
Length of enum list.
const char *const UpwindTypeMapPulse[]
double NekDouble
DataType Dot(const NekVector< DataType > &lhs, const NekVector< DataType > &rhs)
Definition: NekVector.cpp:1193
T Vsum(int n, const T *x, const int incx)
Subtract return sum(x)
Definition: Vmath.hpp:608
int Imin(int n, const T *x, const int incx)
Return the index of the minimum element in x.
Definition: Vmath.hpp:704
void Zero(int n, T *x, const int incx)
Zero vector.
Definition: Vmath.hpp:273
T Vmax(int n, const T *x, const int incx)
Return the maximum element in x – called vmax to avoid conflict with max.
Definition: Vmath.hpp:644
void Vcopy(int n, const T *x, const int incx, T *y, const int incy)
Definition: Vmath.hpp:825
scalarT< T > sqrt(scalarT< T > in)
Definition: scalar.hpp:294