Nektar++
FieldIOHdf5.cpp
Go to the documentation of this file.
1 ////////////////////////////////////////////////////////////////////////////////
2 //
3 // File: FieldIOHdf5.cpp
4 //
5 // For more information, please see: http://www.nektar.info/
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // Permission is hereby granted, free of charge, to any person obtaining a
14 // copy of this software and associated documentation files (the "Software"),
15 // to deal in the Software without restriction, including without limitation
16 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 // and/or sell copies of the Software, and to permit persons to whom the
18 // Software is furnished to do so, subject to the following conditions:
19 //
20 // The above copyright notice and this permission notice shall be included
21 // in all copies or substantial portions of the Software.
22 //
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 // DEALINGS IN THE SOFTWARE.
30 //
31 // Description: I/O routines relating to Fields into HDF
32 //
33 ////////////////////////////////////////////////////////////////////////////////
34 
37 
38 #include <functional>
39 #include <unordered_set>
40 
41 namespace berrc = boost::system::errc;
42 
43 namespace Nektar
44 {
45 namespace LibUtilities
46 {
47 namespace H5
48 {
49 
51 {
52  return PredefinedDataType::Native<int>();
53 }
54 
55 } // namespace H5
56 
57 std::string FieldIOHdf5::className =
59  "Hdf5", FieldIOHdf5::create, "HDF5-based output of field data.");
60 
61 /// Version of the Nektar++ HDF5 format, which is embedded into the main NEKTAR
62 /// group as an attribute.
63 const unsigned int FieldIOHdf5::FORMAT_VERSION = 1;
64 
65 // The following definitions allow us to consistently refer to indexes pulled
66 // out of the various datasets.
67 
68 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
69 /// position of the number of elements in decomposition (i.e. field definition).
70 const unsigned int FieldIOHdf5::ELEM_DCMP_IDX = 0;
71 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
72 /// position of the number of data points in decomposition (i.e. field
73 /// definition).
74 const unsigned int FieldIOHdf5::VAL_DCMP_IDX = 1;
75 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
76 /// position of the number of elements multiplied by the dimension of the
77 /// element, giving number of modes when variable polynomial order is defined.
78 const unsigned int FieldIOHdf5::ORDER_DCMP_IDX = 2;
79 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
80 /// position of the number of the number of y-planes for homogeneous
81 /// simulations.
82 const unsigned int FieldIOHdf5::HOMY_DCMP_IDX = 3;
83 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
84 /// position of the number of the number of z-planes for homogeneous
85 /// simulations.
86 const unsigned int FieldIOHdf5::HOMZ_DCMP_IDX = 4;
87 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
88 /// position of the number of the number of strips for homogeneous simulations.
89 const unsigned int FieldIOHdf5::HOMS_DCMP_IDX = 5;
90 /// The hash of the field definition information, which defines the name of the
91 /// attribute containing the field definition itself.
92 const unsigned int FieldIOHdf5::HASH_DCMP_IDX = 6;
93 /// A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in
94 /// the decomposition per field definition.
95 const unsigned int FieldIOHdf5::MAX_DCMPS = FieldIOHdf5::HASH_DCMP_IDX + 1;
96 
97 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
98 /// elements in the cnt array.
99 const unsigned int FieldIOHdf5::ELEM_CNT_IDX = 0;
100 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
101 /// data points in the cnt array.
102 const unsigned int FieldIOHdf5::VAL_CNT_IDX = 1;
103 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
104 /// order points in the cnt array.
105 const unsigned int FieldIOHdf5::ORDER_CNT_IDX = 2;
106 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
107 /// homogeneous y-planes in the cnt array.
108 const unsigned int FieldIOHdf5::HOMY_CNT_IDX = 3;
109 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
110 /// homogeneous z-planes in the cnt array.
111 const unsigned int FieldIOHdf5::HOMZ_CNT_IDX = 4;
112 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
113 /// homogeneous strips in the cnt array.
114 const unsigned int FieldIOHdf5::HOMS_CNT_IDX = 5;
115 /// A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in
116 /// the cnt array per field definition.
117 const unsigned int FieldIOHdf5::MAX_CNTS = FieldIOHdf5::HOMS_CNT_IDX + 1;
118 
119 /// A helper for FieldIOHdf5::v_Write. Describes the position of the element IDs
120 /// within the indexing set.
121 const unsigned int FieldIOHdf5::IDS_IDX_IDX = 0;
122 /// A helper for FieldIOHdf5::v_Write. Describes the position of the data size
123 /// within the indexing set.
124 const unsigned int FieldIOHdf5::DATA_IDX_IDX = 1;
125 /// A helper for FieldIOHdf5::v_Write. Describes the position of the element
126 /// order within the indexing set.
127 const unsigned int FieldIOHdf5::ORDER_IDX_IDX = 2;
128 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
129 /// y-planes within the indexing set.
130 const unsigned int FieldIOHdf5::HOMY_IDX_IDX = 3;
131 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
132 /// z-planes within the indexing set.
133 const unsigned int FieldIOHdf5::HOMZ_IDX_IDX = 4;
134 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
135 /// homogeneous strips within the indexing set.
136 const unsigned int FieldIOHdf5::HOMS_IDX_IDX = 5;
137 /// A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in
138 /// the indexing set.
139 const unsigned int FieldIOHdf5::MAX_IDXS = FieldIOHdf5::HOMS_IDX_IDX + 1;
140 
141 /**
142  * @brief Construct the FieldIO object for HDF5 output.
143  *
144  * @param pComm Communicator object.
145  * @param sharedFilesystem True if this system has a shared filesystem.
146  */
148  bool sharedFilesystem)
149  : FieldIO(pComm, sharedFilesystem)
150 {
151 }
152 
153 /**
154  * @brief Write a HDF5 file to @p outFile given the field definitions @p
155  * fielddefs, field data @p fielddata and metadata @p fieldmetadatamap.
156  *
157  * The writing strategy for HDF5 output is as follows:
158  *
159  * - Each rank determines the amount of data needed to be written into each
160  * dataset.
161  * - Each rank communicates its decomposition information to the root process.
162  * - The root processor initialises the output structure, writes the
163  * decomposition dataset and all the field definition information.
164  * - Other ranks may have field definitions that do not belong to the root
165  * process, in which case they open the file and append this (since
166  * attributes cannot be written in parallel).
167  * - Each of the other ranks writes their data contributions to the rest of
168  * the set.
169  *
170  * @param outFile Output filename.
171  * @param fielddefs Input field definitions.
172  * @param fielddata Input field data.
173  * @param fieldmetadatamap Field metadata.
174  */
175 void FieldIOHdf5::v_Write(const std::string &outFile,
176  std::vector<FieldDefinitionsSharedPtr> &fielddefs,
177  std::vector<std::vector<NekDouble>> &fielddata,
178  const FieldMetaDataMap &fieldmetadatamap,
179  const bool backup)
180 {
181  std::stringstream prfx;
182  prfx << m_comm->GetRank() << ": FieldIOHdf5::v_Write(): ";
183  double tm0 = 0.0, tm1 = 0.0;
184 
185  if (m_comm->GetRank() == 0)
186  {
187  tm0 = m_comm->Wtime();
188  }
189 
190  SetUpOutput(outFile, false, backup);
191 
192  // We make a number of assumptions in this code:
193  // 1. All element ids have the same type: unsigned int
194  // 2. All elements within a given field have the same number of values
195  // 3. All element values have the same type, NekDouble
196 
197  // Determine the root MPI process, i.e., the lowest ranked process handling
198  // nMaxFields fields, that will be responsible for writing our file.
199  ASSERTL1(fielddefs.size() == fielddata.size(),
200  prfx.str() + "fielddefs and fielddata have incompatible lengths.");
201 
202  size_t nFields = fielddefs.size();
203  size_t nMaxFields = nFields;
204  m_comm->AllReduce(nMaxFields, LibUtilities::ReduceMax);
205 
206  int root_rank = -1;
207  bool amRoot = false;
208  LibUtilities::CommSharedPtr max_fields_comm;
209 
210  if (m_comm->GetSize() > 1)
211  {
212  max_fields_comm = m_comm->CommCreateIf((nFields == nMaxFields) ? 1 : 0);
213  }
214  else
215  {
216  max_fields_comm = m_comm;
217  }
218 
219  if (max_fields_comm)
220  {
221  int rank = m_comm->GetRank();
222  root_rank = rank;
223  max_fields_comm->AllReduce(root_rank, LibUtilities::ReduceMin);
224  amRoot = (rank == root_rank);
225  if (!amRoot)
226  {
227  root_rank = -1;
228  }
229  }
230 
231  m_comm->AllReduce(root_rank, LibUtilities::ReduceMax);
232  ASSERTL1(root_rank >= 0 && root_rank < m_comm->GetSize(),
233  prfx.str() + "invalid root rank.");
234 
235  std::vector<uint64_t> decomps(nMaxFields * MAX_DCMPS, 0);
236  std::vector<uint64_t> all_hashes(nMaxFields * m_comm->GetSize(), 0);
237  std::vector<uint64_t> cnts(MAX_CNTS, 0);
238  std::vector<std::string> fieldNames(nFields);
239  std::vector<std::string> shapeStrings(nFields);
240  std::vector<std::vector<NekDouble>> homoLengths(nFields);
241  std::vector<std::vector<unsigned int>> homoSIDs(nFields), homoYIDs(nFields),
242  homoZIDs(nFields);
243  std::vector<std::vector<unsigned int>> numModesPerDirVar(nFields);
244  std::vector<std::string> numModesPerDirUni(nFields);
245 
246  int homDim = -1;
247  int varOrder = 0;
248 
249  for (int f = 0; f < nFields; ++f)
250  {
251  if (!fielddefs[f]->m_uniOrder)
252  {
253  varOrder = 1;
254  break;
255  }
256  }
257 
258  m_comm->AllReduce(varOrder, LibUtilities::ReduceMax);
259 
260  // Calculate the total number of elements handled by this MPI process and
261  // the total number of bytes required to store the elements. Base the name
262  // of each field on the hash of the field definition.
263  for (int f = 0; f < nFields; ++f)
264  {
265  ASSERTL1(fielddata[f].size() > 0,
266  prfx.str() +
267  "fielddata vector must contain at least one value.");
268  ASSERTL1(fielddata[f].size() == fielddefs[f]->m_fields.size() *
269  CheckFieldDefinition(fielddefs[f]),
270  prfx.str() + "fielddata vector has invalid size.");
271 
272  std::size_t nFieldElems = fielddefs[f]->m_elementIDs.size();
273  std::size_t nElemVals = fielddata[f].size();
274 
275  decomps[f * MAX_DCMPS + ELEM_DCMP_IDX] = nFieldElems;
276  decomps[f * MAX_DCMPS + VAL_DCMP_IDX] = nElemVals;
277 
278  cnts[ELEM_CNT_IDX] += nFieldElems;
279  cnts[VAL_CNT_IDX] += nElemVals;
280 
281  // Hash the field specification
282  std::stringstream hashStream;
283  std::size_t nSubFields = fielddefs[f]->m_fields.size();
284  for (int sf = 0; sf < nSubFields; ++sf)
285  {
286  hashStream << fielddefs[f]->m_fields[sf];
287  }
288 
289  nSubFields = fielddefs[f]->m_basis.size();
290  for (int sf = 0; sf < nSubFields; ++sf)
291  {
292  hashStream << fielddefs[f]->m_basis[sf];
293  }
294 
295  // Determine SHAPE attribute
296  std::stringstream shapeStringStream;
297  shapeStringStream << ShapeTypeMap[fielddefs[f]->m_shapeType];
298 
299  if (fielddefs[f]->m_numHomogeneousDir > 0)
300  {
301  if (homDim == -1)
302  {
303  homDim = fielddefs[f]->m_numHomogeneousDir;
304  }
305 
306  ASSERTL1(homDim == fielddefs[f]->m_numHomogeneousDir,
307  "HDF5 does not support variable homogeneous directions in "
308  "the same file.");
309 
310  shapeStringStream << "-HomogenousExp"
311  << fielddefs[f]->m_numHomogeneousDir << "D";
312  }
313 
314  if (fielddefs[f]->m_homoStrips)
315  {
316  shapeStringStream << "-Strips";
317  }
318 
319  shapeStrings[f] = shapeStringStream.str();
320  hashStream << shapeStringStream.str();
321 
322  // Determine HOMOGENEOUS attributes
323  if (fielddefs[f]->m_numHomogeneousDir)
324  {
325  nSubFields = fielddefs[f]->m_homogeneousLengths.size();
326  homoLengths[f].resize(nSubFields);
327  for (int sf = 0; sf < nSubFields; ++sf)
328  {
329  NekDouble len = fielddefs[f]->m_homogeneousLengths[sf];
330  hashStream << len;
331  homoLengths[f][sf] = len;
332  }
333 
334  nSubFields = fielddefs[f]->m_homogeneousYIDs.size();
335  if (nSubFields > 0)
336  {
337  homoYIDs[f].resize(nSubFields);
338  decomps[f * MAX_DCMPS + HOMY_DCMP_IDX] = nSubFields;
339  cnts[HOMY_CNT_IDX] += nSubFields;
340  for (int sf = 0; sf < nSubFields; ++sf)
341  {
342  homoYIDs[f][sf] = fielddefs[f]->m_homogeneousYIDs[sf];
343  }
344  }
345 
346  nSubFields = fielddefs[f]->m_homogeneousZIDs.size();
347  if (nSubFields > 0)
348  {
349  homoZIDs[f].resize(nSubFields);
350  decomps[f * MAX_DCMPS + HOMZ_DCMP_IDX] = nSubFields;
351  cnts[HOMZ_CNT_IDX] += nSubFields;
352  for (int sf = 0; sf < nSubFields; ++sf)
353  {
354  homoZIDs[f][sf] = fielddefs[f]->m_homogeneousZIDs[sf];
355  }
356  }
357 
358  nSubFields = fielddefs[f]->m_homogeneousSIDs.size();
359  if (nSubFields > 0)
360  {
361  homoSIDs[f].resize(nSubFields);
362  decomps[f * MAX_DCMPS + HOMS_DCMP_IDX] = nSubFields;
363  cnts[HOMS_CNT_IDX] += nSubFields;
364  for (int sf = 0; sf < nSubFields; ++sf)
365  {
366  homoSIDs[f][sf] = fielddefs[f]->m_homogeneousSIDs[sf];
367  }
368  }
369  }
370 
371  if (fielddefs[f]->m_uniOrder)
372  {
373  std::vector<unsigned int> elemModes(fielddefs[f]->m_basis.size());
374 
375  for (std::vector<int>::size_type i = 0;
376  i < fielddefs[f]->m_basis.size(); ++i)
377  {
378  elemModes[i] = fielddefs[f]->m_numModes[i];
379  }
380 
381  if (varOrder)
382  {
383  for (std::vector<int>::size_type i = 0; i < nFieldElems; ++i)
384  {
385  std::copy(elemModes.begin(), elemModes.end(),
386  std::back_inserter(numModesPerDirVar[f]));
387  }
388  decomps[f * MAX_DCMPS + ORDER_DCMP_IDX] =
389  nFieldElems * elemModes.size();
390  cnts[ORDER_CNT_IDX] += nFieldElems * elemModes.size();
391  }
392  else
393  {
394  std::stringstream numModesStringStream;
395  numModesStringStream << "UNIORDER:";
396  for (std::vector<int>::size_type i = 0; i < elemModes.size();
397  i++)
398  {
399  if (i > 0)
400  {
401  numModesStringStream << ",";
402  }
403  numModesStringStream << elemModes[i];
404  }
405 
406  numModesPerDirUni[f] = numModesStringStream.str();
407  hashStream << numModesPerDirUni[f];
408  }
409  }
410  else
411  {
412  numModesPerDirVar[f] = fielddefs[f]->m_numModes;
413  decomps[f * MAX_DCMPS + ORDER_DCMP_IDX] =
414  fielddefs[f]->m_numModes.size();
415  cnts[ORDER_CNT_IDX] += fielddefs[f]->m_numModes.size();
416  }
417 
418  std::hash<std::string> string_hasher;
419  std::stringstream fieldNameStream;
420  uint64_t fieldDefHash = string_hasher(hashStream.str());
421 
422  decomps[f * MAX_DCMPS + HASH_DCMP_IDX] = fieldDefHash;
423  all_hashes[m_comm->GetRank() * nMaxFields + f] = fieldDefHash;
424 
425  fieldNameStream << fieldDefHash;
426  fieldNames[f] = fieldNameStream.str();
427  }
428 
429  // Gather information from all MPI processes
430  std::vector<uint64_t> all_cnts = m_comm->Gather(root_rank, cnts);
431  std::vector<uint64_t> all_idxs(m_comm->GetSize() * MAX_IDXS, 0);
432  std::vector<uint64_t> all_decomps = m_comm->Gather(root_rank, decomps);
433  std::vector<uint64_t> all_dsetsize(MAX_CNTS, 0);
434 
435  // The root rank creates the file layout from scratch
436  if (amRoot)
437  {
438  H5::FileSharedPtr outfile = H5::File::Create(outFile, H5F_ACC_TRUNC);
439  ASSERTL1(outfile, prfx.str() + "cannot create HDF5 file.");
440  H5::GroupSharedPtr root = outfile->CreateGroup("NEKTAR");
441  ASSERTL1(root, prfx.str() + "cannot create root group.");
442  TagWriterSharedPtr info_writer(new H5TagWriter(root));
443  AddInfoTag(info_writer, fieldmetadatamap);
444 
445  // Record file format version as attribute in main group.
446  root->SetAttribute("FORMAT_VERSION", FORMAT_VERSION);
447 
448  // Calculate the indexes to be used by each MPI process when reading the
449  // IDS and DATA datasets
450  std::size_t nTotElems = 0, nTotVals = 0, nTotOrder = 0;
451  std::size_t nTotHomY = 0, nTotHomZ = 0, nTotHomS = 0;
452  int nRanks = m_comm->GetSize();
453  for (int r = 0; r < nRanks; ++r)
454  {
455  all_idxs[r * MAX_IDXS + IDS_IDX_IDX] = nTotElems;
456  all_idxs[r * MAX_IDXS + DATA_IDX_IDX] = nTotVals;
457  all_idxs[r * MAX_IDXS + ORDER_IDX_IDX] = nTotOrder;
458  all_idxs[r * MAX_IDXS + HOMY_IDX_IDX] = nTotHomY;
459  all_idxs[r * MAX_IDXS + HOMZ_IDX_IDX] = nTotHomZ;
460  all_idxs[r * MAX_IDXS + HOMS_IDX_IDX] = nTotHomS;
461 
462  nTotElems += all_cnts[r * MAX_CNTS + ELEM_CNT_IDX];
463  nTotVals += all_cnts[r * MAX_CNTS + VAL_CNT_IDX];
464  nTotOrder += all_cnts[r * MAX_CNTS + ORDER_CNT_IDX];
465  nTotHomY += all_cnts[r * MAX_CNTS + HOMY_CNT_IDX];
466  nTotHomZ += all_cnts[r * MAX_CNTS + HOMZ_CNT_IDX];
467  nTotHomS += all_cnts[r * MAX_CNTS + HOMS_CNT_IDX];
468  }
469 
470  all_dsetsize[ELEM_CNT_IDX] = nTotElems;
471  all_dsetsize[VAL_CNT_IDX] = nTotVals;
472  all_dsetsize[ORDER_CNT_IDX] = nTotOrder;
473  all_dsetsize[HOMY_CNT_IDX] = nTotHomY;
474  all_dsetsize[HOMZ_CNT_IDX] = nTotHomZ;
475  all_dsetsize[HOMS_CNT_IDX] = nTotHomS;
476 
477  // Create DECOMPOSITION dataset: basic field info for each MPI process
478  H5::DataTypeSharedPtr decomps_type =
479  H5::DataType::OfObject(all_decomps[0]);
480  H5::DataSpaceSharedPtr decomps_space =
481  H5::DataSpace::OneD(all_decomps.size());
482  H5::DataSetSharedPtr decomps_dset =
483  root->CreateDataSet("DECOMPOSITION", decomps_type, decomps_space);
484  ASSERTL1(decomps_dset,
485  prfx.str() + "cannot create DECOMPOSITION dataset.");
486 
487  // Create IDS dataset: element ids
488  H5::DataTypeSharedPtr ids_type =
489  H5::DataType::OfObject(fielddefs[0]->m_elementIDs[0]);
490  H5::DataSpaceSharedPtr ids_space = H5::DataSpace::OneD(nTotElems);
491  H5::DataSetSharedPtr ids_dset =
492  root->CreateDataSet("ELEMENTIDS", ids_type, ids_space);
493  ASSERTL1(ids_dset, prfx.str() + "cannot create ELEMENTIDS dataset.");
494 
495  // Create DATA dataset: element data
496  H5::DataTypeSharedPtr data_type =
497  H5::DataType::OfObject(fielddata[0][0]);
498  H5::DataSpaceSharedPtr data_space = H5::DataSpace::OneD(nTotVals);
499  H5::DataSetSharedPtr data_dset =
500  root->CreateDataSet("DATA", data_type, data_space);
501  ASSERTL1(data_dset, prfx.str() + "cannot create DATA dataset.");
502 
503  // Create HOMOGENEOUSYIDS dataset: homogeneous y-plane IDs
504  if (nTotHomY > 0)
505  {
506  H5::DataTypeSharedPtr homy_type =
507  H5::DataType::OfObject(homoYIDs[0][0]);
508  H5::DataSpaceSharedPtr homy_space = H5::DataSpace::OneD(nTotHomY);
509  H5::DataSetSharedPtr homy_dset =
510  root->CreateDataSet("HOMOGENEOUSYIDS", homy_type, homy_space);
511  ASSERTL1(homy_dset,
512  prfx.str() + "cannot create HOMOGENEOUSYIDS dataset.");
513  }
514 
515  // Create HOMOGENEOUSYIDS dataset: homogeneous z-plane IDs
516  if (nTotHomZ > 0)
517  {
518  H5::DataTypeSharedPtr homz_type =
519  H5::DataType::OfObject(homoZIDs[0][0]);
520  H5::DataSpaceSharedPtr homz_space = H5::DataSpace::OneD(nTotHomZ);
521  H5::DataSetSharedPtr homz_dset =
522  root->CreateDataSet("HOMOGENEOUSZIDS", homz_type, homz_space);
523  ASSERTL1(homz_dset,
524  prfx.str() + "cannot create HOMOGENEOUSZIDS dataset.");
525  }
526 
527  // Create HOMOGENEOUSSIDS dataset: homogeneous strip IDs
528  if (nTotHomS > 0)
529  {
530  H5::DataTypeSharedPtr homs_type =
531  H5::DataType::OfObject(homoSIDs[0][0]);
532  H5::DataSpaceSharedPtr homs_space = H5::DataSpace::OneD(nTotHomS);
533  H5::DataSetSharedPtr homs_dset =
534  root->CreateDataSet("HOMOGENEOUSSIDS", homs_type, homs_space);
535  ASSERTL1(homs_dset,
536  prfx.str() + "cannot create HOMOGENEOUSSIDS dataset.");
537  }
538 
539  // Create POLYORDERS dataset: elemental polynomial orders
540  if (varOrder)
541  {
542  H5::DataTypeSharedPtr order_type =
543  H5::DataType::OfObject(numModesPerDirVar[0][0]);
544  H5::DataSpaceSharedPtr order_space = H5::DataSpace::OneD(nTotOrder);
545  H5::DataSetSharedPtr order_dset =
546  root->CreateDataSet("POLYORDERS", order_type, order_space);
547  ASSERTL1(order_dset,
548  prfx.str() + "cannot create POLYORDERS dataset.");
549  }
550  }
551 
552  m_comm->Bcast(all_dsetsize, root_rank);
553 
554  // Datasets, root group and HDF5 file are all closed automatically since
555  // they are now out of scope. Now we need to determine which process will
556  // write the group representing the field description in the HDF5 file. This
557  // next block of code performs this by finding all unique hashes and then
558  // determining one process that will create (possibly more than one) group
559  // for that hash. An alternative would be to communicate the field
560  // information to the root processor, but this is a bit convoluted.
561 
562  // This set stores the unique hashes.
563  std::set<uint64_t> hashToProc;
564  // This map takes ranks to hashes this process will write.
565  std::map<int, std::vector<uint64_t>> writingProcs;
566 
567  // Gather all field hashes to every processor.
568  m_comm->AllReduce(all_hashes, LibUtilities::ReduceMax);
569 
570  for (int n = 0; n < m_comm->GetSize(); ++n)
571  {
572  for (int i = 0; i < nMaxFields; ++i)
573  {
574  uint64_t hash = all_hashes[n * nMaxFields + i];
575 
576  // Note hash can be zero if, on this process, nFields < nMaxFields.
577  if (hashToProc.find(hash) != hashToProc.end() || hash == 0)
578  {
579  continue;
580  }
581  hashToProc.insert(hash);
582  writingProcs[n].push_back(hash);
583  }
584  }
585 
586  // Having constructed the map, go ahead and write the attributes out.
587  for (auto &sIt : writingProcs)
588  {
589  int rank = sIt.first;
590 
591  // Write out this rank's groups.
592  if (m_comm->GetRank() == rank)
593  {
594  H5::PListSharedPtr serialProps = H5::PList::Default();
596 
597  // Reopen the file
598  H5::FileSharedPtr outfile =
599  H5::File::Open(outFile, H5F_ACC_RDWR, serialProps);
600  ASSERTL1(outfile, prfx.str() + "cannot open HDF5 file.");
601  H5::GroupSharedPtr root = outfile->OpenGroup("NEKTAR");
602  ASSERTL1(root, prfx.str() + "cannot open root group.");
603 
604  // Write a HDF5 group for each field
605  hashToProc.clear();
606  for (int i = 0; i < sIt.second.size(); ++i)
607  {
608  for (int f = 0; f < nFields; ++f)
609  {
610  if (sIt.second[i] !=
611  all_hashes[m_comm->GetRank() * nMaxFields + f] ||
612  hashToProc.find(sIt.second[i]) != hashToProc.end())
613  {
614  continue;
615  }
616 
617  hashToProc.insert(sIt.second[i]);
618 
619  // Just in case we've already written this
620  H5::GroupSharedPtr field_group =
621  root->CreateGroup(fieldNames[f]);
622  ASSERTL1(field_group,
623  prfx.str() + "cannot create field group.");
624  field_group->SetAttribute("FIELDS", fielddefs[f]->m_fields);
625  field_group->SetAttribute("BASIS", fielddefs[f]->m_basis);
626  field_group->SetAttribute("SHAPE", shapeStrings[f]);
627 
628  if (homoLengths[f].size() > 0)
629  {
630  field_group->SetAttribute("HOMOGENEOUSLENGTHS",
631  homoLengths[f]);
632  }
633 
634  // If the field has only uniform order, we write the order
635  // into the NUMMODESPERDIR attribute. Otherwise, we'll go
636  // ahead and assume everything is mixed and fix this in the
637  // read later if required.
638  if (!varOrder)
639  {
640  field_group->SetAttribute("NUMMODESPERDIR",
641  numModesPerDirUni[f]);
642  }
643  else
644  {
645  std::string numModesPerDir = "MIXORDER";
646  field_group->SetAttribute("NUMMODESPERDIR",
647  numModesPerDir);
648  }
649  }
650  }
651  }
652 
653  // We block to avoid more than one processor opening the file at a time.
654  m_comm->Block();
655  }
656 
657  // Write the DECOMPOSITION dataset
658  if (amRoot)
659  {
660  H5::PListSharedPtr serialProps = H5::PList::Default();
662 
663  // Reopen the file
664  H5::FileSharedPtr outfile =
665  H5::File::Open(outFile, H5F_ACC_RDWR, serialProps);
666  ASSERTL1(outfile, prfx.str() + "cannot open HDF5 file.");
667  H5::GroupSharedPtr root = outfile->OpenGroup("NEKTAR");
668  ASSERTL1(root, prfx.str() + "cannot open root group.");
669 
670  // Write the DECOMPOSITION dataset
671  H5::DataSetSharedPtr decomps_dset = root->OpenDataSet("DECOMPOSITION");
672  ASSERTL1(decomps_dset,
673  prfx.str() + "cannot open DECOMPOSITION dataset.");
674 
675  H5::DataSpaceSharedPtr decomps_fspace = decomps_dset->GetSpace();
676  ASSERTL1(decomps_fspace,
677  prfx.str() + "cannot open DECOMPOSITION filespace.");
678 
679  decomps_fspace->SelectRange(0, all_decomps.size());
680  decomps_dset->Write(all_decomps, decomps_fspace, writeSR);
681  }
682 
683  // Initialise the dataset indexes for all MPI processes
684  std::vector<uint64_t> idx = m_comm->Scatter(root_rank, all_idxs);
685  uint64_t ids_i = idx[IDS_IDX_IDX];
686  uint64_t data_i = idx[DATA_IDX_IDX];
687  uint64_t order_i = idx[ORDER_IDX_IDX];
688  uint64_t homy_i = idx[HOMY_IDX_IDX];
689  uint64_t homz_i = idx[HOMZ_IDX_IDX];
690  uint64_t homs_i = idx[HOMS_IDX_IDX];
691 
692  // Set properties for parallel file access (if we're in parallel)
693  H5::PListSharedPtr parallelProps = H5::PList::Default();
695  if (m_comm->GetSize() > 1)
696  {
697  // Use MPI/O to access the file
698  parallelProps = H5::PList::FileAccess();
699  parallelProps->SetMpio(m_comm);
700  // Use collective IO
701  writePL = H5::PList::DatasetXfer();
702  writePL->SetDxMpioCollective();
703  }
704 
705  // Reopen the file
706  H5::FileSharedPtr outfile =
707  H5::File::Open(outFile, H5F_ACC_RDWR, parallelProps);
708  ASSERTL1(outfile, prfx.str() + "cannot open HDF5 file.");
709  H5::GroupSharedPtr root = outfile->OpenGroup("NEKTAR");
710  ASSERTL1(root, prfx.str() + "cannot open root group.");
711 
712  m_comm->Block();
713 
714  // all HDF5 groups have now been created. Open the IDS dataset and
715  // associated data space
716  H5::DataSetSharedPtr ids_dset = root->OpenDataSet("ELEMENTIDS");
717  ASSERTL1(ids_dset, prfx.str() + "cannot open ELEMENTIDS dataset.");
718  H5::DataSpaceSharedPtr ids_fspace = ids_dset->GetSpace();
719  ASSERTL1(ids_fspace, prfx.str() + "cannot open ELEMENTIDS filespace.");
720 
721  // Open the DATA dataset and associated data space
722  H5::DataSetSharedPtr data_dset = root->OpenDataSet("DATA");
723  ASSERTL1(data_dset, prfx.str() + "cannot open DATA dataset.");
724  H5::DataSpaceSharedPtr data_fspace = data_dset->GetSpace();
725  ASSERTL1(data_fspace, prfx.str() + "cannot open DATA filespace.");
726 
727  // Open the optional datasets and data spaces.
728  H5::DataSetSharedPtr order_dset, homy_dset, homz_dset, homs_dset;
729  H5::DataSpaceSharedPtr order_fspace, homy_fspace, homz_fspace, homs_fspace;
730 
731  if (all_dsetsize[ORDER_CNT_IDX])
732  {
733  order_dset = root->OpenDataSet("POLYORDERS");
734  ASSERTL1(order_dset, prfx.str() + "cannot open POLYORDERS dataset.");
735  order_fspace = order_dset->GetSpace();
736  ASSERTL1(order_fspace,
737  prfx.str() + "cannot open POLYORDERS filespace.");
738  }
739 
740  if (all_dsetsize[HOMY_CNT_IDX])
741  {
742  homy_dset = root->OpenDataSet("HOMOGENEOUSYIDS");
743  ASSERTL1(homy_dset,
744  prfx.str() + "cannot open HOMOGENEOUSYIDS dataset.");
745  homy_fspace = homy_dset->GetSpace();
746  ASSERTL1(homy_fspace,
747  prfx.str() + "cannot open HOMOGENEOUSYIDS filespace.");
748  }
749 
750  if (all_dsetsize[HOMZ_CNT_IDX])
751  {
752  homz_dset = root->OpenDataSet("HOMOGENEOUSZIDS");
753  ASSERTL1(homz_dset,
754  prfx.str() + "cannot open HOMOGENEOUSZIDS dataset.");
755  homz_fspace = homz_dset->GetSpace();
756  ASSERTL1(homz_fspace,
757  prfx.str() + "cannot open HOMOGENEOUSZIDS filespace.");
758  }
759 
760  if (all_dsetsize[HOMS_CNT_IDX])
761  {
762  homs_dset = root->OpenDataSet("HOMOGENEOUSSIDS");
763  ASSERTL1(homs_dset,
764  prfx.str() + "cannot open HOMOGENEOUSSIDS dataset.");
765  homs_fspace = homs_dset->GetSpace();
766  ASSERTL1(homs_fspace,
767  prfx.str() + "cannot open HOMOGENEOUSSIDS filespace.");
768  }
769 
770  // Write the data
771  for (int f = 0; f < nFields; ++f)
772  {
773  // write the element ids
774  std::size_t nFieldElems = fielddefs[f]->m_elementIDs.size();
775  ids_fspace->SelectRange(ids_i, nFieldElems);
776  ids_dset->Write(fielddefs[f]->m_elementIDs, ids_fspace, writePL);
777  ids_i += nFieldElems;
778 
779  // write the element values
780  std::size_t nFieldVals = fielddata[f].size();
781  data_fspace->SelectRange(data_i, nFieldVals);
782  data_dset->Write(fielddata[f], data_fspace, writePL);
783  data_i += nFieldVals;
784  }
785 
786  if (order_dset)
787  {
788  for (int f = 0; f < nFields; ++f)
789  {
790  std::size_t nOrders = numModesPerDirVar[f].size();
791  order_fspace->SelectRange(order_i, nOrders);
792  order_dset->Write(numModesPerDirVar[f], order_fspace, writePL);
793  order_i += nOrders;
794  }
795  }
796 
797  if (homy_dset)
798  {
799  for (int f = 0; f < nFields; ++f)
800  {
801  std::size_t nYIDs = homoYIDs[f].size();
802  homy_fspace->SelectRange(homy_i, nYIDs);
803  homy_dset->Write(homoYIDs[f], homy_fspace, writePL);
804  homy_i += nYIDs;
805  }
806  }
807 
808  if (homz_dset)
809  {
810  for (int f = 0; f < nFields; ++f)
811  {
812  std::size_t nZIDs = homoZIDs[f].size();
813  homz_fspace->SelectRange(homz_i, nZIDs);
814  homz_dset->Write(homoZIDs[f], homz_fspace, writePL);
815  homz_i += nZIDs;
816  }
817  }
818 
819  if (homs_dset)
820  {
821  for (int f = 0; f < nFields; ++f)
822  {
823  std::size_t nSIDs = homoSIDs[f].size();
824  homs_fspace->SelectRange(homs_i, nSIDs);
825  homs_dset->Write(homoSIDs[f], homs_fspace, writePL);
826  homs_i += nSIDs;
827  }
828  }
829 
830  for (int f = nFields; f < nMaxFields; ++f)
831  {
832  // this MPI process is handling fewer than nMaxFields fields
833  // so, since this is a collective operation
834  // just rewrite the element ids and values of the last field
835  ids_dset->Write(fielddefs[nFields - 1]->m_elementIDs, ids_fspace,
836  writePL);
837  data_dset->Write(fielddata[nFields - 1], data_fspace, writePL);
838 
839  if (order_dset)
840  {
841  order_dset->Write(numModesPerDirVar[nFields - 1], order_fspace,
842  writePL);
843  }
844 
845  if (homy_dset)
846  {
847  homy_dset->Write(homoYIDs[nFields - 1], homy_fspace, writePL);
848  }
849 
850  if (homz_dset)
851  {
852  homz_dset->Write(homoZIDs[nFields - 1], homz_fspace, writePL);
853  }
854 
855  if (homs_dset)
856  {
857  homs_dset->Write(homoSIDs[nFields - 1], homs_fspace, writePL);
858  }
859  }
860 
861  m_comm->Block();
862 
863  // all data has been written
864  if (m_comm->GetRank() == 0)
865  {
866  tm1 = m_comm->Wtime();
867  std::cout << " (" << tm1 - tm0 << "s, HDF5)" << std::endl;
868  }
869 }
870 
871 /**
872  * @brief Import a HDF5 format file.
873  *
874  * @param finfilename Input filename
875  * @param fielddefs Field definitions of resulting field
876  * @param fielddata Field data of resulting field
877  * @param fieldinfomap Field metadata of resulting field
878  * @param ElementIDs If specified, contains the list of element IDs on
879  * this rank. The resulting field definitions will only
880  * contain data for the element IDs specified in this
881  * array.
882  */
883 void FieldIOHdf5::v_Import(const std::string &infilename,
884  std::vector<FieldDefinitionsSharedPtr> &fielddefs,
885  std::vector<std::vector<NekDouble>> &fielddata,
886  FieldMetaDataMap &fieldinfomap,
887  const Array<OneD, int> &ElementIDs)
888 {
889  std::stringstream prfx;
890  int nRanks = m_comm->GetSize();
891 
892  // Set properties for parallel file access (if we're in parallel)
893  H5::PListSharedPtr parallelProps = H5::PList::Default();
896 
897  if (nRanks > 1)
898  {
899  // Use MPI/O to access the file
900  parallelProps = H5::PList::FileAccess();
901  parallelProps->SetMpio(m_comm);
902  // Use collective IO
903  readPL = H5::PList::DatasetXfer();
904  readPL->SetDxMpioCollective();
905  readPLInd = H5::PList::DatasetXfer();
906  readPLInd->SetDxMpioIndependent();
907  }
908 
909  DataSourceSharedPtr dataSource =
910  H5DataSource::create(infilename, parallelProps);
911 
912  // Open the root group of the hdf5 file
914  std::static_pointer_cast<H5DataSource>(dataSource);
915  ASSERTL1(h5, prfx.str() + "cannot open HDF5 file.");
916  H5::GroupSharedPtr root = h5->Get()->OpenGroup("NEKTAR");
917  ASSERTL1(root, prfx.str() + "cannot open root group.");
918 
919  // Check format version
920  unsigned int formatVersion;
921  H5::Group::AttrIterator attrIt = root->attr_begin();
922  H5::Group::AttrIterator attrEnd = root->attr_end();
923  for (; attrIt != attrEnd; ++attrIt)
924  {
925  if (*attrIt == "FORMAT_VERSION")
926  {
927  break;
928  }
929  }
930 
931  ASSERTL0(attrIt != attrEnd,
932  "Unable to determine Nektar++ HDF5 file version");
933  root->GetAttribute("FORMAT_VERSION", formatVersion);
934 
935  ASSERTL0(formatVersion <= FORMAT_VERSION,
936  "File format if " + infilename +
937  " is higher than supported in "
938  "this version of Nektar++");
939 
940  // Open the datasets
941  H5::DataSetSharedPtr decomps_dset = root->OpenDataSet("DECOMPOSITION");
942  ASSERTL1(decomps_dset, prfx.str() + "cannot open DECOMPOSITION dataset.");
943 
944  H5::DataSetSharedPtr ids_dset = root->OpenDataSet("ELEMENTIDS");
945  ASSERTL1(ids_dset, prfx.str() + "cannot open ELEMENTIDS dataset.");
946 
947  H5::DataSetSharedPtr data_dset = root->OpenDataSet("DATA");
948  ASSERTL1(data_dset, prfx.str() + "cannot open DATA dataset.");
949 
950  // Open the dataset file spaces
951  H5::DataSpaceSharedPtr decomps_fspace = decomps_dset->GetSpace();
952  ASSERTL1(decomps_fspace,
953  prfx.str() + "cannot open DECOMPOSITION filespace.");
954 
955  H5::DataSpaceSharedPtr ids_fspace = ids_dset->GetSpace();
956  ASSERTL1(ids_fspace, prfx.str() + "cannot open ELEMENTIDS filespace.");
957 
958  H5::DataSpaceSharedPtr data_fspace = data_dset->GetSpace();
959  ASSERTL1(data_fspace, prfx.str() + "cannot open DATA filespace.");
960 
961  // Read entire IDS data set to get list of global IDs.
962  std::vector<uint64_t> ids;
963 
964  ids_dset->Read(ids, ids_fspace, readPL);
965 
966  std::unordered_set<uint64_t> toread;
967  if (ElementIDs != NullInt1DArray)
968  {
969  for (uint64_t i = 0; i < ElementIDs.size(); ++i)
970  {
971  toread.insert(ElementIDs[i]);
972  }
973  }
974 
975  std::vector<uint64_t> decomps;
976  decomps_dset->Read(decomps, decomps_fspace, readPL);
977 
978  size_t nDecomps = decomps.size() / MAX_DCMPS;
979  size_t cnt = 0, cnt2 = 0;
980 
981  // Mapping from each decomposition to offsets in the data array.
982  std::vector<OffsetHelper> decompsToOffsets(nDecomps);
983 
984  // Mapping from each group's hash to a vector of element IDs. Note this has
985  // to be unsigned int, since that's what we use in FieldDefinitions.
986  std::map<uint64_t, std::vector<unsigned int>> groupsToElmts;
987 
988  // Mapping from each group's hash to each of the decompositions.
989  std::map<uint64_t, std::set<uint64_t>> groupsToDecomps;
990 
991  // True if we are pulling element IDs from ElementIDs.
992  bool selective = toread.size() > 0;
993 
994  // Counters for data offsets
995  OffsetHelper running;
996 
997  for (size_t i = 0; i < nDecomps; ++i, cnt += MAX_DCMPS)
998  {
999  uint64_t nElmt = decomps[cnt + ELEM_DCMP_IDX];
1000  uint64_t groupHash = decomps[cnt + HASH_DCMP_IDX];
1001 
1002  std::vector<uint64_t> tmp;
1003 
1004  if (selective)
1005  {
1006  for (size_t j = 0; j < nElmt; ++j)
1007  {
1008  uint64_t elmtId = ids[cnt2 + j];
1009  if (toread.find(elmtId) != toread.end())
1010  {
1011  tmp.push_back(elmtId);
1012  }
1013  }
1014  }
1015  else
1016  {
1017  tmp.insert(tmp.begin(), ids.begin() + cnt2,
1018  ids.begin() + cnt2 + nElmt);
1019  }
1020 
1021  std::vector<unsigned int> tmp2(nElmt);
1022  for (size_t j = 0; j < nElmt; ++j)
1023  {
1024  tmp2[j] = ids[cnt2 + j];
1025  }
1026 
1027  cnt2 += nElmt;
1028 
1029  if (tmp.size() > 0)
1030  {
1031  groupsToDecomps[groupHash].insert(i);
1032  }
1033 
1034  groupsToElmts[i] = tmp2;
1035  decompsToOffsets[i] = running;
1036 
1037  running.data += decomps[cnt + VAL_DCMP_IDX];
1038  running.order += decomps[cnt + ORDER_DCMP_IDX];
1039  running.homy += decomps[cnt + HOMY_DCMP_IDX];
1040  running.homz += decomps[cnt + HOMZ_DCMP_IDX];
1041  running.homs += decomps[cnt + HOMS_DCMP_IDX];
1042  }
1043 
1044  for (auto &gIt : groupsToDecomps)
1045  {
1046  // Select region from dataset for this decomposition.
1047  for (auto &sIt : gIt.second)
1048  {
1049  std::stringstream fieldNameStream;
1050  fieldNameStream << gIt.first;
1051 
1052  FieldDefinitionsSharedPtr fielddef =
1054  ImportFieldDef(readPLInd, root, decomps, sIt, decompsToOffsets[sIt],
1055  fieldNameStream.str(), fielddef);
1056 
1057  fielddef->m_elementIDs = groupsToElmts[sIt];
1058  fielddefs.push_back(fielddef);
1059 
1060  if (fielddata != NullVectorNekDoubleVector)
1061  {
1062  std::vector<NekDouble> decompFieldData;
1063  ImportFieldData(readPLInd, data_dset, data_fspace,
1064  decompsToOffsets[sIt].data, decomps, sIt,
1065  fielddef, decompFieldData);
1066  fielddata.push_back(decompFieldData);
1067  }
1068  }
1069  }
1070 
1071  ImportHDF5FieldMetaData(dataSource, fieldinfomap);
1072  m_comm->Block();
1073 }
1074 
1075 /**
1076  * @brief Import field definitions from a HDF5 document.
1077  *
1078  * @param readPL Reading parameter list.
1079  * @param root Root group containing field definitions.
1080  * @param group Group name to process.
1081  * @param def On output contains field definitions.
1082  */
1084  H5::GroupSharedPtr root,
1085  std::vector<uint64_t> &decomps,
1086  uint64_t decomp, OffsetHelper offset,
1087  std::string group,
1089 {
1090  std::stringstream prfx;
1091  prfx << m_comm->GetRank() << ": FieldIOHdf5::ImportFieldDefsHdf5(): ";
1092 
1093  H5::GroupSharedPtr field = root->OpenGroup(group);
1094  ASSERTL1(field, prfx.str() + "cannot open field group, " + group + '.');
1095 
1096  def->m_uniOrder = false;
1097 
1098  H5::Group::AttrIterator attrIt = field->attr_begin();
1099  H5::Group::AttrIterator attrEnd = field->attr_end();
1100  for (; attrIt != attrEnd; ++attrIt)
1101  {
1102  const std::string &attrName = *attrIt;
1103  if (attrName == "FIELDS")
1104  {
1105  field->GetAttribute(attrName, def->m_fields);
1106  }
1107  else if (attrName == "SHAPE")
1108  {
1109  std::string shapeString;
1110  field->GetAttribute(attrName, shapeString);
1111 
1112  // check to see if homogeneous expansion and if so
1113  // strip down the shapeString definition
1114  size_t loc;
1115  //---> this finds the first location of 'n'!
1116  if (shapeString.find("Strips") != std::string::npos)
1117  {
1118  def->m_homoStrips = true;
1119  }
1120 
1121  if ((loc = shapeString.find_first_of("-")) != std::string::npos)
1122  {
1123  if (shapeString.find("Exp1D") != std::string::npos)
1124  {
1125  def->m_numHomogeneousDir = 1;
1126  }
1127  else // HomogeneousExp1D
1128  {
1129  def->m_numHomogeneousDir = 2;
1130  }
1131 
1132  shapeString.erase(loc, shapeString.length());
1133  }
1134 
1135  // get the geometrical shape
1136  bool valid = false;
1137  for (unsigned int j = 0; j < SIZE_ShapeType; j++)
1138  {
1139  if (ShapeTypeMap[j] == shapeString)
1140  {
1141  def->m_shapeType = (ShapeType)j;
1142  valid = true;
1143  break;
1144  }
1145  }
1146 
1147  ASSERTL0(
1148  valid,
1149  prfx.str() +
1150  std::string("unable to correctly parse the shape type: ")
1151  .append(shapeString)
1152  .c_str());
1153  }
1154  else if (attrName == "BASIS")
1155  {
1156  field->GetAttribute(attrName, def->m_basis);
1157  // check the basis is in range
1158  for (auto &bIt : def->m_basis)
1159  {
1160  BasisType bt = bIt;
1161  ASSERTL0(bt >= 0 && bt < SIZE_BasisType,
1162  prfx.str() +
1163  "unable to correctly parse the basis types.");
1164  }
1165  }
1166  else if (attrName == "HOMOGENEOUSLENGTHS")
1167  {
1168  field->GetAttribute(attrName, def->m_homogeneousLengths);
1169  }
1170  else if (attrName == "NUMMODESPERDIR")
1171  {
1172  std::string numModesPerDir;
1173  field->GetAttribute(attrName, numModesPerDir);
1174 
1175  if (strstr(numModesPerDir.c_str(), "UNIORDER:"))
1176  {
1177  def->m_uniOrder = true;
1178  bool valid = ParseUtils::GenerateVector(
1179  numModesPerDir.substr(9), def->m_numModes);
1180  ASSERTL0(valid,
1181  prfx.str() +
1182  "unable to correctly parse the number of modes.");
1183  }
1184  }
1185  else if (attrName == "POINTSTYPE")
1186  {
1187  std::string pointsString;
1188  field->GetAttribute(attrName, pointsString);
1189  def->m_pointsDef = true;
1190 
1191  std::vector<std::string> pointsStrings;
1192  bool valid =
1193  ParseUtils::GenerateVector(pointsString, pointsStrings);
1194  ASSERTL0(valid, prfx.str() +
1195  "unable to correctly parse the points types.");
1196  for (std::vector<std::string>::size_type i = 0;
1197  i < pointsStrings.size(); i++)
1198  {
1199  valid = false;
1200  for (unsigned int j = 0; j < SIZE_PointsType; j++)
1201  {
1202  if (kPointsTypeStr[j] == pointsStrings[i])
1203  {
1204  def->m_points.push_back((PointsType)j);
1205  valid = true;
1206  break;
1207  }
1208  }
1209 
1210  ASSERTL0(valid,
1211  prfx.str() +
1212  std::string(
1213  "unable to correctly parse the points type: ")
1214  .append(pointsStrings[i])
1215  .c_str());
1216  }
1217  }
1218  else if (attrName == "NUMPOINTSPERDIR")
1219  {
1220  std::string numPointsPerDir;
1221  field->GetAttribute(attrName, numPointsPerDir);
1222  def->m_numPointsDef = true;
1223 
1224  bool valid =
1225  ParseUtils::GenerateVector(numPointsPerDir, def->m_numPoints);
1226  ASSERTL0(valid,
1227  prfx.str() +
1228  "unable to correctly parse the number of points.");
1229  }
1230  else
1231  {
1232  std::string errstr("unknown attribute: ");
1233  errstr += attrName;
1234  ASSERTL1(false, prfx.str() + errstr.c_str());
1235  }
1236  }
1237 
1238  if (def->m_numHomogeneousDir >= 1)
1239  {
1240  H5::DataSetSharedPtr homz_dset = root->OpenDataSet("HOMOGENEOUSZIDS");
1241  H5::DataSpaceSharedPtr homz_fspace = homz_dset->GetSpace();
1242  uint64_t dsize = decomps[decomp * MAX_DCMPS + HOMZ_DCMP_IDX];
1243  homz_fspace->SelectRange(offset.homz, dsize);
1244  homz_dset->Read(def->m_homogeneousZIDs, homz_fspace, readPL);
1245  }
1246 
1247  if (def->m_numHomogeneousDir >= 2)
1248  {
1249  H5::DataSetSharedPtr homy_dset = root->OpenDataSet("HOMOGENEOUSYIDS");
1250  H5::DataSpaceSharedPtr homy_fspace = homy_dset->GetSpace();
1251  uint64_t dsize = decomps[decomp * MAX_DCMPS + HOMY_DCMP_IDX];
1252  homy_fspace->SelectRange(offset.homy, dsize);
1253  homy_dset->Read(def->m_homogeneousYIDs, homy_fspace, readPL);
1254  }
1255 
1256  if (def->m_homoStrips)
1257  {
1258  H5::DataSetSharedPtr homs_dset = root->OpenDataSet("HOMOGENEOUSSIDS");
1259  H5::DataSpaceSharedPtr homs_fspace = homs_dset->GetSpace();
1260  uint64_t dsize = decomps[decomp * MAX_DCMPS + HOMS_DCMP_IDX];
1261  homs_fspace->SelectRange(offset.homs, dsize);
1262  homs_dset->Read(def->m_homogeneousSIDs, homs_fspace, readPL);
1263  }
1264 
1265  if (!def->m_uniOrder)
1266  {
1267  H5::DataSetSharedPtr order_dset = root->OpenDataSet("POLYORDERS");
1268  H5::DataSpaceSharedPtr order_fspace = order_dset->GetSpace();
1269  uint64_t dsize = decomps[decomp * MAX_DCMPS + ORDER_DCMP_IDX];
1270  order_fspace->SelectRange(offset.order, dsize);
1271  order_dset->Read(def->m_numModes, order_fspace, readPL);
1272  }
1273 }
1274 
1275 /**
1276  * @brief Import the field data from the HDF5 document.
1277  *
1278  * @param readPL Reading parameter list.
1279  * @param data_dset Pointer to the `DATA` dataset.
1280  * @param data_fspace Pointer to the `DATA` data space.
1281  * @param data_i Index in the `DATA` dataset to start reading from.
1282  * @param decomps Information from the `DECOMPOSITION` dataset.
1283  * @param decomp Index of the decomposition.
1284  * @param fielddef Field definitions for this file
1285  * @param fielddata On return contains resulting field data.
1286  */
1288  H5::PListSharedPtr readPL, H5::DataSetSharedPtr data_dset,
1289  H5::DataSpaceSharedPtr data_fspace, uint64_t data_i,
1290  std::vector<uint64_t> &decomps, uint64_t decomp,
1291  const FieldDefinitionsSharedPtr fielddef, std::vector<NekDouble> &fielddata)
1292 {
1293  std::stringstream prfx;
1294  prfx << m_comm->GetRank() << ": FieldIOHdf5::ImportFieldData(): ";
1295 
1296  uint64_t nElemVals = decomps[decomp * MAX_DCMPS + VAL_DCMP_IDX];
1297  uint64_t nFieldVals = nElemVals;
1298 
1299  data_fspace->SelectRange(data_i, nFieldVals);
1300  data_dset->Read(fielddata, data_fspace, readPL);
1301  int datasize = CheckFieldDefinition(fielddef);
1302  ASSERTL0(fielddata.size() == datasize * fielddef->m_fields.size(),
1303  prfx.str() +
1304  "input data is not the same length as header information.");
1305 }
1306 
1307 /**
1308  * @brief Import field metadata from @p filename and return the data source
1309  * which wraps @p filename.
1310  *
1311  * @param filename Input filename.
1312  * @param fieldmetadatamap Resulting field metadata from @p dataSource.
1313  */
1315  const std::string &filename, FieldMetaDataMap &fieldmetadatamap)
1316 {
1317  H5::PListSharedPtr parallelProps = H5::PList::Default();
1318  DataSourceSharedPtr ans = H5DataSource::create(filename, parallelProps);
1319  ImportHDF5FieldMetaData(ans, fieldmetadatamap);
1320  return ans;
1321 }
1322 
1323 /**
1324  * @brief Import field metadata from @p dataSource.
1325  *
1326  * @param dataSource Input datasource, which should be a H5DataSource.
1327  * @param fieldmetadatamap Resulting field metadata from @p dataSource.
1328  */
1330  FieldMetaDataMap &fieldmetadatamap)
1331 {
1332  H5DataSourceSharedPtr hdf =
1333  std::static_pointer_cast<H5DataSource>(dataSource);
1334 
1335  H5::GroupSharedPtr master = hdf->Get()->OpenGroup("NEKTAR");
1336  // New metadata format only in HDF
1337  H5::GroupSharedPtr metadata = master->OpenGroup("Metadata");
1338 
1339  if (metadata)
1340  {
1341  H5::Group::AttrIterator param = metadata->attr_begin(),
1342  pEnd = metadata->attr_end();
1343  for (; param != pEnd; ++param)
1344  {
1345  std::string paramString = *param;
1346  if (paramString != "Provenance")
1347  {
1348  std::string paramBodyStr;
1349  metadata->GetAttribute(paramString, paramBodyStr);
1350  fieldmetadatamap[paramString] = paramBodyStr;
1351  }
1352  }
1353  }
1354 }
1355 
1356 } // namespace LibUtilities
1357 } // namespace Nektar
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:215
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode....
Definition: ErrorUtil.hpp:249
void ImportFieldData(H5::PListSharedPtr readPL, H5::DataSetSharedPtr data_dset, H5::DataSpaceSharedPtr data_fspace, uint64_t data_i, std::vector< uint64_t > &decomps, uint64_t decomp, const FieldDefinitionsSharedPtr fielddef, std::vector< NekDouble > &fielddata)
Import the field data from the HDF5 document.
static const unsigned int ELEM_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of elements in the cnt array.
Definition: FieldIOHdf5.h:188
static FieldIOSharedPtr create(LibUtilities::CommSharedPtr pComm, bool sharedFilesystem)
Creates an instance of this class.
Definition: FieldIOHdf5.h:205
static const unsigned int MAX_CNTS
A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in the cnt array per field d...
Definition: FieldIOHdf5.h:194
virtual void v_Import(const std::string &infilename, std::vector< FieldDefinitionsSharedPtr > &fielddefs, std::vector< std::vector< NekDouble >> &fielddata=NullVectorNekDoubleVector, FieldMetaDataMap &fieldinfomap=NullFieldMetaDataMap, const Array< OneD, int > &ElementIDs=NullInt1DArray)
Import a HDF5 format file.
static const unsigned int HOMY_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of homogeneous y-planes in th...
Definition: FieldIOHdf5.h:191
static std::string className
Name of class.
Definition: FieldIOHdf5.h:213
static const unsigned int ORDER_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:181
static const unsigned int HOMZ_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of homogeneous z-planes in th...
Definition: FieldIOHdf5.h:192
static const unsigned int FORMAT_VERSION
Version of the Nektar++ HDF5 format, which is embedded into the main NEKTAR group as an attribute.
Definition: FieldIOHdf5.h:177
static const unsigned int HASH_DCMP_IDX
The hash of the field definition information, which defines the name of the attribute containing the ...
Definition: FieldIOHdf5.h:185
static const unsigned int HOMS_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of homogeneous strips in the ...
Definition: FieldIOHdf5.h:193
static const unsigned int VAL_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of data points in the cnt arr...
Definition: FieldIOHdf5.h:189
virtual DataSourceSharedPtr v_ImportFieldMetaData(const std::string &filename, FieldMetaDataMap &fieldmetadatamap)
Import field metadata from filename and return the data source which wraps filename.
virtual void v_Write(const std::string &outFile, std::vector< FieldDefinitionsSharedPtr > &fielddefs, std::vector< std::vector< NekDouble >> &fielddata, const FieldMetaDataMap &fieldinfomap=NullFieldMetaDataMap, const bool backup=false)
Write a HDF5 file to outFile given the field definitions fielddefs, field data fielddata and metadata...
static const unsigned int IDS_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the element IDs within the indexing set.
Definition: FieldIOHdf5.h:196
static const unsigned int HOMS_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:184
static const unsigned int ELEM_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:179
static const unsigned int ORDER_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of order points in the cnt ar...
Definition: FieldIOHdf5.h:190
void ImportFieldDef(H5::PListSharedPtr readPL, H5::GroupSharedPtr root, std::vector< uint64_t > &decomps, uint64_t decomp, OffsetHelper offset, std::string group, FieldDefinitionsSharedPtr def)
Import field definitions from a HDF5 document.
static const unsigned int ORDER_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the element order within the indexing se...
Definition: FieldIOHdf5.h:198
static const unsigned int HOMZ_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:183
static const unsigned int MAX_DCMPS
A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in the decomposition per fie...
Definition: FieldIOHdf5.h:186
FieldIOHdf5(LibUtilities::CommSharedPtr pComm, bool sharedFilesystem)
Construct the FieldIO object for HDF5 output.
static const unsigned int MAX_IDXS
A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in the indexing set.
Definition: FieldIOHdf5.h:202
static const unsigned int VAL_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:180
static const unsigned int DATA_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the data size within the indexing set.
Definition: FieldIOHdf5.h:197
void ImportHDF5FieldMetaData(DataSourceSharedPtr dataSource, FieldMetaDataMap &fieldmetadatamap)
Import field metadata from dataSource.
static const unsigned int HOMS_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of homogeneous strips within ...
Definition: FieldIOHdf5.h:201
static const unsigned int HOMY_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:182
static const unsigned int HOMZ_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of z-planes within the indexi...
Definition: FieldIOHdf5.h:200
static const unsigned int HOMY_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of y-planes within the indexi...
Definition: FieldIOHdf5.h:199
Class for operating on Nektar++ input/output files.
Definition: FieldIO.h:220
int CheckFieldDefinition(const FieldDefinitionsSharedPtr &fielddefs)
Check field definitions for correctness and return storage size.
Definition: FieldIO.cpp:585
std::string SetUpOutput(const std::string outname, bool perRank, bool backup=false)
Set up the filesystem ready for output.
Definition: FieldIO.cpp:407
LibUtilities::CommSharedPtr m_comm
Communicator to use when writing parallel format.
Definition: FieldIO.h:261
static void AddInfoTag(TagWriterSharedPtr root, const FieldMetaDataMap &fieldmetadatamap)
Add provenance information to the field metadata map.
Definition: FieldIO.cpp:345
static DataSpaceSharedPtr OneD(hsize_t size)
Definition: H5.cpp:419
static DataTypeSharedPtr OfObject(const T &obj)
Definition: H5.h:389
static FileSharedPtr Open(const std::string &filename, unsigned mode, PListSharedPtr accessPL=PList::Default())
Definition: H5.cpp:642
static FileSharedPtr Create(const std::string &filename, unsigned mode, PListSharedPtr createPL=PList::Default(), PListSharedPtr accessPL=PList::Default())
Definition: H5.cpp:633
static PListSharedPtr DatasetXfer()
Properties for raw data transfer.
Definition: H5.cpp:115
static PListSharedPtr FileAccess()
Properties for file access.
Definition: H5.cpp:97
static PListSharedPtr Default()
Default options.
Definition: H5.cpp:79
static DataSourceSharedPtr create(const std::string &fn, H5::PListSharedPtr parallelProps)
Static constructor for this data source.
Definition: FieldIOHdf5.h:78
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:198
static std::shared_ptr< DataType > AllocateSharedPtr(const Args &...args)
Allocate a shared pointer from the memory pool.
static bool GenerateVector(const std::string &str, std::vector< T > &out)
Takes a comma-separated string and converts it to entries in a vector.
Definition: ParseUtils.cpp:129
def copy(self)
Definition: pycml.py:2663
std::shared_ptr< DataSpace > DataSpaceSharedPtr
Definition: H5.h:83
std::shared_ptr< PList > PListSharedPtr
Definition: H5.h:97
std::shared_ptr< File > FileSharedPtr
Definition: H5.h:93
std::shared_ptr< DataType > DataTypeSharedPtr
Definition: H5.h:79
std::shared_ptr< Group > GroupSharedPtr
Definition: FieldIOHdf5.h:49
std::shared_ptr< DataSet > DataSetSharedPtr
Definition: H5.h:95
const char *const ShapeTypeMap[SIZE_ShapeType]
Definition: ShapeType.hpp:77
std::shared_ptr< TagWriter > TagWriterSharedPtr
Definition: FieldIO.h:71
std::shared_ptr< H5DataSource > H5DataSourceSharedPtr
Definition: FieldIOHdf5.h:88
std::shared_ptr< DataSource > DataSourceSharedPtr
Definition: FieldIO.h:81
std::map< std::string, std::string > FieldMetaDataMap
Definition: FieldIO.h:52
const std::string kPointsTypeStr[]
Definition: Foundations.hpp:54
static std::vector< std::vector< NekDouble > > NullVectorNekDoubleVector
std::shared_ptr< FieldDefinitions > FieldDefinitionsSharedPtr
Definition: FieldIO.h:177
FieldIOFactory & GetFieldIOFactory()
Returns the FieldIO factory.
Definition: FieldIO.cpp:72
@ SIZE_PointsType
Length of enum list.
Definition: PointsType.h:97
std::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.
Definition: Comm.h:54
@ SIZE_BasisType
Length of enum list.
Definition: BasisType.h:72
The above copyright notice and this permission notice shall be included.
Definition: CoupledSolver.h:1
static Array< OneD, int > NullInt1DArray
double NekDouble
static DataTypeSharedPtr GetType()
Definition: H5.h:642