Nektar++
FieldIOHdf5.cpp
Go to the documentation of this file.
1 ////////////////////////////////////////////////////////////////////////////////
2 //
3 // File: FieldIOHdf5.cpp
4 //
5 // For more information, please see: http://www.nektar.info/
6 //
7 // The MIT License
8 //
9 // Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10 // Department of Aeronautics, Imperial College London (UK), and Scientific
11 // Computing and Imaging Institute, University of Utah (USA).
12 //
13 // Permission is hereby granted, free of charge, to any person obtaining a
14 // copy of this software and associated documentation files (the "Software"),
15 // to deal in the Software without restriction, including without limitation
16 // the rights to use, copy, modify, merge, publish, distribute, sublicense,
17 // and/or sell copies of the Software, and to permit persons to whom the
18 // Software is furnished to do so, subject to the following conditions:
19 //
20 // The above copyright notice and this permission notice shall be included
21 // in all copies or substantial portions of the Software.
22 //
23 // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24 // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26 // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27 // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28 // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29 // DEALINGS IN THE SOFTWARE.
30 //
31 // Description: I/O routines relating to Fields into HDF
32 //
33 ////////////////////////////////////////////////////////////////////////////////
34 
37 
38 #include <unordered_set>
39 #include <functional>
40 
41 namespace berrc = boost::system::errc;
42 
43 namespace Nektar
44 {
45 namespace LibUtilities
46 {
47 namespace H5
48 {
49 
51 {
52  return PredefinedDataType::Native<int>();
53 }
54 
55 }
56 
57 std::string FieldIOHdf5::className =
59  "Hdf5", FieldIOHdf5::create, "HDF5-based output of field data.");
60 
61 /// Version of the Nektar++ HDF5 format, which is embedded into the main NEKTAR
62 /// group as an attribute.
63 const unsigned int FieldIOHdf5::FORMAT_VERSION = 1;
64 
65 // The following definitions allow us to consistently refer to indexes pulled
66 // out of the various datasets.
67 
68 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
69 /// position of the number of elements in decomposition (i.e. field definition).
70 const unsigned int FieldIOHdf5::ELEM_DCMP_IDX = 0;
71 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
72 /// position of the number of data points in decomposition (i.e. field
73 /// definition).
74 const unsigned int FieldIOHdf5::VAL_DCMP_IDX = 1;
75 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
76 /// position of the number of elements multiplied by the dimension of the
77 /// element, giving number of modes when variable polynomial order is defined.
78 const unsigned int FieldIOHdf5::ORDER_DCMP_IDX = 2;
79 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
80 /// position of the number of the number of y-planes for homogeneous
81 /// simulations.
82 const unsigned int FieldIOHdf5::HOMY_DCMP_IDX = 3;
83 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
84 /// position of the number of the number of z-planes for homogeneous
85 /// simulations.
86 const unsigned int FieldIOHdf5::HOMZ_DCMP_IDX = 4;
87 /// A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the
88 /// position of the number of the number of strips for homogeneous simulations.
89 const unsigned int FieldIOHdf5::HOMS_DCMP_IDX = 5;
90 /// The hash of the field definition information, which defines the name of the
91 /// attribute containing the field definition itself.
92 const unsigned int FieldIOHdf5::HASH_DCMP_IDX = 6;
93 /// A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in
94 /// the decomposition per field definition.
95 const unsigned int FieldIOHdf5::MAX_DCMPS = FieldIOHdf5::HASH_DCMP_IDX + 1;
96 
97 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
98 /// elements in the cnt array.
99 const unsigned int FieldIOHdf5::ELEM_CNT_IDX = 0;
100 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
101 /// data points in the cnt array.
102 const unsigned int FieldIOHdf5::VAL_CNT_IDX = 1;
103 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
104 /// order points in the cnt array.
105 const unsigned int FieldIOHdf5::ORDER_CNT_IDX = 2;
106 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
107 /// homogeneous y-planes in the cnt array.
108 const unsigned int FieldIOHdf5::HOMY_CNT_IDX = 3;
109 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
110 /// homogeneous z-planes in the cnt array.
111 const unsigned int FieldIOHdf5::HOMZ_CNT_IDX = 4;
112 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
113 /// homogeneous strips in the cnt array.
114 const unsigned int FieldIOHdf5::HOMS_CNT_IDX = 5;
115 /// A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in
116 /// the cnt array per field definition.
117 const unsigned int FieldIOHdf5::MAX_CNTS = FieldIOHdf5::HOMS_CNT_IDX + 1;
118 
119 /// A helper for FieldIOHdf5::v_Write. Describes the position of the element IDs
120 /// within the indexing set.
121 const unsigned int FieldIOHdf5::IDS_IDX_IDX = 0;
122 /// A helper for FieldIOHdf5::v_Write. Describes the position of the data size
123 /// within the indexing set.
124 const unsigned int FieldIOHdf5::DATA_IDX_IDX = 1;
125 /// A helper for FieldIOHdf5::v_Write. Describes the position of the element
126 /// order within the indexing set.
127 const unsigned int FieldIOHdf5::ORDER_IDX_IDX = 2;
128 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
129 /// y-planes within the indexing set.
130 const unsigned int FieldIOHdf5::HOMY_IDX_IDX = 3;
131 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
132 /// z-planes within the indexing set.
133 const unsigned int FieldIOHdf5::HOMZ_IDX_IDX = 4;
134 /// A helper for FieldIOHdf5::v_Write. Describes the position of the number of
135 /// homogeneous strips within the indexing set.
136 const unsigned int FieldIOHdf5::HOMS_IDX_IDX = 5;
137 /// A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in
138 /// the indexing set.
139 const unsigned int FieldIOHdf5::MAX_IDXS = FieldIOHdf5::HOMS_IDX_IDX + 1;
140 
141 /**
142  * @brief Construct the FieldIO object for HDF5 output.
143  *
144  * @param pComm Communicator object.
145  * @param sharedFilesystem True if this system has a shared filesystem.
146  */
148  bool sharedFilesystem)
149  : FieldIO(pComm, sharedFilesystem)
150 {
151 }
152 
153 /**
154  * @brief Write a HDF5 file to @p outFile given the field definitions @p
155  * fielddefs, field data @p fielddata and metadata @p fieldmetadatamap.
156  *
157  * The writing strategy for HDF5 output is as follows:
158  *
159  * - Each rank determines the amount of data needed to be written into each
160  * dataset.
161  * - Each rank communicates its decomposition information to the root process.
162  * - The root processor initialises the output structure, writes the
163  * decomposition dataset and all the field definition information.
164  * - Other ranks may have field definitions that do not belong to the root
165  * process, in which case they open the file and append this (since
166  * attributes cannot be written in parallel).
167  * - Each of the other ranks writes their data contributions to the rest of
168  * the set.
169  *
170  * @param outFile Output filename.
171  * @param fielddefs Input field definitions.
172  * @param fielddata Input field data.
173  * @param fieldmetadatamap Field metadata.
174  */
175 void FieldIOHdf5::v_Write(const std::string &outFile,
176  std::vector<FieldDefinitionsSharedPtr> &fielddefs,
177  std::vector<std::vector<NekDouble> > &fielddata,
178  const FieldMetaDataMap &fieldmetadatamap,
179  const bool backup)
180 {
181  std::stringstream prfx;
182  prfx << m_comm->GetRank() << ": FieldIOHdf5::v_Write(): ";
183  double tm0 = 0.0, tm1 = 0.0;
184 
185  if (m_comm->GetRank() == 0)
186  {
187  tm0 = m_comm->Wtime();
188  }
189 
190  SetUpOutput(outFile, false, backup);
191 
192  // We make a number of assumptions in this code:
193  // 1. All element ids have the same type: unsigned int
194  // 2. All elements within a given field have the same number of values
195  // 3. All element values have the same type, NekDouble
196 
197  // Determine the root MPI process, i.e., the lowest ranked process handling
198  // nMaxFields fields, that will be responsible for writing our file.
199  ASSERTL1(fielddefs.size() == fielddata.size(),
200  prfx.str() + "fielddefs and fielddata have incompatible lengths.");
201 
202  size_t nFields = fielddefs.size();
203  size_t nMaxFields = nFields;
204  m_comm->AllReduce(nMaxFields, LibUtilities::ReduceMax);
205 
206  int root_rank = -1;
207  bool amRoot = false;
208  LibUtilities::CommSharedPtr max_fields_comm;
209 
210  if (m_comm->GetSize() > 1)
211  {
212  max_fields_comm = m_comm->CommCreateIf((nFields == nMaxFields) ? 1 : 0);
213  }
214  else
215  {
216  max_fields_comm = m_comm;
217  }
218 
219  if (max_fields_comm)
220  {
221  int rank = m_comm->GetRank();
222  root_rank = rank;
223  max_fields_comm->AllReduce(root_rank, LibUtilities::ReduceMin);
224  amRoot = (rank == root_rank);
225  if (!amRoot)
226  {
227  root_rank = -1;
228  }
229  }
230 
231  m_comm->AllReduce(root_rank, LibUtilities::ReduceMax);
232  ASSERTL1(root_rank >= 0 && root_rank < m_comm->GetSize(),
233  prfx.str() + "invalid root rank.");
234 
235  std::vector<uint64_t> decomps(nMaxFields * MAX_DCMPS, 0);
236  std::vector<uint64_t> all_hashes(nMaxFields * m_comm->GetSize(), 0);
237  std::vector<uint64_t> cnts(MAX_CNTS, 0);
238  std::vector<std::string> fieldNames(nFields);
239  std::vector<std::string> shapeStrings(nFields);
240  std::vector<std::vector<NekDouble> > homoLengths(nFields);
241  std::vector<std::vector<unsigned int> > homoSIDs(nFields),
242  homoYIDs(nFields), homoZIDs(nFields);
243  std::vector<std::vector<unsigned int> > numModesPerDirVar(nFields);
244  std::vector<std::string> numModesPerDirUni(nFields);
245 
246  int homDim = -1;
247  int varOrder = 0;
248 
249  for (int f = 0; f < nFields; ++f)
250  {
251  if (!fielddefs[f]->m_uniOrder)
252  {
253  varOrder = 1;
254  break;
255  }
256  }
257 
258  m_comm->AllReduce(varOrder, LibUtilities::ReduceMax);
259 
260  // Calculate the total number of elements handled by this MPI process and
261  // the total number of bytes required to store the elements. Base the name
262  // of each field on the hash of the field definition.
263  for (int f = 0; f < nFields; ++f)
264  {
265  ASSERTL1(fielddata[f].size() > 0,
266  prfx.str() +
267  "fielddata vector must contain at least one value.");
268  ASSERTL1(fielddata[f].size() ==
269  fielddefs[f]->m_fields.size() *
270  CheckFieldDefinition(fielddefs[f]),
271  prfx.str() + "fielddata vector has invalid size.");
272 
273  std::size_t nFieldElems = fielddefs[f]->m_elementIDs.size();
274  std::size_t nElemVals = fielddata[f].size();
275 
276  decomps[f * MAX_DCMPS + ELEM_DCMP_IDX] = nFieldElems;
277  decomps[f * MAX_DCMPS + VAL_DCMP_IDX] = nElemVals;
278 
279  cnts[ELEM_CNT_IDX] += nFieldElems;
280  cnts[VAL_CNT_IDX] += nElemVals;
281 
282  // Hash the field specification
283  std::stringstream hashStream;
284  std::size_t nSubFields = fielddefs[f]->m_fields.size();
285  for (int sf = 0; sf < nSubFields; ++sf)
286  {
287  hashStream << fielddefs[f]->m_fields[sf];
288  }
289 
290  nSubFields = fielddefs[f]->m_basis.size();
291  for (int sf = 0; sf < nSubFields; ++sf)
292  {
293  hashStream << fielddefs[f]->m_basis[sf];
294  }
295 
296  // Determine SHAPE attribute
297  std::stringstream shapeStringStream;
298  shapeStringStream << ShapeTypeMap[fielddefs[f]->m_shapeType];
299 
300  if (fielddefs[f]->m_numHomogeneousDir > 0)
301  {
302  if (homDim == -1)
303  {
304  homDim = fielddefs[f]->m_numHomogeneousDir;
305  }
306 
307  ASSERTL1(homDim == fielddefs[f]->m_numHomogeneousDir,
308  "HDF5 does not support variable homogeneous directions in "
309  "the same file.");
310 
311  shapeStringStream << "-HomogenousExp"
312  << fielddefs[f]->m_numHomogeneousDir << "D";
313  }
314 
315  if (fielddefs[f]->m_homoStrips)
316  {
317  shapeStringStream << "-Strips";
318  }
319 
320  shapeStrings[f] = shapeStringStream.str();
321  hashStream << shapeStringStream.str();
322 
323  // Determine HOMOGENEOUS attributes
324  if (fielddefs[f]->m_numHomogeneousDir)
325  {
326  nSubFields = fielddefs[f]->m_homogeneousLengths.size();
327  homoLengths[f].resize(nSubFields);
328  for (int sf = 0; sf < nSubFields; ++sf)
329  {
330  NekDouble len = fielddefs[f]->m_homogeneousLengths[sf];
331  hashStream << len;
332  homoLengths[f][sf] = len;
333  }
334 
335  nSubFields = fielddefs[f]->m_homogeneousYIDs.size();
336  if (nSubFields > 0)
337  {
338  homoYIDs[f].resize(nSubFields);
339  decomps[f * MAX_DCMPS + HOMY_DCMP_IDX] = nSubFields;
340  cnts[HOMY_CNT_IDX] += nSubFields;
341  for (int sf = 0; sf < nSubFields; ++sf)
342  {
343  homoYIDs[f][sf] = fielddefs[f]->m_homogeneousYIDs[sf];
344  }
345  }
346 
347  nSubFields = fielddefs[f]->m_homogeneousZIDs.size();
348  if (nSubFields > 0)
349  {
350  homoZIDs[f].resize(nSubFields);
351  decomps[f * MAX_DCMPS + HOMZ_DCMP_IDX] = nSubFields;
352  cnts[HOMZ_CNT_IDX] += nSubFields;
353  for (int sf = 0; sf < nSubFields; ++sf)
354  {
355  homoZIDs[f][sf] = fielddefs[f]->m_homogeneousZIDs[sf];
356  }
357  }
358 
359  nSubFields = fielddefs[f]->m_homogeneousSIDs.size();
360  if (nSubFields > 0)
361  {
362  homoSIDs[f].resize(nSubFields);
363  decomps[f * MAX_DCMPS + HOMS_DCMP_IDX] = nSubFields;
364  cnts[HOMS_CNT_IDX] += nSubFields;
365  for (int sf = 0; sf < nSubFields; ++sf)
366  {
367  homoSIDs[f][sf] = fielddefs[f]->m_homogeneousSIDs[sf];
368  }
369  }
370  }
371 
372  if (fielddefs[f]->m_uniOrder)
373  {
374  std::vector<unsigned int> elemModes(fielddefs[f]->m_basis.size());
375 
376  for (std::vector<int>::size_type i = 0;
377  i < fielddefs[f]->m_basis.size(); ++i)
378  {
379  elemModes[i] = fielddefs[f]->m_numModes[i];
380  }
381 
382  if (varOrder)
383  {
384  for (std::vector<int>::size_type i = 0; i < nFieldElems; ++i)
385  {
386  std::copy(elemModes.begin(), elemModes.end(),
387  std::back_inserter(numModesPerDirVar[f]));
388  }
389  decomps[f * MAX_DCMPS + ORDER_DCMP_IDX] =
390  nFieldElems * elemModes.size();
391  cnts[ORDER_CNT_IDX] += nFieldElems * elemModes.size();
392  }
393  else
394  {
395  std::stringstream numModesStringStream;
396  numModesStringStream << "UNIORDER:";
397  for (std::vector<int>::size_type i = 0;
398  i < elemModes.size(); i++)
399  {
400  if (i > 0)
401  {
402  numModesStringStream << ",";
403  }
404  numModesStringStream << elemModes[i];
405  }
406 
407  numModesPerDirUni[f] = numModesStringStream.str();
408  hashStream << numModesPerDirUni[f];
409  }
410  }
411  else
412  {
413  numModesPerDirVar[f] = fielddefs[f]->m_numModes;
414  decomps[f * MAX_DCMPS + ORDER_DCMP_IDX] =
415  fielddefs[f]->m_numModes.size();
416  cnts[ORDER_CNT_IDX] += fielddefs[f]->m_numModes.size();
417  }
418 
419  std::hash<std::string> string_hasher;
420  std::stringstream fieldNameStream;
421  uint64_t fieldDefHash = string_hasher(hashStream.str());
422 
423  decomps[f * MAX_DCMPS + HASH_DCMP_IDX] = fieldDefHash;
424  all_hashes[m_comm->GetRank() * nMaxFields + f] = fieldDefHash;
425 
426  fieldNameStream << fieldDefHash;
427  fieldNames[f] = fieldNameStream.str();
428  }
429 
430  // Gather information from all MPI processes
431  std::vector<uint64_t> all_cnts = m_comm->Gather(root_rank, cnts);
432  std::vector<uint64_t> all_idxs(m_comm->GetSize() * MAX_IDXS, 0);
433  std::vector<uint64_t> all_decomps = m_comm->Gather(root_rank, decomps);
434  std::vector<uint64_t> all_dsetsize(MAX_CNTS, 0);
435 
436  // The root rank creates the file layout from scratch
437  if (amRoot)
438  {
439  H5::FileSharedPtr outfile = H5::File::Create(outFile, H5F_ACC_TRUNC);
440  ASSERTL1(outfile, prfx.str() + "cannot create HDF5 file.");
441  H5::GroupSharedPtr root = outfile->CreateGroup("NEKTAR");
442  ASSERTL1(root, prfx.str() + "cannot create root group.");
443  TagWriterSharedPtr info_writer(new H5TagWriter(root));
444  AddInfoTag(info_writer, fieldmetadatamap);
445 
446  // Record file format version as attribute in main group.
447  root->SetAttribute("FORMAT_VERSION", FORMAT_VERSION);
448 
449  // Calculate the indexes to be used by each MPI process when reading the
450  // IDS and DATA datasets
451  std::size_t nTotElems = 0, nTotVals = 0, nTotOrder = 0;
452  std::size_t nTotHomY = 0, nTotHomZ = 0, nTotHomS = 0;
453  int nRanks = m_comm->GetSize();
454  for (int r = 0; r < nRanks; ++r)
455  {
456  all_idxs[r * MAX_IDXS + IDS_IDX_IDX] = nTotElems;
457  all_idxs[r * MAX_IDXS + DATA_IDX_IDX] = nTotVals;
458  all_idxs[r * MAX_IDXS + ORDER_IDX_IDX] = nTotOrder;
459  all_idxs[r * MAX_IDXS + HOMY_IDX_IDX] = nTotHomY;
460  all_idxs[r * MAX_IDXS + HOMZ_IDX_IDX] = nTotHomZ;
461  all_idxs[r * MAX_IDXS + HOMS_IDX_IDX] = nTotHomS;
462 
463  nTotElems += all_cnts[r * MAX_CNTS + ELEM_CNT_IDX];
464  nTotVals += all_cnts[r * MAX_CNTS + VAL_CNT_IDX];
465  nTotOrder += all_cnts[r * MAX_CNTS + ORDER_CNT_IDX];
466  nTotHomY += all_cnts[r * MAX_CNTS + HOMY_CNT_IDX];
467  nTotHomZ += all_cnts[r * MAX_CNTS + HOMZ_CNT_IDX];
468  nTotHomS += all_cnts[r * MAX_CNTS + HOMS_CNT_IDX];
469  }
470 
471  all_dsetsize[ELEM_CNT_IDX ] = nTotElems;
472  all_dsetsize[VAL_CNT_IDX ] = nTotVals;
473  all_dsetsize[ORDER_CNT_IDX] = nTotOrder;
474  all_dsetsize[HOMY_CNT_IDX ] = nTotHomY;
475  all_dsetsize[HOMZ_CNT_IDX ] = nTotHomZ;
476  all_dsetsize[HOMS_CNT_IDX ] = nTotHomS;
477 
478  // Create DECOMPOSITION dataset: basic field info for each MPI process
479  H5::DataTypeSharedPtr decomps_type =
480  H5::DataType::OfObject(all_decomps[0]);
481  H5::DataSpaceSharedPtr decomps_space =
482  H5::DataSpace::OneD(all_decomps.size());
483  H5::DataSetSharedPtr decomps_dset =
484  root->CreateDataSet("DECOMPOSITION", decomps_type, decomps_space);
485  ASSERTL1(decomps_dset,
486  prfx.str() + "cannot create DECOMPOSITION dataset.");
487 
488  // Create IDS dataset: element ids
489  H5::DataTypeSharedPtr ids_type =
490  H5::DataType::OfObject(fielddefs[0]->m_elementIDs[0]);
491  H5::DataSpaceSharedPtr ids_space = H5::DataSpace::OneD(nTotElems);
492  H5::DataSetSharedPtr ids_dset =
493  root->CreateDataSet("ELEMENTIDS", ids_type, ids_space);
494  ASSERTL1(ids_dset, prfx.str() + "cannot create ELEMENTIDS dataset.");
495 
496  // Create DATA dataset: element data
497  H5::DataTypeSharedPtr data_type =
498  H5::DataType::OfObject(fielddata[0][0]);
499  H5::DataSpaceSharedPtr data_space = H5::DataSpace::OneD(nTotVals);
500  H5::DataSetSharedPtr data_dset =
501  root->CreateDataSet("DATA", data_type, data_space);
502  ASSERTL1(data_dset, prfx.str() + "cannot create DATA dataset.");
503 
504  // Create HOMOGENEOUSYIDS dataset: homogeneous y-plane IDs
505  if (nTotHomY > 0)
506  {
507  H5::DataTypeSharedPtr homy_type =
508  H5::DataType::OfObject(homoYIDs[0][0]);
509  H5::DataSpaceSharedPtr homy_space = H5::DataSpace::OneD(nTotHomY);
510  H5::DataSetSharedPtr homy_dset =
511  root->CreateDataSet("HOMOGENEOUSYIDS", homy_type, homy_space);
512  ASSERTL1(homy_dset,
513  prfx.str() + "cannot create HOMOGENEOUSYIDS dataset.");
514  }
515 
516  // Create HOMOGENEOUSYIDS dataset: homogeneous z-plane IDs
517  if (nTotHomZ > 0)
518  {
519  H5::DataTypeSharedPtr homz_type =
520  H5::DataType::OfObject(homoZIDs[0][0]);
521  H5::DataSpaceSharedPtr homz_space = H5::DataSpace::OneD(nTotHomZ);
522  H5::DataSetSharedPtr homz_dset =
523  root->CreateDataSet("HOMOGENEOUSZIDS", homz_type, homz_space);
524  ASSERTL1(homz_dset,
525  prfx.str() + "cannot create HOMOGENEOUSZIDS dataset.");
526  }
527 
528  // Create HOMOGENEOUSSIDS dataset: homogeneous strip IDs
529  if (nTotHomS > 0)
530  {
531  H5::DataTypeSharedPtr homs_type =
532  H5::DataType::OfObject(homoSIDs[0][0]);
533  H5::DataSpaceSharedPtr homs_space = H5::DataSpace::OneD(nTotHomS);
534  H5::DataSetSharedPtr homs_dset =
535  root->CreateDataSet("HOMOGENEOUSSIDS", homs_type, homs_space);
536  ASSERTL1(homs_dset,
537  prfx.str() + "cannot create HOMOGENEOUSSIDS dataset.");
538  }
539 
540  // Create POLYORDERS dataset: elemental polynomial orders
541  if (varOrder)
542  {
543  H5::DataTypeSharedPtr order_type =
544  H5::DataType::OfObject(numModesPerDirVar[0][0]);
545  H5::DataSpaceSharedPtr order_space = H5::DataSpace::OneD(nTotOrder);
546  H5::DataSetSharedPtr order_dset =
547  root->CreateDataSet("POLYORDERS", order_type, order_space);
548  ASSERTL1(order_dset,
549  prfx.str() + "cannot create POLYORDERS dataset.");
550  }
551  }
552 
553  m_comm->Bcast(all_dsetsize, root_rank);
554 
555  // Datasets, root group and HDF5 file are all closed automatically since
556  // they are now out of scope. Now we need to determine which process will
557  // write the group representing the field description in the HDF5 file. This
558  // next block of code performs this by finding all unique hashes and then
559  // determining one process that will create (possibly more than one) group
560  // for that hash. An alternative would be to communicate the field
561  // information to the root processor, but this is a bit convoluted.
562 
563  // This set stores the unique hashes.
564  std::set<uint64_t> hashToProc;
565  // This map takes ranks to hashes this process will write.
566  std::map<int, std::vector<uint64_t> > writingProcs;
567 
568  // Gather all field hashes to every processor.
569  m_comm->AllReduce(all_hashes, LibUtilities::ReduceMax);
570 
571  for (int n = 0; n < m_comm->GetSize(); ++n)
572  {
573  for (int i = 0; i < nMaxFields; ++i)
574  {
575  uint64_t hash = all_hashes[n*nMaxFields + i];
576 
577  // Note hash can be zero if, on this process, nFields < nMaxFields.
578  if (hashToProc.find(hash) != hashToProc.end() || hash == 0)
579  {
580  continue;
581  }
582  hashToProc.insert(hash);
583  writingProcs[n].push_back(hash);
584  }
585  }
586 
587  // Having constructed the map, go ahead and write the attributes out.
588  for (auto &sIt : writingProcs)
589  {
590  int rank = sIt.first;
591 
592  // Write out this rank's groups.
593  if (m_comm->GetRank() == rank)
594  {
595  H5::PListSharedPtr serialProps = H5::PList::Default();
597 
598  // Reopen the file
599  H5::FileSharedPtr outfile =
600  H5::File::Open(outFile, H5F_ACC_RDWR, serialProps);
601  ASSERTL1(outfile, prfx.str() + "cannot open HDF5 file.");
602  H5::GroupSharedPtr root = outfile->OpenGroup("NEKTAR");
603  ASSERTL1(root, prfx.str() + "cannot open root group.");
604 
605  // Write a HDF5 group for each field
606  hashToProc.clear();
607  for (int i = 0; i < sIt.second.size(); ++i)
608  {
609  for (int f = 0; f < nFields; ++f)
610  {
611  if (sIt.second[i] !=
612  all_hashes[m_comm->GetRank() * nMaxFields + f] ||
613  hashToProc.find(sIt.second[i]) != hashToProc.end())
614  {
615  continue;
616  }
617 
618  hashToProc.insert(sIt.second[i]);
619 
620  // Just in case we've already written this
621  H5::GroupSharedPtr field_group =
622  root->CreateGroup(fieldNames[f]);
623  ASSERTL1(field_group,
624  prfx.str() + "cannot create field group.");
625  field_group->SetAttribute("FIELDS", fielddefs[f]->m_fields);
626  field_group->SetAttribute("BASIS", fielddefs[f]->m_basis);
627  field_group->SetAttribute("SHAPE", shapeStrings[f]);
628 
629  if (homoLengths[f].size() > 0)
630  {
631  field_group->SetAttribute("HOMOGENEOUSLENGTHS",
632  homoLengths[f]);
633  }
634 
635  // If the field has only uniform order, we write the order
636  // into the NUMMODESPERDIR attribute. Otherwise, we'll go
637  // ahead and assume everything is mixed and fix this in the
638  // read later if required.
639  if (!varOrder)
640  {
641  field_group->SetAttribute("NUMMODESPERDIR",
642  numModesPerDirUni[f]);
643  }
644  else
645  {
646  std::string numModesPerDir = "MIXORDER";
647  field_group->SetAttribute("NUMMODESPERDIR",
648  numModesPerDir);
649  }
650  }
651  }
652  }
653 
654  // We block to avoid more than one processor opening the file at a time.
655  m_comm->Block();
656  }
657 
658  // Write the DECOMPOSITION dataset
659  if (amRoot)
660  {
661  H5::PListSharedPtr serialProps = H5::PList::Default();
663 
664  // Reopen the file
665  H5::FileSharedPtr outfile =
666  H5::File::Open(outFile, H5F_ACC_RDWR, serialProps);
667  ASSERTL1(outfile, prfx.str() + "cannot open HDF5 file.");
668  H5::GroupSharedPtr root = outfile->OpenGroup("NEKTAR");
669  ASSERTL1(root, prfx.str() + "cannot open root group.");
670 
671  // Write the DECOMPOSITION dataset
672  H5::DataSetSharedPtr decomps_dset = root->OpenDataSet("DECOMPOSITION");
673  ASSERTL1(decomps_dset,
674  prfx.str() + "cannot open DECOMPOSITION dataset.");
675 
676  H5::DataSpaceSharedPtr decomps_fspace = decomps_dset->GetSpace();
677  ASSERTL1(decomps_fspace,
678  prfx.str() + "cannot open DECOMPOSITION filespace.");
679 
680  decomps_fspace->SelectRange(0, all_decomps.size());
681  decomps_dset->Write(all_decomps, decomps_fspace, writeSR);
682  }
683 
684  // Initialise the dataset indexes for all MPI processes
685  std::vector<uint64_t> idx = m_comm->Scatter(root_rank, all_idxs);
686  uint64_t ids_i = idx[IDS_IDX_IDX];
687  uint64_t data_i = idx[DATA_IDX_IDX];
688  uint64_t order_i = idx[ORDER_IDX_IDX];
689  uint64_t homy_i = idx[HOMY_IDX_IDX];
690  uint64_t homz_i = idx[HOMZ_IDX_IDX];
691  uint64_t homs_i = idx[HOMS_IDX_IDX];
692 
693  // Set properties for parallel file access (if we're in parallel)
694  H5::PListSharedPtr parallelProps = H5::PList::Default();
696  if (m_comm->GetSize() > 1)
697  {
698  // Use MPI/O to access the file
699  parallelProps = H5::PList::FileAccess();
700  parallelProps->SetMpio(m_comm);
701  // Use collective IO
702  writePL = H5::PList::DatasetXfer();
703  writePL->SetDxMpioCollective();
704  }
705 
706  // Reopen the file
707  H5::FileSharedPtr outfile =
708  H5::File::Open(outFile, H5F_ACC_RDWR, parallelProps);
709  ASSERTL1(outfile, prfx.str() + "cannot open HDF5 file.");
710  H5::GroupSharedPtr root = outfile->OpenGroup("NEKTAR");
711  ASSERTL1(root, prfx.str() + "cannot open root group.");
712 
713  m_comm->Block();
714 
715  // all HDF5 groups have now been created. Open the IDS dataset and
716  // associated data space
717  H5::DataSetSharedPtr ids_dset = root->OpenDataSet("ELEMENTIDS");
718  ASSERTL1(ids_dset, prfx.str() + "cannot open ELEMENTIDS dataset.");
719  H5::DataSpaceSharedPtr ids_fspace = ids_dset->GetSpace();
720  ASSERTL1(ids_fspace, prfx.str() + "cannot open ELEMENTIDS filespace.");
721 
722  // Open the DATA dataset and associated data space
723  H5::DataSetSharedPtr data_dset = root->OpenDataSet("DATA");
724  ASSERTL1(data_dset, prfx.str() + "cannot open DATA dataset.");
725  H5::DataSpaceSharedPtr data_fspace = data_dset->GetSpace();
726  ASSERTL1(data_fspace, prfx.str() + "cannot open DATA filespace.");
727 
728  // Open the optional datasets and data spaces.
729  H5::DataSetSharedPtr order_dset, homy_dset, homz_dset, homs_dset;
730  H5::DataSpaceSharedPtr order_fspace , homy_fspace, homz_fspace, homs_fspace;
731 
732  if (all_dsetsize[ORDER_CNT_IDX])
733  {
734  order_dset = root->OpenDataSet("POLYORDERS");
735  ASSERTL1(order_dset, prfx.str() + "cannot open POLYORDERS dataset.");
736  order_fspace = order_dset->GetSpace();
737  ASSERTL1(order_fspace, prfx.str() + "cannot open POLYORDERS filespace.");
738  }
739 
740  if (all_dsetsize[HOMY_CNT_IDX])
741  {
742  homy_dset = root->OpenDataSet("HOMOGENEOUSYIDS");
743  ASSERTL1(homy_dset, prfx.str() + "cannot open HOMOGENEOUSYIDS dataset.");
744  homy_fspace = homy_dset->GetSpace();
745  ASSERTL1(homy_fspace, prfx.str() + "cannot open HOMOGENEOUSYIDS filespace.");
746  }
747 
748  if (all_dsetsize[HOMZ_CNT_IDX])
749  {
750  homz_dset = root->OpenDataSet("HOMOGENEOUSZIDS");
751  ASSERTL1(homz_dset, prfx.str() + "cannot open HOMOGENEOUSZIDS dataset.");
752  homz_fspace = homz_dset->GetSpace();
753  ASSERTL1(homz_fspace, prfx.str() + "cannot open HOMOGENEOUSZIDS filespace.");
754  }
755 
756  if (all_dsetsize[HOMS_CNT_IDX])
757  {
758  homs_dset = root->OpenDataSet("HOMOGENEOUSSIDS");
759  ASSERTL1(homs_dset, prfx.str() + "cannot open HOMOGENEOUSSIDS dataset.");
760  homs_fspace = homs_dset->GetSpace();
761  ASSERTL1(homs_fspace, prfx.str() + "cannot open HOMOGENEOUSSIDS filespace.");
762  }
763 
764  // Write the data
765  for (int f = 0; f < nFields; ++f)
766  {
767  // write the element ids
768  std::size_t nFieldElems = fielddefs[f]->m_elementIDs.size();
769  ids_fspace->SelectRange(ids_i, nFieldElems);
770  ids_dset->Write(fielddefs[f]->m_elementIDs, ids_fspace, writePL);
771  ids_i += nFieldElems;
772 
773  // write the element values
774  std::size_t nFieldVals = fielddata[f].size();
775  data_fspace->SelectRange(data_i, nFieldVals);
776  data_dset->Write(fielddata[f], data_fspace, writePL);
777  data_i += nFieldVals;
778  }
779 
780  if (order_dset)
781  {
782  for (int f = 0; f < nFields; ++f)
783  {
784  std::size_t nOrders = numModesPerDirVar[f].size();
785  order_fspace->SelectRange(order_i, nOrders);
786  order_dset->Write(numModesPerDirVar[f], order_fspace, writePL);
787  order_i += nOrders;
788  }
789  }
790 
791  if (homy_dset)
792  {
793  for (int f = 0; f < nFields; ++f)
794  {
795  std::size_t nYIDs = homoYIDs[f].size();
796  homy_fspace->SelectRange(homy_i, nYIDs);
797  homy_dset->Write(homoYIDs[f], homy_fspace, writePL);
798  homy_i += nYIDs;
799  }
800  }
801 
802  if (homz_dset)
803  {
804  for (int f = 0; f < nFields; ++f)
805  {
806  std::size_t nZIDs = homoZIDs[f].size();
807  homz_fspace->SelectRange(homz_i, nZIDs);
808  homz_dset->Write(homoZIDs[f], homz_fspace, writePL);
809  homz_i += nZIDs;
810  }
811  }
812 
813  if (homs_dset)
814  {
815  for (int f = 0; f < nFields; ++f)
816  {
817  std::size_t nSIDs = homoSIDs[f].size();
818  homs_fspace->SelectRange(homs_i, nSIDs);
819  homs_dset->Write(homoSIDs[f], homs_fspace, writePL);
820  homs_i += nSIDs;
821  }
822  }
823 
824  for (int f = nFields; f < nMaxFields; ++f)
825  {
826  // this MPI process is handling fewer than nMaxFields fields
827  // so, since this is a collective operation
828  // just rewrite the element ids and values of the last field
829  ids_dset->Write(
830  fielddefs[nFields - 1]->m_elementIDs, ids_fspace, writePL);
831  data_dset->Write(fielddata[nFields - 1], data_fspace, writePL);
832 
833  if (order_dset)
834  {
835  order_dset->Write(numModesPerDirVar[nFields - 1],
836  order_fspace, writePL);
837  }
838 
839  if (homy_dset)
840  {
841  homy_dset->Write(homoYIDs[nFields - 1], homy_fspace, writePL);
842  }
843 
844  if (homz_dset)
845  {
846  homz_dset->Write(homoZIDs[nFields - 1], homz_fspace, writePL);
847  }
848 
849  if (homs_dset)
850  {
851  homs_dset->Write(homoSIDs[nFields - 1], homs_fspace, writePL);
852  }
853  }
854 
855  m_comm->Block();
856 
857  // all data has been written
858  if (m_comm->GetRank() == 0)
859  {
860  tm1 = m_comm->Wtime();
861  std::cout << " (" << tm1 - tm0 << "s, HDF5)" << std::endl;
862  }
863 }
864 
865 /**
866  * @brief Import a HDF5 format file.
867  *
868  * @param finfilename Input filename
869  * @param fielddefs Field definitions of resulting field
870  * @param fielddata Field data of resulting field
871  * @param fieldinfomap Field metadata of resulting field
872  * @param ElementIDs If specified, contains the list of element IDs on
873  * this rank. The resulting field definitions will only
874  * contain data for the element IDs specified in this
875  * array.
876  */
877 void FieldIOHdf5::v_Import(const std::string &infilename,
878  std::vector<FieldDefinitionsSharedPtr> &fielddefs,
879  std::vector<std::vector<NekDouble> > &fielddata,
880  FieldMetaDataMap &fieldinfomap,
881  const Array<OneD, int> &ElementIDs)
882 {
883  std::stringstream prfx;
884  int nRanks = m_comm->GetSize();
885 
886  // Set properties for parallel file access (if we're in parallel)
887  H5::PListSharedPtr parallelProps = H5::PList::Default();
890 
891  if (nRanks > 1)
892  {
893  // Use MPI/O to access the file
894  parallelProps = H5::PList::FileAccess();
895  parallelProps->SetMpio(m_comm);
896  // Use collective IO
897  readPL = H5::PList::DatasetXfer();
898  readPL->SetDxMpioCollective();
899  readPLInd = H5::PList::DatasetXfer();
900  readPLInd->SetDxMpioIndependent();
901  }
902 
904  infilename, parallelProps);
905 
906  // Open the root group of the hdf5 file
908  std::static_pointer_cast<H5DataSource>(dataSource);
909  ASSERTL1(h5, prfx.str() + "cannot open HDF5 file.");
910  H5::GroupSharedPtr root = h5->Get()->OpenGroup("NEKTAR");
911  ASSERTL1(root, prfx.str() + "cannot open root group.");
912 
913  // Check format version
914  unsigned int formatVersion;
915  H5::Group::AttrIterator attrIt = root->attr_begin();
916  H5::Group::AttrIterator attrEnd = root->attr_end();
917  for (; attrIt != attrEnd; ++attrIt)
918  {
919  if (*attrIt == "FORMAT_VERSION")
920  {
921  break;
922  }
923  }
924 
925  ASSERTL0(attrIt != attrEnd,
926  "Unable to determine Nektar++ HDF5 file version");
927  root->GetAttribute("FORMAT_VERSION", formatVersion);
928 
929  ASSERTL0(formatVersion <= FORMAT_VERSION,
930  "File format if " + infilename + " is higher than supported in "
931  "this version of Nektar++");
932 
933  // Open the datasets
934  H5::DataSetSharedPtr decomps_dset = root->OpenDataSet("DECOMPOSITION");
935  ASSERTL1(decomps_dset, prfx.str() + "cannot open DECOMPOSITION dataset.");
936 
937  H5::DataSetSharedPtr ids_dset = root->OpenDataSet("ELEMENTIDS");
938  ASSERTL1(ids_dset, prfx.str() + "cannot open ELEMENTIDS dataset.");
939 
940  H5::DataSetSharedPtr data_dset = root->OpenDataSet("DATA");
941  ASSERTL1(data_dset, prfx.str() + "cannot open DATA dataset.");
942 
943  // Open the dataset file spaces
944  H5::DataSpaceSharedPtr decomps_fspace = decomps_dset->GetSpace();
945  ASSERTL1(decomps_fspace,
946  prfx.str() + "cannot open DECOMPOSITION filespace.");
947 
948  H5::DataSpaceSharedPtr ids_fspace = ids_dset->GetSpace();
949  ASSERTL1(ids_fspace, prfx.str() + "cannot open ELEMENTIDS filespace.");
950 
951  H5::DataSpaceSharedPtr data_fspace = data_dset->GetSpace();
952  ASSERTL1(data_fspace, prfx.str() + "cannot open DATA filespace.");
953 
954  // Read entire IDS data set to get list of global IDs.
955  std::vector<uint64_t> ids;
956 
957  ids_dset->Read(ids, ids_fspace, readPL);
958 
959  std::unordered_set<uint64_t> toread;
960  if (ElementIDs != NullInt1DArray)
961  {
962  for (uint64_t i = 0; i < ElementIDs.num_elements(); ++i)
963  {
964  toread.insert(ElementIDs[i]);
965  }
966  }
967 
968  std::vector<uint64_t> decomps;
969  decomps_dset->Read(decomps, decomps_fspace, readPL);
970 
971  size_t nDecomps = decomps.size() / MAX_DCMPS;
972  size_t cnt = 0, cnt2 = 0;
973 
974  // Mapping from each decomposition to offsets in the data array.
975  std::vector<OffsetHelper> decompsToOffsets (nDecomps);
976 
977  // Mapping from each group's hash to a vector of element IDs. Note this has
978  // to be unsigned int, since that's what we use in FieldDefinitions.
979  std::map<uint64_t, std::vector<unsigned int> > groupsToElmts;
980 
981  // Mapping from each group's hash to each of the decompositions.
982  std::map<uint64_t, std::set<uint64_t> > groupsToDecomps;
983 
984  // True if we are pulling element IDs from ElementIDs.
985  bool selective = toread.size() > 0;
986 
987  // Counters for data offsets
988  OffsetHelper running;
989 
990  for (size_t i = 0; i < nDecomps; ++i, cnt += MAX_DCMPS)
991  {
992  uint64_t nElmt = decomps[cnt + ELEM_DCMP_IDX];
993  uint64_t groupHash = decomps[cnt + HASH_DCMP_IDX];
994 
995  std::vector<uint64_t> tmp;
996 
997  if (selective)
998  {
999  for (size_t j = 0; j < nElmt; ++j)
1000  {
1001  uint64_t elmtId = ids[cnt2 + j];
1002  if (toread.find(elmtId) != toread.end())
1003  {
1004  tmp.push_back(elmtId);
1005  }
1006  }
1007  }
1008  else
1009  {
1010  tmp.insert(
1011  tmp.begin(), ids.begin() + cnt2, ids.begin() + cnt2 + nElmt);
1012  }
1013 
1014  std::vector<unsigned int> tmp2(nElmt);
1015  for (size_t j = 0; j < nElmt; ++j)
1016  {
1017  tmp2[j] = ids[cnt2+j];
1018  }
1019 
1020  cnt2 += nElmt;
1021 
1022  if (tmp.size() > 0)
1023  {
1024  groupsToDecomps[groupHash].insert(i);
1025  }
1026 
1027  groupsToElmts[i] = tmp2;
1028  decompsToOffsets[i] = running;
1029 
1030  running.data += decomps[cnt + VAL_DCMP_IDX];
1031  running.order += decomps[cnt + ORDER_DCMP_IDX];
1032  running.homy += decomps[cnt + HOMY_DCMP_IDX];
1033  running.homz += decomps[cnt + HOMZ_DCMP_IDX];
1034  running.homs += decomps[cnt + HOMS_DCMP_IDX];
1035  }
1036 
1037  for (auto &gIt : groupsToDecomps)
1038  {
1039  // Select region from dataset for this decomposition.
1040  for (auto &sIt : gIt.second)
1041  {
1042  std::stringstream fieldNameStream;
1043  fieldNameStream << gIt.first;
1044 
1045  FieldDefinitionsSharedPtr fielddef =
1047  ImportFieldDef(readPLInd, root, decomps, sIt, decompsToOffsets[sIt],
1048  fieldNameStream.str(), fielddef);
1049 
1050  fielddef->m_elementIDs = groupsToElmts[sIt];
1051  fielddefs.push_back(fielddef);
1052 
1053  if (fielddata != NullVectorNekDoubleVector)
1054  {
1055  std::vector<NekDouble> decompFieldData;
1057  readPLInd, data_dset, data_fspace,
1058  decompsToOffsets[sIt].data, decomps, sIt, fielddef,
1059  decompFieldData);
1060  fielddata.push_back(decompFieldData);
1061  }
1062  }
1063  }
1064 
1065  ImportHDF5FieldMetaData(dataSource, fieldinfomap);
1066  m_comm->Block();
1067 }
1068 
1069 /**
1070  * @brief Import field definitions from a HDF5 document.
1071  *
1072  * @param readPL Reading parameter list.
1073  * @param root Root group containing field definitions.
1074  * @param group Group name to process.
1075  * @param def On output contains field definitions.
1076  */
1078  H5::PListSharedPtr readPL,
1079  H5::GroupSharedPtr root,
1080  std::vector<uint64_t> &decomps,
1081  uint64_t decomp,
1082  OffsetHelper offset,
1083  std::string group,
1085 {
1086  std::stringstream prfx;
1087  prfx << m_comm->GetRank() << ": FieldIOHdf5::ImportFieldDefsHdf5(): ";
1088 
1089  H5::GroupSharedPtr field = root->OpenGroup(group);
1090  ASSERTL1(field, prfx.str() + "cannot open field group, " + group + '.');
1091 
1092  def->m_uniOrder = false;
1093 
1094  H5::Group::AttrIterator attrIt = field->attr_begin();
1095  H5::Group::AttrIterator attrEnd = field->attr_end();
1096  for (; attrIt != attrEnd; ++attrIt)
1097  {
1098  const std::string &attrName = *attrIt;
1099  if (attrName == "FIELDS")
1100  {
1101  field->GetAttribute(attrName, def->m_fields);
1102  }
1103  else if (attrName == "SHAPE")
1104  {
1105  std::string shapeString;
1106  field->GetAttribute(attrName, shapeString);
1107 
1108  // check to see if homogeneous expansion and if so
1109  // strip down the shapeString definition
1110  size_t loc;
1111  //---> this finds the first location of 'n'!
1112  if (shapeString.find("Strips") != std::string::npos)
1113  {
1114  def->m_homoStrips = true;
1115  }
1116 
1117  if ((loc = shapeString.find_first_of("-")) != std::string::npos)
1118  {
1119  if (shapeString.find("Exp1D") != std::string::npos)
1120  {
1121  def->m_numHomogeneousDir = 1;
1122  }
1123  else // HomogeneousExp1D
1124  {
1125  def->m_numHomogeneousDir = 2;
1126  }
1127 
1128  shapeString.erase(loc, shapeString.length());
1129  }
1130 
1131  // get the geometrical shape
1132  bool valid = false;
1133  for (unsigned int j = 0; j < SIZE_ShapeType; j++)
1134  {
1135  if (ShapeTypeMap[j] == shapeString)
1136  {
1137  def->m_shapeType = (ShapeType)j;
1138  valid = true;
1139  break;
1140  }
1141  }
1142 
1143  ASSERTL0(valid, prfx.str() + std::string(
1144  "unable to correctly parse the shape type: ")
1145  .append(shapeString).c_str());
1146  }
1147  else if (attrName == "BASIS")
1148  {
1149  field->GetAttribute(attrName, def->m_basis);
1150  // check the basis is in range
1151  for (auto &bIt : def->m_basis)
1152  {
1153  BasisType bt = bIt;
1154  ASSERTL0(bt >= 0 && bt < SIZE_BasisType,
1155  prfx.str() +
1156  "unable to correctly parse the basis types.");
1157  }
1158  }
1159  else if (attrName == "HOMOGENEOUSLENGTHS")
1160  {
1161  field->GetAttribute(attrName, def->m_homogeneousLengths);
1162  }
1163  else if (attrName == "NUMMODESPERDIR")
1164  {
1165  std::string numModesPerDir;
1166  field->GetAttribute(attrName, numModesPerDir);
1167 
1168  if (strstr(numModesPerDir.c_str(), "UNIORDER:"))
1169  {
1170  def->m_uniOrder = true;
1171  bool valid = ParseUtils::GenerateVector(
1172  numModesPerDir.substr(9), def->m_numModes);
1173  ASSERTL0(valid,
1174  prfx.str() +
1175  "unable to correctly parse the number of modes.");
1176  }
1177  }
1178  else if (attrName == "POINTSTYPE")
1179  {
1180  std::string pointsString;
1181  field->GetAttribute(attrName, pointsString);
1182  def->m_pointsDef = true;
1183 
1184  std::vector<std::string> pointsStrings;
1185  bool valid = ParseUtils::GenerateVector(
1186  pointsString, pointsStrings);
1187  ASSERTL0(valid,
1188  prfx.str() +
1189  "unable to correctly parse the points types.");
1190  for (std::vector<std::string>::size_type i = 0;
1191  i < pointsStrings.size();
1192  i++)
1193  {
1194  valid = false;
1195  for (unsigned int j = 0; j < SIZE_PointsType; j++)
1196  {
1197  if (kPointsTypeStr[j] == pointsStrings[i])
1198  {
1199  def->m_points.push_back((PointsType)j);
1200  valid = true;
1201  break;
1202  }
1203  }
1204 
1205  ASSERTL0(
1206  valid,
1207  prfx.str() +
1208  std::string(
1209  "unable to correctly parse the points type: ")
1210  .append(pointsStrings[i])
1211  .c_str());
1212  }
1213  }
1214  else if (attrName == "NUMPOINTSPERDIR")
1215  {
1216  std::string numPointsPerDir;
1217  field->GetAttribute(attrName, numPointsPerDir);
1218  def->m_numPointsDef = true;
1219 
1220  bool valid = ParseUtils::GenerateVector(
1221  numPointsPerDir, def->m_numPoints);
1222  ASSERTL0(valid,
1223  prfx.str() +
1224  "unable to correctly parse the number of points.");
1225  }
1226  else
1227  {
1228  std::string errstr("unknown attribute: ");
1229  errstr += attrName;
1230  ASSERTL1(false, prfx.str() + errstr.c_str());
1231  }
1232  }
1233 
1234  if (def->m_numHomogeneousDir >= 1)
1235  {
1236  H5::DataSetSharedPtr homz_dset = root->OpenDataSet("HOMOGENEOUSZIDS");
1237  H5::DataSpaceSharedPtr homz_fspace = homz_dset->GetSpace();
1238  uint64_t dsize = decomps[decomp * MAX_DCMPS + HOMZ_DCMP_IDX];
1239  homz_fspace->SelectRange(offset.homz, dsize);
1240  homz_dset->Read(def->m_homogeneousZIDs, homz_fspace, readPL);
1241  }
1242 
1243  if (def->m_numHomogeneousDir >= 2)
1244  {
1245  H5::DataSetSharedPtr homy_dset = root->OpenDataSet("HOMOGENEOUSYIDS");
1246  H5::DataSpaceSharedPtr homy_fspace = homy_dset->GetSpace();
1247  uint64_t dsize = decomps[decomp * MAX_DCMPS + HOMY_DCMP_IDX];
1248  homy_fspace->SelectRange(offset.homy, dsize);
1249  homy_dset->Read(def->m_homogeneousYIDs, homy_fspace, readPL);
1250  }
1251 
1252  if (def->m_homoStrips)
1253  {
1254  H5::DataSetSharedPtr homs_dset = root->OpenDataSet("HOMOGENEOUSSIDS");
1255  H5::DataSpaceSharedPtr homs_fspace = homs_dset->GetSpace();
1256  uint64_t dsize = decomps[decomp * MAX_DCMPS + HOMS_DCMP_IDX];
1257  homs_fspace->SelectRange(offset.homs, dsize);
1258  homs_dset->Read(def->m_homogeneousSIDs, homs_fspace, readPL);
1259  }
1260 
1261  if (!def->m_uniOrder)
1262  {
1263  H5::DataSetSharedPtr order_dset = root->OpenDataSet("POLYORDERS");
1264  H5::DataSpaceSharedPtr order_fspace = order_dset->GetSpace();
1265  uint64_t dsize = decomps[decomp * MAX_DCMPS + ORDER_DCMP_IDX];
1266  order_fspace->SelectRange(offset.order, dsize);
1267  order_dset->Read(def->m_numModes, order_fspace, readPL);
1268  }
1269 }
1270 
1271 /**
1272  * @brief Import the field data from the HDF5 document.
1273  *
1274  * @param readPL Reading parameter list.
1275  * @param data_dset Pointer to the `DATA` dataset.
1276  * @param data_fspace Pointer to the `DATA` data space.
1277  * @param data_i Index in the `DATA` dataset to start reading from.
1278  * @param decomps Information from the `DECOMPOSITION` dataset.
1279  * @param decomp Index of the decomposition.
1280  * @param fielddef Field definitions for this file
1281  * @param fielddata On return contains resulting field data.
1282  */
1284  H5::PListSharedPtr readPL,
1285  H5::DataSetSharedPtr data_dset,
1286  H5::DataSpaceSharedPtr data_fspace,
1287  uint64_t data_i,
1288  std::vector<uint64_t> &decomps,
1289  uint64_t decomp,
1290  const FieldDefinitionsSharedPtr fielddef,
1291  std::vector<NekDouble> &fielddata)
1292 {
1293  std::stringstream prfx;
1294  prfx << m_comm->GetRank() << ": FieldIOHdf5::ImportFieldData(): ";
1295 
1296  uint64_t nElemVals = decomps[decomp * MAX_DCMPS + VAL_DCMP_IDX];
1297  uint64_t nFieldVals = nElemVals;
1298 
1299  data_fspace->SelectRange(data_i, nFieldVals);
1300  data_dset->Read(fielddata, data_fspace, readPL);
1301  int datasize = CheckFieldDefinition(fielddef);
1302  ASSERTL0(
1303  fielddata.size() == datasize * fielddef->m_fields.size(),
1304  prfx.str() +
1305  "input data is not the same length as header information.");
1306 }
1307 
1308 /**
1309  * @brief Import field metadata from @p filename and return the data source
1310  * which wraps @p filename.
1311  *
1312  * @param filename Input filename.
1313  * @param fieldmetadatamap Resulting field metadata from @p dataSource.
1314  */
1316  const std::string &filename,
1317  FieldMetaDataMap &fieldmetadatamap)
1318 {
1319  H5::PListSharedPtr parallelProps = H5::PList::Default();
1320  DataSourceSharedPtr ans = H5DataSource::create(filename, parallelProps);
1321  ImportHDF5FieldMetaData(ans, fieldmetadatamap);
1322  return ans;
1323 }
1324 
1325 /**
1326  * @brief Import field metadata from @p dataSource.
1327  *
1328  * @param dataSource Input datasource, which should be a H5DataSource.
1329  * @param fieldmetadatamap Resulting field metadata from @p dataSource.
1330  */
1332  FieldMetaDataMap &fieldmetadatamap)
1333 {
1334  H5DataSourceSharedPtr hdf =
1335  std::static_pointer_cast<H5DataSource>(dataSource);
1336 
1337  H5::GroupSharedPtr master = hdf->Get()->OpenGroup("NEKTAR");
1338  // New metadata format only in HDF
1339  H5::GroupSharedPtr metadata = master->OpenGroup("Metadata");
1340 
1341  if (metadata)
1342  {
1343  H5::Group::AttrIterator param = metadata->attr_begin(),
1344  pEnd = metadata->attr_end();
1345  for (; param != pEnd; ++param)
1346  {
1347  std::string paramString = *param;
1348  if (paramString != "Provenance")
1349  {
1350  std::string paramBodyStr;
1351  metadata->GetAttribute(paramString, paramBodyStr);
1352  fieldmetadatamap[paramString] = paramBodyStr;
1353  }
1354  }
1355  }
1356 }
1357 
1358 }
1359 }
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:216
std::shared_ptr< H5DataSource > H5DataSourceSharedPtr
Definition: FieldIOHdf5.h:88
FieldIOHdf5(LibUtilities::CommSharedPtr pComm, bool sharedFilesystem)
Construct the FieldIO object for HDF5 output.
static const unsigned int HOMY_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of homogeneous y-planes in th...
Definition: FieldIOHdf5.h:189
static FileSharedPtr Create(const std::string &filename, unsigned mode, PListSharedPtr createPL=PList::Default(), PListSharedPtr accessPL=PList::Default())
Definition: H5.cpp:632
virtual void v_Write(const std::string &outFile, std::vector< FieldDefinitionsSharedPtr > &fielddefs, std::vector< std::vector< NekDouble > > &fielddata, const FieldMetaDataMap &fieldinfomap=NullFieldMetaDataMap, const bool backup=false)
Write a HDF5 file to outFile given the field definitions fielddefs, field data fielddata and metadata...
const std::string kPointsTypeStr[]
Definition: Foundations.hpp:70
static const unsigned int MAX_DCMPS
A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in the decomposition per fie...
Definition: FieldIOHdf5.h:184
static const unsigned int HOMS_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of homogeneous strips within ...
Definition: FieldIOHdf5.h:199
std::shared_ptr< Comm > CommSharedPtr
Pointer to a Communicator object.
Definition: Comm.h:53
static DataTypeSharedPtr OfObject(const T &obj)
Definition: H5.h:401
static const unsigned int ORDER_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the element order within the indexing se...
Definition: FieldIOHdf5.h:196
static DataTypeSharedPtr GetType()
Definition: H5.h:670
std::shared_ptr< DataSpace > DataSpaceSharedPtr
Definition: H5.h:90
void ImportHDF5FieldMetaData(DataSourceSharedPtr dataSource, FieldMetaDataMap &fieldmetadatamap)
Import field metadata from dataSource.
def copy(self)
Definition: pycml.py:2663
FieldIOFactory & GetFieldIOFactory()
Returns the FieldIO factory.
Definition: FieldIO.cpp:72
std::map< std::string, std::string > FieldMetaDataMap
Definition: FieldIO.h:52
std::shared_ptr< DataSet > DataSetSharedPtr
Definition: H5.h:102
int CheckFieldDefinition(const FieldDefinitionsSharedPtr &fielddefs)
Check field definitions for correctness and return storage size.
Definition: FieldIO.cpp:572
std::shared_ptr< DataType > DataTypeSharedPtr
Definition: H5.h:86
const char *const ShapeTypeMap[]
Definition: ShapeType.hpp:67
std::shared_ptr< PList > PListSharedPtr
Definition: H5.h:104
static const unsigned int HASH_DCMP_IDX
The hash of the field definition information, which defines the name of the attribute containing the ...
Definition: FieldIOHdf5.h:183
std::shared_ptr< TagWriter > TagWriterSharedPtr
Definition: FieldIO.h:69
static DataSourceSharedPtr create(const std::string &fn, H5::PListSharedPtr parallelProps)
Static constructor for this data source.
Definition: FieldIOHdf5.h:78
void ImportFieldDef(H5::PListSharedPtr readPL, H5::GroupSharedPtr root, std::vector< uint64_t > &decomps, uint64_t decomp, OffsetHelper offset, std::string group, FieldDefinitionsSharedPtr def)
Import field definitions from a HDF5 document.
static const unsigned int FORMAT_VERSION
Version of the Nektar++ HDF5 format, which is embedded into the main NEKTAR group as an attribute...
Definition: FieldIOHdf5.h:175
std::shared_ptr< DataSource > DataSourceSharedPtr
Definition: FieldIO.h:79
static DataSpaceSharedPtr OneD(hsize_t size)
Definition: H5.cpp:418
static const unsigned int HOMY_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of y-planes within the indexi...
Definition: FieldIOHdf5.h:197
std::shared_ptr< File > FileSharedPtr
Definition: H5.h:100
static const unsigned int MAX_IDXS
A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in the indexing set...
Definition: FieldIOHdf5.h:200
static FileSharedPtr Open(const std::string &filename, unsigned mode, PListSharedPtr accessPL=PList::Default())
Definition: H5.cpp:640
static std::vector< std::vector< NekDouble > > NullVectorNekDoubleVector
static std::shared_ptr< DataType > AllocateSharedPtr(const Args &...args)
Allocate a shared pointer from the memory pool.
static PListSharedPtr DatasetXfer()
Properties for raw data transfer.
Definition: H5.cpp:115
double NekDouble
static const unsigned int HOMZ_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of homogeneous z-planes in th...
Definition: FieldIOHdf5.h:190
static const unsigned int VAL_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:178
static bool GenerateVector(const std::string &str, std::vector< T > &out)
Takes a comma-separated string and converts it to entries in a vector.
Definition: ParseUtils.cpp:135
static const unsigned int IDS_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the element IDs within the indexing set...
Definition: FieldIOHdf5.h:194
std::string SetUpOutput(const std::string outname, bool perRank, bool backup=false)
Set up the filesystem ready for output.
Definition: FieldIO.cpp:410
LibUtilities::CommSharedPtr m_comm
Communicator to use when writing parallel format.
Definition: FieldIO.h:266
static std::string className
Name of class.
Definition: FieldIOHdf5.h:211
virtual void v_Import(const std::string &infilename, std::vector< FieldDefinitionsSharedPtr > &fielddefs, std::vector< std::vector< NekDouble > > &fielddata=NullVectorNekDoubleVector, FieldMetaDataMap &fieldinfomap=NullFieldMetaDataMap, const Array< OneD, int > &ElementIDs=NullInt1DArray)
Import a HDF5 format file.
static const unsigned int MAX_CNTS
A helper for FieldIOHdf5::v_Write. Describes the maximum number of items in the cnt array per field d...
Definition: FieldIOHdf5.h:192
static const unsigned int ELEM_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of elements in the cnt array...
Definition: FieldIOHdf5.h:186
static const unsigned int ORDER_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of order points in the cnt ar...
Definition: FieldIOHdf5.h:188
static const unsigned int HOMY_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:180
static const unsigned int DATA_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the data size within the indexing set...
Definition: FieldIOHdf5.h:195
static const unsigned int HOMS_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of homogeneous strips in the ...
Definition: FieldIOHdf5.h:191
static const unsigned int HOMZ_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:181
std::shared_ptr< FieldDefinitions > FieldDefinitionsSharedPtr
Definition: FieldIO.h:179
static const unsigned int ELEM_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:177
static const unsigned int HOMZ_IDX_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of z-planes within the indexi...
Definition: FieldIOHdf5.h:198
void ImportFieldData(H5::PListSharedPtr readPL, H5::DataSetSharedPtr data_dset, H5::DataSpaceSharedPtr data_fspace, uint64_t data_i, std::vector< uint64_t > &decomps, uint64_t decomp, const FieldDefinitionsSharedPtr fielddef, std::vector< NekDouble > &fielddata)
Import the field data from the HDF5 document.
Class for operating on Nektar++ input/output files.
Definition: FieldIO.h:222
std::shared_ptr< Group > GroupSharedPtr
Definition: FieldIOHdf5.h:49
static PListSharedPtr FileAccess()
Properties for file access.
Definition: H5.cpp:97
H5::FileSharedPtr Get()
Get H5::FileSharedPtr reference to file.
Definition: FieldIOHdf5.h:66
tKey RegisterCreatorFunction(tKey idKey, CreatorFunction classCreator, std::string pDesc="")
Register a class with the factory.
Definition: NekFactory.hpp:199
static FieldIOSharedPtr create(LibUtilities::CommSharedPtr pComm, bool sharedFilesystem)
Creates an instance of this class.
Definition: FieldIOHdf5.h:203
static const unsigned int HOMS_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:182
static Array< OneD, int > NullInt1DArray
static const unsigned int ORDER_DCMP_IDX
A helper for FieldIOHdf5::v_Write and FieldIOHdf5::v_Import. Describes the position of the number of ...
Definition: FieldIOHdf5.h:179
#define ASSERTL1(condition, msg)
Assert Level 1 – Debugging which is used whether in FULLDEBUG or DEBUG compilation mode...
Definition: ErrorUtil.hpp:250
static void AddInfoTag(TagWriterSharedPtr root, const FieldMetaDataMap &fieldmetadatamap)
Add provenance information to the field metadata map.
Definition: FieldIO.cpp:348
static const unsigned int VAL_CNT_IDX
A helper for FieldIOHdf5::v_Write. Describes the position of the number of data points in the cnt arr...
Definition: FieldIOHdf5.h:187
static PListSharedPtr Default()
Default options.
Definition: H5.cpp:79
virtual DataSourceSharedPtr v_ImportFieldMetaData(const std::string &filename, FieldMetaDataMap &fieldmetadatamap)
Import field metadata from filename and return the data source which wraps filename.