Nektar++
Tester.cpp.in
Go to the documentation of this file.
1///////////////////////////////////////////////////////////////////////////////
2//
3// File: Tester.cpp
4//
5// For more information, please see: http://www.nektar.info
6//
7// The MIT License
8//
9// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10// Department of Aeronautics, Imperial College London (UK), and Scientific
11// Computing and Imaging Institute, University of Utah (USA).
12//
13// Permission is hereby granted, free of charge, to any person obtaining a
14// copy of this software and associated documentation files (the "Software"),
15// to deal in the Software without restriction, including without limitation
16// the rights to use, copy, modify, merge, publish, distribute, sublicense,
17// and/or sell copies of the Software, and to permit persons to whom the
18// Software is furnished to do so, subject to the following conditions:
19//
20// The above copyright notice and this permission notice shall be included
21// in all copies or substantial portions of the Software.
22//
23// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29// DEALINGS IN THE SOFTWARE.
30//
31// Description: Tester executable.
32//
33///////////////////////////////////////////////////////////////////////////////
34
35/**
36 * @file Tester.cpp.in
37 * @brief This file contains the main function for the Tester program, which is
38 * a tool for testing Nektar++ executables.
39 *
40 * The main function reads command line options and parses the provided test
41 * (.tst) file. Using information provided in this file, the Tester program
42 * generates test metrics, and creates temporary subdirectories in which to run
43 * the executable. All test outputs are appended to a single @p master.out file,
44 * and errors are appended to @p master.err. These files are sent to all of the
45 * metrics for analysis. If the test fails, the output and error files are
46 * dumped to the terminal for debugging purposes.
47 *
48 * @see Metric
49 * @see Metric#Test:
50 */
51
52#include <chrono>
53#include <fstream>
54#include <iostream>
55#include <string>
56#include <thread>
57#include <vector>
58
59#include <Metric.h>
60#include <TestData.h>
61
63
64#include <boost/program_options.hpp>
65
66#cmakedefine NEKTAR_TEST_FORCEMPIEXEC 1
67
68using namespace std;
69using namespace Nektar;
70
71// Define some namespace aliases
72namespace po = boost::program_options;
73
74int main(int argc, char *argv[])
75{
76 int status = 0;
77 string command;
78
79 // Set up command line options.
80 po::options_description desc("Available options");
81 desc.add_options()
82 ("help,h", "Produce this help message.")
83 ("verbose,v", "Turn on verbosity.")
84 ("generate-metric,g", po::value<vector<int>>(),
85 "Generate a single metric.")
86 ("generate-all-metrics,a", "Generate all metrics.")
87 ("executable,e", po::value<string>(),
88 "Use specified executable.");
89
90 po::options_description hidden("Hidden options");
91 hidden.add_options()("input-file", po::value<string>(), "Input filename");
92
93 po::options_description cmdline_options("Command-line options");
94 cmdline_options.add(hidden).add(desc);
95
96 po::options_description visible("Allowed options");
97 visible.add(desc);
98
99 po::positional_options_description p;
100 p.add("input-file", -1);
101
102 po::variables_map vm;
103
104 try
105 {
106 po::store(po::command_line_parser(argc, argv)
107 .options(cmdline_options)
108 .positional(p)
109 .run(),
110 vm);
111 po::notify(vm);
112 }
113 catch (const exception &e)
114 {
115 cerr << e.what() << endl;
116 cerr << desc;
117 return 1;
118 }
119
120 if (vm.count("help") || vm.count("input-file") != 1)
121 {
122 cerr << "Usage: Tester [options] input-file.tst" << endl;
123 cout << desc;
124 return 1;
125 }
126
127 bool verbose = vm.count("verbose");
128
129 // Set up set containing metrics to be generated.
130 vector<int> metricGenVec;
131 if (vm.count("generate-metric"))
132 {
133 metricGenVec = vm["generate-metric"].as<vector<int>>();
134 }
135 set<int> metricGen(metricGenVec.begin(), metricGenVec.end());
136
137 // Path to test definition file
138 const fs::path specFile(vm["input-file"].as<string>());
139
140 // Parent path of test definition file containing dependent files
141 fs::path specPath = specFile.parent_path();
142
143 if (specPath.empty())
144 {
145 specPath = fs::current_path();
146 }
147
148 string specFileStem = specFile.stem().string();
149
150 // Temporary master directory to create which holds master output and error
151 // files, and the working directories for each run
152 const fs::path masterDir =
153 fs::current_path() / LibUtilities::UniquePath(specFileStem);
154
155 // The current directory
156 const fs::path startDir = fs::current_path();
157
158 try
159 {
160 if (verbose)
161 {
162 cerr << "Reading test file definition: " << specFile << endl;
163 }
164
165 // Parse the test file
166 TestData file(specFile, vm);
167
168 if (verbose && file.GetNumMetrics() > 0)
169 {
170 cerr << "Creating metrics:" << endl;
171 }
172
173 // Generate the metric objects
174 vector<MetricSharedPtr> metrics;
175 for (unsigned int i = 0; i < file.GetNumMetrics(); ++i)
176 {
177 set<int>::iterator it = metricGen.find(file.GetMetricId(i));
178 bool genMetric =
179 it != metricGen.end() || (vm.count("generate-all-metrics") > 0);
180
181 metrics.push_back(GetMetricFactory().CreateInstance(
182 file.GetMetricType(i), file.GetMetric(i), genMetric));
183
184 if (verbose)
185 {
186 cerr << " - ID " << metrics.back()->GetID() << ": "
187 << metrics.back()->GetType() << endl;
188 }
189
190 if (it != metricGen.end())
191 {
192 metricGen.erase(it);
193 }
194 }
195
196 if (metricGen.size() != 0)
197 {
198 string s = metricGen.size() == 1 ? "s" : "";
199 set<int>::iterator it;
200 cerr << "Unable to find metric" + s + " with ID" + s + " ";
201 for (it = metricGen.begin(); it != metricGen.end(); ++it)
202 {
203 cerr << *it << " ";
204 }
205 cerr << endl;
206 return 1;
207 }
208
209 // Remove the master directory if left from a previous test
210 if (fs::exists(masterDir))
211 {
212 fs::remove_all(masterDir);
213 }
214
215 if (verbose)
216 {
217 cerr << "Creating master directory: " << masterDir << endl;
218 }
219
220 // Create the master directory
221 fs::create_directory(masterDir);
222
223 // Change working directory to the master directory
224 fs::current_path(masterDir);
225
226 // Create a master output and error file. Output and error files from
227 // all runs will be appended to these files.
228 fstream masterOut("master.out", ios::out | ios::in | ios::trunc);
229 fstream masterErr("master.err", ios::out | ios::in | ios::trunc);
230
231 if (masterOut.bad() || masterErr.bad())
232 {
233 cerr << "One or more master output files are unreadable." << endl;
234 throw 1;
235 }
236
237 // Vector of temporary subdirectories to create and conduct tests in
238 vector<fs::path> tmpWorkingDirs;
239 string line;
240
241 for (unsigned int i = 0; i < file.GetNumRuns(); ++i)
242 {
243 command = "";
244
245 if (verbose)
246 {
247 cerr << "Starting run " << i << "." << endl;
248 }
249
250 // Temporary directory to create and in which to hold the run
251 const fs::path tmpDir =
252 masterDir / fs::path("run" + std::to_string(i));
253 tmpWorkingDirs.push_back(tmpDir);
254
255 if (verbose)
256 {
257 cerr << "Creating working directory: " << tmpDir << endl;
258 }
259
260 // Create temporary directory
261 fs::create_directory(tmpDir);
262
263 // Change working directory to the temporary directory
264 fs::current_path(tmpDir);
265
266 if (verbose && file.GetNumDependentFiles())
267 {
268 cerr << "Copying required files: " << endl;
269 }
270
271 // Copy required files for this test from the test definition
272 // directory to the temporary directory.
273 for (unsigned int j = 0; j < file.GetNumDependentFiles(); ++j)
274 {
275 fs::path source_file(file.GetDependentFile(j).m_filename);
276
277 fs::path source = specPath / source_file;
278 fs::path dest = tmpDir / source_file.filename();
279 if (verbose)
280 {
281 cerr << " - " << source << " -> " << dest << endl;
282 }
283
284 if (fs::is_directory(source))
285 {
286 fs::create_directory(dest);
287 // If source is a directory, then only directory name is
288 // created, so call copy again to copy files.
289 for (const auto &dirEnt :
290 fs::recursive_directory_iterator{source})
291 {
292 fs::path newdest = dest / dirEnt.path().filename();
293 fs::copy_file(dirEnt.path(), newdest);
294 }
295 }
296 else
297 {
298 fs::copy_file(source, dest);
299 }
300 }
301
302 // Copy opt file if exists to to the temporary directory.
303 fs::path source_file("test.opt");
304 fs::path source = specPath / source_file;
305 bool HaveOptFile = false;
306 if (fs::exists(source))
307 {
308 fs::path dest = tmpDir / source_file.filename();
309 if (verbose)
310 {
311 cerr << " - " << source << " -> " << dest << endl;
312 }
313
314 if (fs::is_directory(source))
315 {
316 fs::create_directory(dest);
317 // If source is a directory, then only directory name is
318 // created, so call copy again to copy files.
319 for (const auto &dirEnt :
320 fs::recursive_directory_iterator{source})
321 {
322 fs::path newdest = dest / dirEnt.path().filename();
323 fs::copy_file(dirEnt.path(), newdest);
324 }
325 }
326 else
327 {
328 fs::copy_file(source, dest);
329 }
330
331 HaveOptFile = true;
332 }
333
334 // If we're Python, copy script too.
335
336 // Construct test command to run. Output from stdout and stderr are
337 // directed to the files output.out and output.err, respectively.
338
339 bool pythonAdded = false, mpiAdded = false;
340 for (unsigned int j = 0; j < file.GetNumCommands(); ++j)
341 {
342 Command cmd = file.GetCommand(j);
343 if (cmd.m_pythonTest && !pythonAdded)
344 {
345 // Prepend Python to very start of command.
346 command = "PYTHONPATH=\"@CMAKE_BINARY_DIR@\" " + command;
347 pythonAdded = true;
348 }
349
350#ifdef NEKTAR_TEST_FORCEMPIEXEC
351#else
352 if (cmd.m_processes > 1 || file.GetNumCommands() > 1)
353#endif
354 {
355 if (mpiAdded)
356 {
357 continue;
358 }
359
360 command += "\"@MPIEXEC@\" ";
361 if (std::string("@NEKTAR_TEST_USE_HOSTFILE@") == "ON")
362 {
363 command += "-hostfile hostfile ";
364 if (system("echo 'localhost slots=12' > hostfile"))
365 {
366 cerr << "Unable to write 'hostfile' in path '"
367 << fs::current_path() << endl;
368 status = 1;
369 }
370 }
371
372 if (file.GetNumCommands() > 1)
373 {
374 command += "--tag-output ";
375 }
376
377 mpiAdded = true;
378 }
379 }
380
381 // Parse commands.
382 for (unsigned int j = 0; j < file.GetNumCommands(); ++j)
383 {
384 Command cmd = file.GetCommand(j);
385
386 // If running with multiple commands simultaneously, separate
387 // with colon.
388 if (j > 0)
389 {
390 command += " : ";
391 }
392
393 // Add -n where appropriate.
394 if (file.GetNumCommands() > 1 || cmd.m_processes > 1)
395 {
396 command += "@MPIEXEC_NUMPROC_FLAG@ ";
397 command += std::to_string(cmd.m_processes) + " ";
398 }
399
400 // Look for executable or Python script.
401 fs::path execPath = startDir / cmd.m_executable;
402 if (!fs::exists(execPath))
403 {
404 ASSERTL0(!cmd.m_pythonTest, "Python script not found.");
405 execPath = cmd.m_executable;
406 }
407
408 // Prepend script name with Python executable path if this is a
409 // Python test.
410 if (cmd.m_pythonTest)
411 {
412 command += "@PYTHON_EXECUTABLE@ ";
413 }
414
415 command += LibUtilities::PortablePath(execPath);
416 if (HaveOptFile)
417 {
418 command += " --use-opt-file test.opt ";
419 }
420
421 command += " ";
422 command += cmd.m_parameters;
423 command += " 1>output.out 2>output.err";
424 }
425
426 status = 0;
427
428 if (verbose)
429 {
430 cerr << "Running command: " << command << endl;
431 }
432
433 // Run executable to perform test.
434 if (system(command.c_str()))
435 {
436 cerr << "Error occurred running test:" << endl;
437 cerr << "Command: " << command << endl;
438 status = 1;
439 }
440
441 // Check output files exist
442 if (!(fs::exists("output.out") && fs::exists("output.err")))
443 {
444 cerr << "One or more test output files are missing." << endl;
445 throw 1;
446 }
447
448 // Open output files and check they are readable
449 ifstream vStdout("output.out");
450 ifstream vStderr("output.err");
451 if (vStdout.bad() || vStderr.bad())
452 {
453 cerr << "One or more test output files are unreadable." << endl;
454 throw 1;
455 }
456
457 // Append output to the master output and error files.
458 if (verbose)
459 {
460 cerr << "Appending run " << i << " output and error to master."
461 << endl;
462 }
463
464 while (getline(vStdout, line))
465 {
466 masterOut << line << endl;
467 }
468
469 while (getline(vStderr, line))
470 {
471 masterErr << line << endl;
472 }
473
474 vStdout.close();
475 vStderr.close();
476 }
477
478 // Warn user if any metrics don't support multiple runs.
479 for (int i = 0; i < metrics.size(); ++i)
480 {
481 if (!metrics[i]->SupportsAverage() && file.GetNumRuns() > 1)
482 {
483 cerr << "WARNING: Metric " << metrics[i]->GetType()
484 << " does not support multiple runs. Test may yield "
485 "unexpected results."
486 << endl;
487 }
488 }
489
490 // Test against all metrics
491 if (status == 0)
492 {
493 if (verbose && metrics.size())
494 {
495 cerr << "Checking metrics:" << endl;
496 }
497
498 for (int i = 0; i < metrics.size(); ++i)
499 {
500 bool gen =
501 metricGen.find(metrics[i]->GetID()) != metricGen.end() ||
502 (vm.count("generate-all-metrics") > 0);
503
504 masterOut.clear();
505 masterErr.clear();
506 masterOut.seekg(0, ios::beg);
507 masterErr.seekg(0, ios::beg);
508
509 if (verbose)
510 {
511 cerr << " - " << (gen ? "generating" : "checking")
512 << " metric " << metrics[i]->GetID() << " ("
513 << metrics[i]->GetType() << ")... ";
514 }
515
516 if (!metrics[i]->Test(masterOut, masterErr))
517 {
518 status = 1;
519 if (verbose)
520 {
521 cerr << "failed!" << endl;
522 }
523 }
524 else if (verbose)
525 {
526 cerr << "passed" << endl;
527 }
528 }
529 }
530
531 if (verbose)
532 {
533 cerr << endl << endl;
534 }
535
536 // Dump output files to terminal for debugging purposes on fail.
537 if (status == 1 || verbose)
538 {
539 masterOut.clear();
540 masterErr.clear();
541 masterOut.seekg(0, ios::beg);
542 masterErr.seekg(0, ios::beg);
543
544 cout << "=== Output ===" << endl;
545 while (masterOut.good())
546 {
547 getline(masterOut, line);
548 cout << line << endl;
549 }
550 cout << "=== Errors ===" << endl;
551 while (masterErr.good())
552 {
553 getline(masterErr, line);
554 cout << line << endl;
555 }
556 }
557
558 // Close output files.
559 masterOut.close();
560 masterErr.close();
561
562 // Change back to the original path and delete temporary directory.
563 fs::current_path(startDir);
564
565 if (verbose)
566 {
567 cerr << "Removing working directory" << endl;
568 }
569
570 // Repeatedly try deleting directory with sleep for filesystems which
571 // work asynchronously. This allows time for the filesystem to register
572 // the output files are closed so they can be deleted and not cause a
573 // filesystem failure. Attempts made for 1 second.
574 int i = 1000;
575 while (i > 0)
576 {
577 try
578 {
579 // If delete successful, stop trying.
580 fs::remove_all(masterDir);
581 break;
582 }
583 catch (const fs::filesystem_error &e)
584 {
585 using namespace std::chrono_literals;
586 std::this_thread::sleep_for(1ms);
587 i--;
588 if (i > 0)
589 {
590 cout << "Locked files encountered. "
591 << "Retrying after 1ms..." << endl;
592 }
593 else
594 {
595 // If still failing after 1sec, we consider it a permanent
596 // filesystem error and abort.
597 throw e;
598 }
599 }
600 }
601
602 // Save any changes.
603 if (vm.count("generate-metric") > 0 ||
604 vm.count("generate-all-metrics") > 0)
605 {
606 file.SaveFile();
607 }
608
609 // Return status of test. 0 = PASS, 1 = FAIL
610 return status;
611 }
612 catch (const fs::filesystem_error &e)
613 {
614 cerr << "Filesystem operation error occurred:" << endl;
615 cerr << " " << e.what() << endl;
616 cerr << " Files left in " << masterDir.string() << endl;
617 }
618 catch (const TesterException &e)
619 {
620 cerr << "Error occurred during test:" << endl;
621 cerr << " " << e.what() << endl;
622 cerr << " Files left in " << masterDir.string() << endl;
623 }
624 catch (const std::exception &e)
625 {
626 cerr << "Unhandled exception during test:" << endl;
627 cerr << " " << e.what() << endl;
628 cerr << " Files left in " << masterDir.string() << endl;
629 }
630 catch (...)
631 {
632 cerr << "Unknown error during test" << endl;
633 cerr << " Files left in " << masterDir.string() << endl;
634 }
635
636 // If a system error, return 2
637 return 2;
638}
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:208
int main(int argc, char *argv[])
Definition: Tester.cpp.in:74
MetricSharedPtr CreateInstance(std::string key, TiXmlElement *elmt, bool generate)
Definition: Metric.h:139
The TestData class is responsible for parsing a test XML file and storing the data.
Definition: TestData.h:71
DependentFile GetDependentFile(unsigned int pId) const
Definition: TestData.cpp:131
unsigned int GetMetricId(unsigned int pId)
Returns the ID of the metric for a given metric ID.
Definition: TestData.cpp:123
unsigned int GetNumDependentFiles() const
Returns the number of dependent files required for the test.
Definition: TestData.cpp:138
unsigned int GetNumMetrics() const
Returns the number of metrics to be collected for the test.
Definition: TestData.cpp:109
unsigned int GetNumRuns() const
Returns the number of runs to be performed for the test.
Definition: TestData.cpp:144
TiXmlElement * GetMetric(unsigned int pId)
Returns a pointer to the TiXmlElement object representing the metric for a given metric ID.
Definition: TestData.cpp:116
unsigned int GetNumCommands() const
Definition: TestData.cpp:91
std::string GetMetricType(unsigned int pId) const
Returns the type of metric to be collected for a given metric ID.
Definition: TestData.cpp:97
const Command & GetCommand(unsigned int pId) const
Definition: TestData.cpp:84
static std::string PortablePath(const fs::path &path)
create portable path on different platforms for std::filesystem path.
Definition: Filesystem.hpp:56
static fs::path UniquePath(std::string specFileStem)
Create a unique (random) path, based on an input stem string. The returned string is a filename or di...
Definition: Filesystem.hpp:69
MetricFactory & GetMetricFactory()
Definition: Metric.cpp:42
bool m_pythonTest
Definition: TestData.h:62
fs::path m_executable
Definition: TestData.h:59
std::string m_parameters
Definition: TestData.h:60
unsigned int m_processes
Definition: TestData.h:61
std::string m_filename
Definition: TestData.h:54
Subclass of std::runtime_error to handle exceptions raised by Tester.