Nektar++
Tester.cpp.in
Go to the documentation of this file.
1///////////////////////////////////////////////////////////////////////////////
2//
3// File: Tester.cpp
4//
5// For more information, please see: http://www.nektar.info
6//
7// The MIT License
8//
9// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10// Department of Aeronautics, Imperial College London (UK), and Scientific
11// Computing and Imaging Institute, University of Utah (USA).
12//
13// Permission is hereby granted, free of charge, to any person obtaining a
14// copy of this software and associated documentation files (the "Software"),
15// to deal in the Software without restriction, including without limitation
16// the rights to use, copy, modify, merge, publish, distribute, sublicense,
17// and/or sell copies of the Software, and to permit persons to whom the
18// Software is furnished to do so, subject to the following conditions:
19//
20// The above copyright notice and this permission notice shall be included
21// in all copies or substantial portions of the Software.
22//
23// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29// DEALINGS IN THE SOFTWARE.
30//
31// Description: Tester executable.
32//
33///////////////////////////////////////////////////////////////////////////////
34
35/**
36 * @file Tester.cpp.in
37 * @brief This file contains the main function for the Tester program, which is
38 * a tool for testing Nektar++ executables.
39 *
40 * The main function reads command line options and parses the provided test
41 * (.tst) file. Using information provided in this file, the Tester program
42 * generates test metrics, and creates temporary subdirectories in which to run
43 * the executable. All test outputs are appended to a single @p master.out file,
44 * and errors are appended to @p master.err. These files are sent to all of the
45 * metrics for analysis. If the test fails, the output and error files are
46 * dumped to the terminal for debugging purposes.
47 *
48 * @see Metric
49 * @see Metric#Test:
50 */
51
52#include <algorithm>
53#include <chrono>
54#include <fstream>
55#include <iostream>
56#include <string>
57#include <thread>
58#include <vector>
59
60#include <Metric.h>
61#include <TestData.h>
62
64
65#include <boost/program_options.hpp>
66
67#cmakedefine NEKTAR_TEST_FORCEMPIEXEC 1
68
69using namespace std;
70using namespace Nektar;
71
72// Define some namespace aliases
73namespace po = boost::program_options;
74
75#ifdef _WIN32
76// Define a setenv function for Windows
77int setenv(const char *name, const char *value, int overwrite)
78{
79 int errcode = 0;
80 if (!overwrite)
81 {
82 size_t envsize = 0;
83 errcode = getenv_s(&envsize, NULL, 0, name);
84 if (errcode || envsize)
85 return errcode;
86 }
87 return _putenv_s(name, value);
88}
89#endif
90
91int main(int argc, char *argv[])
92{
93 int status = 0;
94 string command;
95
96 // Set up command line options.
97 po::options_description desc("Available options");
98 desc.add_options()("help,h", "Produce this help message.")(
99 "verbose,v", "Turn on verbosity.")("generate-metric,g",
100 po::value<vector<int>>(),
101 "Generate a single metric.")(
102 "generate-all-metrics,a", "Generate all metrics.")(
103 "executable,e", po::value<string>(), "Use specified executable.");
104
105 po::options_description hidden("Hidden options");
106 hidden.add_options()("input-file", po::value<string>(), "Input filename");
107
108 po::options_description cmdline_options("Command-line options");
109 cmdline_options.add(hidden).add(desc);
110
111 po::options_description visible("Allowed options");
112 visible.add(desc);
113
114 po::positional_options_description p;
115 p.add("input-file", -1);
116
117 po::variables_map vm;
118
119 try
120 {
121 po::store(po::command_line_parser(argc, argv)
122 .options(cmdline_options)
123 .positional(p)
124 .run(),
125 vm);
126 po::notify(vm);
127 }
128 catch (const exception &e)
129 {
130 cerr << e.what() << endl;
131 cerr << desc;
132 return 1;
133 }
134
135 if (vm.count("help") || vm.count("input-file") != 1)
136 {
137 cerr << "Usage: Tester [options] input-file.tst" << endl;
138 cout << desc;
139 return 1;
140 }
141
142 bool verbose = vm.count("verbose");
143
144 // Set up set containing metrics to be generated.
145 vector<int> metricGenVec;
146 if (vm.count("generate-metric"))
147 {
148 metricGenVec = vm["generate-metric"].as<vector<int>>();
149 }
150 set<int> metricGen(metricGenVec.begin(), metricGenVec.end());
151
152 // Path to test definition file
153 const fs::path specFile(vm["input-file"].as<string>());
154
155 // Parent path of test definition file containing dependent files
156 fs::path specPath = specFile.parent_path();
157
158 if (specPath.empty())
159 {
160 specPath = fs::current_path();
161 }
162
163 string specFileStem = specFile.stem().string();
164
165 // Temporary master directory to create which holds master output and error
166 // files, and the working directories for each run
167 const fs::path masterDir =
168 fs::current_path() / LibUtilities::UniquePath(specFileStem);
169
170 // The current directory
171 const fs::path startDir = fs::current_path();
172
173 try
174 {
175 if (verbose)
176 {
177 cerr << "Reading test file definition: " << specFile << endl;
178 }
179
180 // Parse the test file
181 TestData file(specFile, vm);
182
183 if (verbose && file.GetNumMetrics() > 0)
184 {
185 cerr << "Creating metrics:" << endl;
186 }
187
188 // Generate the metric objects
189 vector<MetricSharedPtr> metrics;
190 for (unsigned int i = 0; i < file.GetNumMetrics(); ++i)
191 {
192 set<int>::iterator it = metricGen.find(file.GetMetricId(i));
193 bool genMetric =
194 it != metricGen.end() || (vm.count("generate-all-metrics") > 0);
195
196 metrics.push_back(GetMetricFactory().CreateInstance(
197 file.GetMetricType(i), file.GetMetric(i), genMetric));
198
199 if (verbose)
200 {
201 cerr << " - ID " << metrics.back()->GetID() << ": "
202 << metrics.back()->GetType() << endl;
203 }
204
205 if (it != metricGen.end())
206 {
207 metricGen.erase(it);
208 }
209 }
210
211 if (metricGen.size() != 0)
212 {
213 string s = metricGen.size() == 1 ? "s" : "";
214 set<int>::iterator it;
215 cerr << "Unable to find metric" + s + " with ID" + s + " ";
216 for (it = metricGen.begin(); it != metricGen.end(); ++it)
217 {
218 cerr << *it << " ";
219 }
220 cerr << endl;
221 return 1;
222 }
223
224 // Remove the master directory if left from a previous test
225 if (fs::exists(masterDir))
226 {
227 fs::remove_all(masterDir);
228 }
229
230 if (verbose)
231 {
232 cerr << "Creating master directory: " << masterDir << endl;
233 }
234
235 // Create the master directory
236 fs::create_directory(masterDir);
237
238 // Change working directory to the master directory
239 fs::current_path(masterDir);
240
241 // Create a master output and error file. Output and error files from
242 // all runs will be appended to these files.
243 fstream masterOut("master.out", ios::out | ios::in | ios::trunc);
244 fstream masterErr("master.err", ios::out | ios::in | ios::trunc);
245
246 if (masterOut.bad() || masterErr.bad())
247 {
248 cerr << "One or more master output files are unreadable." << endl;
249 throw 1;
250 }
251
252 // Vector of temporary subdirectories to create and conduct tests in
253 vector<fs::path> tmpWorkingDirs;
254 string line;
255
256 for (unsigned int i = 0; i < file.GetNumRuns(); ++i)
257 {
258 command = "";
259
260 if (verbose)
261 {
262 cerr << "Starting run " << i << "." << endl;
263 }
264
265 // Temporary directory to create and in which to hold the run
266 const fs::path tmpDir =
267 masterDir / fs::path("run" + std::to_string(i));
268 tmpWorkingDirs.push_back(tmpDir);
269
270 if (verbose)
271 {
272 cerr << "Creating working directory: " << tmpDir << endl;
273 }
274
275 // Create temporary directory
276 fs::create_directory(tmpDir);
277
278 // Change working directory to the temporary directory
279 fs::current_path(tmpDir);
280
281 if (verbose && file.GetNumDependentFiles())
282 {
283 cerr << "Copying required files: " << endl;
284 }
285
286 // Copy required files for this test from the test definition
287 // directory to the temporary directory.
288 for (unsigned int j = 0; j < file.GetNumDependentFiles(); ++j)
289 {
290 fs::path source_file(file.GetDependentFile(j).m_filename);
291
292 fs::path source = specPath / source_file;
293 fs::path dest = tmpDir / source_file.filename();
294 if (verbose)
295 {
296 cerr << " - " << source << " -> " << dest << endl;
297 }
298
299 if (fs::is_directory(source))
300 {
301 fs::create_directory(dest);
302 // If source is a directory, then only directory name is
303 // created, so call copy again to copy files.
304 for (const auto &dirEnt :
305 fs::recursive_directory_iterator{source})
306 {
307 fs::path newdest = dest / dirEnt.path().filename();
308 fs::copy_file(dirEnt.path(), newdest);
309 }
310 }
311 else
312 {
313 fs::copy_file(source, dest);
314 }
315 }
316
317 // Copy opt file if exists to to the temporary directory.
318 fs::path source_file("test.opt");
319 fs::path source = specPath / source_file;
320 bool HaveOptFile = false;
321 if (fs::exists(source))
322 {
323 fs::path dest = tmpDir / source_file.filename();
324 if (verbose)
325 {
326 cerr << " - " << source << " -> " << dest << endl;
327 }
328
329 if (fs::is_directory(source))
330 {
331 fs::create_directory(dest);
332 // If source is a directory, then only directory name is
333 // created, so call copy again to copy files.
334 for (const auto &dirEnt :
335 fs::recursive_directory_iterator{source})
336 {
337 fs::path newdest = dest / dirEnt.path().filename();
338 fs::copy_file(dirEnt.path(), newdest);
339 }
340 }
341 else
342 {
343 fs::copy_file(source, dest);
344 }
345
346 HaveOptFile = true;
347 }
348
349 // If we're Python, copy script too.
350
351 // Set PYTHONPATH environment variable in case Python is run inside
352 // any of our tests. For non-Python tests this will do nothing.
353 setenv("PYTHONPATH", "@NEKPY_BASE_DIR@", true);
354
355 // Construct test command to run. Output from stdout and stderr are
356 // directed to the files output.out and output.err, respectively.
357
358 bool mpiAdded = false;
359 for (unsigned int j = 0; j < file.GetNumCommands(); ++j)
360 {
361 Command cmd = file.GetCommand(j);
362
363#ifdef NEKTAR_TEST_FORCEMPIEXEC
364#else
365 if (cmd.m_processes > 1 || (file.GetNumCommands() > 1 &&
366 cmd.m_commandType == eParallel))
367#endif
368 {
369 if (mpiAdded)
370 {
371 continue;
372 }
373
374 command += "\"@MPIEXEC@\" ";
375 if (std::string("@NEKTAR_TEST_USE_HOSTFILE@") == "ON")
376 {
377 command += "-hostfile hostfile ";
378 if (system("echo 'localhost slots=12' > hostfile"))
379 {
380 cerr << "Unable to write 'hostfile' in path '"
381 << fs::current_path() << endl;
382 status = 1;
383 }
384 }
385
386 if (file.GetNumCommands() > 1)
387 {
388 command += "--tag-output ";
389 }
390
391 mpiAdded = true;
392 }
393 }
394
395 // Parse commands.
396 for (unsigned int j = 0; j < file.GetNumCommands(); ++j)
397 {
398 Command cmd = file.GetCommand(j);
399
400 // If running with multiple commands simultaneously, separate
401 // with colon.
402 if (j > 0 && cmd.m_commandType == eParallel)
403 {
404 command += " : ";
405 }
406 else if (j > 0 && cmd.m_commandType == eSequential)
407 {
408 command += " && ";
409 if (cmd.m_processes > 1)
410 {
411 command += "\"@MPIEXEC@\" ";
412 if (std::string("@NEKTAR_TEST_USE_HOSTFILE@") == "ON")
413 {
414 command += "-hostfile hostfile ";
415 }
416 }
417 }
418
419 // Add -n where appropriate.
420 if (cmd.m_processes > 1 || (file.GetNumCommands() > 1 &&
421 cmd.m_commandType == eParallel))
422 {
423 command += "@MPIEXEC_NUMPROC_FLAG@ ";
424 command += std::to_string(cmd.m_processes) + " ";
425 }
426
427 // Look for executable or Python script.
428 fs::path execPath = startDir / cmd.m_executable;
429 if (!fs::exists(execPath))
430 {
431 ASSERTL0(!cmd.m_pythonTest, "Python script not found.");
432 execPath = cmd.m_executable;
433 }
434
435 // Prepend script name with Python executable path if this is a
436 // Python test.
437 if (cmd.m_pythonTest)
438 {
439 command += "@PYTHON_EXECUTABLE@ ";
440 }
441
442 std::string pathString = LibUtilities::PortablePath(execPath);
443 command += pathString;
444 if (HaveOptFile)
445 {
446 command += " --use-opt-file test.opt ";
447 }
448
449 command += " ";
450 command += cmd.m_parameters;
451 command += " 1>output.out 2>output.err";
452 }
453
454 status = 0;
455
456 if (verbose)
457 {
458 cerr << "Running command: " << command << endl;
459 }
460
461 // Run executable to perform test.
462 if (system(command.c_str()))
463 {
464 cerr << "Error occurred running test:" << endl;
465 cerr << "Command: " << command << endl;
466 status = 1;
467 }
468
469 // Check output files exist
470 if (!(fs::exists("output.out") && fs::exists("output.err")))
471 {
472 cerr << "One or more test output files are missing." << endl;
473 throw 1;
474 }
475
476 // Open output files and check they are readable
477 ifstream vStdout("output.out");
478 ifstream vStderr("output.err");
479 if (vStdout.bad() || vStderr.bad())
480 {
481 cerr << "One or more test output files are unreadable." << endl;
482 throw 1;
483 }
484
485 // Append output to the master output and error files.
486 if (verbose)
487 {
488 cerr << "Appending run " << i << " output and error to master."
489 << endl;
490 }
491
492 while (getline(vStdout, line))
493 {
494 masterOut << line << endl;
495 }
496
497 while (getline(vStderr, line))
498 {
499 masterErr << line << endl;
500 }
501
502 vStdout.close();
503 vStderr.close();
504 }
505
506 // Warn user if any metrics don't support multiple runs.
507 for (int i = 0; i < metrics.size(); ++i)
508 {
509 if (!metrics[i]->SupportsAverage() && file.GetNumRuns() > 1)
510 {
511 cerr << "WARNING: Metric " << metrics[i]->GetType()
512 << " does not support multiple runs. Test may yield "
513 "unexpected results."
514 << endl;
515 }
516 }
517
518 // Test against all metrics
519 if (status == 0)
520 {
521 if (verbose && metrics.size())
522 {
523 cerr << "Checking metrics:" << endl;
524 }
525
526 for (int i = 0; i < metrics.size(); ++i)
527 {
528 bool gen =
529 metricGen.find(metrics[i]->GetID()) != metricGen.end() ||
530 (vm.count("generate-all-metrics") > 0);
531
532 masterOut.clear();
533 masterErr.clear();
534 masterOut.seekg(0, ios::beg);
535 masterErr.seekg(0, ios::beg);
536
537 if (verbose)
538 {
539 cerr << " - " << (gen ? "generating" : "checking")
540 << " metric " << metrics[i]->GetID() << " ("
541 << metrics[i]->GetType() << ")... ";
542 }
543
544 if (!metrics[i]->Test(masterOut, masterErr))
545 {
546 status = 1;
547 if (verbose)
548 {
549 cerr << "failed!" << endl;
550 }
551 }
552 else if (verbose)
553 {
554 cerr << "passed" << endl;
555 }
556 }
557 }
558
559 if (verbose)
560 {
561 cerr << endl << endl;
562 }
563
564 // Dump output files to terminal for debugging purposes on fail.
565 if (status == 1 || verbose)
566 {
567 masterOut.clear();
568 masterErr.clear();
569 masterOut.seekg(0, ios::beg);
570 masterErr.seekg(0, ios::beg);
571
572 cout << "=== Output ===" << endl;
573 while (masterOut.good())
574 {
575 getline(masterOut, line);
576 cout << line << endl;
577 }
578 cout << "=== Errors ===" << endl;
579 while (masterErr.good())
580 {
581 getline(masterErr, line);
582 cout << line << endl;
583 }
584 }
585
586 // Close output files.
587 masterOut.close();
588 masterErr.close();
589
590 // Change back to the original path and delete temporary directory.
591 fs::current_path(startDir);
592
593 if (verbose)
594 {
595 cerr << "Removing working directory" << endl;
596 }
597
598 // Repeatedly try deleting directory with sleep for filesystems which
599 // work asynchronously. This allows time for the filesystem to register
600 // the output files are closed so they can be deleted and not cause a
601 // filesystem failure. Attempts made for 1 second.
602 int i = 1000;
603 while (i > 0)
604 {
605 try
606 {
607 // If delete successful, stop trying.
608 fs::remove_all(masterDir);
609 break;
610 }
611 catch (const fs::filesystem_error &e)
612 {
613 using namespace std::chrono_literals;
614 std::this_thread::sleep_for(1ms);
615 i--;
616 if (i > 0)
617 {
618 cout << "Locked files encountered. "
619 << "Retrying after 1ms..." << endl;
620 }
621 else
622 {
623 // If still failing after 1sec, we consider it a permanent
624 // filesystem error and abort.
625 throw e;
626 }
627 }
628 }
629
630 // Save any changes.
631 if (vm.count("generate-metric") > 0 ||
632 vm.count("generate-all-metrics") > 0)
633 {
634 file.SaveFile();
635 }
636
637 // Return status of test. 0 = PASS, 1 = FAIL
638 return status;
639 }
640 catch (const fs::filesystem_error &e)
641 {
642 cerr << "Filesystem operation error occurred:" << endl;
643 cerr << " " << e.what() << endl;
644 cerr << " Files left in " << masterDir.string() << endl;
645 }
646 catch (const TesterException &e)
647 {
648 cerr << "Error occurred during test:" << endl;
649 cerr << " " << e.what() << endl;
650 cerr << " Files left in " << masterDir.string() << endl;
651 }
652 catch (const std::exception &e)
653 {
654 cerr << "Unhandled exception during test:" << endl;
655 cerr << " " << e.what() << endl;
656 cerr << " Files left in " << masterDir.string() << endl;
657 }
658 catch (...)
659 {
660 cerr << "Unknown error during test" << endl;
661 cerr << " Files left in " << masterDir.string() << endl;
662 }
663
664 // If a system error, return 2
665 return 2;
666}
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:208
int main(int argc, char *argv[])
Definition: Tester.cpp.in:91
MetricSharedPtr CreateInstance(std::string key, TiXmlElement *elmt, bool generate)
Definition: Metric.h:139
The TestData class is responsible for parsing a test XML file and storing the data.
Definition: TestData.h:79
DependentFile GetDependentFile(unsigned int pId) const
Definition: TestData.cpp:131
unsigned int GetMetricId(unsigned int pId)
Returns the ID of the metric for a given metric ID.
Definition: TestData.cpp:123
unsigned int GetNumDependentFiles() const
Returns the number of dependent files required for the test.
Definition: TestData.cpp:138
unsigned int GetNumMetrics() const
Returns the number of metrics to be collected for the test.
Definition: TestData.cpp:109
unsigned int GetNumRuns() const
Returns the number of runs to be performed for the test.
Definition: TestData.cpp:144
TiXmlElement * GetMetric(unsigned int pId)
Returns a pointer to the TiXmlElement object representing the metric for a given metric ID.
Definition: TestData.cpp:116
unsigned int GetNumCommands() const
Definition: TestData.cpp:91
std::string GetMetricType(unsigned int pId) const
Returns the type of metric to be collected for a given metric ID.
Definition: TestData.cpp:97
const Command & GetCommand(unsigned int pId) const
Definition: TestData.cpp:84
static std::string PortablePath(const fs::path &path)
create portable path on different platforms for std::filesystem path.
Definition: Filesystem.hpp:56
static fs::path UniquePath(std::string specFileStem)
Create a unique (random) path, based on an input stem string. The returned string is a filename or di...
Definition: Filesystem.hpp:69
MetricFactory & GetMetricFactory()
Definition: Metric.cpp:42
@ eSequential
Definition: TestData.h:60
@ eParallel
Definition: TestData.h:61
STL namespace.
bool m_pythonTest
Definition: TestData.h:69
fs::path m_executable
Definition: TestData.h:66
std::string m_parameters
Definition: TestData.h:67
CommandType m_commandType
Definition: TestData.h:70
unsigned int m_processes
Definition: TestData.h:68
std::string m_filename
Definition: TestData.h:54
Subclass of std::runtime_error to handle exceptions raised by Tester.