Nektar++
Tester.cpp.in
Go to the documentation of this file.
1///////////////////////////////////////////////////////////////////////////////
2//
3// File: Tester.cpp
4//
5// For more information, please see: http://www.nektar.info
6//
7// The MIT License
8//
9// Copyright (c) 2006 Division of Applied Mathematics, Brown University (USA),
10// Department of Aeronautics, Imperial College London (UK), and Scientific
11// Computing and Imaging Institute, University of Utah (USA).
12//
13// Permission is hereby granted, free of charge, to any person obtaining a
14// copy of this software and associated documentation files (the "Software"),
15// to deal in the Software without restriction, including without limitation
16// the rights to use, copy, modify, merge, publish, distribute, sublicense,
17// and/or sell copies of the Software, and to permit persons to whom the
18// Software is furnished to do so, subject to the following conditions:
19//
20// The above copyright notice and this permission notice shall be included
21// in all copies or substantial portions of the Software.
22//
23// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
24// OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
26// THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
27// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
28// FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
29// DEALINGS IN THE SOFTWARE.
30//
31// Description: Tester executable.
32//
33///////////////////////////////////////////////////////////////////////////////
34
35/**
36 * @file Tester.cpp.in
37 * @brief This file contains the main function for the Tester program, which is
38 * a tool for testing Nektar++ executables.
39 *
40 * The main function reads command line options and parses the provided test
41 * (.tst) file. Using information provided in this file, the Tester program
42 * generates test metrics, and creates temporary subdirectories in which to run
43 * the executable. All test outputs are appended to a single @p master.out file,
44 * and errors are appended to @p master.err. These files are sent to all of the
45 * metrics for analysis. If the test fails, the output and error files are
46 * dumped to the terminal for debugging purposes.
47 *
48 * @see Metric
49 * @see Metric#Test:
50 */
51
52#include <fstream>
53#include <iostream>
54#include <string>
55#include <vector>
56
57#include <Metric.h>
58#include <TestData.h>
59
60#include <boost/program_options.hpp>
61#include <boost/thread.hpp>
62
63#cmakedefine NEKTAR_TEST_FORCEMPIEXEC 1
64
65using namespace std;
66using namespace Nektar;
67
68// Define some namespace aliases
69namespace po = boost::program_options;
70
71std::string PortablePath(const boost::filesystem::path &path)
72{
73 boost::filesystem::path temp = path;
74 temp.make_preferred();
75 return temp.string();
76}
77
78int main(int argc, char *argv[])
79{
80 int status = 0;
81 string command;
82
83 // Set up command line options.
84 po::options_description desc("Available options");
85 desc.add_options()
86 ("help,h", "Produce this help message.")
87 ("verbose,v", "Turn on verbosity.")
88 ("generate-metric,g", po::value<vector<int>>(),
89 "Generate a single metric.")
90 ("generate-all-metrics,a", "Generate all metrics.")
91 ("executable,e", po::value<string>(),
92 "Use specified executable.");
93
94 po::options_description hidden("Hidden options");
95 hidden.add_options()("input-file", po::value<string>(), "Input filename");
96
97 po::options_description cmdline_options("Command-line options");
98 cmdline_options.add(hidden).add(desc);
99
100 po::options_description visible("Allowed options");
101 visible.add(desc);
102
103 po::positional_options_description p;
104 p.add("input-file", -1);
105
106 po::variables_map vm;
107
108 try
109 {
110 po::store(po::command_line_parser(argc, argv)
111 .options(cmdline_options)
112 .positional(p)
113 .run(),
114 vm);
115 po::notify(vm);
116 }
117 catch (const exception &e)
118 {
119 cerr << e.what() << endl;
120 cerr << desc;
121 return 1;
122 }
123
124 if (vm.count("help") || vm.count("input-file") != 1)
125 {
126 cerr << "Usage: Tester [options] input-file.tst" << endl;
127 cout << desc;
128 return 1;
129 }
130
131 bool verbose = vm.count("verbose");
132
133 // Set up set containing metrics to be generated.
134 vector<int> metricGenVec;
135 if (vm.count("generate-metric"))
136 {
137 metricGenVec = vm["generate-metric"].as<vector<int>>();
138 }
139 set<int> metricGen(metricGenVec.begin(), metricGenVec.end());
140
141 // Path to test definition file
142 const fs::path specFile(vm["input-file"].as<string>());
143
144 // Parent path of test definition file containing dependent files
145 fs::path specPath = specFile.parent_path();
146
147 if (specPath.empty())
148 {
149 specPath = fs::current_path();
150 }
151
152 string specFileStem = specFile.stem().string();
153
154 // Temporary master directory to create which holds master output and error
155 // files, and the working directories for each run
156 const fs::path masterDir =
157 fs::current_path() / fs::path("tmp_" + specFileStem + "_" +
158 fs::unique_path("%%%%%%").string());
159
160 // The current directory
161 const fs::path startDir = fs::current_path();
162
163 try
164 {
165 if (verbose)
166 {
167 cerr << "Reading test file definition: " << specFile << endl;
168 }
169
170 // Parse the test file
171 TestData file(specFile, vm);
172
173 if (verbose && file.GetNumMetrics() > 0)
174 {
175 cerr << "Creating metrics:" << endl;
176 }
177
178 // Generate the metric objects
179 vector<MetricSharedPtr> metrics;
180 for (unsigned int i = 0; i < file.GetNumMetrics(); ++i)
181 {
182 set<int>::iterator it = metricGen.find(file.GetMetricId(i));
183 bool genMetric =
184 it != metricGen.end() || (vm.count("generate-all-metrics") > 0);
185
186 metrics.push_back(GetMetricFactory().CreateInstance(
187 file.GetMetricType(i), file.GetMetric(i), genMetric));
188
189 if (verbose)
190 {
191 cerr << " - ID " << metrics.back()->GetID() << ": "
192 << metrics.back()->GetType() << endl;
193 }
194
195 if (it != metricGen.end())
196 {
197 metricGen.erase(it);
198 }
199 }
200
201 if (metricGen.size() != 0)
202 {
203 string s = metricGen.size() == 1 ? "s" : "";
204 set<int>::iterator it;
205 cerr << "Unable to find metric" + s + " with ID" + s + " ";
206 for (it = metricGen.begin(); it != metricGen.end(); ++it)
207 {
208 cerr << *it << " ";
209 }
210 cerr << endl;
211 return 1;
212 }
213
214 // Remove the master directory if left from a previous test
215 if (fs::exists(masterDir))
216 {
217 fs::remove_all(masterDir);
218 }
219
220 if (verbose)
221 {
222 cerr << "Creating master directory: " << masterDir << endl;
223 }
224
225 // Create the master directory
226 fs::create_directory(masterDir);
227
228 // Change working directory to the master directory
229 fs::current_path(masterDir);
230
231 // Create a master output and error file. Output and error files from
232 // all runs will be appended to these files.
233 fstream masterOut("master.out", ios::out | ios::in | ios::trunc);
234 fstream masterErr("master.err", ios::out | ios::in | ios::trunc);
235
236 if (masterOut.bad() || masterErr.bad())
237 {
238 cerr << "One or more master output files are unreadable." << endl;
239 throw 1;
240 }
241
242 // Vector of temporary subdirectories to create and conduct tests in
243 vector<fs::path> tmpWorkingDirs;
244 string line;
245
246 for (unsigned int i = 0; i < file.GetNumRuns(); ++i)
247 {
248 command = "";
249
250 if (verbose)
251 {
252 cerr << "Starting run " << i << "." << endl;
253 }
254
255 // Temporary directory to create and in which to hold the run
256 const fs::path tmpDir =
257 masterDir / fs::path("run" + std::to_string(i));
258 tmpWorkingDirs.push_back(tmpDir);
259
260 if (verbose)
261 {
262 cerr << "Creating working directory: " << tmpDir << endl;
263 }
264
265 // Create temporary directory
266 fs::create_directory(tmpDir);
267
268 // Change working directory to the temporary directory
269 fs::current_path(tmpDir);
270
271 if (verbose && file.GetNumDependentFiles())
272 {
273 cerr << "Copying required files: " << endl;
274 }
275
276 // Copy required files for this test from the test definition
277 // directory to the temporary directory.
278 for (unsigned int j = 0; j < file.GetNumDependentFiles(); ++j)
279 {
280 fs::path source_file(file.GetDependentFile(j).m_filename);
281
282 fs::path source = specPath / source_file;
283 fs::path dest = tmpDir / source_file.filename();
284 if (verbose)
285 {
286 cerr << " - " << source << " -> " << dest << endl;
287 }
288
289 if (fs::is_directory(source))
290 {
291 fs::copy_directory(source, dest);
292 // If source is a directory, then only directory name is
293 // created, so call copy again to copy files.
294 for (const auto &dirEnt :
295 fs::recursive_directory_iterator{source})
296 {
297 fs::path newdest = dest / dirEnt.path().filename();
298 fs::copy_file(dirEnt.path(), newdest);
299 }
300 }
301 else
302 {
303 fs::copy_file(source, dest);
304 }
305 }
306
307 // Copy opt file if exists to to the temporary directory.
308 fs::path source_file("test.opt");
309 fs::path source = specPath / source_file;
310 bool HaveOptFile = false;
311 if (fs::exists(source))
312 {
313 fs::path dest = tmpDir / source_file.filename();
314 if (verbose)
315 {
316 cerr << " - " << source << " -> " << dest << endl;
317 }
318
319 if (fs::is_directory(source))
320 {
321 fs::copy_directory(source, dest);
322 // If source is a directory, then only directory name is
323 // created, so call copy again to copy files.
324 for (const auto &dirEnt :
325 fs::recursive_directory_iterator{source})
326 {
327 fs::path newdest = dest / dirEnt.path().filename();
328 fs::copy_file(dirEnt.path(), newdest);
329 }
330 }
331 else
332 {
333 fs::copy_file(source, dest);
334 }
335
336 HaveOptFile = true;
337 }
338
339 // If we're Python, copy script too.
340
341 // Construct test command to run. Output from stdout and stderr are
342 // directed to the files output.out and output.err, respectively.
343
344 bool pythonAdded = false, mpiAdded = false;
345 for (unsigned int j = 0; j < file.GetNumCommands(); ++j)
346 {
347 Command cmd = file.GetCommand(j);
348 if (cmd.m_pythonTest && !pythonAdded)
349 {
350 // Prepend Python to very start of command.
351 command = "PYTHONPATH=\"@CMAKE_BINARY_DIR@\" " + command;
352 pythonAdded = true;
353 }
354
355#ifdef NEKTAR_TEST_FORCEMPIEXEC
356#else
357 if (cmd.m_processes > 1 || file.GetNumCommands() > 1)
358#endif
359 {
360 if (mpiAdded)
361 {
362 continue;
363 }
364
365 command += "\"@MPIEXEC@\" ";
366 if (std::string("@NEKTAR_TEST_USE_HOSTFILE@") == "ON")
367 {
368 command += "-hostfile hostfile ";
369 if (system("echo 'localhost slots=12' > hostfile"))
370 {
371 cerr << "Unable to write 'hostfile' in path '"
372 << fs::current_path() << endl;
373 status = 1;
374 }
375 }
376
377 if (file.GetNumCommands() > 1)
378 {
379 command += "--tag-output ";
380 }
381
382 mpiAdded = true;
383 }
384 }
385
386 // Parse commands.
387 for (unsigned int j = 0; j < file.GetNumCommands(); ++j)
388 {
389 Command cmd = file.GetCommand(j);
390
391 // If running with multiple commands simultaneously, separate
392 // with colon.
393 if (j > 0)
394 {
395 command += " : ";
396 }
397
398 // Add -n where appropriate.
399 if (file.GetNumCommands() > 1 || cmd.m_processes > 1)
400 {
401 command += "@MPIEXEC_NUMPROC_FLAG@ ";
402 command += std::to_string(cmd.m_processes) + " ";
403 }
404
405 // Look for executable or Python script.
406 fs::path execPath = startDir / cmd.m_executable;
407 if (!fs::exists(execPath))
408 {
409 ASSERTL0(!cmd.m_pythonTest, "Python script not found.");
410 execPath = cmd.m_executable;
411 }
412
413 // Prepend script name with Python executable path if this is a
414 // Python test.
415 if (cmd.m_pythonTest)
416 {
417 command += "@PYTHON_EXECUTABLE@ ";
418 }
419
420 command += PortablePath(execPath);
421 if (HaveOptFile)
422 {
423 command += " --useoptfile test.opt ";
424 }
425
426 command += " ";
427 command += cmd.m_parameters;
428 command += " 1>output.out 2>output.err";
429 }
430
431 status = 0;
432
433 if (verbose)
434 {
435 cerr << "Running command: " << command << endl;
436 }
437
438 // Run executable to perform test.
439 if (system(command.c_str()))
440 {
441 cerr << "Error occurred running test:" << endl;
442 cerr << "Command: " << command << endl;
443 status = 1;
444 }
445
446 // Check output files exist
447 if (!(fs::exists("output.out") && fs::exists("output.err")))
448 {
449 cerr << "One or more test output files are missing." << endl;
450 throw 1;
451 }
452
453 // Open output files and check they are readable
454 ifstream vStdout("output.out");
455 ifstream vStderr("output.err");
456 if (vStdout.bad() || vStderr.bad())
457 {
458 cerr << "One or more test output files are unreadable." << endl;
459 throw 1;
460 }
461
462 // Append output to the master output and error files.
463 if (verbose)
464 {
465 cerr << "Appending run " << i << " output and error to master."
466 << endl;
467 }
468
469 while (getline(vStdout, line))
470 {
471 masterOut << line << endl;
472 }
473
474 while (getline(vStderr, line))
475 {
476 masterErr << line << endl;
477 }
478
479 vStdout.close();
480 vStderr.close();
481 }
482
483 // Warn user if any metrics don't support multiple runs.
484 for (int i = 0; i < metrics.size(); ++i)
485 {
486 if (!metrics[i]->SupportsAverage() && file.GetNumRuns() > 1)
487 {
488 cerr << "WARNING: Metric " << metrics[i]->GetType()
489 << " does not support multiple runs. Test may yield "
490 "unexpected results."
491 << endl;
492 }
493 }
494
495 // Test against all metrics
496 if (status == 0)
497 {
498 if (verbose && metrics.size())
499 {
500 cerr << "Checking metrics:" << endl;
501 }
502
503 for (int i = 0; i < metrics.size(); ++i)
504 {
505 bool gen =
506 metricGen.find(metrics[i]->GetID()) != metricGen.end() ||
507 (vm.count("generate-all-metrics") > 0);
508
509 masterOut.clear();
510 masterErr.clear();
511 masterOut.seekg(0, ios::beg);
512 masterErr.seekg(0, ios::beg);
513
514 if (verbose)
515 {
516 cerr << " - " << (gen ? "generating" : "checking")
517 << " metric " << metrics[i]->GetID() << " ("
518 << metrics[i]->GetType() << ")... ";
519 }
520
521 if (!metrics[i]->Test(masterOut, masterErr))
522 {
523 status = 1;
524 if (verbose)
525 {
526 cerr << "failed!" << endl;
527 }
528 }
529 else if (verbose)
530 {
531 cerr << "passed" << endl;
532 }
533 }
534 }
535
536 if (verbose)
537 {
538 cerr << endl << endl;
539 }
540
541 // Dump output files to terminal for debugging purposes on fail.
542 if (status == 1 || verbose)
543 {
544 masterOut.clear();
545 masterErr.clear();
546 masterOut.seekg(0, ios::beg);
547 masterErr.seekg(0, ios::beg);
548
549 cout << "=== Output ===" << endl;
550 while (masterOut.good())
551 {
552 getline(masterOut, line);
553 cout << line << endl;
554 }
555 cout << "=== Errors ===" << endl;
556 while (masterErr.good())
557 {
558 getline(masterErr, line);
559 cout << line << endl;
560 }
561 }
562
563 // Close output files.
564 masterOut.close();
565 masterErr.close();
566
567 // Change back to the original path and delete temporary directory.
568 fs::current_path(startDir);
569
570 if (verbose)
571 {
572 cerr << "Removing working directory" << endl;
573 }
574
575 // Repeatedly try deleting directory with sleep for filesystems which
576 // work asynchronously. This allows time for the filesystem to register
577 // the output files are closed so they can be deleted and not cause a
578 // filesystem failure. Attempts made for 1 second.
579 int i = 1000;
580 while (i > 0)
581 {
582 try
583 {
584 // If delete successful, stop trying.
585 fs::remove_all(masterDir);
586 break;
587 }
588 catch (const fs::filesystem_error &e)
589 {
590 // usleep(1000);
591 boost::this_thread::sleep(boost::posix_time::milliseconds(1));
592 i--;
593 if (i > 0)
594 {
595 cout << "Locked files encountered. "
596 << "Retrying after 1ms..." << endl;
597 }
598 else
599 {
600 // If still failing after 1sec, we consider it a permanent
601 // filesystem error and abort.
602 throw e;
603 }
604 }
605 }
606
607 // Save any changes.
608 if (vm.count("generate-metric") > 0 ||
609 vm.count("generate-all-metrics") > 0)
610 {
611 file.SaveFile();
612 }
613
614 // Return status of test. 0 = PASS, 1 = FAIL
615 return status;
616 }
617 catch (const fs::filesystem_error &e)
618 {
619 cerr << "Filesystem operation error occurred:" << endl;
620 cerr << " " << e.what() << endl;
621 cerr << " Files left in " << masterDir.string() << endl;
622 }
623 catch (const TesterException &e)
624 {
625 cerr << "Error occurred during test:" << endl;
626 cerr << " " << e.what() << endl;
627 cerr << " Files left in " << masterDir.string() << endl;
628 }
629 catch (const std::exception &e)
630 {
631 cerr << "Unhandled exception during test:" << endl;
632 cerr << " " << e.what() << endl;
633 cerr << " Files left in " << masterDir.string() << endl;
634 }
635 catch (...)
636 {
637 cerr << "Unknown error during test" << endl;
638 cerr << " Files left in " << masterDir.string() << endl;
639 }
640
641 // If a system error, return 2
642 return 2;
643}
#define ASSERTL0(condition, msg)
Definition: ErrorUtil.hpp:215
int main(int argc, char *argv[])
Definition: Tester.cpp.in:78
std::string PortablePath(const boost::filesystem::path &path)
Definition: Tester.cpp.in:71
MetricSharedPtr CreateInstance(std::string key, TiXmlElement *elmt, bool generate)
Definition: Metric.h:144
The TestData class is responsible for parsing a test XML file and storing the data.
Definition: TestData.h:71
DependentFile GetDependentFile(unsigned int pId) const
Definition: TestData.cpp:131
unsigned int GetMetricId(unsigned int pId)
Returns the ID of the metric for a given metric ID.
Definition: TestData.cpp:123
unsigned int GetNumDependentFiles() const
Returns the number of dependent files required for the test.
Definition: TestData.cpp:138
unsigned int GetNumMetrics() const
Returns the number of metrics to be collected for the test.
Definition: TestData.cpp:109
unsigned int GetNumRuns() const
Returns the number of runs to be performed for the test.
Definition: TestData.cpp:144
TiXmlElement * GetMetric(unsigned int pId)
Returns a pointer to the TiXmlElement object representing the metric for a given metric ID.
Definition: TestData.cpp:116
unsigned int GetNumCommands() const
Definition: TestData.cpp:91
std::string GetMetricType(unsigned int pId) const
Returns the type of metric to be collected for a given metric ID.
Definition: TestData.cpp:97
const Command & GetCommand(unsigned int pId) const
Definition: TestData.cpp:84
The above copyright notice and this permission notice shall be included.
Definition: CoupledSolver.h:2
MetricFactory & GetMetricFactory()
Definition: Metric.cpp:42
bool m_pythonTest
Definition: TestData.h:62
fs::path m_executable
Definition: TestData.h:59
std::string m_parameters
Definition: TestData.h:60
unsigned int m_processes
Definition: TestData.h:61
std::string m_filename
Definition: TestData.h:54
Subclass of std::runtime_error to handle exceptions raised by Tester.