Nektar++
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Properties Friends Macros Pages
Functions
functions_main Namespace Reference

Functions

def Filename_Generate
 
def PBS_Job_Parse
 
def Find_Nektar_Elements
 
def Find_Nektar_Files
 
def Parse_Nektar_Output
 
def Parse_Nektar_CG_Benchmark_Output
 
def Find_Hardware
 
def Find_Conditions
 
def PBS_Benchmark_Parse
 
def Parse_Benchmark
 
def Partition
 
def Find_Topologies
 

Function Documentation

def functions_main.Filename_Generate (   Mesh,
  Max_N_Z,
  Conditions_File 
)

Definition at line 21 of file functions_main.py.

21 
22 def Filename_Generate(Mesh, Max_N_Z, Conditions_File):
23 
24  # Mesh File Location
25  Mesh_File = 'Input/Mesh/' + Mesh
26 
27  # File containing the output of the maximum value of N_Z, for parsing CG iteration data.
28  Input_Nektar_Max = 'Input/Serial_Input/' + Max_N_Z
29 
30  # File containing the conditions.
31  Conditions = 'Input/Conditions/' + Conditions_File
32 
33  # Serial timing file location
34  Loc_Serial_Timing_Files = 'Input/Serial_Input/'
35 
36  # Parallel timing file location
37  Loc_Parallel_Timing_Files = 'Input/Parallel_Input/'
38 
39  # Hardware benchmarking files
40  Benchmark_PBS = 'Input/Benchmark/Benchmark.pbs'
41  MPI_Benchmark = 'Input/Benchmark/Benchmark.txt'
42  Node_Map = 'Input/Benchmark/node.xml'
43 
44  return(Mesh_File, Input_Nektar_Max, Conditions, Loc_Serial_Timing_Files, Loc_Parallel_Timing_Files, Benchmark_PBS, MPI_Benchmark, Node_Map)
45 
46 #------------------------------------
47 # New Function
48 #------------------------------------
49 
# Parse the PBS script provided to find the number of nodes
def functions_main.Find_Conditions (   Input_Filename)

Definition at line 303 of file functions_main.py.

304 def Find_Conditions(Input_Filename):
305 
306  # Open the file to be pharsed
307  f = open(Input_Filename, "r")
308 
309  # Error checking, default to True
310  Error = True
311  Message = []
312 
313  # Iterate over the file to find P and N_Modes
314  for line in f:
315  a = line.split()
316  for i in range(0, len(a)):
317 
318  if (a[i] == 'HomModesZ'):
319  try:
320  N_Modes = int(a[i + 2])/2
321  except:
322  Error = False
323 
324  b = a[i].split('=', 1)
325  for j in range(0, len(b)):
326  if(b[j] == 'NUMMODES'):
327  c = b[j + 1].split('"',2)
328  try:
329  P = int(c[1]) - 1
330  except:
331  Error = False
332 
333  # Close the file
334  f.close()
335 
336  # Return the errors and desired values
337  return(N_Modes, P, Error, Message)
338 
339 #------------------------------------
340 # New Function
341 #------------------------------------
342 
# Function to find PROC_BENCHMARK, number of processes used in Benchmarking
def functions_main.Find_Hardware (   Input_Filename)

Definition at line 254 of file functions_main.py.

255 def Find_Hardware(Input_Filename):
256 
257  # Open the input file containing information generated by lstopo
258  f = open(Input_Filename, 'r')
259 
260  # Declare initial counters
261  count_cores = 0
262  count_sockets = 0
263 
264  # Error check default to True
265  Error = True
266  Message = []
267 
268  # Count cores and socket by pharsing the lstopo file
269  for line in f:
270  a = line.split()
271  for i in range(0, len(a)):
272  if (a[i] == 'type="Core"'):
273  count_cores += 1
274  if (a[i] == 'type="Socket"'):
275  count_sockets += 1
276 
277  # Close the file
278  f.close()
279 
280  # PROC_TOT is one of the desired outputs
281  Num_Core_Per_Node = count_cores
282 
283  # Error check the pharsing
284  if(Num_Core_Per_Node == 0):
285  Error = False
286  Message.append('Unable to find any cores in ' + Input_Filename)
287 
288  if(count_sockets == 0):
289  Error = False
290  Message.append('Unable to find any sockets in ' + Input_Filename)
291 
292  # Find other desired quantities
293  Num_Core_Per_Socket = Num_Core_Per_Node/count_sockets
294  Num_Sock_Per_Node = count_sockets
295 
296  # Return information along with error check
297  return(Num_Core_Per_Node, Num_Core_Per_Socket, Num_Sock_Per_Node, Error, Message)
298 
299 #------------------------------------
300 # New Function
301 #------------------------------------
302 
# Pharse the condtions input file for P and N_Modes.
def functions_main.Find_Nektar_Elements (   Input_Filename)

Definition at line 85 of file functions_main.py.

85 
86 def Find_Nektar_Elements(Input_Filename):
87 
88  # Create a file to hold files required for the model to run
89  output_path = 'Temporary_Files'
90  if os.path.exists(output_path):
91  cmd_string_clear = 'rm -r Temporary_Files/ \n'
92  process = Popen([cmd_string_clear],shell=True, stdout=PIPE, stdin=PIPE)
93  process.wait()
94  os.mkdir(output_path)
95 
96  if not os.path.exists(output_path):
97  os.mkdir(output_path)
98 
99  # Uncompress data if needs be using Nekmesh
100  cmd_string_uncompress = 'Nekmesh ' + Input_Filename + ' Temporary_Files/Nektar_Serial_Mesh.xml:xml:uncompress' + " \n"
101 
102  # Run the uncompress command using Python'subprocess module
103  process = Popen([cmd_string_uncompress],shell=True, stdout=PIPE, stdin=PIPE)
104  process.wait()
105 
106  # Open the uncompressed mesh and count the total number of elements
107  f = open('Temporary_Files/Nektar_Serial_Mesh.xml', "r")
108  element_count = 0
109  for line in f:
110  a = line.split()
111 
112  # Record an element when strings match
113  if(a[0] == '<Q'):
114  element_count += 1
115  f.close()
116 
117  return(element_count)
118 
119 #------------------------------------
120 # New Function
121 #------------------------------------
122 
# Find the files that make up the Serial Data being provided to the model
def functions_main.Find_Nektar_Files (   Input_Filename)

Definition at line 123 of file functions_main.py.

Referenced by parallel.Run_Parallel_Comparison().

124 def Find_Nektar_Files(Input_Filename):
125 
126  # Now find the directory for the generated partition files
127  current_directory = os.getcwd()
128 
129  directory = current_directory + '/' + Input_Filename
130 
131  # Holding list for file names
132  Timing_Files = []
133 
134  # Find the file names in the given directory
135  for root, dirs, files in os.walk(directory):
136  for file in files:
137  if file.endswith(".txt"):
138  Timing_Files.append(os.path.join(file))
139 
140  Nektar_Modes = []
141 
142  # Loop over the the timing file names to parse the nektar modes
143  for i in range(0, len(Timing_Files)):
144  a = Timing_Files[i].split('_', 1)
145  b = a[1].split('.txt', 1)
146  Nektar_Modes.append(int(b[0]))
147 
148  # Sort the output by plane number
149  (Nektar_Modes, Timing_Files) = zip(*sorted(zip(Nektar_Modes, Timing_Files)))
150 
151  # Return the file names
152  return(Nektar_Modes, Timing_Files)
153 
154 #------------------------------------
155 # New Function
156 #------------------------------------
157 
# Pharse the Nektar file to find timing information.
def functions_main.Find_Topologies (   PROC_TOT,
  Num_Modes 
)

Definition at line 689 of file functions_main.py.

Referenced by parallel.Run_Parallel_Model().

690 def Find_Topologies(PROC_TOT, Num_Modes):
691  PROC_Z = []
692  PROC_XY = []
693 
694  PROC_TOT_HALF = PROC_TOT/2
695 
696  for i in range(1, PROC_TOT_HALF + 1):
697  if(PROC_TOT % i == 0 and Num_Modes % i == 0):
698  PROC_Z.append(i)
699  PROC_XY.append(PROC_TOT/i)
700 
701  if((Num_Modes % PROC_TOT) == 0):
702  PROC_Z.append(PROC_TOT)
703  PROC_XY.append(1)
704 
705  return (PROC_XY, PROC_Z)
706 
707 #--=---------------------------------
708 # End of Functions
#------------------------------------
def functions_main.Parse_Benchmark (   Input_Filename,
  PROC_BENCHMARK,
  Num_Core_Per_Socket,
  Num_Sock_Per_Node 
)

Definition at line 388 of file functions_main.py.

389 def Parse_Benchmark(Input_Filename, PROC_BENCHMARK, Num_Core_Per_Socket, Num_Sock_Per_Node):
390 
391  # Calculate number of cores per node
392  Num_Core_Per_Node = Num_Core_Per_Socket * Num_Sock_Per_Node
393 
394  # Usable grouping sizes to be checked
395  Check = [4, 8, 16, 32]
396 
397  # Checks to be used in loop to decide group size choice
398  Check_Node = False
399  Check_Socket = False
400 
401  # Loop to find size choice, require the group that crosses socket and node divisions
402  for i in range(0, len(Check)):
403  if (Num_Core_Per_Socket % Check[i] != 0):
404  Check_Socket = True
405 
406  if (Num_Core_Per_Node % Check[i] != 0):
407  Check_Node = True
408 
409  # Break when found
410  if (Check_Node is True and Check_Socket is True):
411  PROC_PER_GROUP = Check[i]
412  break
413 
414  # Reset the checks
415  Check_Node = False
416  Check_Socket = False
417 
418  # Number of groups
419  Num_Groups = PROC_BENCHMARK/PROC_PER_GROUP
420 
421  # Lists to hold groupings of groups
422  concerned_node_groups = []
423  concerned_socket_groups = []
424  concerned_core_groups = []
425 
426  # List to hold groupings
427  Groups = []
428 
429  # Counter used in iteration
430  count = 0
431 
432  # Reset the checks, now used to confirm which group concerns which communication combination
433  Check_Node = False
434  Check_Socket = False
435 
436  # Loop over each group and categorise
437  for i in range(0, Num_Groups):
438  Groups.append([])
439 
440  for j in range(0, PROC_PER_GROUP):
441  Groups[i].append(count)
442  count += 1
443 
444  for j in range(1, PROC_PER_GROUP - 1):
445  if (Groups[i][j] % Num_Core_Per_Node == 0):
446  concerned_node_groups.append(i)
447  Check_Node = True
448  continue
449 
450  if (Groups[i][j] % Num_Core_Per_Socket == 0 and Check_Node is False):
451  concerned_socket_groups.append(i)
452  Check_Socket = True
453  continue
454 
455  if (Check_Node is False and Check_Socket is False):
456  concerned_core_groups.append(i)
457 
458  Check_Node = False
459  Check_Socket = False
460 
461  # Open the file to be pharsed
462  f = open(Input_Filename, "r")
463 
464  # List to hold pharsed data
465  data = []
466 
467  # True/False statements and count set to default requirements
468  count = -1
469  Finder_0 = False
470  Finder_1 = False
471  Finder_2 = False
472  Finder_3 = False
473 
474  # Pharse the data as required, the strings checked are those produced by the benchmarking tool.
475  # Clunky but effective
476  for line in f:
477  pharsed = line.split()
478  if (pharsed == ['#', 'Benchmarking', 'Multi-Exchange']):
479  Finder_0 = True
480  continue
481 
482  if (pharsed == ['#', '(', str(Num_Groups), 'groups', 'of', str(PROC_PER_GROUP), 'processes', 'each', 'running', 'simultaneous', ')'] and Finder_0 is True):
483  Finder_1 = True
484  continue
485 
486  if (pharsed == ['Group', '#bytes', '#repetitions', 't_min[usec]', 't_max[usec]', 't_avg[usec]', 'Mbytes/sec'] and Finder_1 is True):
487  Finder_2 = True
488  continue
489 
490  if (Finder_1 is True and Finder_2 is True):
491  if (pharsed == []):
492  count += 1
493  data.append([[],[],[],[],[],[],[]])
494  continue
495  if (pharsed == ['#------------------------------------------------------------------------------------------']):
496  break
497 
498  data[count][5].append(float(pharsed[5]))
499  data[count][6].append(float(pharsed[6]))
500 
501  # Calculate Latencies using the groups
502  count_lat_node = 0.0
503  count_lat_socket = 0.0
504  count_lat_core = 0.0
505 
506  for i in range(0, len(concerned_node_groups)):
507  index = concerned_node_groups[i]
508  count_lat_node += data[0][5][index]
509 
510  LAT_Node_To_Node = (count_lat_node/len(concerned_node_groups)) * 1e-06
511 
512  for i in range(0, len(concerned_socket_groups)):
513  index = concerned_socket_groups[i]
514  count_lat_socket += data[0][5][index]
515  LAT_Socket_To_Socket = (count_lat_socket/len(concerned_socket_groups)) * 1e-06
516 
517 
518  for i in range(0, len(concerned_core_groups)):
519  index = concerned_core_groups[i]
520  count_lat_core += data[0][5][index]
521 
522  LAT_Core_To_Core = (count_lat_core/len(concerned_core_groups)) * 1e-06
523 
524 
525  # Calculate Bandwidth using the groups, memory size chosen by hand, adjust the minus to choose different size
526  count_band_node = 0.0
527  count_band_socket = 0.0
528  count_band_core = 0.0
529 
530  for i in range(0, len(concerned_node_groups)):
531  index = concerned_node_groups[i]
532  count_band_node += data[count - 3][6][index]
533 
534  BW_Node_To_Node = (count_band_node/len(concerned_node_groups)) * 1e06
535 
536  for i in range(0, len(concerned_socket_groups)):
537  index = concerned_socket_groups[i]
538  count_band_socket += data[count - 3][6][index]
539 
540  BW_Socket_To_Socket = (count_band_socket/len(concerned_socket_groups)) * 1e06
541 
542 
543  for i in range(0, len(concerned_core_groups)):
544  index = concerned_core_groups[i]
545  count_band_core += data[count - 3][6][index]
546 
547  BW_Core_To_Core = (count_band_core/len(concerned_core_groups)) * 1e06
548 
549  # Return the desired values
550  return(BW_Node_To_Node, LAT_Node_To_Node, BW_Socket_To_Socket, LAT_Socket_To_Socket, BW_Core_To_Core, LAT_Core_To_Core)
551 
552 #------------------------------------
553 # New Function
554 #------------------------------------
555 
# Input the filename and number of processors you wish to partition by
def functions_main.Parse_Nektar_CG_Benchmark_Output (   Input_Filename)

Definition at line 186 of file functions_main.py.

187 def Parse_Nektar_CG_Benchmark_Output(Input_Filename):
188 
189  # Open file to be pharsed
190  f = open(Input_Filename, "r")
191 
192  # Pharse desired data
193  for line in f:
194  a = line.split()
195  for i in range(0, len(a)):
196 
197  # Reset the data each time as we only want the last entry
198  if (a[i] == 'Pressure'):
199  Pressure = {}
200  Velocity_1 = {}
201  Velocity_2 = {}
202  Velocity_3 = {}
203  var = 1
204  continue
205 
206  if (a[i] == 'Velocity'):
207  var = 2
208  continue
209 
210  if (a[i] == 'Plane'):
211  plane = int(a[i + 1]) + 1
212  plane = str(plane)
213  continue
214 
215  # Append each value of CG to the dictionaries
216  if (a[i] == 'CG'):
217  if (var == 1):
218  if plane in Pressure.keys():
219  Pressure[plane].append(int(a[i + 4]))
220  continue
221  else:
222  Pressure[plane] = [int(a[i + 4])]
223 
224  if (var == 2):
225  if plane in Velocity_1.keys():
226  Velocity_1[plane].append(int(a[i + 4]))
227  var = var + 1
228  continue
229  else:
230  Velocity_1[plane] = [int(a[i + 4])]
231 
232  if (var == 3):
233  if plane in Velocity_2.keys():
234  Velocity_2[plane].append(int(a[i + 4]))
235  var = var + 1
236  continue
237  else:
238  Velocity_2[plane] = [int(a[i + 4])]
239 
240  if (var == 4):
241  if plane in Velocity_3.keys():
242  Velocity_3[plane].append(int(a[i + 4]))
243  continue
244  else:
245  Velocity_3[plane] = [int(a[i + 4])]
246 
247  # Return the dictionaries of CG iteration
248  return(Pressure, Velocity_1, Velocity_2, Velocity_3)
249 
250 #------------------------------------
251 # New Function
252 #------------------------------------
253 
# Pharse the lstopo generated file to count socket and core related quantities
def Parse_Nektar_CG_Benchmark_Output
def functions_main.Parse_Nektar_Output (   Input_Filename)

Definition at line 158 of file functions_main.py.

159 def Parse_Nektar_Output(Input_Filename):
160 
161  # Open file to be pharsed
162  f = open(Input_Filename, "r")
163 
164  # List for outputs
165  times = []
166 
167  # Pharse desired data
168  for line in f:
169  a = line.split()
170  for i in range(0, len(a)):
171  if (a[i] == 'CPU'):
172  b = a[i + 2].split('s', 1)
173  times.append(float(b[0]))
174 
175  # Remove first two entries, not representative of the timings
176  times.pop(0)
177  times.pop(0)
178 
179  # Return the list of times
180  return(times)
181 
182 #------------------------------------
183 # New Function
184 #------------------------------------
185 
# Parse the Nektar file to find CG information for final timestep
def functions_main.Partition (   Input_Filename,
  PROC_XY 
)

Definition at line 556 of file functions_main.py.

Referenced by parallel.Run_Parallel_Model().

557 def Partition(Input_Filename, PROC_XY):
558 
559  # Create a file to hold files required for the model to run
560  output_path = 'Temporary_Files'
561  if os.path.exists(output_path):
562  cmd_string_clear = 'rm -r Temporary_Files/ \n'
563  process = Popen([cmd_string_clear],shell=True, stdout=PIPE, stdin=PIPE)
564  process.wait()
565  os.mkdir(output_path)
566 
567  if not os.path.exists(output_path):
568  os.mkdir(output_path)
569 
570  # Uncompress data if needs be using Nekmesh
571  cmd_string_uncompress = 'Nekmesh ' + Input_Filename + ' Temporary_Files/uncompressed_mesh.xml:xml:uncompress' + " \n"
572 
573  # Run the uncompress command using Python'subprocess module
574  process = Popen([cmd_string_uncompress],shell=True, stdout=PIPE, stdin=PIPE)
575  process.wait()
576 
577  if(PROC_XY == 1):
578  f = open('Temporary_Files/uncompressed_mesh.xml', "r")
579  element_count = 0
580  for line in f:
581  a = line.split()
582 
583  # Record an element when strings match
584  if(a[0] == '<Q'):
585  element_count += 1
586  f.close()
587  return([0], [element_count])
588 
589  # Run partitioning part of IncNavierStokesSolver to find how METIS splits elements across processes
590  cmd_string_partition = 'IncNavierStokesSolver Temporary_Files/uncompressed_mesh.xml --part-only ' + str(PROC_XY) + " \n"
591 
592  # Run the partition command using Python's subprocess module
593  process = Popen([cmd_string_partition],shell=True, stdout=PIPE, stdin=PIPE)
594  process.wait()
595 
596  # Now find the directory for the generated partition files
597  current_directory = os.getcwd()
598  mesh_part_folder = '/Temporary_Files/uncompressed_mesh_xml'
599 
600  directory = current_directory + mesh_part_folder
601 
602  # Holding list for file names
603  mesh_file = []
604 
605  # Find the file names in the given directory
606  for root, dirs, files in os.walk(directory):
607  for file in files:
608  if file.endswith(".xml"):
609  mesh_file.append(os.path.join(file))
610 
611  # Initialise list for recording edges present on each core
612  edges = []
613 
614  # Initialise list for recording elements present on each core
615  Num_Elements = []
616 
617  # Stores the number of messages process i will send to process j
618  dictionary_edges = []
619 
620  # Pharse all the edge data from the xml files and store them by each process in edges[]
621  for i in range(0, len(mesh_file)):
622 
623  # Open each file
624  f = open('Temporary_Files/uncompressed_mesh_xml/' + mesh_file[i], "r")
625 
626  # Append and update variables used for storage
627  edges.append([])
628  dictionary_edges.append({})
629  element_count = 0
630 
631  # Iterate over the file, splitting each line into strings
632  for line in f:
633  a = line.split()
634 
635  # Record an edge when strings match
636  if(a[0] == '<E'):
637  b = a[1].split('"', 2)
638  try:
639  edges[i].append(int(b[1]))
640 
641  # Python complains about indentation if we leave out an except:
642  # So lets add 1 and 1 and call it Kwyjibo, the word made up by Bart Simpson
643  # to win a game of scrable by using all his letters in one go
644  except:
645  Kwyjibo = 1 + 1
646 
647  # Record an element when strings match
648  if(a[0] == '<Q'):
649  element_count += 1
650 
651  Num_Elements.append(element_count)
652 
653  # Initialise dictionary counters for cores
654  for i in range(0, len(mesh_file)):
655  for j in range(0, len(mesh_file)):
656  if(j == i):
657  dictionary_edges[i][str(j)] = 'Self'
658  continue
659  dictionary_edges[i][str(j)] = 0
660 
661  # Now compare edge lists between processes to find matches.
662  # These correspond to neighbouring elements that must communicate.
663  # We have +1 message recorded for a match between process i and k
664  for i in range(0, len(mesh_file)):
665  for k in range(0, len(mesh_file)):
666  if(i == k):
667  continue
668  for j in range(0, len(edges[i])):
669  a = edges[i][j]
670  for n in range(0, len(edges[k])):
671  if(a == edges[k][n]):
672  dictionary_edges[i][str(k)] += 1
673 
674  # Put the counted edges into lists for general use later.
675  Num_Element_Msg = []
676 
677  for i in range(0, len(mesh_file)):
678  Num_Element_Msg.append([])
679  for k in range(0, len(mesh_file)):
680  Num_Element_Msg[i].append(dictionary_edges[i][str(k)])
681 
682  # Return the dictionary of values to be used later
683  return (Num_Element_Msg, Num_Elements)
684 
685 #------------------------------------
686 # New Function
687 #------------------------------------
688 
# Find the possible combinations of PROC_Z and PROC_XY such that a cartesian grid is formed.
def functions_main.PBS_Benchmark_Parse (   Input_Filename)

Definition at line 343 of file functions_main.py.

344 def PBS_Benchmark_Parse(Input_Filename):
345 
346  # Open the input file in .pbs format
347  f = open(Input_Filename, 'r')
348 
349  # Error check default to True
350  Error = True
351  Message = []
352 
353  # Iterate over the file looking for the number of cores chosen by the user
354  for line in f:
355  a = line.split()
356  for i in range(0, len(a)):
357  b = a[i].split(':', 2)
358  for j in range(0, len(b)):
359  c = b[j].split('=', )
360  if (c[0] == 'select'):
361  try:
362  Num_Node = int(c[1])
363  except:
364  Num_Node = 0
365  Error = False
366  Message.append('Unable to find number of nodes from ' + Input_Filename)
367  if (c[0] == 'ncpus'):
368  try:
369  Num_Cores = int(c[1])
370  except:
371  Num_Cores = 0
372  Error = False
373  Message.append('Unable to find number of cores from ' + Input_Filename)
374 
375  # Calculate desired quantity
376  PROC_BENCHMARK = Num_Cores * Num_Node
377 
378  # Close the file
379  f.close()
380 
381  # Return the desired values and error information
382  return(PROC_BENCHMARK, Error, Message)
383 
384 #------------------------------------
385 # New Function
386 #------------------------------------
387 
# Pharse the IBM/Intel MPI Benchmarking file to find bandwidths and latencies
def functions_main.PBS_Job_Parse (   Input_Filename)

Definition at line 50 of file functions_main.py.

50 
51 def PBS_Job_Parse(Input_Filename):
52 
53  # Open the input file in .pbs format
54  f = open('Input/' + Input_Filename, 'r')
55 
56  # Error check default to True
57  Error = True
58  Message = []
59 
60  # Iterate over the file looking for the number of nodes chosen by the user
61  for line in f:
62  a = line.split()
63  for i in range(0, len(a)):
64  b = a[i].split(':', 2)
65  for j in range(0, len(b)):
66  c = b[j].split('=', 1)
67  if (c[0] == 'select'):
68  try:
69  Num_Node = int(c[1])
70  except:
71  Num_Node = 0
72  Error = False
73  Message.append('Unable to find number of nodes from ' + Input_Filename)
74 
75  # Close the file
76  f.close()
77 
78  # Return the desired values and error information
79  return(Num_Node, Error, Message)
80 
81 #------------------------------------
82 # New Function
83 #------------------------------------
84 
# Function to find the total number of elements for the serial calibration.