Nektar++
Functions
functions_main Namespace Reference

Functions

def Filename_Generate (Mesh, Max_N_Z, Conditions_File)
 
def PBS_Job_Parse (Input_Filename)
 
def Find_Nektar_Elements (Input_Filename)
 
def Find_Nektar_Files (Input_Filename)
 
def Parse_Nektar_Output (Input_Filename)
 
def Parse_Nektar_CG_Benchmark_Output (Input_Filename)
 
def Find_Hardware (Input_Filename)
 
def Find_Conditions (Input_Filename)
 
def PBS_Benchmark_Parse (Input_Filename)
 
def Parse_Benchmark (Input_Filename, PROC_BENCHMARK, Num_Core_Per_Socket, Num_Sock_Per_Node)
 
def Partition (Input_Filename, PROC_XY)
 
def Find_Topologies (PROC_TOT, Num_Modes)
 

Function Documentation

◆ Filename_Generate()

def functions_main.Filename_Generate (   Mesh,
  Max_N_Z,
  Conditions_File 
)

Definition at line 21 of file functions_main.py.

21def Filename_Generate(Mesh, Max_N_Z, Conditions_File):
22
23 # Mesh File Location
24 Mesh_File = 'Input/Mesh/' + Mesh
25
26 # File containing the output of the maximum value of N_Z, for parsing CG iteration data.
27 Input_Nektar_Max = 'Input/Serial_Input/' + Max_N_Z
28
29 # File containing the conditions.
30 Conditions = 'Input/Conditions/' + Conditions_File
31
32 # Serial timing file location
33 Loc_Serial_Timing_Files = 'Input/Serial_Input/'
34
35 # Parallel timing file location
36 Loc_Parallel_Timing_Files = 'Input/Parallel_Input/'
37
38 # Hardware benchmarking files
39 Benchmark_PBS = 'Input/Benchmark/Benchmark.pbs'
40 MPI_Benchmark = 'Input/Benchmark/Benchmark.txt'
41 Node_Map = 'Input/Benchmark/node.xml'
42
43 return(Mesh_File, Input_Nektar_Max, Conditions, Loc_Serial_Timing_Files, Loc_Parallel_Timing_Files, Benchmark_PBS, MPI_Benchmark, Node_Map)
44
45#------------------------------------
46# New Function
47#------------------------------------
48
49# Parse the PBS script provided to find the number of nodes
def Filename_Generate(Mesh, Max_N_Z, Conditions_File)

◆ Find_Conditions()

def functions_main.Find_Conditions (   Input_Filename)

Definition at line 303 of file functions_main.py.

303def Find_Conditions(Input_Filename):
304
305 # Open the file to be pharsed
306 f = open(Input_Filename, "r")
307
308 # Error checking, default to True
309 Error = True
310 Message = []
311
312 # Iterate over the file to find P and N_Modes
313 for line in f:
314 a = line.split()
315 for i in range(0, len(a)):
316
317 if (a[i] == 'HomModesZ'):
318 try:
319 N_Modes = int(a[i + 2])/2
320 except:
321 Error = False
322
323 b = a[i].split('=', 1)
324 for j in range(0, len(b)):
325 if(b[j] == 'NUMMODES'):
326 c = b[j + 1].split('"',2)
327 try:
328 P = int(c[1]) - 1
329 except:
330 Error = False
331
332 # Close the file
333 f.close()
334
335 # Return the errors and desired values
336 return(N_Modes, P, Error, Message)
337
338#------------------------------------
339# New Function
340#------------------------------------
341
342# Function to find PROC_BENCHMARK, number of processes used in Benchmarking
def Find_Conditions(Input_Filename)

◆ Find_Hardware()

def functions_main.Find_Hardware (   Input_Filename)

Definition at line 254 of file functions_main.py.

254def Find_Hardware(Input_Filename):
255
256 # Open the input file containing information generated by lstopo
257 f = open(Input_Filename, 'r')
258
259 # Declare initial counters
260 count_cores = 0
261 count_sockets = 0
262
263 # Error check default to True
264 Error = True
265 Message = []
266
267 # Count cores and socket by pharsing the lstopo file
268 for line in f:
269 a = line.split()
270 for i in range(0, len(a)):
271 if (a[i] == 'type="Core"'):
272 count_cores += 1
273 if (a[i] == 'type="Socket"'):
274 count_sockets += 1
275
276 # Close the file
277 f.close()
278
279 # PROC_TOT is one of the desired outputs
280 Num_Core_Per_Node = count_cores
281
282 # Error check the pharsing
283 if(Num_Core_Per_Node == 0):
284 Error = False
285 Message.append('Unable to find any cores in ' + Input_Filename)
286
287 if(count_sockets == 0):
288 Error = False
289 Message.append('Unable to find any sockets in ' + Input_Filename)
290
291 # Find other desired quantities
292 Num_Core_Per_Socket = Num_Core_Per_Node/count_sockets
293 Num_Sock_Per_Node = count_sockets
294
295 # Return information along with error check
296 return(Num_Core_Per_Node, Num_Core_Per_Socket, Num_Sock_Per_Node, Error, Message)
297
298#------------------------------------
299# New Function
300#------------------------------------
301
302# Pharse the condtions input file for P and N_Modes.
def Find_Hardware(Input_Filename)

◆ Find_Nektar_Elements()

def functions_main.Find_Nektar_Elements (   Input_Filename)

Definition at line 85 of file functions_main.py.

85def Find_Nektar_Elements(Input_Filename):
86
87 # Create a file to hold files required for the model to run
88 output_path = 'Temporary_Files'
89 if os.path.exists(output_path):
90 cmd_string_clear = 'rm -r Temporary_Files/ \n'
91 process = Popen([cmd_string_clear],shell=True, stdout=PIPE, stdin=PIPE)
92 process.wait()
93 os.mkdir(output_path)
94
95 if not os.path.exists(output_path):
96 os.mkdir(output_path)
97
98 # Uncompress data if needs be using Nekmesh
99 cmd_string_uncompress = 'Nekmesh ' + Input_Filename + ' Temporary_Files/Nektar_Serial_Mesh.xml:xml:uncompress' + " \n"
100
101 # Run the uncompress command using Python'subprocess module
102 process = Popen([cmd_string_uncompress],shell=True, stdout=PIPE, stdin=PIPE)
103 process.wait()
104
105 # Open the uncompressed mesh and count the total number of elements
106 f = open('Temporary_Files/Nektar_Serial_Mesh.xml', "r")
107 element_count = 0
108 for line in f:
109 a = line.split()
110
111 # Record an element when strings match
112 if(a[0] == '<Q'):
113 element_count += 1
114 f.close()
115
116 return(element_count)
117
118#------------------------------------
119# New Function
120#------------------------------------
121
122# Find the files that make up the Serial Data being provided to the model
def Find_Nektar_Elements(Input_Filename)

◆ Find_Nektar_Files()

def functions_main.Find_Nektar_Files (   Input_Filename)

Definition at line 123 of file functions_main.py.

123def Find_Nektar_Files(Input_Filename):
124
125 # Now find the directory for the generated partition files
126 current_directory = os.getcwd()
127
128 directory = current_directory + '/' + Input_Filename
129
130 # Holding list for file names
131 Timing_Files = []
132
133 # Find the file names in the given directory
134 for root, dirs, files in os.walk(directory):
135 for file in files:
136 if file.endswith(".txt"):
137 Timing_Files.append(os.path.join(file))
138
139 Nektar_Modes = []
140
141 # Loop over the the timing file names to parse the nektar modes
142 for i in range(0, len(Timing_Files)):
143 a = Timing_Files[i].split('_', 1)
144 b = a[1].split('.txt', 1)
145 Nektar_Modes.append(int(b[0]))
146
147 # Sort the output by plane number
148 (Nektar_Modes, Timing_Files) = zip(*sorted(zip(Nektar_Modes, Timing_Files)))
149
150 # Return the file names
151 return(Nektar_Modes, Timing_Files)
152
153#------------------------------------
154# New Function
155#------------------------------------
156
157# Pharse the Nektar file to find timing information.
def Find_Nektar_Files(Input_Filename)

Referenced by parallel.Run_Parallel_Comparison().

◆ Find_Topologies()

def functions_main.Find_Topologies (   PROC_TOT,
  Num_Modes 
)

Definition at line 689 of file functions_main.py.

689def Find_Topologies(PROC_TOT, Num_Modes):
690 PROC_Z = []
691 PROC_XY = []
692
693 PROC_TOT_HALF = PROC_TOT/2
694
695 for i in range(1, PROC_TOT_HALF + 1):
696 if(PROC_TOT % i == 0 and Num_Modes % i == 0):
697 PROC_Z.append(i)
698 PROC_XY.append(PROC_TOT/i)
699
700 if((Num_Modes % PROC_TOT) == 0):
701 PROC_Z.append(PROC_TOT)
702 PROC_XY.append(1)
703
704 return (PROC_XY, PROC_Z)
705
706#--=---------------------------------
707# End of Functions
708#------------------------------------
def Find_Topologies(PROC_TOT, Num_Modes)

Referenced by parallel.Run_Parallel_Model().

◆ Parse_Benchmark()

def functions_main.Parse_Benchmark (   Input_Filename,
  PROC_BENCHMARK,
  Num_Core_Per_Socket,
  Num_Sock_Per_Node 
)

Definition at line 388 of file functions_main.py.

388def Parse_Benchmark(Input_Filename, PROC_BENCHMARK, Num_Core_Per_Socket, Num_Sock_Per_Node):
389
390 # Calculate number of cores per node
391 Num_Core_Per_Node = Num_Core_Per_Socket * Num_Sock_Per_Node
392
393 # Usable grouping sizes to be checked
394 Check = [4, 8, 16, 32]
395
396 # Checks to be used in loop to decide group size choice
397 Check_Node = False
398 Check_Socket = False
399
400 # Loop to find size choice, require the group that crosses socket and node divisions
401 for i in range(0, len(Check)):
402 if (Num_Core_Per_Socket % Check[i] != 0):
403 Check_Socket = True
404
405 if (Num_Core_Per_Node % Check[i] != 0):
406 Check_Node = True
407
408 # Break when found
409 if (Check_Node is True and Check_Socket is True):
410 PROC_PER_GROUP = Check[i]
411 break
412
413 # Reset the checks
414 Check_Node = False
415 Check_Socket = False
416
417 # Number of groups
418 Num_Groups = PROC_BENCHMARK/PROC_PER_GROUP
419
420 # Lists to hold groupings of groups
421 concerned_node_groups = []
422 concerned_socket_groups = []
423 concerned_core_groups = []
424
425 # List to hold groupings
426 Groups = []
427
428 # Counter used in iteration
429 count = 0
430
431 # Reset the checks, now used to confirm which group concerns which communication combination
432 Check_Node = False
433 Check_Socket = False
434
435 # Loop over each group and categorise
436 for i in range(0, Num_Groups):
437 Groups.append([])
438
439 for j in range(0, PROC_PER_GROUP):
440 Groups[i].append(count)
441 count += 1
442
443 for j in range(1, PROC_PER_GROUP - 1):
444 if (Groups[i][j] % Num_Core_Per_Node == 0):
445 concerned_node_groups.append(i)
446 Check_Node = True
447 continue
448
449 if (Groups[i][j] % Num_Core_Per_Socket == 0 and Check_Node is False):
450 concerned_socket_groups.append(i)
451 Check_Socket = True
452 continue
453
454 if (Check_Node is False and Check_Socket is False):
455 concerned_core_groups.append(i)
456
457 Check_Node = False
458 Check_Socket = False
459
460 # Open the file to be pharsed
461 f = open(Input_Filename, "r")
462
463 # List to hold pharsed data
464 data = []
465
466 # True/False statements and count set to default requirements
467 count = -1
468 Finder_0 = False
469 Finder_1 = False
470 Finder_2 = False
471 Finder_3 = False
472
473 # Pharse the data as required, the strings checked are those produced by the benchmarking tool.
474 # Clunky but effective
475 for line in f:
476 pharsed = line.split()
477 if (pharsed == ['#', 'Benchmarking', 'Multi-Exchange']):
478 Finder_0 = True
479 continue
480
481 if (pharsed == ['#', '(', str(Num_Groups), 'groups', 'of', str(PROC_PER_GROUP), 'processes', 'each', 'running', 'simultaneous', ')'] and Finder_0 is True):
482 Finder_1 = True
483 continue
484
485 if (pharsed == ['Group', '#bytes', '#repetitions', 't_min[usec]', 't_max[usec]', 't_avg[usec]', 'Mbytes/sec'] and Finder_1 is True):
486 Finder_2 = True
487 continue
488
489 if (Finder_1 is True and Finder_2 is True):
490 if (pharsed == []):
491 count += 1
492 data.append([[],[],[],[],[],[],[]])
493 continue
494 if (pharsed == ['#------------------------------------------------------------------------------------------']):
495 break
496
497 data[count][5].append(float(pharsed[5]))
498 data[count][6].append(float(pharsed[6]))
499
500 # Calculate Latencies using the groups
501 count_lat_node = 0.0
502 count_lat_socket = 0.0
503 count_lat_core = 0.0
504
505 for i in range(0, len(concerned_node_groups)):
506 index = concerned_node_groups[i]
507 count_lat_node += data[0][5][index]
508
509 LAT_Node_To_Node = (count_lat_node/len(concerned_node_groups)) * 1e-06
510
511 for i in range(0, len(concerned_socket_groups)):
512 index = concerned_socket_groups[i]
513 count_lat_socket += data[0][5][index]
514 LAT_Socket_To_Socket = (count_lat_socket/len(concerned_socket_groups)) * 1e-06
515
516
517 for i in range(0, len(concerned_core_groups)):
518 index = concerned_core_groups[i]
519 count_lat_core += data[0][5][index]
520
521 LAT_Core_To_Core = (count_lat_core/len(concerned_core_groups)) * 1e-06
522
523
524 # Calculate Bandwidth using the groups, memory size chosen by hand, adjust the minus to choose different size
525 count_band_node = 0.0
526 count_band_socket = 0.0
527 count_band_core = 0.0
528
529 for i in range(0, len(concerned_node_groups)):
530 index = concerned_node_groups[i]
531 count_band_node += data[count - 3][6][index]
532
533 BW_Node_To_Node = (count_band_node/len(concerned_node_groups)) * 1e06
534
535 for i in range(0, len(concerned_socket_groups)):
536 index = concerned_socket_groups[i]
537 count_band_socket += data[count - 3][6][index]
538
539 BW_Socket_To_Socket = (count_band_socket/len(concerned_socket_groups)) * 1e06
540
541
542 for i in range(0, len(concerned_core_groups)):
543 index = concerned_core_groups[i]
544 count_band_core += data[count - 3][6][index]
545
546 BW_Core_To_Core = (count_band_core/len(concerned_core_groups)) * 1e06
547
548 # Return the desired values
549 return(BW_Node_To_Node, LAT_Node_To_Node, BW_Socket_To_Socket, LAT_Socket_To_Socket, BW_Core_To_Core, LAT_Core_To_Core)
550
551#------------------------------------
552# New Function
553#------------------------------------
554
555# Input the filename and number of processors you wish to partition by
def Parse_Benchmark(Input_Filename, PROC_BENCHMARK, Num_Core_Per_Socket, Num_Sock_Per_Node)

◆ Parse_Nektar_CG_Benchmark_Output()

def functions_main.Parse_Nektar_CG_Benchmark_Output (   Input_Filename)

Definition at line 186 of file functions_main.py.

186def Parse_Nektar_CG_Benchmark_Output(Input_Filename):
187
188 # Open file to be pharsed
189 f = open(Input_Filename, "r")
190
191 # Pharse desired data
192 for line in f:
193 a = line.split()
194 for i in range(0, len(a)):
195
196 # Reset the data each time as we only want the last entry
197 if (a[i] == 'Pressure'):
198 Pressure = {}
199 Velocity_1 = {}
200 Velocity_2 = {}
201 Velocity_3 = {}
202 var = 1
203 continue
204
205 if (a[i] == 'Velocity'):
206 var = 2
207 continue
208
209 if (a[i] == 'Plane'):
210 plane = int(a[i + 1]) + 1
211 plane = str(plane)
212 continue
213
214 # Append each value of CG to the dictionaries
215 if (a[i] == 'CG'):
216 if (var == 1):
217 if plane in Pressure.keys():
218 Pressure[plane].append(int(a[i + 4]))
219 continue
220 else:
221 Pressure[plane] = [int(a[i + 4])]
222
223 if (var == 2):
224 if plane in Velocity_1.keys():
225 Velocity_1[plane].append(int(a[i + 4]))
226 var = var + 1
227 continue
228 else:
229 Velocity_1[plane] = [int(a[i + 4])]
230
231 if (var == 3):
232 if plane in Velocity_2.keys():
233 Velocity_2[plane].append(int(a[i + 4]))
234 var = var + 1
235 continue
236 else:
237 Velocity_2[plane] = [int(a[i + 4])]
238
239 if (var == 4):
240 if plane in Velocity_3.keys():
241 Velocity_3[plane].append(int(a[i + 4]))
242 continue
243 else:
244 Velocity_3[plane] = [int(a[i + 4])]
245
246 # Return the dictionaries of CG iteration
247 return(Pressure, Velocity_1, Velocity_2, Velocity_3)
248
249#------------------------------------
250# New Function
251#------------------------------------
252
253# Pharse the lstopo generated file to count socket and core related quantities
def Parse_Nektar_CG_Benchmark_Output(Input_Filename)

◆ Parse_Nektar_Output()

def functions_main.Parse_Nektar_Output (   Input_Filename)

Definition at line 158 of file functions_main.py.

158def Parse_Nektar_Output(Input_Filename):
159
160 # Open file to be pharsed
161 f = open(Input_Filename, "r")
162
163 # List for outputs
164 times = []
165
166 # Pharse desired data
167 for line in f:
168 a = line.split()
169 for i in range(0, len(a)):
170 if (a[i] == 'CPU'):
171 b = a[i + 2].split('s', 1)
172 times.append(float(b[0]))
173
174 # Remove first two entries, not representative of the timings
175 times.pop(0)
176 times.pop(0)
177
178 # Return the list of times
179 return(times)
180
181#------------------------------------
182# New Function
183#------------------------------------
184
185# Parse the Nektar file to find CG information for final timestep
def Parse_Nektar_Output(Input_Filename)

◆ Partition()

def functions_main.Partition (   Input_Filename,
  PROC_XY 
)

Definition at line 556 of file functions_main.py.

556def Partition(Input_Filename, PROC_XY):
557
558 # Create a file to hold files required for the model to run
559 output_path = 'Temporary_Files'
560 if os.path.exists(output_path):
561 cmd_string_clear = 'rm -r Temporary_Files/ \n'
562 process = Popen([cmd_string_clear],shell=True, stdout=PIPE, stdin=PIPE)
563 process.wait()
564 os.mkdir(output_path)
565
566 if not os.path.exists(output_path):
567 os.mkdir(output_path)
568
569 # Uncompress data if needs be using Nekmesh
570 cmd_string_uncompress = 'Nekmesh ' + Input_Filename + ' Temporary_Files/uncompressed_mesh.xml:xml:uncompress' + " \n"
571
572 # Run the uncompress command using Python'subprocess module
573 process = Popen([cmd_string_uncompress],shell=True, stdout=PIPE, stdin=PIPE)
574 process.wait()
575
576 if(PROC_XY == 1):
577 f = open('Temporary_Files/uncompressed_mesh.xml', "r")
578 element_count = 0
579 for line in f:
580 a = line.split()
581
582 # Record an element when strings match
583 if(a[0] == '<Q'):
584 element_count += 1
585 f.close()
586 return([0], [element_count])
587
588 # Run partitioning part of IncNavierStokesSolver to find how METIS splits elements across processes
589 cmd_string_partition = 'IncNavierStokesSolver Temporary_Files/uncompressed_mesh.xml --part-only ' + str(PROC_XY) + " \n"
590
591 # Run the partition command using Python's subprocess module
592 process = Popen([cmd_string_partition],shell=True, stdout=PIPE, stdin=PIPE)
593 process.wait()
594
595 # Now find the directory for the generated partition files
596 current_directory = os.getcwd()
597 mesh_part_folder = '/Temporary_Files/uncompressed_mesh_xml'
598
599 directory = current_directory + mesh_part_folder
600
601 # Holding list for file names
602 mesh_file = []
603
604 # Find the file names in the given directory
605 for root, dirs, files in os.walk(directory):
606 for file in files:
607 if file.endswith(".xml"):
608 mesh_file.append(os.path.join(file))
609
610 # Initialise list for recording edges present on each core
611 edges = []
612
613 # Initialise list for recording elements present on each core
614 Num_Elements = []
615
616 # Stores the number of messages process i will send to process j
617 dictionary_edges = []
618
619 # Pharse all the edge data from the xml files and store them by each process in edges[]
620 for i in range(0, len(mesh_file)):
621
622 # Open each file
623 f = open('Temporary_Files/uncompressed_mesh_xml/' + mesh_file[i], "r")
624
625 # Append and update variables used for storage
626 edges.append([])
627 dictionary_edges.append({})
628 element_count = 0
629
630 # Iterate over the file, splitting each line into strings
631 for line in f:
632 a = line.split()
633
634 # Record an edge when strings match
635 if(a[0] == '<E'):
636 b = a[1].split('"', 2)
637 try:
638 edges[i].append(int(b[1]))
639
640 # Python complains about indentation if we leave out an except:
641 # So lets add 1 and 1 and call it Kwyjibo, the word made up by Bart Simpson
642 # to win a game of scrable by using all his letters in one go
643 except:
644 Kwyjibo = 1 + 1
645
646 # Record an element when strings match
647 if(a[0] == '<Q'):
648 element_count += 1
649
650 Num_Elements.append(element_count)
651
652 # Initialise dictionary counters for cores
653 for i in range(0, len(mesh_file)):
654 for j in range(0, len(mesh_file)):
655 if(j == i):
656 dictionary_edges[i][str(j)] = 'Self'
657 continue
658 dictionary_edges[i][str(j)] = 0
659
660 # Now compare edge lists between processes to find matches.
661 # These correspond to neighbouring elements that must communicate.
662 # We have +1 message recorded for a match between process i and k
663 for i in range(0, len(mesh_file)):
664 for k in range(0, len(mesh_file)):
665 if(i == k):
666 continue
667 for j in range(0, len(edges[i])):
668 a = edges[i][j]
669 for n in range(0, len(edges[k])):
670 if(a == edges[k][n]):
671 dictionary_edges[i][str(k)] += 1
672
673 # Put the counted edges into lists for general use later.
674 Num_Element_Msg = []
675
676 for i in range(0, len(mesh_file)):
677 Num_Element_Msg.append([])
678 for k in range(0, len(mesh_file)):
679 Num_Element_Msg[i].append(dictionary_edges[i][str(k)])
680
681 # Return the dictionary of values to be used later
682 return (Num_Element_Msg, Num_Elements)
683
684#------------------------------------
685# New Function
686#------------------------------------
687
688# Find the possible combinations of PROC_Z and PROC_XY such that a cartesian grid is formed.
def Partition(Input_Filename, PROC_XY)

Referenced by parallel.Run_Parallel_Model().

◆ PBS_Benchmark_Parse()

def functions_main.PBS_Benchmark_Parse (   Input_Filename)

Definition at line 343 of file functions_main.py.

343def PBS_Benchmark_Parse(Input_Filename):
344
345 # Open the input file in .pbs format
346 f = open(Input_Filename, 'r')
347
348 # Error check default to True
349 Error = True
350 Message = []
351
352 # Iterate over the file looking for the number of cores chosen by the user
353 for line in f:
354 a = line.split()
355 for i in range(0, len(a)):
356 b = a[i].split(':', 2)
357 for j in range(0, len(b)):
358 c = b[j].split('=', )
359 if (c[0] == 'select'):
360 try:
361 Num_Node = int(c[1])
362 except:
363 Num_Node = 0
364 Error = False
365 Message.append('Unable to find number of nodes from ' + Input_Filename)
366 if (c[0] == 'ncpus'):
367 try:
368 Num_Cores = int(c[1])
369 except:
370 Num_Cores = 0
371 Error = False
372 Message.append('Unable to find number of cores from ' + Input_Filename)
373
374 # Calculate desired quantity
375 PROC_BENCHMARK = Num_Cores * Num_Node
376
377 # Close the file
378 f.close()
379
380 # Return the desired values and error information
381 return(PROC_BENCHMARK, Error, Message)
382
383#------------------------------------
384# New Function
385#------------------------------------
386
387# Pharse the IBM/Intel MPI Benchmarking file to find bandwidths and latencies
def PBS_Benchmark_Parse(Input_Filename)

◆ PBS_Job_Parse()

def functions_main.PBS_Job_Parse (   Input_Filename)

Definition at line 50 of file functions_main.py.

50def PBS_Job_Parse(Input_Filename):
51
52 # Open the input file in .pbs format
53 f = open('Input/' + Input_Filename, 'r')
54
55 # Error check default to True
56 Error = True
57 Message = []
58
59 # Iterate over the file looking for the number of nodes chosen by the user
60 for line in f:
61 a = line.split()
62 for i in range(0, len(a)):
63 b = a[i].split(':', 2)
64 for j in range(0, len(b)):
65 c = b[j].split('=', 1)
66 if (c[0] == 'select'):
67 try:
68 Num_Node = int(c[1])
69 except:
70 Num_Node = 0
71 Error = False
72 Message.append('Unable to find number of nodes from ' + Input_Filename)
73
74 # Close the file
75 f.close()
76
77 # Return the desired values and error information
78 return(Num_Node, Error, Message)
79
80#------------------------------------
81# New Function
82#------------------------------------
83
84# Function to find the total number of elements for the serial calibration.
def PBS_Job_Parse(Input_Filename)