diff --git a/benchmarks/apps/hpcg/hpcg.py b/benchmarks/apps/hpcg/hpcg.py index bb152f0c..e54b1391 100644 --- a/benchmarks/apps/hpcg/hpcg.py +++ b/benchmarks/apps/hpcg/hpcg.py @@ -32,9 +32,6 @@ class HPCGBenchmark(SpackTest): prerun_cmds.append('cp "$(dirname $(which xhpcg))/hpcg.dat" .') reference = { - 'archer2': { - 'flops': (1000.0, -0.2, None, 'Gflops/seconds'), - }, '*': { 'flops': (1, None, None, 'Gflops/seconds'), } @@ -100,3 +97,92 @@ class HPCG_LFRic(HPCGBenchmark): tags = {"lfric"} # lfric app requires extra data - dinodump.dat prerun_cmds.append('cp "$(dirname $(which xhpcg))/dinodump.dat" .') + + +# TODO - make a class which inherits the above, then uses spack to install oneapi mkl and so on +# for paper on Isambard this is hard loaded below due to issues on the system (see other github issues) + +from reframe.core.backends import getlauncher +from reframe.core.pipeline import RunOnlyRegressionTest + +class HPCGIntelOptimised_Isambard(RunOnlyRegressionTest): + valid_systems = ['*'] #- isambard only + valid_prog_environs = ['*'] # again - all hard-coded on Isambard + executable = './xhpcg' + num_cpus_per_task = 1 + num_tasks = required + num_tasks_per_node = required + time_limit = '40m' + maintainers = ['dcaseGH'] + prerun_cmds.append('module use --append /projects/bristol/modules/intel-oneapi-2023.1.0/tbb/2021.9.0/modulefiles') + prerun_cmds.append('module use --append /projects/bristol/modules/intel-oneapi-2023.1.0/mpi/2021.9.0/modulefiles') + prerun_cmds.append('module use --append /projects/bristol/modules/intel-oneapi-2023.1.0/mkl/2023.1.0/modulefiles') + prerun_cmds.append('module use --append /projects/bristol/modules/intel-oneapi-2023.1.0/compiler/2023.1.0/modulefiles') + prerun_cmds.append('module load mkl') + prerun_cmds.append('module load mpi') + prerun_cmds.append('cp /home/mox-dcase/small.dat hpcg.dat') # Strictly dont need this, but following the above lead to copy dat + + @run_after('run') + def set_output_datafile(self): + # If other outputfiles in stage directory before running, ensure use latest one + # Assume using 104 ** 3 grid + possible_outfiles = glob.glob(self.stagedir + "/n104*.txt") + if (len(possible_outfiles) >= 1): + ordered_outfiles = sorted(possible_outfiles, key=lambda t: os.stat(t).st_mtime) + self.output_data = ordered_outfiles[-1] + else: + self.output_data = '' #no data + + @run_before('sanity') + def set_sanity_patterns(self): + # Check that it's a valid run + self.sanity_patterns = sn.assert_found(r'VALID with a GFLOP/s rating of=', self.output_data) + + @run_before('performance') + def set_perf_patterns(self): + # This performance pattern parses the output of the program to extract + # the desired figure of merit. + self.perf_patterns = { + 'flops': sn.extractsingle( + r'VALID with a GFLOP/s rating of=(\S+)', + self.output_data, 1, float), + } + + @run_before('run') + def setup_variables(self): + # Strictly HPCG is only intended to run for 1 OMP thread, except in original version + self.env_vars['OMP_NUM_THREADS'] = f'{self.num_cpus_per_task}' + + @run_before('run') + def set_launcher(self): + self.job.launcher = getlauncher('mpiexec')() + + @run_before('run') + def setup_num_tasks(self): + self.set_var_default( + 'num_tasks', + self.current_partition.processor.num_cpus // + min(1, self.current_partition.processor.num_cpus_per_core) // + self.num_cpus_per_task) + self.set_var_default('num_tasks_per_node', + self.current_partition.processor.num_cpus // + self.num_cpus_per_task) + + +@rfm.simple_test +class HPCG_IntelAVX_Isambard(HPCGIntelOptimised_Isambard): + # Copy the avx exe and run it + tags = {"intel", "avx"} + prerun_cmds.append('cp /projects/bristol/modules/intel-oneapi-2023.1.0/mkl/2023.1.0/benchmarks/hpcg/bin/xhpcg_avx xhpcg') + +@rfm.simple_test +class HPCG_IntelAVX2_Isambard(HPCGIntelOptimised_Isambard): + # Copy the avx2 exe and run it + tags = {"intel", "avx2"} + prerun_cmds.append('cp /projects/bristol/modules/intel-oneapi-2023.1.0/mkl/2023.1.0/benchmarks/hpcg/bin/xhpcg_avx2 xhpcg') + +@rfm.simple_test +class HPCG_IntelSKX_Isambard(HPCGIntelOptimised_Isambard): + # Copy the skx exe and run it + tags = {"intel", "skx"} + prerun_cmds.append('cp /projects/bristol/modules/intel-oneapi-2023.1.0/mkl/2023.1.0/benchmarks/hpcg/bin/xhpcg_skx xhpcg') diff --git a/benchmarks/spack/repo/packages/hpcg_excalibur/package.py b/benchmarks/spack/repo/packages/hpcg_excalibur/package.py index 87c164a9..6799a617 100644 --- a/benchmarks/spack/repo/packages/hpcg_excalibur/package.py +++ b/benchmarks/spack/repo/packages/hpcg_excalibur/package.py @@ -19,8 +19,8 @@ class HpcgExcalibur(MakefilePackage): version( "hpcg_lfric", - url="https://github.com/NCAS-CMS/hpcg_27ptStencil/archive/refs/tags/lfric_250523.tar.gz", - sha256="fa2824890175489e5ad43e4d30d2fd41334777b4ef9c95d798536fc9b8766229" + url="https://github.com/NCAS-CMS/hpcg_27ptStencil/archive/refs/tags/lfric_260723.tar.gz", + sha256="38b042d4dcb4c6b33d01b5671f3e80c4b6b5561bed624ba98c2075c99d586c6b" ) version(