[main] testTopDir = /raid/testing/castro-gpu/ webTopDir = /raid/www/Castro/test-suite/gpu/ sourceTree = C_Src numMakeJobs = 20 suiteName = Castro-gpu goUpLink = 1 reportActiveTestsOnly = 1 COMP = pgi add_to_c_make_command = CUDA_VERSION=cc60 COMPILE_CUDA_PATH=/usr/local/cuda-9.2 USE_CUDA=TRUE use_ctools = 0 purge_output = 1 summary_job_info_field1 = EOS summary_job_info_field2 = NETWORK #globalAddToExecString = diffusion.use_mlmg_solver=1 gravity.use_mlmg_solver=1 # MPIcommand should use the placeholders: # @host@ to indicate where to put the hostname to run on # @nprocs@ to indicate where to put the number of processors # @command@ to indicate where to put the command to run # # only tests with useMPI = 1 will run in parallel # nprocs is problem dependent and specified in the individual problem # sections. MPIcommand = mpiexec -n @nprocs@ @command@ default_branch = development # email sendEmailWhenFail = 0 emailTo = castro-development@googlegroups.com emailBody = check http://groot.astro.sunysb.edu/Castro/test-suite/gpu/ # slack slack_post = 1 slack_webhookfile = /home/zingale/.slack.webhook slack_channel = "#gpu" slack_username = "i am groot" [AMReX] dir = /raid/testing/castro-gfortran/AMReX branch = gpu [source] dir = /raid/testing/castro-gfortran/Castro/ branch = development # this is a safeguard in case any problem GNUmakefiles hardcode in CASTRO_HOME comp_string = CASTRO_HOME=@source@ [extra-Microphysics] dir = /raid/testing/castro-gfortran/Microphysics branch = development comp_string = MICROPHYSICS_HOME=@self@ [Sedov-3d] buildDir = Exec/hydro_tests/Sedov/ inputFile = inputs.starlord.gpu_test probinFile = probin.starlord dim = 3 restartTest = 0 useMPI = 0 useOMP = 0 compileTest = 0 doVis = 0