[main] testTopDir = /raid/testing/microphysics-gpu/ webTopDir = /raid/www/Microphysics/test-suite/gpu/ sourceTree = C_Src numMakeJobs = 20 suiteName = Microphysics-gpu goUpLink = 1 reportActiveTestsOnly = 1 COMP = gnu add_to_c_make_command = NVCC_CCBIN=g++-14 USE_CUDA=TRUE CUDA_ARCH=70 # TINY_PROFILE=TRUE use_ctools = 0 purge_output = 1 summary_job_info_field1 = EOS summary_job_info_field2 = NETWORK #globalAddToExecString = diffusion.use_mlmg_solver=1 gravity.use_mlmg_solver=1 # MPIcommand should use the placeholders: # @host@ to indicate where to put the hostname to run on # @nprocs@ to indicate where to put the number of processors # @command@ to indicate where to put the command to run # # only tests with useMPI = 1 will run in parallel # nprocs is problem dependent and specified in the individual problem # sections. MPIcommand = mpiexec -n @nprocs@ @command@ default_branch = development # email sendEmailWhenFail = 0 emailTo = castro-development@googlegroups.com emailBody = check http://groot.astro.sunysb.edu/Microphysics/test-suite/gpu/ # slack slack_post = 1 slack_webhookfile = /raid/testing/.slack.webhook slack_channel = "#gpu" slack_username = "i am groot" [AMReX] dir = /raid/testing/microphysics-gpu/amrex branch = development [source] dir = /raid/testing/microphysics-gpu/Microphysics branch = development #branch = new_memory_subch [test_eos_C-helmholtz] buildDir = unit_test/test_eos inputFile = input_eos dim = 3 link1File = helm_table.dat useMPI = 0 useOMP = 0 addToCompileString = EOS_DIR=helmholtz compareFile = test_eos.helmholtz [test_aprox_rates_C] buildDir = unit_test/test_aprox_rates inputFile = input_aprox_rates dim = 3 link1File = helm_table.dat useMPI = 0 useOMP = 0 addToCompileString = NETWORK_DIR=aprox19 INTEGRATOR_DIR=VODE compareFile = test_aprox_rates.helmholtz [test_screening_C] buildDir = unit_test/test_screening_templated/ inputFile = inputs dim = 3 link1File = helm_table.dat useMPI = 0 useOMP = 0 addToCompileString = NETWORK_DIR=aprox21 INTEGRATOR_DIR=VODE compareFile = test_screening.screen5 [test_react_aprox13_C] buildDir = unit_test/test_react/ inputFile = inputs_aprox13 dim = 3 link1File = helm_table.dat useMPI = 0 useOMP = 0 addToCompileString = NETWORK_DIR=aprox13 INTEGRATOR_DIR=VODE compareFile = react_aprox13_test_react.VODE [test_react_C-nova] buildDir = unit_test/test_react/ inputFile = inputs_nova dim = 3 link1File = helm_table.dat useMPI = 0 useOMP = 0 numthreads = 4 addToCompileString = NETWORK_DIR=nova USE_JACOBIAN_CACHING=TRUE compareFile = react_nova_test_react.VODE [test_react_C-subch_simple-VODE] buildDir = unit_test/test_react/ inputFile = inputs_aprox21 dim = 3 link1File = helm_table.dat useMPI = 0 useOMP = 1 numthreads = 4 addToCompileString = NETWORK_DIR=subch_simple INTEGRATOR_DIR=VODE compareFile = react_subch_simple_test_react.VODE runtime_params = prefix=react_subch_simple_ integrator.jacobian=1 [test_react_C-cno-he-burn-34am-VODE] buildDir = unit_test/test_react/ inputFile = inputs_nova dim = 3 link1File = helm_table.dat useMPI = 0 useOMP = 1 numthreads = 4 addToCompileString = NETWORK_DIR=he-burn/cno-he-burn-34am INTEGRATOR_DIR=VODE compareFile = react_cno_he_burn_34am_test_react.VODE runtime_params = prefix=react_cno_he_burn_34am_ unit_test.dens_min=5.e4 unit_test.dens_max=5.e5 unit_test.temp_max=2.e8 unit_test.tmax=1.e-6 amrex.the_arena_init_size=0