Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- import openml
- from arbok.bench import Benchmark
- # We create a benchmark setup where we specify the headers, the interpreter we
- # want to use, the directory to where we store the jobs (.sh-files), and we give
- # it the config-file we created earlier.
- bench = Benchmark(
- headers="#PBS -lnodes=1:cpu3\n#PBS -lwalltime=15:00:00",
- python_interpreter="/home/jhoof/python/python36/bin/python3", # Path to interpreter
- root="/home/jhoof/benchmark-test/",
- jobs_dir="jobs",
- config_file="config.json",
- log_file="log.json"
- )
- # Config file
- config_file = bench.create_config_file(
- # Wrapper parameters
- wrapper={"refit": True, "verbose": False, "retry_on_error": False},
- # TPOT parameters
- tpot={
- "max_time_mins": 60, # Max total time in minutes
- },
- # Autosklearn parameters
- autosklearn={
- "time_left_for_this_task": 30, # Max total time in seconds
- }
- )
- # Next, we load the tasks we want to benchmark on from OpenML.
- # In this case, we load a list of task id's from study 99.
- tasks = [31]
- # Next, we create jobs for both tpot and autosklearn.
- bench.create_jobs(tasks, classifiers=["autosklearn"])
- # And finally, we submit the jobs using qsub
- # bench.submit_jobs()
Add Comment
Please, Sign In to add comment