Guest User

Untitled

a guest
May 25th, 2018
92
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 1.20 KB | None | 0 0
  1. import openml
  2.  
  3. from arbok.bench import Benchmark
  4.  
  5. # We create a benchmark setup where we specify the headers, the interpreter we
  6. # want to use, the directory to where we store the jobs (.sh-files), and we give
  7. # it the config-file we created earlier.
  8. bench = Benchmark(
  9. headers="#PBS -lnodes=1:cpu3\n#PBS -lwalltime=15:00:00",
  10. python_interpreter="/home/jhoof/python/python36/bin/python3", # Path to interpreter
  11. root="/home/jhoof/benchmark-test/",
  12. jobs_dir="jobs",
  13. config_file="config.json",
  14. log_file="log.json"
  15. )
  16.  
  17. # Config file
  18. config_file = bench.create_config_file(
  19.  
  20. # Wrapper parameters
  21. wrapper={"refit": True, "verbose": False, "retry_on_error": False},
  22.  
  23. # TPOT parameters
  24. tpot={
  25. "max_time_mins": 60, # Max total time in minutes
  26. },
  27.  
  28. # Autosklearn parameters
  29. autosklearn={
  30. "time_left_for_this_task": 30, # Max total time in seconds
  31. }
  32. )
  33.  
  34. # Next, we load the tasks we want to benchmark on from OpenML.
  35. # In this case, we load a list of task id's from study 99.
  36. tasks = [31]
  37.  
  38. # Next, we create jobs for both tpot and autosklearn.
  39. bench.create_jobs(tasks, classifiers=["autosklearn"])
  40.  
  41. # And finally, we submit the jobs using qsub
  42. # bench.submit_jobs()
Add Comment
Please, Sign In to add comment