Advertisement
Guest User

Untitled

a guest
May 13th, 2016
176
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 2.49 KB | None | 0 0
  1. # Template for a Spark Job Server configuration file
  2. # When deployed these settings are loaded when job server starts
  3. #
  4. # Spark Cluster / Job Server configuration
  5. spark {
  6. # spark.master will be passed to each job's JobContext
  7. master = "local[4]"
  8. # master = "mesos://vm28-hulk-pub:5050"
  9. # master = "yarn-client"
  10.  
  11. # Default # of CPUs for jobs to use for Spark standalone cluster
  12. job-number-cpus = 4
  13.  
  14. jobserver {
  15. port = 8090
  16. jar-store-rootdir = /tmp/jobserver/jars
  17.  
  18. context-per-jvm = false
  19.  
  20. jobdao = spark.jobserver.io.JobFileDAO
  21.  
  22. filedao {
  23. rootdir = /tmp/spark-job-server/filedao/data
  24. }
  25.  
  26. # When using chunked transfer encoding with scala Stream job results, this is the size of each chunk
  27. result-chunk-size = 1m
  28. }
  29.  
  30. # predefined Spark contexts
  31. # contexts {
  32. # my-low-latency-context {
  33. # num-cpu-cores = 1 # Number of cores to allocate. Required.
  34. # memory-per-node = 512m # Executor memory per node, -Xmx style eg 512m, 1G, etc.
  35. # }
  36. # # define additional contexts here
  37. # }
  38.  
  39. # universal context configuration. These settings can be overridden, see README.md
  40. context-settings {
  41. num-cpu-cores = 2 # Number of cores to allocate. Required.
  42. memory-per-node = 512m # Executor memory per node, -Xmx style eg 512m, #1G, etc.
  43.  
  44. # in case spark distribution should be accessed from HDFS (as opposed to being installed on every mesos slave)
  45. # spark.executor.uri = "hdfs://namenode:8020/apps/spark/spark.tgz"
  46.  
  47. # uris of jars to be loaded into the classpath for this context. Uris is a string list, or a string separated by commas ','
  48. # dependent-jar-uris = ["file:///some/path/present/in/each/mesos/slave/somepackage.jar"]
  49.  
  50. # If you wish to pass any settings directly to the sparkConf as-is, add them here in passthrough,
  51. # such as hadoop connection settings that don't use the "spark." prefix
  52. passthrough {
  53. #es.nodes = "192.1.1.1"
  54. }
  55. }
  56.  
  57. # This needs to match SPARK_HOME for cluster SparkContexts to be created successfully
  58. # home = "my/path/to/spark-1.6.1-bin-hadoop2.6"
  59. }
  60.  
  61. # Note that you can use this file to define settings not only for job server,
  62. # but for your Spark jobs as well. Spark job configuration merges with this configuration file as defaults.
  63.  
  64. akka {
  65. remote.netty.tcp {
  66. # This controls the maximum message size, including job results, that can be sent
  67. # maximum-frame-size = 10 MiB
  68. }
  69. }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement