Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- # Template for a Spark Job Server configuration file
- # When deployed these settings are loaded when job server starts
- #
- # Spark Cluster / Job Server configuration
- spark {
- # spark.master will be passed to each job's JobContext
- master = "local[4]"
- # master = "mesos://vm28-hulk-pub:5050"
- # master = "yarn-client"
- # Default # of CPUs for jobs to use for Spark standalone cluster
- job-number-cpus = 4
- jobserver {
- port = 8090
- jar-store-rootdir = /tmp/jobserver/jars
- context-per-jvm = false
- jobdao = spark.jobserver.io.JobFileDAO
- filedao {
- rootdir = /tmp/spark-job-server/filedao/data
- }
- # When using chunked transfer encoding with scala Stream job results, this is the size of each chunk
- result-chunk-size = 1m
- }
- # predefined Spark contexts
- # contexts {
- # my-low-latency-context {
- # num-cpu-cores = 1 # Number of cores to allocate. Required.
- # memory-per-node = 512m # Executor memory per node, -Xmx style eg 512m, 1G, etc.
- # }
- # # define additional contexts here
- # }
- # universal context configuration. These settings can be overridden, see README.md
- context-settings {
- num-cpu-cores = 2 # Number of cores to allocate. Required.
- memory-per-node = 512m # Executor memory per node, -Xmx style eg 512m, #1G, etc.
- # in case spark distribution should be accessed from HDFS (as opposed to being installed on every mesos slave)
- # spark.executor.uri = "hdfs://namenode:8020/apps/spark/spark.tgz"
- # uris of jars to be loaded into the classpath for this context. Uris is a string list, or a string separated by commas ','
- # dependent-jar-uris = ["file:///some/path/present/in/each/mesos/slave/somepackage.jar"]
- # If you wish to pass any settings directly to the sparkConf as-is, add them here in passthrough,
- # such as hadoop connection settings that don't use the "spark." prefix
- passthrough {
- #es.nodes = "192.1.1.1"
- }
- }
- # This needs to match SPARK_HOME for cluster SparkContexts to be created successfully
- # home = "my/path/to/spark-1.6.1-bin-hadoop2.6"
- }
- # Note that you can use this file to define settings not only for job server,
- # but for your Spark jobs as well. Spark job configuration merges with this configuration file as defaults.
- akka {
- remote.netty.tcp {
- # This controls the maximum message size, including job results, that can be sent
- # maximum-frame-size = 10 MiB
- }
- }
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement