Guest User

Untitled

a guest
Dec 21st, 2017
102
0
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
text 4.76 KB | None | 0 0
  1. # Template for a Spark Job Server configuration file
  2. # When deployed these settings are loaded when job server starts
  3. #
  4. # Spark Cluster / Job Server configuration
  5. spark {
  6.  
  7. master = "yarn"
  8.  
  9. # Default # of CPUs for jobs to use for Spark standalone cluster
  10. job-number-cpus = 2
  11.  
  12. jobserver {
  13. port = 8090
  14.  
  15. context-per-jvm = true
  16.  
  17. # Default client mode will start up a new JobManager int local machine
  18. # You can use mesos-cluster mode with REMOTE_JOBSERVER_DIR and MESOS_SPARK_DISPATCHER
  19. # environment value set in xxxx.sh file to launch JobManager in remote node
  20. # Mesos will take responsibility to offer resource to the JobManager process
  21. #driver-mode = client
  22.  
  23. # Note: JobFileDAO is deprecated from v0.7.0 because of issues in
  24. # production and will be removed in future, now defaults to H2 file.
  25. jobdao = spark.jobserver.io.JobSqlDAO
  26.  
  27. filedao {
  28. rootdir = /tmp/spark-jobserver/filedao/data
  29. }
  30.  
  31. datadao {
  32. # storage directory for files that are uploaded to the server
  33. # via POST/data commands
  34. rootdir = /tmp/spark-jobserver/upload
  35. }
  36.  
  37. sqldao {
  38. # Slick database driver, full classpath
  39. # slick-driver = slick.driver.H2Driver
  40. slick-driver = slick.driver.MySQLDriver
  41. # JDBC driver, full classpath
  42. #jdbc-driver = org.h2.Driver
  43. jdbc-driver = com.mysql.jdbc.Driver
  44. # Directory where default H2 driver stores its data. Only needed for H2.
  45. #rootdir = /tmp/spark-jobserver/sqldao/data
  46.  
  47. # Full JDBC URL / init string, along with username and password. Sorry, needs to match above.
  48. # Substitutions may be used to launch job-server, but leave it out here in the default or tests won't pass
  49. #jdbc {
  50. # url = "jdbc:h2:file:/tmp/spark-jobserver/sqldao/data/h2-db"
  51. # user = ""
  52. # password = ""
  53. #}
  54. jdbc {
  55. url = "jdbc:mysql://localhost:3666/spark_jobserver?useSSL=false"
  56. user = "jobserver"
  57. password = ""
  58. }
  59.  
  60. # DB connection pool settings
  61. dbcp {
  62. #enabled = false
  63. maxactive = 20
  64. maxidle = 10
  65. initialsize = 10
  66. }
  67. }
  68. # When using chunked transfer encoding with scala Stream job results, this is the size of each chunk
  69. result-chunk-size = 1m
  70. }
  71.  
  72. # Predefined Spark contexts
  73. # contexts {
  74. # my-low-latency-context {
  75. # num-cpu-cores = 1 # Number of cores to allocate. Required.
  76. # memory-per-node = 512m # Executor memory per node, -Xmx style eg 512m, 1G, etc.
  77. # }
  78. # # define additional contexts here
  79. # }
  80.  
  81. # Universal context configuration. These settings can be overridden, see README.md
  82. context-settings {
  83. #num-cpu-cores = 2 # Number of cores to allocate. Required.
  84. #memory-per-node = 2g # Executor memory per node, -Xmx style eg 512m, #1G, etc.
  85. spark.driver.port = 32456
  86. spark.yarn.jar = "/opt/mapreducelab/spark/jars/*"
  87. spark.dynamicAllocation.enabled = true
  88. spark.executor.memory = 6632m
  89. spark.yarn.executor.memoryOverhead = 664
  90.  
  91. # In case spark distribution should be accessed from HDFS (as opposed to being installed on every Mesos slave)
  92. # spark.executor.uri = "hdfs://namenode:8020/apps/spark/spark.tgz"
  93.  
  94. # URIs of Jars to be loaded into the classpath for this context.
  95. # Uris is a string list, or a string separated by commas ','
  96. # dependent-jar-uris = ["file:///some/path/present/in/each/mesos/slave/somepackage.jar"]
  97.  
  98. # Add settings you wish to pass directly to the sparkConf as-is such as Hadoop connection
  99. # settings that don't use the "spark." prefix
  100. passthrough {
  101. spark.dynamicAllocation.enabled = true
  102. spark.executor.memory = 6632m
  103. spark.yarn.executor.memoryOverhead = 664
  104. spark.cassandra.driver = "org.apache.spark.sql.cassandra"
  105. spark.cassandra.connection.host = "host1, host2, host3"
  106. spark.cassandra.connection.port = "9042"
  107. spark.cassandra.auth.username = "noetl"
  108. spark.cassandra.auth.password = "noetl"
  109. spark.driver.allowMultipleContexts = true
  110. }
  111. }
  112.  
  113. # This needs to match SPARK_HOME for cluster SparkContexts to be created successfully
  114. home = "/opt/mapreducelab/spark"
  115. }
  116. spray.can.server.parsing.max-content-length = 250m
  117. # Note that you can use this file to define settings not only for job server,
  118. # but for your Spark jobs as well. Spark job configuration merges with this configuration file as defaults.
  119.  
  120. akka {
  121. remote.netty.tcp {
  122. # This controls the maximum message size, including job results, that can be sent
  123. maximum-frame-size = 40 MiB
  124. }
  125. }
  126. #client {
  127. #connecting-timeout = infinite
  128. #}
  129. spray.can.server {
  130. idle-timeout =70 s
  131. request-timeout = 60 s
  132. parsing.max-content-length = 300m
  133. verbose-error-logging = "on"
  134. verbose-error-messages = "on"
  135. }
  136. flyway.locations="db/mysql/migration"
Add Comment
Please, Sign In to add comment