SHARE
TWEET

Untitled

a guest Dec 13th, 2016 135 Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. {
  2.   "status": "ERROR",
  3.   "result": {
  4.     "message": "Ask timed out on [Actor[akka://JobServer/user/context-supervisor/1995aeba-com.spmsoftware.distributed.job.TestJob#-1370794810]] after [10000 ms]. Sender[null] sent message of type "spark.jobserver.JobManagerActor$StartJob".",
  5.     "errorClass": "akka.pattern.AskTimeoutException",
  6.     "stack": ["akka.pattern.PromiseActorRef$$anonfun$1.apply$mcV$sp(AskSupport.scala:604)", "akka.actor.Scheduler$$anon$4.run(Scheduler.scala:126)", "scala.concurrent.Future$InternalCallbackExecutor$.unbatchedExecute(Future.scala:601)", "scala.concurrent.BatchingExecutor$class.execute(BatchingExecutor.scala:109)", "scala.concurrent.Future$InternalCallbackExecutor$.execute(Future.scala:599)", "akka.actor.LightArrayRevolverScheduler$TaskHolder.executeTask(LightArrayRevolverScheduler.scala:331)", "akka.actor.LightArrayRevolverScheduler$$anon$4.executeBucket$1(LightArrayRevolverScheduler.scala:282)", "akka.actor.LightArrayRevolverScheduler$$anon$4.nextTick(LightArrayRevolverScheduler.scala:286)", "akka.actor.LightArrayRevolverScheduler$$anon$4.run(LightArrayRevolverScheduler.scala:238)", "java.lang.Thread.run(Thread.java:745)"]
  7.   }
  8. }
  9.    
  10. # Template for a Spark Job Server configuration file
  11. # When deployed these settings are loaded when job server starts
  12. #
  13. # Spark Cluster / Job Server configuration
  14. # Spark Cluster / Job Server configuration
  15. spark {
  16.   # spark.master will be passed to each job's JobContext
  17.   master = <spark_master>
  18.  
  19.   # Default # of CPUs for jobs to use for Spark standalone cluster
  20.   job-number-cpus = 4
  21.  
  22.   jobserver {
  23.     port = 8090
  24.  
  25.     context-per-jvm = false
  26.     context-creation-timeout = 100 s
  27.     # Note: JobFileDAO is deprecated from v0.7.0 because of issues in
  28.     # production and will be removed in future, now defaults to H2 file.
  29.     jobdao = spark.jobserver.io.JobSqlDAO
  30.  
  31.     filedao {
  32.       rootdir = /tmp/spark-jobserver/filedao/data
  33.     }
  34.  
  35.     datadao {
  36.       rootdir = /tmp/spark-jobserver/upload
  37.     }
  38.  
  39.     sqldao {
  40.       slick-driver = slick.driver.H2Driver
  41.  
  42.       jdbc-driver = org.h2.Driver
  43.  
  44.       rootdir = /tmp/spark-jobserver/sqldao/data
  45.  
  46.       jdbc {
  47.         url = "jdbc:h2:file:/tmp/spark-jobserver/sqldao/data/h2-db"
  48.         user = ""
  49.         password = ""
  50.       }
  51.  
  52.       dbcp {
  53.         enabled = false
  54.         maxactive = 20
  55.         maxidle = 10
  56.         initialsize = 10
  57.       }
  58.     }
  59.     result-chunk-size = 1m
  60.     short-timeout = 60 s    
  61.   }
  62.  
  63.   context-settings {
  64.     num-cpu-cores = 2           # Number of cores to allocate.  Required.
  65.     memory-per-node = 512m         # Executor memory per node, -Xmx style eg 512m, #1G, etc.
  66.  
  67.   }
  68.  
  69. }
  70.  
  71. akka {
  72.   remote.netty.tcp {
  73.     # This controls the maximum message size, including job results, that can be sent
  74.     # maximum-frame-size = 200 MiB
  75.   }
  76. }
  77.  
  78. # check the reference.conf in spray-can/src/main/resources for all defined settings
  79. spray.can.server.parsing.max-content-length = 250m
RAW Paste Data
We use cookies for various purposes including analytics. By continuing to use Pastebin, you agree to our use of cookies as described in the Cookies Policy. OK, I Understand
 
Top