Guest User

Untitled

a guest
Sep 2nd, 2021
13
Never
Not a member of Pastebin yet? Sign Up, it unlocks many cool features!
  1. # This is a "default" Cromwell example that is intended for you you to start with
  2. # and edit for your needs. Specifically, you will be interested to customize
  3. # the configuration based on your preferred backend (see the backends section
  4. # below in the file). For backend-specific examples for you to copy paste here,
  5. # please see the cromwell.backend.examples folder in the repository. The files
  6. # there also include links to online documentation (if it exists)
  7.  
  8. # This line is required. It pulls in default overrides from the embedded cromwell
  9. # `reference.conf` (in core/src/main/resources) needed for proper performance of cromwell.
  10. include required(classpath("application"))
  11.  
  12. # Cromwell HTTP server settings
  13. webservice {
  14. #port = 8000
  15. #interface = 0.0.0.0
  16. #binding-timeout = 5s
  17. #instance.name = "reference"
  18. }
  19.  
  20. akka {
  21. # Optionally set / override any akka settings
  22. http {
  23. server {
  24. # Increasing these timeouts allow rest api responses for very large jobs
  25. # to be returned to the user. When the timeout is reached the server would respond
  26. # `The server was not able to produce a timely response to your request.`
  27. # https://gatkforums.broadinstitute.org/wdl/discussion/10209/retrieving-metadata-for-large-workflows
  28. # request-timeout = 20s
  29. # idle-timeout = 20s
  30. }
  31. }
  32. }
  33.  
  34. # Cromwell "system" settings
  35. system {
  36. # If 'true', a SIGINT will trigger Cromwell to attempt to abort all currently running jobs before exiting
  37. #abort-jobs-on-terminate = false
  38.  
  39. # If 'true', a SIGTERM or SIGINT will trigger Cromwell to attempt to gracefully shutdown in server mode,
  40. # in particular clearing up all queued database writes before letting the JVM shut down.
  41. # The shutdown is a multi-phase process, each phase having its own configurable timeout. See the Dev Wiki for more details.
  42. #graceful-server-shutdown = true
  43.  
  44. # Cromwell will cap the number of running workflows at N
  45. #max-concurrent-workflows = 5000
  46.  
  47. # Cromwell will launch up to N submitted workflows at a time, regardless of how many open workflow slots exist
  48. #max-workflow-launch-count = 50
  49.  
  50. # Number of seconds between workflow launches
  51. #new-workflow-poll-rate = 20
  52.  
  53. # Since the WorkflowLogCopyRouter is initialized in code, this is the number of workers
  54. #number-of-workflow-log-copy-workers = 10
  55.  
  56. # Default number of cache read workers
  57. #number-of-cache-read-workers = 25
  58.  
  59. io {
  60. # throttle {
  61. # # Global Throttling - This is mostly useful for GCS and can be adjusted to match
  62. # # the quota availble on the GCS API
  63. # #number-of-requests = 100000
  64. # #per = 100 seconds
  65. # }
  66.  
  67. # Number of times an I/O operation should be attempted before giving up and failing it.
  68. #number-of-attempts = 5
  69. }
  70.  
  71. # Maximum number of input file bytes allowed in order to read each type.
  72. # If exceeded a FileSizeTooBig exception will be thrown.
  73. input-read-limits {
  74.  
  75. #lines = 128000
  76.  
  77. #bool = 7
  78.  
  79. #int = 19
  80.  
  81. #float = 50
  82.  
  83. #string = 128000
  84.  
  85. #json = 128000
  86.  
  87. #tsv = 128000
  88.  
  89. #map = 128000
  90.  
  91. #object = 128000
  92. }
  93.  
  94. abort {
  95. # These are the default values in Cromwell, in most circumstances there should not be a need to change them.
  96.  
  97. # How frequently Cromwell should scan for aborts.
  98. scan-frequency: 30 seconds
  99.  
  100. # The cache of in-progress aborts. Cromwell will add entries to this cache once a WorkflowActor has been messaged to abort.
  101. # If on the next scan an 'Aborting' status is found for a workflow that has an entry in this cache, Cromwell will not ask
  102. # the associated WorkflowActor to abort again.
  103. cache {
  104. enabled: true
  105. # Guava cache concurrency.
  106. concurrency: 1
  107. # How long entries in the cache should live from the time they are added to the cache.
  108. ttl: 20 minutes
  109. # Maximum number of entries in the cache.
  110. size: 100000
  111. }
  112. }
  113.  
  114. # Cromwell reads this value into the JVM's `networkaddress.cache.ttl` setting to control DNS cache expiration
  115. dns-cache-ttl: 3 minutes
  116. }
  117.  
  118. workflow-options {
  119. # These workflow options will be encrypted when stored in the database
  120. #encrypted-fields: []
  121.  
  122. # AES-256 key to use to encrypt the values in `encrypted-fields`
  123. #base64-encryption-key: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA="
  124.  
  125. # Directory where to write per workflow logs
  126. #workflow-log-dir: "cromwell-workflow-logs"
  127.  
  128. # When true, per workflow logs will be deleted after copying
  129. #workflow-log-temporary: true
  130.  
  131. # Workflow-failure-mode determines what happens to other calls when a call fails. Can be either ContinueWhilePossible or NoNewCalls.
  132. # Can also be overridden in workflow options. Defaults to NoNewCalls. Uncomment to change:
  133. #workflow-failure-mode: "ContinueWhilePossible"
  134.  
  135. default {
  136. # When a workflow type is not provided on workflow submission, this specifies the default type.
  137. #workflow-type: WDL
  138.  
  139. # When a workflow type version is not provided on workflow submission, this specifies the default type version.
  140. #workflow-type-version: "draft-2"
  141.  
  142. # To set a default hog group rather than defaulting to workflow ID:
  143. #hogGroup: "static"
  144. }
  145. }
  146.  
  147. # Optional call-caching configuration.
  148. call-caching {
  149. # Allows re-use of existing results for jobs you've already run
  150. # (default: false)
  151. #enabled = false
  152.  
  153. # Whether to invalidate a cache result forever if we cannot reuse them. Disable this if you expect some cache copies
  154. # to fail for external reasons which should not invalidate the cache (e.g. auth differences between users):
  155. # (default: true)
  156. #invalidate-bad-cache-results = true
  157.  
  158. # The maximum number of times Cromwell will attempt to copy cache hits before giving up and running the job.
  159. #max-failed-copy-attempts = 1000000
  160.  
  161. # blacklist-cache {
  162. # # The call caching blacklist cache is off by default. This cache is used to blacklist cache hits based on cache
  163. # # hit ids or buckets of cache hit paths that Cromwell has previously failed to copy for permissions reasons.
  164. # enabled: true
  165. #
  166. # # A blacklist grouping can be specified in workflow options which will inform the blacklister which workflows
  167. # # should share a blacklist cache.
  168. # groupings {
  169. # workflow-option: call-cache-blacklist-group
  170. # concurrency: 10000
  171. # ttl: 2 hours
  172. # size: 1000
  173. # }
  174. #
  175. # buckets {
  176. # # Guava cache concurrency.
  177. # concurrency: 10000
  178. # # How long entries in the cache should live from the time of their last access.
  179. # ttl: 20 minutes
  180. # # Maximum number of entries in the cache.
  181. # size: 1000
  182. # }
  183. #
  184. # hits {
  185. # # Guava cache concurrency.
  186. # concurrency: 10000
  187. # # How long entries in the cache should live from the time of their last access.
  188. # ttl: 20 minutes
  189. # # Maximum number of entries in the cache.
  190. # size: 100000
  191. # }
  192. #
  193. # }
  194. }
  195.  
  196. # Google configuration
  197. google {
  198.  
  199. #application-name = "cromwell"
  200.  
  201. # Default: just application default
  202. #auths = [
  203.  
  204. # Application default
  205. #{
  206. # name = "application-default"
  207. # scheme = "application_default"
  208. #},
  209.  
  210. # Use a static service account
  211. #{
  212. # name = "service-account"
  213. # scheme = "service_account"
  214. # Choose between PEM file and JSON file as a credential format. They're mutually exclusive.
  215. # PEM format:
  216. # service-account-id = "my-service-account"
  217. # pem-file = "/path/to/file.pem"
  218. # JSON format:
  219. # json-file = "/path/to/file.json"
  220. #}
  221.  
  222. # Use service accounts provided through workflow options
  223. #{
  224. # name = "user-service-account"
  225. # scheme = "user_service_account"
  226. #}
  227. #]
  228. }
  229.  
  230. docker {
  231. hash-lookup {
  232. # Set this to match your available quota against the Google Container Engine API
  233. #gcr-api-queries-per-100-seconds = 1000
  234.  
  235. # Time in minutes before an entry expires from the docker hashes cache and needs to be fetched again
  236. #cache-entry-ttl = "20 minutes"
  237.  
  238. # Maximum number of elements to be kept in the cache. If the limit is reached, old elements will be removed from the cache
  239. #cache-size = 200
  240.  
  241. # How should docker hashes be looked up. Possible values are "local" and "remote"
  242. # "local": Lookup hashes on the local docker daemon using the cli
  243. # "remote": Lookup hashes on docker hub, gcr, gar, quay
  244. #method = "remote"
  245. }
  246. }
  247.  
  248. engine {
  249. # This instructs the engine which filesystems are at its disposal to perform any IO operation that it might need.
  250. # For instance, WDL variables declared at the Workflow level will be evaluated using the filesystems declared here.
  251. # If you intend to be able to run workflows with this kind of declarations:
  252. # workflow {
  253. # String str = read_string("gs://bucket/my-file.txt")
  254. # }
  255. # You will need to provide the engine with a gcs filesystem
  256. # Note that the default filesystem (local) is always available.
  257. filesystems {
  258. # gcs {
  259. # auth = "application-default"
  260. # # Google project which will be billed for the requests
  261. # project = "google-billing-project"
  262. # }
  263. # oss {
  264. # auth {
  265. # endpoint = ""
  266. # access-id = ""
  267. # access-key = ""
  268. # security-token = ""
  269. # }
  270. # }
  271. local {
  272. #enabled: true
  273. }
  274. }
  275. }
  276.  
  277. # You probably don't want to override the language factories here, but the strict-validation and enabled fields might be of interest:
  278. #
  279. # `enabled`: Defaults to `true`. Set to `false` to disallow workflows of this language/version from being run.
  280. # `strict-validation`: Specifies whether workflows fail if the inputs JSON (or YAML) file contains values which the workflow did not ask for (and will therefore have no effect).
  281. languages {
  282. WDL {
  283. versions {
  284. "draft-2" {
  285. # language-factory = "languages.wdl.draft2.WdlDraft2LanguageFactory"
  286. # config {
  287. # strict-validation: true
  288. # enabled: true
  289. # caching {
  290. # # WDL Draft 2 namespace caching is off by default, this value must be set to true to enable it.
  291. # enabled: false
  292. # # Guava cache concurrency
  293. # concurrency: 2
  294. # # How long entries in the cache should live from the time of their last access.
  295. # ttl: 20 minutes
  296. # # Maximum number of entries in the cache (i.e. the number of workflow source + imports => namespace entries).
  297. # size: 1000
  298. # }
  299. # }
  300. }
  301. # draft-3 is the same as 1.0 so files should be able to be submitted to Cromwell as 1.0
  302. # "draft-3" {
  303. # language-factory = "languages.wdl.draft3.WdlDraft3LanguageFactory"
  304. # config {
  305. # strict-validation: true
  306. # enabled: true
  307. # }
  308. # }
  309. "1.0" {
  310. # 1.0 is just a rename of draft-3, so yes, they really do use the same factory:
  311. # language-factory = "languages.wdl.draft3.WdlDraft3LanguageFactory"
  312. # config {
  313. # strict-validation: true
  314. # enabled: true
  315. # }
  316. }
  317. }
  318. }
  319. CWL {
  320. versions {
  321. "v1.0" {
  322. # language-factory = "languages.cwl.CwlV1_0LanguageFactory"
  323. # config {
  324. # strict-validation: false
  325. # enabled: true
  326. # }
  327. }
  328. }
  329. }
  330. }
  331. # Here is where you can define the backend providers that Cromwell understands.
  332. # The default is a local provider.
  333. # To add additional backend providers, you should copy paste additional backends
  334. # of interest that you can find in the cromwell.example.backends folder
  335. # folder at https://www.github.com/broadinstitute/cromwell
  336. # Other backend providers include SGE, SLURM, Docker, udocker, Singularity. etc.
  337. # Don't forget you will need to customize them for your particular use case.
  338. backend {
  339. # Override the default backend.
  340. default = "LocalWithDocker"
  341.  
  342. # The list of providers.
  343. providers {
  344.  
  345. # The local provider is included by default in the reference.conf. This is an example.
  346.  
  347. # Define a new backend provider.
  348. LocalWithDocker {
  349. # The actor that runs the backend. In this case, it's the Shared File System (SFS) ConfigBackend.
  350. actor-factory = "cromwell.backend.impl.sfs.config.ConfigBackendLifecycleActorFactory"
  351.  
  352. # The backend custom configuration.
  353. config {
  354.  
  355. # Optional limits on the number of concurrent jobs
  356. #concurrent-job-limit = 5
  357.  
  358. # The list of possible runtime custom attributes.
  359. runtime-attributes = """
  360. String? docker
  361. String? docker_user
  362. String? cpu
  363. String? memory_mb
  364. """
  365.  
  366. # Submit string when there is no "docker" runtime attribute.
  367. submit = "/usr/bin/env bash ${script}"
  368.  
  369. # Submit string when there is a "docker" runtime attribute.
  370. submit-docker = """
  371. docker run \
  372. --rm -i \
  373. ${"--cpus=" + cpu} \
  374. ${"--memory=" + memory_mb + "m" } \
  375. --user 1081:1123 \
  376. --entrypoint ${job_shell} \
  377. -v ${cwd}:${docker_cwd} \
  378. ${docker} ${script}
  379. """
  380.  
  381. # Root directory where Cromwell writes job results. This directory must be
  382. # visible and writeable by the Cromwell process as well as the jobs that Cromwell
  383. # launches.
  384. root = "cromwell-executions"
  385.  
  386. # Root directory where Cromwell writes job results in the container. This value
  387. # can be used to specify where the execution folder is mounted in the container.
  388. # it is used for the construction of the docker_cwd string in the submit-docker
  389. # value above.
  390. dockerRoot = "/cromwell-executions"
  391.  
  392. # File system configuration.
  393. filesystems {
  394.  
  395. # For SFS backends, the "local" configuration specifies how files are handled.
  396. local {
  397.  
  398. # Try to hard link (ln), then soft-link (ln -s), and if both fail, then copy the files.
  399. localization: [
  400. "hard-link", "soft-link", "copy"
  401. ]
  402.  
  403. # Call caching strategies
  404. caching {
  405. # When copying a cached result, what type of file duplication should occur. Attempted in the order listed below:
  406. duplication-strategy: [
  407. "hard-link", "soft-link", "copy"
  408. ]
  409.  
  410. # Possible values: file, path
  411. # "file" will compute an md5 hash of the file content.
  412. # "path" will compute an md5 hash of the file path. This strategy will only be effective if the duplication-strategy (above) is set to "soft-link",
  413. # in order to allow for the original file path to be hashed.
  414. hashing-strategy: "file"
  415.  
  416. # When true, will check if a sibling file with the same name and the .md5 extension exists, and if it does, use the content of this file as a hash.
  417. # If false or the md5 does not exist, will proceed with the above-defined hashing strategy.
  418. check-sibling-md5: false
  419. }
  420. }
  421. }
  422.  
  423. # The defaults for runtime attributes if not provided.
  424. default-runtime-attributes {
  425. failOnStderr: false
  426. continueOnReturnCode: 0
  427. cpu: 60
  428. memory: "100000 MB"
  429. }
  430. }
  431. }
  432. }
  433. }
  434.  
  435. services {
  436. MetadataService {
  437.  
  438. # This class is the "default" database backed implementation:
  439. # class = "cromwell.services.metadata.impl.MetadataServiceActor"
  440. # config {
  441. # # For the standard MetadataService implementation, cromwell.services.metadata.impl.MetadataServiceActor:
  442. # # Set this value to "Inf" to turn off metadata summary refresh. The default value is currently "1 second".
  443. # metadata-summary-refresh-interval = "1 second"
  444. #
  445. # # Set this value to the maximum number of metadata rows to be considered per summarization cycle.
  446. # metadata-summary-refresh-limit = 5000
  447. #
  448. # # For higher scale environments, e.g. many workflows and/or jobs, DB write performance for metadata events
  449. # # can improved by writing to the database in batches. Increasing this value can dramatically improve overall
  450. # # performance but will both lead to a higher memory usage as well as increase the risk that metadata events
  451. # # might not have been persisted in the event of a Cromwell crash.
  452. # #
  453. # # For normal usage the default value of 200 should be fine but for larger/production environments we recommend a
  454. # # value of at least 500. There'll be no one size fits all number here so we recommend benchmarking performance and
  455. # # tuning the value to match your environment.
  456. # db-batch-size = 200
  457. #
  458. # # Periodically the stored metadata events will be forcibly written to the DB regardless of if the batch size
  459. # # has been reached. This is to prevent situations where events wind up never being written to an incomplete batch
  460. # # with no new events being generated. The default value is currently 5 seconds
  461. # db-flush-rate = 5 seconds
  462. #
  463. # # Kill metadata SQL queries that run so long that the associated request will likely already have timed out.
  464. # # The intention is to return resources to the system within a reasonable timeframe to avoid OOM incidents.
  465. # # See also `akka.http.server.request-timeout`.
  466. # metadata-read-query-timeout = "Inf"
  467. #
  468. # # Limit the number of rows from METADATA_ENTRY that will be fetched to produce metadata responses.
  469. # # This limit takes into account the effects of `includeKey`, `excludeKey` and `includeSubworkflows`
  470. # # request parameters; only the rows required to be retrieved from the database to compose the response
  471. # # count against this limit.
  472. # metadata-read-row-number-safety-threshold = 1000000
  473. # }
  474.  
  475. # Alternative 1: Pub sub implementation:
  476. # class = "cromwell.services.metadata.impl.MetadataServiceActor"
  477. # config {
  478. # # For the Google PubSub MetadataService implementation: cromwell.services.metadata.impl.pubsub.PubSubMetadataServiceActor:
  479. # # Google project
  480. # project = "my-project"
  481. # # The auth *must* be a service-account auth with JSON auth.
  482. # auth = "service-account"
  483. # # The PubSub topic to write to. Will be created if it doesn't already exist. Defaults to "cromwell-metadata"
  484. # topic = "cromwell-metadata"
  485. # # An optional PubSub subscription name. If supplied and if it doesn't already exist, it will be created and
  486. # # subscribed to the topic
  487. # # subscription = "optional-subscription"
  488. # # An application name to set on your PubSub interaction.
  489. # appName = "cromwell"
  490. # }
  491. }
  492.  
  493. Instrumentation {
  494. # StatsD - Send metrics to a StatsD server over UDP
  495. # class = "cromwell.services.instrumentation.impl.statsd.StatsDInstrumentationServiceActor"
  496. # config {
  497. # hostname = "localhost"
  498. # port = 8125
  499. # prefix = "" # can be used to prefix all metrics with an api key for example
  500. # flush-rate = 1 second # rate at which aggregated metrics will be sent to statsd
  501. # }
  502.  
  503. # Stackdriver - Send metrics to Google's monitoring API
  504. # class = "cromwell.services.instrumentation.impl.stackdriver.StackdriverInstrumentationServiceActor"
  505. # config {
  506. # # auth scheme can be `application_default` or `service_account`
  507. # auth = "service-account"
  508. # google-project = "my-project"
  509. # # rate at which aggregated metrics will be sent to Stackdriver API, must be 1 minute or more.
  510. # flush-rate = 1 minute
  511. # # below 3 keys are attached as labels to each metric. `cromwell-perf-test-case` is specifically meant for perf env.
  512. # cromwell-instance-identifier = "cromwell-101"
  513. # cromwell-instance-role = "role"
  514. # cromwell-perf-test-case = "perf-test-1"
  515. # }
  516. }
  517. HealthMonitor {
  518. config {
  519.  
  520. #####
  521. # Choose what to monitor:
  522. #####
  523.  
  524. # If you want to check the availability of the PAPI or PAPIv2 services, list them here.
  525. # If provided, all values here *MUST* be valid PAPI or PAPIv2 backend names in the Backends stanza.
  526. # NB: requires 'google-auth-name' to be set
  527. # check-papi-backends: [ PAPIv2 ]
  528.  
  529. # If you want to check connection to GCS (NB: requires 'google-auth-name' and 'gcs-bucket-to-check' to be set):
  530. # check-gcs: true
  531.  
  532. # If you want to check database connectivity:
  533. # check-engine-database: true
  534.  
  535. # If you want to check dockerhub availability:
  536. # check-dockerhub: true
  537.  
  538. #####
  539. # General health monitor configuration:
  540. #####
  541.  
  542. # How long to wait between status check sweeps
  543. # check-refresh-time = 5 minutes
  544.  
  545. # For any given status check, how long to wait before assuming failure
  546. # check-timeout = 1 minute
  547.  
  548. # For any given status datum, the maximum time a value will be kept before reverting back to "Unknown"
  549. # status-ttl = 15 minutes
  550.  
  551. # For any given status check, how many times to retry a failure before setting status to failed. Note this
  552. # is the number of retries before declaring failure, not the total number of tries which is 1 more than
  553. # the number of retries.
  554. # check-failure-retry-count = 3
  555.  
  556. # For any given status check, how long to wait between failure retries.
  557. # check-failure-retry-interval = 30 seconds
  558.  
  559. #####
  560. # GCS- and PAPI-specific configuration options:
  561. #####
  562.  
  563. # The name of an authentication scheme to use for e.g. pinging PAPI and GCS. This should be either an application
  564. # default or service account auth, otherwise things won't work as there'll not be a refresh token where you need
  565. # them.
  566. # google-auth-name = application-default
  567.  
  568. # A *bucket* in GCS to periodically stat to check for connectivity. This must be accessible by the auth mode
  569. # specified by google-auth-name
  570. # NB: This is a *bucket name*, not a URL and not an *object*. With 'some-bucket-name', Cromwell would ping gs://some-bucket-name
  571. # gcs-bucket-to-check = some-bucket-name
  572. }
  573. }
  574. LoadController {
  575. config {
  576. # The load controller service will periodically look at the status of various metrics its collecting and make an
  577. # assessment of the system's load. If necessary an alert will be sent to the rest of the system.
  578. # This option sets how frequently this should happen
  579. # To disable load control, set this to "Inf"
  580. # control-frequency = 5 seconds
  581. }
  582. }
  583. }
  584.  
  585. database {
  586. # mysql example
  587. #driver = "slick.driver.MySQLDriver$"
  588.  
  589. # see all possible parameters and default values here:
  590. # http://slick.lightbend.com/doc/3.2.0/api/index.html#slick.jdbc.JdbcBackend$DatabaseFactoryDef@forConfig(String,Config,Driver):Database
  591. #db {
  592. # driver = "com.mysql.jdbc.Driver"
  593. # url = "jdbc:mysql://host/cromwell?rewriteBatchedStatements=true"
  594. # user = "user"
  595. # password = "pass"
  596. # connectionTimeout = 5000
  597. #}
  598.  
  599. # For batch inserts the number of inserts to send to the DB at a time
  600. # insert-batch-size = 2000
  601.  
  602. migration {
  603. # For databases with a very large number of symbols, selecting all the rows at once can generate a variety of
  604. # problems. In order to avoid any issue, the selection is paginated. This value sets how many rows should be
  605. # retrieved and processed at a time, before asking for the next chunk.
  606. #read-batch-size = 100000
  607.  
  608. # Because a symbol row can contain any arbitrary wdl value, the amount of metadata rows to insert from a single
  609. # symbol row can vary from 1 to several thousands (or more). To keep the size of the insert batch from growing out
  610. # of control we monitor its size and execute/commit when it reaches or exceeds writeBatchSize.
  611. #write-batch-size = 100000
  612. }
  613.  
  614. # To customize the metadata database connection, create a block under `database` with the metadata database settings.
  615. #
  616. # For example, the default database stores all data in memory. This commented out block would store `metadata` in an
  617. # hsqldb file, without modifying the internal engine database connection.
  618. #
  619. # The value `${uniqueSchema}` is always replaced with a unqiue UUID on each cromwell startup.
  620. #
  621. # This feature should be considered experimental and likely to change in the future.
  622.  
  623. #metadata {
  624. # profile = "slick.jdbc.HsqldbProfile$"
  625. # db {
  626. # driver = "org.hsqldb.jdbcDriver"
  627. # url = "jdbc:hsqldb:file:metadata-${uniqueSchema};shutdown=false;hsqldb.tx=mvcc"
  628. # connectionTimeout = 3000
  629. # }
  630. #}
  631.  
  632. # Postgresql example
  633. #database {
  634. # profile = "slick.jdbc.PostgresProfile$"
  635. # db {
  636. # driver = "org.postgresql.Driver"
  637. # url = "jdbc:postgresql://localhost:5432/cromwell"
  638. # user = ""
  639. # password = ""
  640. # port = 5432
  641. # connectionTimeout = 5000
  642. # }
  643. #}
  644. }
  645.  
  646.  
RAW Paste Data