Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- druid.extensions.coordinates=["org.apache.hadoop:hadoop-client:2.4.0", "io.druid.extensions:druid-kafka-eight","io.druid.extensions:mysql-metadata-storage", "io.druid.extensions:druid-hdfs-storage"]
- # Zookeeper
- druid.zk.service.host=kafka.advertine.com:2181/kafka081
- # Metadata Storage (mysql)
- druid.metadata.storage.type=mysql
- druid.metadata.storage.connector.connectURI=jdbc\:mysql\://web.advertine.com\:3306/druid
- druid.metadata.storage.connector.user=root
- druid.metadata.storage.connector.password=
- druid.metadata.storage.connector.createTables=true
- # Deep storage (local filesystem for examples - don't use this in production)
- #druid.storage.type=local
- #druid.storage.storage.storageDirectory=/tmp/druid/localStorage
- druid.cache.type=local
- druid.cache.sizeInBytes=300000000
- # Indexing service discovery
- druid.selectors.indexing.serviceName=overlord
- # Monitoring (disabled for examples)
- # druid.monitoring.monitors=["com.metamx.metrics.JvmMonitor"]
- # Metrics logging (disabled for examples)
- druid.emitter=noop
- # We enable using the local query cache here
- druid.broker.cache.useCache=true
- druid.broker.cache.populateCache=true
- # Bump these up only for faster nested groupBy
- druid.processing.buffer.sizeBytes=1000000000
- druid.processing.numThreads=4
- #druid.host=localhost
- druid.port=9084
- druid.service=historical
- # We can only 1 scan segment in parallel with these configs.
- # Our intermediate buffer is also very small so longer topNs will be slow.
- druid.segmentCache.locations=[{"path": "/home/druid/cache/historical1/indexCache", "maxSize": 10000000000}]
- druid.server.maxSize=100000000000
- druid.storage.type=hdfs
- druid.storage.storageDirectory=/user/druid
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement