Advertisement
Not a member of Pastebin yet?
Sign Up,
it unlocks many cool features!
- #port=9876
- #S1S NODES
- data.nodes=127.0.0.1:9876
- aggregation.nodes=
- #S1S NODES END
- #NODE TYPE ALLOWED WORKERS
- data.nodes.workers=qresult,indexer,searcher,score,mapping,getmapping,getids,flush,clusterinfo,shutdown,clearcache,admin,merger,translate,dictindexer
- aggregation.nodes.workers=
- #NODE TYPE ALLOWED WORKERS END
- worker.merger.class=br.com.s1mbi0se.s1search.serverworkers.MergerWorker
- worker.merger.threadpoolsize=10
- worker.admin.class=br.com.s1mbi0se.s1search.serverworkers.AdminWorker
- worker.admin.threadpoolsize=10
- worker.qresult.class=br.com.s1mbi0se.s1search.serverworkers.JSONResultQueryRequestWorker
- worker.qresult.threadpoolsize=10
- worker.indexer.class=br.com.s1mbi0se.s1search.serverworkers.JSONIndexRequestWorker
- worker.indexer.threadpoolsize=10
- worker.searcher.class=br.com.s1mbi0se.s1search.serverworkers.JSONSearchRequestWorker
- worker.searcher.threadpoolsize=10
- worker.score.class=br.com.s1mbi0se.s1search.serverworkers.JSONScoreQueryRequestWorker
- worker.score.threadpoolsize=10
- worker.mapping.class=br.com.s1mbi0se.s1search.serverworkers.JSONMappingRequestWorker
- worker.mapping.threadpoolsize=1
- worker.getmapping.class=br.com.s1mbi0se.s1search.serverworkers.JSONGetMappingRequestWorker
- worker.getmapping.threadpoolsize=1
- worker.getids.class=br.com.s1mbi0se.s1search.serverworkers.JSONGetIdRequestWorker
- worker.getids.threadpoolsize=1
- worker.aggs.class=br.com.s1mbi0se.s1search.serverworkers.JSONAggregationRequestWorker
- worker.aggs.threadpoolsize=1
- worker.asqueueindexer.class=br.com.s1mbi0se.s1search.serverworkers.ASQueueIndexWorker
- worker.asqueueindexer.threadpoolsize=1
- worker.asqueueindexer.usecommitlog=true
- worker.asqueueindexer.commitlogdir=s1index/data/commitlog
- worker.asqueueindexer.damcommitlogsize=50000
- worker.sqsdictindexer.class=br.com.s1mbi0se.s1search.serverworkers.SQSDictIndexerWorker
- worker.sqsdictindexer.threadpoolsize=1
- worker.randompopulator.class=br.com.s1mbi0se.s1search.serverworkers.JSONRandomPopulatorRequestWorker
- worker.randompopulator.threadpoolsize=1
- worker.flush.class=br.com.s1mbi0se.s1search.serverworkers.JSONFlushRequestWorker
- worker.flush.threadpoolsize=1
- worker.clusterinfo.class=br.com.s1mbi0se.s1search.serverworkers.JSONClusterInfoWorker
- worker.clusterinfo.threadpoolsize=1
- default.worker.threadpool.size=1
- #SERVER CONFIGS END
- #AGG TYPE CLASSES
- agg.type.terms.class=br.com.s1mbi0se.s1search.index.aggs.TermsAggregation
- #AGG TYPE CLASSES END
- #INDEX CONFIGS
- index.dir.data=s1index/data/node{S1SNODEID}
- index.dir.mapping=s1index/mapping/node{S1SNODEID}
- index.dir.dictionary=s1index/dictionary/node{S1SNODEID}
- index.dir.merge=s1index/merge/node{S1SNODEID}
- index.dir.obsolete=s1index/obsolete/node{S1SNODEID}
- index.dir.rec_id=s1index/rec_id/node{S1SNODEID}
- index.qtdofids=2000000
- index.bitmap.treeimplementation=br.com.s1mbi0se.s1search.bitmap.binarytree.BitmapTreeImpl
- index.value.treeimplementation=br.com.s1mbi0se.s1search.bitmap.binarytree.ValueTreeImpl
- index.timefrequency.treeimplementation=br.com.s1mbi0se.s1search.bitmap.binarytree.TimeFrequencyPostingListTreeImpl
- index.invertedindex.treeimplementation=br.com.s1mbi0se.s1search.bitmap.binarytree.SimplePostingListTreeImpl
- index.treeimplementation=br.com.s1mbi0se.s1search.bitmap.binarytree.TreeImpl
- index.freqgroupimplementation=br.com.s1mbi0se.s1search.bitmap.binarytree.FreqGroupImpl
- index.groups=prefix,subprefix
- index.time_segmentation=yyyyMMdd
- index.defaultblocksize=50
- #MergeWorker Informations
- index.enablestoremerged=false
- merge.startingday=3
- merge.shedule=false
- #it should be dinamic. The server should comunicate each other the last id
- mvp.maxbitmapsize=8192
- mvp.qtdofshards=2
- mvp.nodeid={S1SNODEID}
- index.threadsperquery=10
- index.sizeof.tempblock=5
- indexer.threadpool=10
- writer.flush.time=10000
- search.usegloballock=true
- search.globallockfile=_globallock
- index.resize=100
- #INDEX CONFIGS END
- #FIELD CONFIGS
- #FIELD CONFIGS END
- #INTERNAL FIELD CONFIGS
- #INTERNAL FIELD CONFIGS END
- #TEST CONFIGS
- s1db.connstr=jdbc:mysql://dictionary.c0dixxbt7tg7.us-east-1.rds.amazonaws.com:3306/index_tests
- #TEST CONFIGS END
- #DICTIONARY
- dict.flush.max_items=500
- dict.flush.interval=30
- dict.flush.max_counter=500
- dict.flush.entries_per_msg=200
- dict.sqs.queue.prefix=test_index_dict_
- dict.sqs.queue.timeout=1800
- dict.sqs.accesskey=AKIAJ2EKYJPF2N6NACFA
- dict.sqs.secretkey=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
- dict.indexer.dbname=new_agg_source
- dict.indexer.host=localhost:3308
- dict.indexer.user=root
- dict.indexer.pass=123
- dict.indexer.jdbc.driver=com.mysql.jdbc.Driver
- dict.indexer.jdbc.prefix=mysql
- dict.indexer.dbworkers=1
- dict.indexer.flush.interval=60
- dict.indexer.pct.max.memory=50
- dict.searcher.dbname=new_agg_source
- dict.searcher.host=localhost:3308
- dict.searcher.user=root
- dict.searcher.pass=123
- dict.searcher.jdbc.driver=com.mysql.jdbc.Driver
- dict.searcher.jdbc.prefix=mysql
- dict.searcher.dbworkers=30
- dict.searcher.isolationlevel=1
- #DICTIONARY END
- nodes.communicator.connpernode=1
- enable.statslog=false
- disable.dictionary=true
- record_results=false
- # metrics
- metrics.graphite_server=grafana.s1mbiose.com
- metrics.graphite_port=3000
- metrics.prefix=s1stest
- metrics.cardinality.blacklisted_fields=_[a-zA-Z_$0-9]+,cookieid,cookieids
Advertisement
Add Comment
Please, Sign In to add comment
Advertisement